repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
AdmiralNemo/ansible-modules-extras
|
system/alternatives.py
|
8
|
5401
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage symbolic link alternatives.
(c) 2014, Gabe Mulley <[email protected]>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: alternatives
short_description: Manages alternative programs for common commands
description:
- Manages symbolic links using the 'update-alternatives' tool provided on debian-like systems.
- Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
version_added: "1.6"
options:
name:
description:
- The generic name of the link.
required: true
path:
description:
- The path to the real executable that the link should point to.
required: true
link:
description:
- The path to the symbolic link that should point to the real executable.
required: false
requirements: [ update-alternatives ]
'''
EXAMPLES = '''
- name: correct java version selected
alternatives: name=java path=/usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
- name: alternatives link created
alternatives: name=hadoop-conf link=/etc/hadoop/conf path=/etc/hadoop/conf.ansible
'''
DEFAULT_LINK_PRIORITY = 50
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
path = dict(required=True),
link = dict(required=False),
),
supports_check_mode=True,
)
params = module.params
name = params['name']
path = params['path']
link = params['link']
UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True)
current_path = None
all_alternatives = []
os_family = None
(rc, query_output, query_error) = module.run_command(
[UPDATE_ALTERNATIVES, '--query', name]
)
# Gather the current setting and all alternatives from the query output.
# Query output should look something like this on Debian systems:
# Name: java
# Link: /usr/bin/java
# Slaves:
# java.1.gz /usr/share/man/man1/java.1.gz
# Status: manual
# Best: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
# Value: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java
# Alternative: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java
# Priority: 1061
# Slaves:
# java.1.gz /usr/lib/jvm/java-6-openjdk-amd64/jre/man/man1/java.1.gz
# Alternative: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
# Priority: 1071
# Slaves:
# java.1.gz /usr/lib/jvm/java-7-openjdk-amd64/jre/man/man1/java.1.gz
if rc == 0:
os_family = "Debian"
for line in query_output.splitlines():
split_line = line.split(':')
if len(split_line) == 2:
key = split_line[0]
value = split_line[1].strip()
if key == 'Value':
current_path = value
elif key == 'Alternative':
all_alternatives.append(value)
elif key == 'Link' and not link:
link = value
elif rc == 2:
os_family = "RedHat"
# This is the version of update-alternatives that is shipped with
# chkconfig on RedHat-based systems. Try again with the right options.
(rc, query_output, query_error) = module.run_command(
[UPDATE_ALTERNATIVES, '--list']
)
for line in query_output.splitlines():
line_name, line_mode, line_path = line.strip().split("\t")
if line_name != name:
continue
current_path = line_path
break
if current_path != path:
if module.check_mode:
module.exit_json(changed=True, current_path=current_path)
try:
# install the requested path if necessary
# (unsupported on the RedHat version)
if path not in all_alternatives and os_family == "Debian":
if link:
module.run_command(
[UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)],
check_rc=True
)
else:
module.fail_json("Needed to install the alternative, but unable to do so, as we are missking the link")
# select the requested path
module.run_command(
[UPDATE_ALTERNATIVES, '--set', name, path],
check_rc=True
)
module.exit_json(changed=True)
except subprocess.CalledProcessError, cpe:
module.fail_json(msg=str(dir(cpe)))
else:
module.exit_json(changed=False)
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
|
gregdek/ansible
|
lib/ansible/modules/network/aci/aci_epg.py
|
12
|
10462
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_epg
short_description: Manage End Point Groups (EPG) objects (fv:AEPg)
description:
- Manage End Point Groups (EPG) on Cisco ACI fabrics.
notes:
- The C(tenant) and C(app_profile) used must exist before using this module in your playbook.
The M(aci_tenant) and M(aci_ap) modules can be used for this.
seealso:
- module: aci_tenant
- module: aci_ap
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(fv:AEPg).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Swetha Chunduri (@schunduri)
version_added: '2.4'
options:
tenant:
description:
- Name of an existing tenant.
type: str
aliases: [ tenant_name ]
ap:
description:
- Name of an existing application network profile, that will contain the EPGs.
type: str
required: yes
aliases: [ app_profile, app_profile_name ]
epg:
description:
- Name of the end point group.
type: str
required: yes
aliases: [ epg_name, name ]
bd:
description:
- Name of the bridge domain being associated with the EPG.
type: str
aliases: [ bd_name, bridge_domain ]
priority:
description:
- The QoS class.
- The APIC defaults to C(unspecified) when unset during creation.
type: str
choices: [ level1, level2, level3, unspecified ]
intra_epg_isolation:
description:
- The Intra EPG Isolation.
- The APIC defaults to C(unenforced) when unset during creation.
type: str
choices: [ enforced, unenforced ]
description:
description:
- Description for the EPG.
type: str
aliases: [ descr ]
fwd_control:
description:
- The forwarding control used by the EPG.
- The APIC defaults to C(none) when unset during creation.
type: str
choices: [ none, proxy-arp ]
preferred_group:
description:
- Whether ot not the EPG is part of the Preferred Group and can communicate without contracts.
- This is very convenient for migration scenarios, or when ACI is used for network automation but not for policy.
- The APIC defaults to C(no) when unset during creation.
type: bool
version_added: '2.5'
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new EPG
aci_epg:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
ap: intranet
epg: web_epg
description: Web Intranet EPG
bd: prod_bd
preferred_group: yes
state: present
delegate_to: localhost
- aci_epg:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
ap: ticketing
epg: "{{ item.epg }}"
description: Ticketing EPG
bd: "{{ item.bd }}"
priority: unspecified
intra_epg_isolation: unenforced
state: present
delegate_to: localhost
with_items:
- epg: web
bd: web_bd
- epg: database
bd: database_bd
- name: Remove an EPG
aci_epg:
host: apic
username: admin
password: SomeSecretPassword
validate_certs: no
tenant: production
app_profile: intranet
epg: web_epg
state: absent
delegate_to: localhost
- name: Query an EPG
aci_epg:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
ap: ticketing
epg: web_epg
state: query
delegate_to: localhost
register: query_result
- name: Query all EPGs
aci_epg:
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
- name: Query all EPGs with a Specific Name
aci_epg:
host: apic
username: admin
password: SomeSecretPassword
validate_certs: no
epg: web_epg
state: query
delegate_to: localhost
register: query_result
- name: Query all EPGs of an App Profile
aci_epg:
host: apic
username: admin
password: SomeSecretPassword
validate_certs: no
ap: ticketing
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
epg=dict(type='str', aliases=['epg_name', 'name']), # Not required for querying all objects
bd=dict(type='str', aliases=['bd_name', 'bridge_domain']),
ap=dict(type='str', aliases=['app_profile', 'app_profile_name']), # Not required for querying all objects
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
priority=dict(type='str', choices=['level1', 'level2', 'level3', 'unspecified']),
intra_epg_isolation=dict(choices=['enforced', 'unenforced']),
fwd_control=dict(type='str', choices=['none', 'proxy-arp']),
preferred_group=dict(type='bool'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['ap', 'epg', 'tenant']],
['state', 'present', ['ap', 'epg', 'tenant']],
],
)
aci = ACIModule(module)
epg = module.params['epg']
bd = module.params['bd']
description = module.params['description']
priority = module.params['priority']
intra_epg_isolation = module.params['intra_epg_isolation']
fwd_control = module.params['fwd_control']
preferred_group = aci.boolean(module.params['preferred_group'], 'include', 'exclude')
state = module.params['state']
tenant = module.params['tenant']
ap = module.params['ap']
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='fvAp',
aci_rn='ap-{0}'.format(ap),
module_object=ap,
target_filter={'name': ap},
),
subclass_2=dict(
aci_class='fvAEPg',
aci_rn='epg-{0}'.format(epg),
module_object=epg,
target_filter={'name': epg},
),
child_classes=['fvRsBd'],
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fvAEPg',
class_config=dict(
name=epg,
descr=description,
prio=priority,
pcEnfPref=intra_epg_isolation,
fwdCtrl=fwd_control,
prefGrMemb=preferred_group,
),
child_configs=[dict(
fvRsBd=dict(
attributes=dict(
tnFvBDName=bd,
),
),
)],
)
aci.get_diff(aci_class='fvAEPg')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
gpl-3.0
|
guozhongxin/shadowsocks
|
shadowsocks/local.py
|
1015
|
2248
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import logging
import signal
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from shadowsocks import shell, daemon, eventloop, tcprelay, udprelay, asyncdns
def main():
shell.check_python()
# fix py2exe
if hasattr(sys, "frozen") and sys.frozen in \
("windows_exe", "console_exe"):
p = os.path.dirname(os.path.abspath(sys.executable))
os.chdir(p)
config = shell.get_config(True)
daemon.daemon_exec(config)
try:
logging.info("starting local at %s:%d" %
(config['local_address'], config['local_port']))
dns_resolver = asyncdns.DNSResolver()
tcp_server = tcprelay.TCPRelay(config, dns_resolver, True)
udp_server = udprelay.UDPRelay(config, dns_resolver, True)
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
tcp_server.add_to_loop(loop)
udp_server.add_to_loop(loop)
def handler(signum, _):
logging.warn('received SIGQUIT, doing graceful shutting down..')
tcp_server.close(next_tick=True)
udp_server.close(next_tick=True)
signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM), handler)
def int_handler(signum, _):
sys.exit(1)
signal.signal(signal.SIGINT, int_handler)
daemon.set_user(config.get('user', None))
loop.run()
except Exception as e:
shell.print_exception(e)
sys.exit(1)
if __name__ == '__main__':
main()
|
apache-2.0
|
cheza/ActivityDynamics
|
lib/network.py
|
1
|
23860
|
from __future__ import division
from time import sleep
from lib.util import *
from config import config
import math
import random
import numpy as np
from numpy.lib.type_check import real, imag
import datetime
from graph_tool.all import *
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from random import shuffle, sample
from scipy.sparse.linalg.eigen.arpack import eigsh as largest_eigsh
import sys
class Network:
# Create Network Object with default values
# @params:
# directed = Is graph directed?
# graph_name = Name of graph, mainly used for storing plots and figures
# run = Number of random initializations for activity weights
# percentage = Percentage of nodes to randomly select for increasing/decreasing ratio
# converge_at = Minimum difference that has to be reached between two iterations to declare convergence
# ratios = The ratio for the activity dynamics.
# deltatau = The time that each iteration "represents".
# debug_level = The level of debug messages to be displayed.
# store_iterations = The interval for storing iterations (1 = all, 2 = every other, etc.)
def __init__(self, directed, graph_name, run=1, converge_at=1e-16, deltatau=0.01, runs = 1,
deltapsi=0.0001, debug_level=1, store_iterations=1, ratios=[], ratio_index = 0, tau_in_days=30,
num_nodes=None):
# default variables
self.name_to_id = {}
self.graph = Graph(directed=directed)
# variables used to store and load files
self.graph_name = graph_name
self.run = run
self.runs = runs
self.num_nodes = num_nodes
# variables used for activity dynamics modeling process
self.cur_iteration = 0
self.ratio_ones = [1.0] * self.graph.num_vertices()
self.deltatau = deltatau
self.deltapsi = deltapsi
self.tau_in_days = tau_in_days
self.converge_at = float(converge_at)
self.store_iterations = store_iterations
self.ratio = None
# variables used to specifically increase the ratio for certain nodes
self.random_nodes = []
# variable needed for adding and removing edges from graph
self.edge_list = None
# variable storing the eigenvalues for the network
self.top_eigenvalues = None
self.debug_level = debug_level
# empirical network variables
self.ratio_index = ratio_index
self.ratios = ratios
self.minimized_error_ratios = []
# synthetic network helper variables
self.converged = False
self.diverged = False
def calc_acs(self, ac_per_taus=None, min_ac=None):
if ac_per_taus is None:
self.a_cs = [max((np.mean(self.replies) + np.mean(self.posts)) / self.num_vertices, min_ac)] * (len(self.replies)-1)
else:
for i in xrange(len(self.replies)-ac_per_taus):
j = i + ac_per_taus
curr_ac = (np.mean(self.replies[i:j]) + np.mean(self.posts[i:j])) / self.num_vertices
for k in xrange(i+ac_per_taus):
self.a_cs.append(curr_ac)
self.set_ac(0)
def set_ac(self, index):
self.a_c = self.a_cs[index]
#def calc_ac(self, start_tau=0, end_tau=None, min_ac=40):
# replies = self.replies[start_tau:end_tau]
# posts = self.posts[start_tau:end_tau]
# return max((np.mean(replies) + np.mean(posts)) / self.num_vertices, min_ac)
def calc_max_posts_per_day(self, start_tau=0, end_tau=None):
return max(self.posts_per_user_per_day[start_tau:end_tau])
def calc_g_per_month(self):
return self.max_posts_per_day / (math.sqrt(self.a_c ** 2 + self.max_posts_per_day ** 2))
def calc_max_q(self):
return (self.max_posts_per_day * self.tau_in_days * self.num_vertices) / (2 * self.num_edges * self.g_per_month)
def get_empirical_input(self, path, start_tau=0, end_tau=None, ac_per_taus=None):
self.dx = []
self.apm = []
self.posts = []
self.replies = []
self.num_users = []
self.init_users = []
self.posts_per_user_per_day = []
self.a_cs = []
f = open(path, "rb")
for ldx, line in enumerate(f):
if ldx < 1:
continue
el = line.strip().split("\t")
try:
self.dx.append(float(el[1]))
except:
break
self.apm.append(float(el[2]))
self.posts.append(float(el[3]))
self.replies.append(float(el[4]))
try:
self.init_users.append(el[6].split(","))
except:
self.init_users.append(["dummy"])
num_users = float(el[5]) + 1
self.num_users.append(num_users)
self.posts_per_user_per_day.append(float(el[3])/num_users/30.0)
f.close()
self.calc_acs(ac_per_taus)
self.max_posts_per_day = self.calc_max_posts_per_day(start_tau, end_tau)
self.g_per_month = self.calc_g_per_month()
self.max_q = self.calc_max_q()
self.mu = self.max_q / self.a_c
self.deltapsi = self.mu
self.debug_msg("max_q: {}".format(self.max_q), level=1)
self.debug_msg("deltapsi: {}".format(self.deltapsi), level=1)
self.debug_msg("max_posts_per_day: {}".format(self.max_posts_per_day), level=1)
self.debug_msg("a_c: {}".format(self.a_c), level=1)
self.debug_msg("kappa_1: {}".format(self.k1), level=1)
# Creating all necessary folders for storing results, plots and figures
def create_folders(self):
folders = [config.graph_source_dir+"weights/"+self.graph_name+"/",
config.plot_dir + "weights_over_time/" + self.graph_name + "/",
config.plot_dir + "average_weights_over_tau/" + self.graph_name + "/",
config.plot_dir + "ratios_over_time/" + self.graph_name + "/"]
try:
for folder in folders:
if not os.path.exists(folder):
self.debug_msg("Creating folder: {}".format(folder))
os.makedirs(folder)
except Exception, e:
self.debug_msg("\x1b[41mERROR:: {}\x1b[00m".format(e))
def get_binary_filename(self, source_name, bin_type="GT", run=0):
if bin_type == "GT":
return config.graph_binary_dir+"GT/"+source_name+"/"+source_name+"_run_"+str(run)+".gt"
elif bin_type == "GML":
return config.graph_binary_dir+"GML/"+source_name+"/"+source_name+"_run_"+str(run)+".gml"
# Methods to manage the files to store the weights over all iterations
def open_weights_files(self):
folder = config.graph_source_dir + "weights/" + self.graph_name + "/"
wname = self.graph_name + "_" + str(self.store_iterations) +"_"+\
str(float(self.deltatau)).replace(".", "") + "_" + str(self.ratio).replace(".", "") + "_run_" + \
str(self.run) + "_weights.txt"
#iname = self.graph_name + "_" + str(self.store_iterations) +"_"+\
# str(float(self.deltatau)).replace(".", "") + "_" + str(self.ratio).replace(".", "") + "_run_" + \
# str(self.run) + "_intrinsic.txt"
#ename = self.graph_name + "_" + str(self.store_iterations) +"_"+\
# str(float(self.deltatau)).replace(".", "") + "_" + str(self.ratio).replace(".", "") + "_run_" + \
# str(self.run) + "_extrinsic.txt"
self.weights_file_path = folder+wname
#self.intrinsic_file_path = folder+iname
#self.extrinsic_file_path = folder+ename
self.weights_file = open(self.weights_file_path, "wb")
#self.intrinsic_file = open(self.intrinsic_file_path, "wb")
#self.extrinsic_file = open(self.extrinsic_file_path, "wb")
def write_weights_to_file(self):
self.weights_file.write(("\t").join(["%.8f" % float(x) for x in self.get_node_weights("activity")]) + "\n")
def write_summed_weights_to_file(self):
self.weights_file.write(str(sum(self.get_node_weights("activity"))) + "\n")
#self.intrinsic_file.write("0"+"\n")
#self.extrinsic_file.write("0"+"\n")
def close_weights_files(self):
self.weights_file.close()
#self.intrinsic_file.close()
#self.extrinsic_file.close()
def reduce_to_largest_component(self):
fl = label_largest_component(self.graph)
self.graph = GraphView(self.graph, vfilt=fl)
def set_graph_property(self, type, property, name):
a = self.graph.new_graph_property(type, property)
self.graph.graph_properties[name] = a
# Add calculated graph_properties to graph object
def add_graph_properties(self):
self.set_graph_property("object", self.deltatau, "deltatau")
self.set_graph_property("object", self.deltapsi, "deltapsi")
self.set_graph_property("float", self.cur_iteration, "cur_iteration")
self.set_graph_property("string", self.graph_name, "graph_name")
self.set_graph_property("int", self.store_iterations, "store_iterations")
self.set_graph_property("object", self.top_eigenvalues, "top_eigenvalues")
self.set_graph_property("object", self.ratios, "ratios")
self.set_graph_property("int", self.runs, "runs")
try:
self.set_graph_property("object", self.apm, "activity_per_month")
self.set_graph_property("object", self.dx, "delta_activity_per_month")
self.set_graph_property("object", self.posts, "posts")
self.set_graph_property("object", self.replies, "replies")
self.set_graph_property("float", self.a_c, "a_c")
self.set_graph_property("object", self.a_cs, "a_cs")
self.set_graph_property("object", self.max_q, "max_q")
self.set_graph_property("object", self.max_posts_per_day, "max_posts_per_day")
self.set_graph_property("object", self.g_per_month, "g_per_month")
except:
self.debug_msg(" -> INFO: Could not store empirical activities! ", level=1)
# Reset attributes between iterations / runs
def reset_attributes(self, ratio, temp_weights):
self.graph.vertex_properties["activity"].a = temp_weights
self.ratio = ratio
self.converged = False
self.diverged = False
self.cur_iteration = 0
# node weights getter
def get_node_weights(self, name):
return np.array(self.graph.vp[name].a)
# init empirical weight as average over all nodes
def init_empirical_activity(self):
initial_empirical_activity = self.apm[0]/(self.graph.num_edges()*2)/self.num_users[0]/self.a_c
init_nodes = self.init_users[0]
# reset activity!
for v in self.graph.vertices():
self.graph.vp["activity"][v] = 0.0
# randomly initiate minimal activity
for v_id in init_nodes:
n = self.graph.vertex(v_id)
self.graph.vp["activity"][n] = initial_empirical_activity
# node weights setter
def set_node_weights(self, name, weights):
self.graph.vertex_properties[name].a = weights
def update_node_weights(self, name, added_weight):
self.graph.vertex_properties[name].a += added_weight
def clear_all_filters(self):
self.graph.clear_filters()
self.num_vertices = self.graph.num_vertices()
self.ratio_ones = [1.0] * self.num_vertices
# creating random node weights
def add_node_weights(self, min=0.0, max=0.1, distribution=[1,0,0]):
self.debug_msg("Adding random weights between {} and {} to nodes.".format(min, max), level=0)
num_nodes = int(self.graph.num_vertices())
weights = self.graph.new_vertex_property("double")
weights_list = [random.uniform(min, max) for x in xrange(num_nodes)]
random.shuffle(weights_list)
for ndx, n in enumerate(self.graph.vertices()):
weights[n] = weights_list[ndx]
self.graph.vertex_properties["activity"] = weights
# creating random edge weights
def add_edge_weights(self, min=0.0, max=0.1):
self.debug_msg("Adding random weights between {} and {} to edges.".format(min, max), level=0)
for edge in self.graph.edges():
self.graph.edge_properties['activity'][edge] = random.uniform(min, max)
# eigenvalues getter
def get_eigenvalues(self):
return np.asarray(self.graph.graph_properties['top_eigenvalues'])
# store graph to gt
def store_graph(self, run, postfix=""):
self.debug_msg("Storing Graph")
path = config.graph_binary_dir + "/GT/{}/".format(self.graph_name)
try:
if not os.path.exists(path):
self.debug_msg("Created folder: {}".format(path))
os.makedirs(path)
except Exception as e:
self.debug_msg("\x1b[41mERROR:: {}\x1b[00m".format(e))
self.graph.save(path + "{}_run_{}{}.gt".format(self.graph_name, run, postfix))
# sample calculation of g(x)
def gx(self, q, a, ac):
return (q*a)/math.sqrt(ac**2+a**2)
def fx(self, x, ratio):
return -x*ratio
# plot g(x) function for multiple values
def plot_gx(self, min, max):
x = []
y = []
y2 = []
y3 = []
y4 = []
for weight in np.arange(min, max, 0.01):
y.append(self.gx(1.0, weight, 0.5))
y2.append(self.gx(1.0, weight, 2.0))
y3.append(self.gx(2.0, weight, 0.5))
y4.append(self.gx(2.0, weight, 2.0))
x.append(weight)
plt.figure()
plt.plot(x, y, alpha=1, label="$a_c=0.5$, $q=1.0$")
plt.plot(x, y2, alpha=1, label="$a_c=2.0$, $q=1.0$")
plt.plot(x, y3, alpha=1, label="$a_c=0.5$, $q=2.0$")
plt.plot(x, y4, alpha=1, label="$a_c=2.0$, $q=2.0$")
ax = plt.axes()
plt.xlabel("Node Activity ($a$)")
plt.ylabel("Values for $g(a)$")
plt.plot([-6, 6], [0, 0], 'k-', lw=0.5, alpha=0.8)
plt.plot([0.5, 0.5], [0, 3], 'k--', lw=0.5)
plt.plot([2.0, 2.0], [0, 3], 'k--', lw=0.5)
plt.plot([0.0, 6], [1.0, 1.0], 'k--', lw=0.5)
plt.plot([0.0, 6], [2.0, 2.0], 'k--', lw=0.5)
plt.text(-0.95, 0.95, "$q=1.0$", size=12)
plt.text(-0.95, 1.95, "$q=2.0$", size=12)
plt.text(0.1, -0.2, "$a_c=0.5$", size=12)
plt.text(1.6, -0.2, "$a_c=2.0$", size=12)
plt.plot([0, 0], [-3, 3], 'k-', lw=0.5, alpha=0.8)
plt.title("Values for $g(a)$ with weights from ${}$ to ${}$".format(min, max))
ax.grid(color="gray")
plt.ylim(-3, 3)
plt.legend(loc="upper left")
plt.savefig(config.plot_dir + "functions/" + self.graph_name + "_gx.png")
plt.close("all")
def get_fx_weights(self, min, max, lam):
x = []
y = []
for weight in np.arange(min, max+0.1, 0.1):
y.append(self.fx(weight, lam))
x.append(weight)
return x, y
# plot f(x)
def plot_fx(self, min, max, k=1):
plt.figure()
x,y = self.get_fx_weights(min, max, 1.0)
plt.plot(x, y, alpha=1, label="$\lambda$=$1.0$")
x,y = self.get_fx_weights(min, max, 0.5)
plt.plot(x, y, alpha=1, label="$\lambda$=$0.5$")
x,y = self.get_fx_weights(min, max, 0.1)
plt.plot(x, y, alpha=1, label="$\lambda$=$0.1$")
x,y = self.get_fx_weights(min, max, 1.5)
plt.plot(x, y, alpha=1, label="$\lambda$=$1.5$")
ax = plt.axes()
plt.xlabel("Node Activity (a)")
plt.ylabel("Values for f(a)")
plt.title("Values for f(a) with weights from ${}$ to ${}$".format(min, max))
ax.grid(color="gray")
plt.plot([-1, 1], [0, 0], 'k-', lw=0.5, alpha=0.8)
plt.plot([0, 0], [-1.5, 1.5], 'k-', lw=0.5, alpha=0.8)
plt.legend()
plt.savefig(config.plot_dir + "functions/" + self.graph_name + "_fx.png")
plt.close("all")
# plot f(x)
def plot_fx_weight(self, min, max, k=0.5):
x = []
prev_val = 10
y = [prev_val]
for i in xrange(10):
prev_val *= k
y.append(prev_val)
x.append(i)
x.append(10)
plt.figure()
plt.plot(x, y, alpha=1)
ax = plt.axes()
plt.xlabel("Time $t$")
plt.ylabel("Values for f(a)")
plt.title("Values for f(a) with weight=${}$ and $\lambda$=${}$".format(10, 0.5))
ax.grid(color="gray")
plt.savefig(config.plot_dir + "functions/" + self.graph_name + "_fx_weight.png")
plt.close("all")
# getter for laplacian matrix (not needed)
def get_laplacian(self):
return laplacian(self.graph)
# calculate eigenvector centrality
def calc_ev_centrality(self, max_iter, selector):
try:
return self.graph.vertex_properties[selector]
except:
ev, ev_centrality = eigenvector(self.graph, weight=None, max_iter = max_iter)
return ev_centrality
def calculate_ratios(self):
for i in xrange(len(self.apm)-1):
activity_current = self.apm[i]
activity_next = activity_current-self.dx[i]
self.ratio = self.k1 - math.log(activity_next/activity_current) / self.deltapsi
self.ratio -= 0.03 * activity_current / (self.a_c * self.num_vertices)
self.ratios.append(self.ratio)
self.debug_msg("ratios ({}): {}".format(len(self.ratios), self.ratios), level=1)
def set_ratio(self, index):
self.ratio_index = index
self.ratio = self.ratios[index]
def activity_dynamics(self, store_weights=False, empirical=False):
# Collect required input
activity_weight = np.asarray(self.get_node_weights("activity"))
# Calculate deltax
ratio_ones = (self.ratio * np.asarray(self.ones_ratio))
intrinsic_decay = self.activity_decay(activity_weight, ratio_ones)
extrinsic_influence = self.peer_influence(activity_weight)
activity_delta = (intrinsic_decay + extrinsic_influence) * self.deltatau
t = 1.0
# Check if already converged/diverged
if self.cur_iteration % self.store_iterations == 0:
t = np.dot(activity_delta, activity_delta)
# Debug output & convergence/divergence criteria check
if t < self.converge_at and not empirical:
self.debug_msg(" \x1b[32m>>>\x1b[00m Simulation for \x1b[32m'{}'\x1b[00m with \x1b[34mratio={}\x1b[00m and "
"\x1b[34mdtau={}\x1b[00m \x1b[34mconverged\x1b[00m at \x1b[34m{}\x1b[00m with "
"\x1b[34m{}\x1b[00m".format(self.graph_name, self.ratio, self.deltatau, self.cur_iteration+1,
t), level=1)
self.converged = True
if (t == float("Inf") or t == float("NaN")) and not empirical:
self.debug_msg(" \x1b[31m>>>\x1b[00m Simulation for \x1b[32m'{}'\x1b[00m with \x1b[34mratio={}\x1b[00m and "
"\x1b[34mdtau={}\x1b[00m \x1b[31mdiverged\x1b[00m at \x1b[34m{}\x1b[00m with "
"\x1b[34m{}\x1b[00m".format(self.graph_name, self.ratio, self.deltatau, self.cur_iteration+1,
t), level=1)
self.diverged = True
# Set new weights
self.update_node_weights("activity", activity_delta)
# Store weights to file
if ((store_weights and self.cur_iteration % self.store_iterations == 0) and not empirical) or ((self.converged or self.diverged)
and not empirical):
self.weights_file.write(("\t").join(["%.8f" % x for x in self.get_node_weights("activity")]) + "\n")
#self.intrinsic_file.write(("\t").join(["%.8f" % x for x in intrinsic_decay + activity_weight]) + "\n")
#self.extrinsic_file.write(("\t").join(["%.8f" % x for x in extrinsic_influence + activity_weight]) + "\n")
elif ((store_weights and self.cur_iteration % self.store_iterations == 0) and empirical) or ((self.converged or self.diverged)
and empirical):
self.weights_file.write(str(sum(activity_weight + activity_delta)) + "\n")
#self.intrinsic_file.write(str(abs(sum(intrinsic_decay))*self.deltatau) + "\n")
#self.extrinsic_file.write(str(abs(sum(extrinsic_influence))*self.deltatau) + "\n")
# Increment current iteration counter
self.cur_iteration += 1
def peer_influence(self, x):
pi = ((1.0 * x)/(np.sqrt(1.0+x**2)))
return pi * self.A
def activity_decay(self, x, ratio):
return -x*ratio
def debug_msg(self, msg, level=0):
if self.debug_level <= level:
print " \x1b[35m-NWK-\x1b[00m [\x1b[36m{}\x1b[00m][\x1b[32m{}\x1b[00m] \x1b[33m{}\x1b[00m".format(
datetime.datetime.now().strftime("%H:%M:%S"), self.run, msg)
def update_binary_graph(self, rand_iter, save_specific=True):
# Method needed to avoid race condition!
try:
self.store_graph(rand_iter, save_specific=True)
except Exception as e:
self.debug_msg(e.message, level=0)
self.debug_msg(" ### Sleeping for 100 seconds before trying to store again!", level=0)
sleep(100)
self.update_binary_graph(rand_iter, save_specific)
def debug_gt(self):
gps = self.graph.gp.keys()
vps = self.graph.vp.keys()
eps = self.graph.ep.keys()
self.debug_msg(" >> Inspecting graph properties: {}".format((", ").join(gps)), level=1)
for gp_k in gps:
self.debug_msg(" \x1b[36m- {}:\x1b[00m {}".format(gp_k, self.graph.gp[gp_k]), level=1)
self.debug_msg(" >> Inspecting vertex properties: {}".format((", ").join(vps)), level=1)
for vp_k in vps:
self.debug_msg(" \x1b[32m- {}:\x1b[00m {}".format(vp_k, self.graph.vp[vp_k]), level=1)
self.debug_msg(" >> Inspecting edge properties: {}".format((", ").join(eps)), level=1)
for ep_k in eps:
self.debug_msg(" \x1b[37m- {}:\x1b[00m {}".format(ep_k, self.graph.ep[ep_k]), level=1)
print "Sum Posts: ", sum(self.graph.gp["posts"])
print "Sum Replies: ", sum(self.graph.gp["replies"])
def prepare_eigenvalues(self):
self.top_eigenvalues = self.get_eigenvalues()
self.k1 = max(self.top_eigenvalues)
def load_graph_save(self, fpath):
try:
self.load_graph(fpath)
except Exception as e:
self.debug_msg(e.message, level=0)
self.debug_msg(" ### Sleeping for 100 seconds before trying to store again!", level=0)
sleep(100)
self.load_graph(fpath)
def load_graph(self, fpath):
self.debug_msg("Loading GT", level=0)
self.graph = load_graph(fpath)
remove_self_loops(self.graph)
remove_parallel_edges(self.graph)
self.debug_msg(" --> Creating ones vector", level=0)
self.ones_ratio = [1.0] * self.graph.num_vertices()
self.debug_msg(" --> Getting Adjacency Matrix", level=0)
self.A = adjacency(self.graph, weight=None)
self.num_vertices = self.graph.num_vertices()
self.num_edges = self.graph.num_edges()
self.debug_msg(" --> Counted {} vertices".format(self.num_vertices), level=0)
self.debug_msg(" --> Counted {} edges".format(self.num_edges), level=0)
|
gpl-2.0
|
philippeback/urbit
|
outside/commonmark/man/make_man_page.py
|
23
|
3778
|
#!/usr/bin/env python
# Creates a man page from a C file.
# Comments beginning with `/**` are treated as Groff man, except that
# 'this' is converted to \fIthis\fR, and ''this'' to \fBthis\fR.
# Non-blank lines immediately following a man page comment are treated
# as function signatures or examples and parsed into .Ft, .Fo, .Fa, .Fc. The
# immediately preceding man documentation chunk is printed after the example
# as a comment on it.
# That's about it!
import sys, re, os
from datetime import date
comment_start_re = re.compile('^\/\*\* ?')
comment_delim_re = re.compile('^[/ ]\** ?')
comment_end_re = re.compile('^ \**\/')
function_re = re.compile('^ *(?:CMARK_EXPORT\s+)?(?P<type>(?:const\s+)?\w+(?:\s*[*])?)\s*(?P<name>\w+)\s*\((?P<args>[^)]*)\)')
blank_re = re.compile('^\s*$')
macro_re = re.compile('CMARK_EXPORT *')
typedef_start_re = re.compile('typedef.*{$')
typedef_end_re = re.compile('}')
single_quote_re = re.compile("(?<!\w)'([^']+)'(?!\w)")
double_quote_re = re.compile("(?<!\w)''([^']+)''(?!\w)")
def handle_quotes(s):
return re.sub(double_quote_re, '\\\\fB\g<1>\\\\fR', re.sub(single_quote_re, '\\\\fI\g<1>\\\\fR', s))
typedef = False
mdlines = []
chunk = []
sig = []
if len(sys.argv) > 1:
sourcefile = sys.argv[1]
else:
print("Usage: make_man_page.py sourcefile")
exit(1)
with open(sourcefile, 'r') as cmarkh:
state = 'default'
for line in cmarkh:
# state transition
oldstate = state
if comment_start_re.match(line):
state = 'man'
elif comment_end_re.match(line) and state == 'man':
continue
elif comment_delim_re.match(line) and state == 'man':
state = 'man'
elif not typedef and blank_re.match(line):
state = 'default'
elif typedef and typedef_end_re.match(line):
typedef = False
elif state == 'man':
state = 'signature'
typedef = typedef_start_re.match(line)
# handle line
if state == 'man':
chunk.append(handle_quotes(re.sub(comment_delim_re, '', line)))
elif state == 'signature':
ln = re.sub(macro_re, '', line)
if typedef or not re.match(blank_re, ln):
sig.append(ln)
elif oldstate == 'signature' and state != 'signature':
if len(mdlines) > 0 and mdlines[-1] != '\n':
mdlines.append('\n')
rawsig = ''.join(sig)
m = function_re.match(rawsig)
if m:
mdlines.append('\\fI' + m.group('type') + '\\fR' + ' ')
mdlines.append('\\fB' + m.group('name') + '\\fR' + '(')
first = True
for argument in re.split(',', m.group('args')):
if not first:
mdlines.append(', ')
first = False
mdlines.append('\\fI' + argument.strip() + '\\fR')
mdlines.append(')\n')
else:
mdlines.append('.nf\n\\f[C]\n.RS 0n\n')
mdlines += sig
mdlines.append('.RE\n\\f[]\n.fi\n')
if len(mdlines) > 0 and mdlines[-1] != '\n':
mdlines.append('\n')
mdlines.append('.PP\n')
mdlines += chunk
chunk = []
sig = []
elif oldstate == 'man' and state != 'signature':
if len(mdlines) > 0 and mdlines[-1] != '\n':
mdlines.append('\n')
mdlines += chunk # add man chunk
chunk = []
mdlines.append('\n')
sys.stdout.write('.TH ' + os.path.basename(sourcefile).replace('.h','') + ' 3 "' + date.today().strftime('%B %d, %Y') + '" "LOCAL" "Library Functions Manual"\n')
sys.stdout.write(''.join(mdlines))
|
mit
|
clovett/MissionPlanner
|
Lib/encodings/cp1258.py
|
93
|
13927
|
""" Python Character Mapping Codec cp1258 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1258.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1258',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u0300' # 0xCC -> COMBINING GRAVE ACCENT
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u0309' # 0xD2 -> COMBINING HOOK ABOVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u01a0' # 0xD5 -> LATIN CAPITAL LETTER O WITH HORN
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u01af' # 0xDD -> LATIN CAPITAL LETTER U WITH HORN
u'\u0303' # 0xDE -> COMBINING TILDE
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0301' # 0xEC -> COMBINING ACUTE ACCENT
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\u0323' # 0xF2 -> COMBINING DOT BELOW
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u01a1' # 0xF5 -> LATIN SMALL LETTER O WITH HORN
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u01b0' # 0xFD -> LATIN SMALL LETTER U WITH HORN
u'\u20ab' # 0xFE -> DONG SIGN
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
gpl-3.0
|
thombashi/typepy
|
docs/conf.py
|
1
|
5980
|
import os
import sys
import sphinx_rtd_theme
from typepy import __author__, __copyright__, __name__, __version__
sys.path.insert(0, os.path.abspath('../typepy'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
]
intersphinx_mapping = {'python': ('https://docs.python.org/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = __name__
copyright = __copyright__
author = __author__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pytypeutildoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'typepy.tex', 'typepy Documentation',
'Tsuyoshi Hombashi', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'typepy', 'typepy Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'typepy', 'typepy Documentation',
author, 'typepy', 'One line description of project.',
'Miscellaneous'),
]
# -- rst_prolog -------------------------------------------
rp_common = """
.. |TM| replace:: :superscript:`TM`
"""
rp_builtin = """
.. |False| replace:: :py:obj:`False`
.. |True| replace:: :py:obj:`True`
.. |None| replace:: :py:obj:`None`
.. |inf| replace:: :py:obj:`inf`
.. |nan| replace:: :py:obj:`nan`
.. |bool| replace:: :py:class:`bool`
.. |dict| replace:: :py:class:`dict`
.. |int| replace:: :py:class:`int`
.. |list| replace:: :py:class:`list`
.. |float| replace:: :py:class:`float`
.. |str| replace:: :py:class:`str`
.. |tuple| replace:: :py:obj:`tuple`
"""
rp_class = """
.. |TypeConversionError| replace:: :py:class:`typepy.TypeConversionError`
.. |DateTime| replace:: :py:class:`typepy.DateTime`
.. |Dictionary| replace:: :py:class:`typepy.Dictionary`
.. |Infinity| replace:: :py:class:`typepy.Infinity`
.. |Integer| replace:: :py:class:`typepy.Integer`
.. |Nan| replace:: :py:class:`typepy.Nan`
.. |NoneType| replace:: :py:class:`typepy.NoneType`
.. |NullString| replace:: :py:class:`typepy.NullString`
.. |RealNumber| replace:: :py:class:`typepy.RealNumber`
.. |String| replace:: :py:class:`typepy.String`
"""
rp_docstring = """
.. |result_matrix_desc| replace::
For each member methods, the result matrix for each ``strict_level`` is as follows.
Column headers (except ``Method`` column) indicate input data to ``value`` argument of
a method in the ``Method`` column.
For each cell shows the output of the method.
.. |strict_level| replace::
Represents how much strict to detect the value type. Higher ``strict_level`` means that stricter type check.
"""
rst_prolog = (
rp_common +
rp_builtin +
rp_class +
rp_docstring
)
|
mit
|
run2/citytour
|
4symantec/Lib/site-packages/pip/_vendor/requests/packages/chardet/mbcsgroupprober.py
|
2769
|
1967
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober
from .euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
CP949Prober(),
Big5Prober(),
EUCTWProber()
]
self.reset()
|
mit
|
ZenHarbinger/snapcraft
|
snapcraft/integrations/__init__.py
|
7
|
1763
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Snapcraft integrations layer.
Defines 'enable-ci' command infrastructure to support multiple integrations
systems in an isolated form.
"""
import importlib
SUPPORTED_CI_SYSTEMS = (
'travis',
)
def enable_ci(ci_system, refresh_only):
if not ci_system:
# XXX cprov 20161116: we could possibly auto-detect currently
# integration systems in master ?
raise EnvironmentError(
'Please select one of the supported integration systems: '
'{}.'.format(','.join(SUPPORTED_CI_SYSTEMS))
)
elif ci_system not in SUPPORTED_CI_SYSTEMS:
raise EnvironmentError(
'"{}" integration is not supported by snapcraft.\n'
'Please select one of the supported integration systems: '
'{}.'.format(ci_system, ','.join(SUPPORTED_CI_SYSTEMS))
)
module = importlib.import_module(
'snapcraft.integrations.{}'.format(ci_system))
if refresh_only:
module.refresh()
else:
print(module.__doc__)
if input('Continue (y/N): ') == 'y':
module.enable()
|
gpl-3.0
|
arnavd96/Cinemiezer
|
myvenv/lib/python3.4/site-packages/boto/cloudsearch/sourceattribute.py
|
153
|
3156
|
# Copyright (c) 202 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class SourceAttribute(object):
"""
Provide information about attributes for an index field.
A maximum of 20 source attributes can be configured for
each index field.
:ivar default: Optional default value if the source attribute
is not specified in a document.
:ivar name: The name of the document source field to add
to this ``IndexField``.
:ivar data_function: Identifies the transformation to apply
when copying data from a source attribute.
:ivar data_map: The value is a dict with the following keys:
* cases - A dict that translates source field values
to custom values.
* default - An optional default value to use if the
source attribute is not specified in a document.
* name - the name of the document source field to add
to this ``IndexField``
:ivar data_trim_title: Trims common title words from a source
document attribute when populating an ``IndexField``.
This can be used to create an ``IndexField`` you can
use for sorting. The value is a dict with the following
fields:
* default - An optional default value.
* language - an IETF RFC 4646 language code.
* separator - The separator that follows the text to trim.
* name - The name of the document source field to add.
"""
ValidDataFunctions = ('Copy', 'TrimTitle', 'Map')
def __init__(self):
self.data_copy = {}
self._data_function = self.ValidDataFunctions[0]
self.data_map = {}
self.data_trim_title = {}
@property
def data_function(self):
return self._data_function
@data_function.setter
def data_function(self, value):
if value not in self.ValidDataFunctions:
valid = '|'.join(self.ValidDataFunctions)
raise ValueError('data_function must be one of: %s' % valid)
self._data_function = value
|
mit
|
Eyeless95/samsung_g531_kernel
|
tools/perf/scripts/python/event_analyzing_sample.py
|
4719
|
7393
|
# event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
|
gpl-2.0
|
sandeepgupta2k4/tensorflow
|
tensorflow/examples/benchmark/sample_benchmark.py
|
94
|
1605
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sample TensorFlow benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
# Define a class that extends from tf.test.Benchmark.
class SampleBenchmark(tf.test.Benchmark):
# Note: benchmark method name must start with `benchmark`.
def benchmarkSum(self):
with tf.Session() as sess:
x = tf.constant(10)
y = tf.constant(5)
result = tf.add(x, y)
iters = 100
start_time = time.time()
for _ in range(iters):
sess.run(result)
total_wall_time = time.time() - start_time
# Call report_benchmark to report a metric value.
self.report_benchmark(
name="sum_wall_time",
# This value should always be per iteration.
wall_time=total_wall_time/iters,
iters=iters)
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
AfonsoFGarcia/swift
|
swift/common/middleware/list_endpoints.py
|
29
|
10059
|
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
List endpoints for an object, account or container.
This middleware makes it possible to integrate swift with software
that relies on data locality information to avoid network overhead,
such as Hadoop.
Using the original API, answers requests of the form::
/endpoints/{account}/{container}/{object}
/endpoints/{account}/{container}
/endpoints/{account}
/endpoints/v1/{account}/{container}/{object}
/endpoints/v1/{account}/{container}
/endpoints/v1/{account}
with a JSON-encoded list of endpoints of the form::
http://{server}:{port}/{dev}/{part}/{acc}/{cont}/{obj}
http://{server}:{port}/{dev}/{part}/{acc}/{cont}
http://{server}:{port}/{dev}/{part}/{acc}
correspondingly, e.g.::
http://10.1.1.1:6000/sda1/2/a/c2/o1
http://10.1.1.1:6000/sda1/2/a/c2
http://10.1.1.1:6000/sda1/2/a
Using the v2 API, answers requests of the form::
/endpoints/v2/{account}/{container}/{object}
/endpoints/v2/{account}/{container}
/endpoints/v2/{account}
with a JSON-encoded dictionary containing a key 'endpoints' that maps to a list
of endpoints having the same form as described above, and a key 'headers' that
maps to a dictionary of headers that should be sent with a request made to
the endpoints, e.g.::
{ "endpoints": {"http://10.1.1.1:6010/sda1/2/a/c3/o1",
"http://10.1.1.1:6030/sda3/2/a/c3/o1",
"http://10.1.1.1:6040/sda4/2/a/c3/o1"},
"headers": {"X-Backend-Storage-Policy-Index": "1"}}
In this example, the 'headers' dictionary indicates that requests to the
endpoint URLs should include the header 'X-Backend-Storage-Policy-Index: 1'
because the object's container is using storage policy index 1.
The '/endpoints/' path is customizable ('list_endpoints_path'
configuration parameter).
Intended for consumption by third-party services living inside the
cluster (as the endpoints make sense only inside the cluster behind
the firewall); potentially written in a different language.
This is why it's provided as a REST API and not just a Python API:
to avoid requiring clients to write their own ring parsers in their
languages, and to avoid the necessity to distribute the ring file
to clients and keep it up-to-date.
Note that the call is not authenticated, which means that a proxy
with this middleware enabled should not be open to an untrusted
environment (everyone can query the locality data using this middleware).
"""
from urllib import quote, unquote
from swift.common.ring import Ring
from swift.common.utils import json, get_logger, split_path
from swift.common.swob import Request, Response
from swift.common.swob import HTTPBadRequest, HTTPMethodNotAllowed
from swift.common.storage_policy import POLICIES
from swift.proxy.controllers.base import get_container_info
RESPONSE_VERSIONS = (1.0, 2.0)
class ListEndpointsMiddleware(object):
"""
List endpoints for an object, account or container.
See above for a full description.
Uses configuration parameter `swift_dir` (default `/etc/swift`).
:param app: The next WSGI filter or app in the paste.deploy
chain.
:param conf: The configuration dict for the middleware.
"""
def __init__(self, app, conf):
self.app = app
self.logger = get_logger(conf, log_route='endpoints')
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.account_ring = Ring(self.swift_dir, ring_name='account')
self.container_ring = Ring(self.swift_dir, ring_name='container')
self.endpoints_path = conf.get('list_endpoints_path', '/endpoints/')
if not self.endpoints_path.endswith('/'):
self.endpoints_path += '/'
self.default_response_version = 1.0
self.response_map = {
1.0: self.v1_format_response,
2.0: self.v2_format_response,
}
def get_object_ring(self, policy_idx):
"""
Get the ring object to use to handle a request based on its policy.
:policy_idx: policy index as defined in swift.conf
:returns: appropriate ring object
"""
return POLICIES.get_object_ring(policy_idx, self.swift_dir)
def _parse_version(self, raw_version):
err_msg = 'Unsupported version %r' % raw_version
try:
version = float(raw_version.lstrip('v'))
except ValueError:
raise ValueError(err_msg)
if not any(version == v for v in RESPONSE_VERSIONS):
raise ValueError(err_msg)
return version
def _parse_path(self, request):
"""
Parse path parts of request into a tuple of version, account,
container, obj. Unspecified path parts are filled in as None,
except version which is always returned as a float using the
configured default response version if not specified in the
request.
:param request: the swob request
:returns: parsed path parts as a tuple with version filled in as
configured default response version if not specified.
:raises: ValueError if path is invalid, message will say why.
"""
clean_path = request.path[len(self.endpoints_path) - 1:]
# try to peel off version
try:
raw_version, rest = split_path(clean_path, 1, 2, True)
except ValueError:
raise ValueError('No account specified')
try:
version = self._parse_version(raw_version)
except ValueError:
if raw_version.startswith('v') and '_' not in raw_version:
# looks more like a invalid version than an account
raise
# probably no version specified, but if the client really
# said /endpoints/v_3/account they'll probably be sorta
# confused by the useless response and lack of error.
version = self.default_response_version
rest = clean_path
else:
rest = '/' + rest if rest else '/'
try:
account, container, obj = split_path(rest, 1, 3, True)
except ValueError:
raise ValueError('No account specified')
return version, account, container, obj
def v1_format_response(self, req, endpoints, **kwargs):
return Response(json.dumps(endpoints),
content_type='application/json')
def v2_format_response(self, req, endpoints, storage_policy_index,
**kwargs):
resp = {
'endpoints': endpoints,
'headers': {},
}
if storage_policy_index is not None:
resp['headers'][
'X-Backend-Storage-Policy-Index'] = str(storage_policy_index)
return Response(json.dumps(resp),
content_type='application/json')
def __call__(self, env, start_response):
request = Request(env)
if not request.path.startswith(self.endpoints_path):
return self.app(env, start_response)
if request.method != 'GET':
return HTTPMethodNotAllowed(
req=request, headers={"Allow": "GET"})(env, start_response)
try:
version, account, container, obj = self._parse_path(request)
except ValueError as err:
return HTTPBadRequest(str(err))(env, start_response)
if account is not None:
account = unquote(account)
if container is not None:
container = unquote(container)
if obj is not None:
obj = unquote(obj)
storage_policy_index = None
if obj is not None:
container_info = get_container_info(
{'PATH_INFO': '/v1/%s/%s' % (account, container)},
self.app, swift_source='LE')
storage_policy_index = container_info['storage_policy']
obj_ring = self.get_object_ring(storage_policy_index)
partition, nodes = obj_ring.get_nodes(
account, container, obj)
endpoint_template = 'http://{ip}:{port}/{device}/{partition}/' + \
'{account}/{container}/{obj}'
elif container is not None:
partition, nodes = self.container_ring.get_nodes(
account, container)
endpoint_template = 'http://{ip}:{port}/{device}/{partition}/' + \
'{account}/{container}'
else:
partition, nodes = self.account_ring.get_nodes(
account)
endpoint_template = 'http://{ip}:{port}/{device}/{partition}/' + \
'{account}'
endpoints = []
for node in nodes:
endpoint = endpoint_template.format(
ip=node['ip'],
port=node['port'],
device=node['device'],
partition=partition,
account=quote(account),
container=quote(container or ''),
obj=quote(obj or ''))
endpoints.append(endpoint)
resp = self.response_map[version](
request, endpoints=endpoints,
storage_policy_index=storage_policy_index)
return resp(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def list_endpoints_filter(app):
return ListEndpointsMiddleware(app, conf)
return list_endpoints_filter
|
apache-2.0
|
Jgarcia-IAS/Fidelizacion_odoo
|
openerp/extras/jasper_reports/JasperReports/JasperServer.py
|
2
|
4411
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008-2012 NaN Projectes de Programari Lliure, S.L.
# http://www.NaN-tic.com
# Copyright (C) 2013 Tadeus Prastowo <[email protected]>
# Vikasa Infinity Anugrah <http://www.infi-nity.com>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import os
import glob
import time
import socket
import subprocess
import xmlrpclib
import logging
try:
import release
from osv import osv
from tools.translate import _
except ImportError:
import openerp
from openerp import release
from openerp.osv import osv
from openerp.tools.translate import _
class JasperServer:
def __init__(self, port=8090):
self.port = port
self.pidfile = None
url = 'http://localhost:%d' % port
self.proxy = xmlrpclib.ServerProxy( url, allow_none = True )
self.logger = logging.getLogger(__name__)
def error(self, message):
if self.logger:
self.logger.error("%s" % message )
else:
print 'JasperReports: %s' % message
def path(self):
return os.path.abspath(os.path.dirname(__file__))
def setPidFile(self, pidfile):
self.pidfile = pidfile
def start(self):
env = {}
env.update( os.environ )
if os.name == 'nt':
sep = ';'
else:
sep = ':'
libs = os.path.join( self.path(), '..', 'java', 'lib', '*.jar' )
env['CLASSPATH'] = os.path.join( self.path(), '..', 'java' + sep ) + sep.join( glob.glob( libs ) ) + sep + os.path.join( self.path(), '..', 'custom_reports' )
cwd = os.path.join( self.path(), '..', 'java' )
# Set headless = True because otherwise, java may use existing X session and if session is
# closed JasperServer would start throwing exceptions. So we better avoid using the session at all.
command = ['java', '-Djava.awt.headless=true', 'com.nantic.jasperreports.JasperServer', unicode(self.port)]
process = subprocess.Popen(command, env=env, cwd=cwd)
if self.pidfile:
f = open( self.pidfile, 'w')
try:
f.write( str( process.pid ) )
finally:
f.close()
def execute(self, *args):
"""
Render report and return the number of pages generated.
"""
try:
return self.proxy.Report.execute( *args )
except (xmlrpclib.ProtocolError, socket.error), e:
#self.info("First try did not work: %s / %s" % (str(e), str(e.args)) )
self.start()
for x in xrange(40):
time.sleep(1)
try:
return self.proxy.Report.execute( *args )
except (xmlrpclib.ProtocolError, socket.error), e:
self.error("EXCEPTION: %s %s" % ( str(e), str(e.args) ))
pass
except xmlrpclib.Fault, e:
raise osv.except_osv(_('Report Error'), e.faultString)
except xmlrpclib.Fault, e:
raise osv.except_osv(_('Report Error'), e.faultString)
# vim:noexpandtab:smartindent:tabstop=8:softtabstop=8:shiftwidth=8:
|
agpl-3.0
|
2013Commons/HUE-SHARK
|
apps/oozie/src/oozie/migrations/0015_auto__add_field_dataset_advanced_start_instance__add_field_dataset_ins.py
|
39
|
24089
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Dataset.advanced_start_instance'
db.add_column('oozie_dataset', 'advanced_start_instance', self.gf('django.db.models.fields.CharField')(default='0', max_length=128), keep_default=False)
# Adding field 'Dataset.instance_choice'
db.add_column('oozie_dataset', 'instance_choice', self.gf('django.db.models.fields.CharField')(default='default', max_length=10), keep_default=False)
# Adding field 'Dataset.advanced_end_instance'
db.add_column('oozie_dataset', 'advanced_end_instance', self.gf('django.db.models.fields.CharField')(default='0', max_length=128, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Dataset.advanced_start_instance'
db.delete_column('oozie_dataset', 'advanced_start_instance')
# Deleting field 'Dataset.instance_choice'
db.delete_column('oozie_dataset', 'instance_choice')
# Deleting field 'Dataset.advanced_end_instance'
db.delete_column('oozie_dataset', 'advanced_end_instance')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oozie.coordinator': {
'Meta': {'object_name': 'Coordinator', '_ormbases': ['oozie.Job']},
'concurrency': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 6, 19, 26, 33, 676504)'}),
'execution': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 3, 19, 26, 33, 676468)'}),
'throttle': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']", 'null': 'True'})
},
'oozie.datainput': {
'Meta': {'object_name': 'DataInput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataoutput': {
'Meta': {'object_name': 'DataOutput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataset': {
'Meta': {'object_name': 'Dataset'},
'advanced_end_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128', 'blank': 'True'}),
'advanced_start_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128'}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'done_flag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_choice': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 3, 19, 26, 33, 677121)'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'uri': ('django.db.models.fields.CharField', [], {'default': "'/data/${YEAR}${MONTH}${DAY}'", 'max_length': '1024'})
},
'oozie.decision': {
'Meta': {'object_name': 'Decision'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.decisionend': {
'Meta': {'object_name': 'DecisionEnd'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.distcp': {
'Meta': {'object_name': 'DistCp'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.email': {
'Meta': {'object_name': 'Email'},
'body': ('django.db.models.fields.TextField', [], {'default': "''"}),
'cc': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'subject': ('django.db.models.fields.TextField', [], {'default': "''"}),
'to': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.end': {
'Meta': {'object_name': 'End'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fork': {
'Meta': {'object_name': 'Fork'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fs': {
'Meta': {'object_name': 'Fs'},
'chmods': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'deletes': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'mkdirs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'moves': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'touchzs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'})
},
'oozie.generic': {
'Meta': {'object_name': 'Generic'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.history': {
'Meta': {'object_name': 'History'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Job']"}),
'oozie_job_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'properties': ('django.db.models.fields.TextField', [], {}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'oozie.hive': {
'Meta': {'object_name': 'Hive'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.hive.defaults","value":"hive-site.xml"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.java': {
'Meta': {'object_name': 'Java'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'args': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'blank': 'True'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'java_opts': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'main_class': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.job': {
'Meta': {'object_name': 'Job'},
'deployment_dir': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_shared': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'schema_version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'oozie.join': {
'Meta': {'object_name': 'Join'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.kill': {
'Meta': {'object_name': 'Kill'},
'message': ('django.db.models.fields.CharField', [], {'default': "'Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]'", 'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.link': {
'Meta': {'object_name': 'Link'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_node'", 'to': "orm['oozie.Node']"}),
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_node'", 'to': "orm['oozie.Node']"})
},
'oozie.mapreduce': {
'Meta': {'object_name': 'Mapreduce'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.node': {
'Meta': {'object_name': 'Node'},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents'", 'symmetrical': 'False', 'through': "orm['oozie.Link']", 'to': "orm['oozie.Node']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'node_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.pig': {
'Meta': {'object_name': 'Pig'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.shell': {
'Meta': {'object_name': 'Shell'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.sqoop': {
'Meta': {'object_name': 'Sqoop'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'oozie.ssh': {
'Meta': {'object_name': 'Ssh'},
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'oozie.start': {
'Meta': {'object_name': 'Start'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'})
},
'oozie.streaming': {
'Meta': {'object_name': 'Streaming'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'mapper': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'reducer': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'oozie.subworkflow': {
'Meta': {'object_name': 'SubWorkflow'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'propagate_configuration': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'sub_workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.workflow': {
'Meta': {'object_name': 'Workflow', '_ormbases': ['oozie.Job']},
'end': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'end_workflow'", 'null': 'True', 'to': "orm['oozie.End']"}),
'is_single': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'start': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'start_workflow'", 'null': 'True', 'to': "orm['oozie.Start']"})
}
}
complete_apps = ['oozie']
|
apache-2.0
|
idea4bsd/idea4bsd
|
python/lib/Lib/site-packages/django/contrib/localflavor/fr/fr_department.py
|
314
|
3326
|
# -*- coding: utf-8 -*-
DEPARTMENT_ASCII_CHOICES = (
('01', '01 - Ain'),
('02', '02 - Aisne'),
('03', '03 - Allier'),
('04', '04 - Alpes-de-Haute-Provence'),
('05', '05 - Hautes-Alpes'),
('06', '06 - Alpes-Maritimes'),
('07', '07 - Ardeche'),
('08', '08 - Ardennes'),
('09', '09 - Ariege'),
('10', '10 - Aube'),
('11', '11 - Aude'),
('12', '12 - Aveyron'),
('13', '13 - Bouches-du-Rhone'),
('14', '14 - Calvados'),
('15', '15 - Cantal'),
('16', '16 - Charente'),
('17', '17 - Charente-Maritime'),
('18', '18 - Cher'),
('19', '19 - Correze'),
('21', '21 - Cote-d\'Or'),
('22', '22 - Cotes-d\'Armor'),
('23', '23 - Creuse'),
('24', '24 - Dordogne'),
('25', '25 - Doubs'),
('26', '26 - Drome'),
('27', '27 - Eure'),
('28', '28 - Eure-et-Loire'),
('29', '29 - Finistere'),
('2A', '2A - Corse-du-Sud'),
('2B', '2B - Haute-Corse'),
('30', '30 - Gard'),
('31', '31 - Haute-Garonne'),
('32', '32 - Gers'),
('33', '33 - Gironde'),
('34', '34 - Herault'),
('35', '35 - Ille-et-Vilaine'),
('36', '36 - Indre'),
('37', '37 - Indre-et-Loire'),
('38', '38 - Isere'),
('39', '39 - Jura'),
('40', '40 - Landes'),
('41', '41 - Loir-et-Cher'),
('42', '42 - Loire'),
('43', '43 - Haute-Loire'),
('44', '44 - Loire-Atlantique'),
('45', '45 - Loiret'),
('46', '46 - Lot'),
('47', '47 - Lot-et-Garonne'),
('48', '48 - Lozere'),
('49', '49 - Maine-et-Loire'),
('50', '50 - Manche'),
('51', '51 - Marne'),
('52', '52 - Haute-Marne'),
('53', '53 - Mayenne'),
('54', '54 - Meurthe-et-Moselle'),
('55', '55 - Meuse'),
('56', '56 - Morbihan'),
('57', '57 - Moselle'),
('58', '58 - Nievre'),
('59', '59 - Nord'),
('60', '60 - Oise'),
('61', '61 - Orne'),
('62', '62 - Pas-de-Calais'),
('63', '63 - Puy-de-Dome'),
('64', '64 - Pyrenees-Atlantiques'),
('65', '65 - Hautes-Pyrenees'),
('66', '66 - Pyrenees-Orientales'),
('67', '67 - Bas-Rhin'),
('68', '68 - Haut-Rhin'),
('69', '69 - Rhone'),
('70', '70 - Haute-Saone'),
('71', '71 - Saone-et-Loire'),
('72', '72 - Sarthe'),
('73', '73 - Savoie'),
('74', '74 - Haute-Savoie'),
('75', '75 - Paris'),
('76', '76 - Seine-Maritime'),
('77', '77 - Seine-et-Marne'),
('78', '78 - Yvelines'),
('79', '79 - Deux-Sevres'),
('80', '80 - Somme'),
('81', '81 - Tarn'),
('82', '82 - Tarn-et-Garonne'),
('83', '83 - Var'),
('84', '84 - Vaucluse'),
('85', '85 - Vendee'),
('86', '86 - Vienne'),
('87', '87 - Haute-Vienne'),
('88', '88 - Vosges'),
('89', '89 - Yonne'),
('90', '90 - Territoire de Belfort'),
('91', '91 - Essonne'),
('92', '92 - Hauts-de-Seine'),
('93', '93 - Seine-Saint-Denis'),
('94', '94 - Val-de-Marne'),
('95', '95 - Val-d\'Oise'),
('971', '971 - Guadeloupe'),
('972', '972 - Martinique'),
('973', '973 - Guyane'),
('974', '974 - La Reunion'),
('975', '975 - Saint-Pierre-et-Miquelon'),
('976', '976 - Mayotte'),
('984', '984 - Terres Australes et Antarctiques'),
('986', '986 - Wallis et Futuna'),
('987', '987 - Polynesie Francaise'),
('988', '988 - Nouvelle-Caledonie'),
)
|
apache-2.0
|
lokI8/haas
|
haas/dev_support.py
|
3
|
1857
|
# Copyright 2013-2014 Massachusetts Open Cloud Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import logging
from haas import config
from functools import wraps
def no_dry_run(f):
"""A decorator which "disables" a function during a dry run.
A can specify a `dry_run` option in the `devel` section of `haas.cfg`.
If the option is present (regardless of its value), any function or
method decorated with `no_dry_run` will be "disabled." The call will
be logged (with level `logging.DEBUG`), but will not actually execute.
The function will instead return 'None'. Callers of decorated functions
must accept a None value gracefully.
The intended use case of `no_dry_run` is to disable functions which
cannot be run because, for example, the HaaS is executing on a
developer's workstation, which has no configured switch, libvirt, etc.
If the `dry_run` option is not specified, this decorator has no effect.
"""
@wraps(f)
def wrapper(*args, **kwargs):
if config.cfg.has_option('devel', 'dry_run'):
logger = logging.getLogger(__name__)
logger.info('dry run, not executing: %s.%s(*%r,**%r)' %
(f.__module__, f.__name__, args, kwargs))
return None
else:
return f(*args, **kwargs)
return wrapper
|
apache-2.0
|
jaronson/googletest
|
scripts/upload_gtest.py
|
1963
|
2851
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
[email protected] to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = '[email protected]'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
|
bsd-3-clause
|
bobcyw/django
|
django/contrib/gis/db/backends/postgis/introspection.py
|
330
|
5441
|
from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.postgresql.introspection import DatabaseIntrospection
class GeoIntrospectionError(Exception):
pass
class PostGISIntrospection(DatabaseIntrospection):
# Reverse dictionary for PostGIS geometry types not populated until
# introspection is actually performed.
postgis_types_reverse = {}
ignored_tables = DatabaseIntrospection.ignored_tables + [
'geography_columns',
'geometry_columns',
'raster_columns',
'spatial_ref_sys',
'raster_overviews',
]
# Overridden from parent to include raster indices in retrieval.
# Raster indices have pg_index.indkey value 0 because they are an
# expression over the raster column through the ST_ConvexHull function.
# So the default query has to be adapted to include raster indices.
_get_indexes_query = """
SELECT DISTINCT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
LEFT JOIN pg_catalog.pg_type t ON t.oid = attr.atttypid
WHERE
c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND (
attr.attnum = idx.indkey[0] OR
(t.typname LIKE 'raster' AND idx.indkey = '0')
)
AND attr.attnum > 0
AND c.relname = %s"""
def get_postgis_types(self):
"""
Returns a dictionary with keys that are the PostgreSQL object
identification integers for the PostGIS geometry and/or
geography types (if supported).
"""
field_types = [
('geometry', 'GeometryField'),
# The value for the geography type is actually a tuple
# to pass in the `geography=True` keyword to the field
# definition.
('geography', ('GeometryField', {'geography': True})),
]
postgis_types = {}
# The OID integers associated with the geometry type may
# be different across versions; hence, this is why we have
# to query the PostgreSQL pg_type table corresponding to the
# PostGIS custom data types.
oid_sql = 'SELECT "oid" FROM "pg_type" WHERE "typname" = %s'
cursor = self.connection.cursor()
try:
for field_type in field_types:
cursor.execute(oid_sql, (field_type[0],))
for result in cursor.fetchall():
postgis_types[result[0]] = field_type[1]
finally:
cursor.close()
return postgis_types
def get_field_type(self, data_type, description):
if not self.postgis_types_reverse:
# If the PostGIS types reverse dictionary is not populated, do so
# now. In order to prevent unnecessary requests upon connection
# initialization, the `data_types_reverse` dictionary is not updated
# with the PostGIS custom types until introspection is actually
# performed -- in other words, when this function is called.
self.postgis_types_reverse = self.get_postgis_types()
self.data_types_reverse.update(self.postgis_types_reverse)
return super(PostGISIntrospection, self).get_field_type(data_type, description)
def get_geometry_type(self, table_name, geo_col):
"""
The geometry type OID used by PostGIS does not indicate the particular
type of field that a geometry column is (e.g., whether it's a
PointField or a PolygonField). Thus, this routine queries the PostGIS
metadata tables to determine the geometry type,
"""
cursor = self.connection.cursor()
try:
try:
# First seeing if this geometry column is in the `geometry_columns`
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise GeoIntrospectionError
except GeoIntrospectionError:
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geography_columns" '
'WHERE "f_table_name"=%s AND "f_geography_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry or geography column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
|
bsd-3-clause
|
motion2015/a3
|
common/djangoapps/third_party_auth/tests/testutil.py
|
38
|
3875
|
"""
Utilities for writing third_party_auth tests.
Used by Django and non-Django tests; must not have Django deps.
"""
from contextlib import contextmanager
import unittest
import mock
from third_party_auth import provider
AUTH_FEATURES_KEY = 'ENABLE_THIRD_PARTY_AUTH'
class FakeDjangoSettings(object):
"""A fake for Django settings."""
def __init__(self, mappings):
"""Initializes the fake from mappings dict."""
for key, value in mappings.iteritems():
setattr(self, key, value)
class TestCase(unittest.TestCase):
"""Base class for auth test cases."""
# Allow access to protected methods (or module-protected methods) under
# test.
# pylint: disable-msg=protected-access
def setUp(self):
super(TestCase, self).setUp()
self._original_providers = provider.Registry._get_all()
provider.Registry._reset()
def tearDown(self):
provider.Registry._reset()
provider.Registry.configure_once(self._original_providers)
super(TestCase, self).tearDown()
@contextmanager
def simulate_running_pipeline(pipeline_target, backend, email=None, fullname=None, username=None):
"""Simulate that a pipeline is currently running.
You can use this context manager to test packages that rely on third party auth.
This uses `mock.patch` to override some calls in `third_party_auth.pipeline`,
so you will need to provide the "target" module *as it is imported*
in the software under test. For example, if `foo/bar.py` does this:
>>> from third_party_auth import pipeline
then you will need to do something like this:
>>> with simulate_running_pipeline("foo.bar.pipeline", "google-oauth2"):
>>> bar.do_something_with_the_pipeline()
If, on the other hand, `foo/bar.py` had done this:
>>> import third_party_auth
then you would use the target "foo.bar.third_party_auth.pipeline" instead.
Arguments:
pipeline_target (string): The path to `third_party_auth.pipeline` as it is imported
in the software under test.
backend (string): The name of the backend currently running, for example "google-oauth2".
Note that this is NOT the same as the name of the *provider*. See the Python
social auth documentation for the names of the backends.
Keyword Arguments:
email (string): If provided, simulate that the current provider has
included the user's email address (useful for filling in the registration form).
fullname (string): If provided, simulate that the current provider has
included the user's full name (useful for filling in the registration form).
username (string): If provided, simulate that the pipeline has provided
this suggested username. This is something that the `third_party_auth`
app generates itself and should be available by the time the user
is authenticating with a third-party provider.
Returns:
None
"""
pipeline_data = {
"backend": backend,
"kwargs": {
"details": {}
}
}
if email is not None:
pipeline_data["kwargs"]["details"]["email"] = email
if fullname is not None:
pipeline_data["kwargs"]["details"]["fullname"] = fullname
if username is not None:
pipeline_data["kwargs"]["username"] = username
pipeline_get = mock.patch("{pipeline}.get".format(pipeline=pipeline_target), spec=True)
pipeline_running = mock.patch("{pipeline}.running".format(pipeline=pipeline_target), spec=True)
mock_get = pipeline_get.start()
mock_running = pipeline_running.start()
mock_get.return_value = pipeline_data
mock_running.return_value = True
try:
yield
finally:
pipeline_get.stop()
pipeline_running.stop()
|
agpl-3.0
|
oscar810429/mysql-5.6_facebook
|
xtrabackup/test/kewpie/lib/test_mgmt/test_management.py
|
22
|
11911
|
#! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2010 Patrick Crews
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
""" test_management:
code related to the gathering / analysis / management of
the test cases
ie - collecting the list of tests in each suite, then
gathering additional, relevant information for the test-runner's dtr
mode. (traditional diff-based testing)
"""
# imports
import os
import re
import sys
import thread
class testManager(object):
"""Deals with scanning test directories, gathering test cases, and
collecting per-test information (opt files, etc) for use by the
test-runner
"""
def __init__( self, variables, system_manager):
self.system_manager = system_manager
self.time_manager = system_manager.time_manager
self.logging = system_manager.logging
if variables['verbose']:
self.logging.verbose("Initializing test manager...")
self.skip_keys = [ 'system_manager'
, 'verbose'
, 'debug'
]
self.test_list = []
self.first_test = 1
self.total_test_count = 0
self.executed_tests = {} # We have a hash of 'status':[test_name..]
self.executing_tests = {}
self.verbose = variables['verbose']
self.debug = variables['debug']
self.default_engine = variables['defaultengine']
self.dotest = variables['dotest']
if self.dotest:
self.dotest = self.dotest.strip()
self.skiptest = variables['skiptest']
if self.skiptest:
self.skiptest = self.skiptest.strip()
self.reorder = variables['reorder']
self.suitelist = variables['suitelist']
self.mode = variables['mode']
self.suitepaths = variables['suitepaths']
self.testdir = variables['testdir']
self.desired_tests = variables['test_cases']
self.logging.debug_class(self)
def add_test(self, new_test_case):
""" Add a new testCase to our self.test_list """
self.test_list.append(new_test_case)
def gather_tests(self):
self.logging.info("Processing test suites...")
# BEGIN terrible hack to accomodate the fact that
# our 'main' suite is also our testdir : /
if self.suitelist is None and self.mode=='dtr':
self.suitepaths = [self.testdir]
self.suitelist = ['main']
# END horrible hack
for suite in self.suitelist:
suite_path = self.find_suite_path(suite)
if suite_path:
self.process_suite(suite_path)
else:
self.logging.error("Could not find suite: %s in any of paths: %s" %(suite, ", ".join(self.suitepaths)))
self.process_gathered_tests()
def process_gathered_tests(self):
""" We do some post-gathering analysis and whatnot
Report an error if there were desired_tests but no tests
were found. Otherwise just report what we found
"""
# See if we need to reorder our test cases
if self.reorder:
self.sort_testcases()
if self.desired_tests and not self.test_list:
# We wanted tests, but found none
# Probably need to make this smarter at some point
# To maybe make sure that we found all of the desired tests...
# However, this is a start / placeholder code
self.logging.error("Unable to locate any of the desired tests: %s" %(" ,".join(self.desired_tests)))
self.total_test_count = len(self.test_list)
self.logging.info("Found %d test(s) for execution" %(self.total_test_count))
self.logging.debug("Found tests:")
self.logging.debug("%s" %(self.print_test_list()))
def find_suite_path(self, suitename):
""" We have a suitename, we need to locate the path to
the juicy suitedir in one of our suitepaths.
Theoretically, we could have multiple matches, but
such things should never be allowed, so we don't
code for it. We return the first match.
testdir can either be suitepath/suitename or
suitepath/suitename/tests. We test and return the
existing path. Return None if no match found
"""
# BEGIN horrible hack to accomodate bad location of main suite
if self.mode == 'dtr':
if self.suitepaths == [self.testdir] or suitename == 'main':
# We treat this as the 'main' suite
return self.testdir
# END horrible hack
for suitepath in self.suitepaths:
suite_path = self.system_manager.find_path([ os.path.join(suitepath,suitename,'tests'),
os.path.join(suitepath,suitename) ], required = 0 )
if suite_path:
return suite_path
return suite_path
def process_suite(self,suite_dir):
"""Process a test suite.
This includes searching for tests in test_list and only
working with the named tests (all tests in suite is the default)
Further processing includes reading the disabled.def file
to know which tests to skip, processing the suite.opt file,
and processing the individual test cases for data relevant
to the rest of the test-runner
"""
self.logging.verbose("Processing suite: %s" %(suite))
def has_tests(self):
"""Return 1 if we have tests in our testlist, 0 otherwise"""
return len(self.test_list)
def get_testCase(self, requester):
"""return a testCase """
if self.first_test:
# we start our timer
self.time_manager.start('total_time','total_time')
self.first_test = 0
test_case = None
if self.has_tests():
test_case = self.test_list.pop(0)
self.record_test_executor(requester, test_case.fullname)
return test_case
def record_test_executor(self, requester, test_name):
""" We record the test case and executor name as this could be useful
We don't *know* this is needed, but we can always change this
later
"""
self.executing_tests[test_name] = requester
def record_test_result(self, test_case, test_status, output, exec_time):
""" Accept the results of an executed testCase for further
processing.
"""
if test_status not in self.executed_tests:
self.executed_tests[test_status] = [test_case]
else:
self.executed_tests[test_status].append(test_case)
# report. If the test failed, we print any additional
# output returned by the test executor
# We may want to report additional output at other times
if test_status != 'pass':
report_output = True
else:
report_output = False
self.logging.test_report( test_case.fullname, test_status
, exec_time, output, report_output)
def print_test_list(self):
test_names = []
for test in self.test_list:
test_names.append(test.fullname)
return "[ %s ]" %(", ".join(test_names))
def statistical_report(self):
""" Report out various testing statistics:
Failed/Passed %success
list of failed test cases
"""
# This is probably hacky, but I'll think of a better
# location later. When we are ready to see our
# statistical report, we know to stop the total time timer
if not self.first_test:
total_exec_time = self.time_manager.stop('total_time')
self.logging.write_thick_line()
self.logging.info("Test execution complete in %d seconds" %(total_exec_time))
self.logging.info("Summary report:")
self.report_executed_tests()
self.report_test_statuses()
if not self.first_test:
self.time_manager.summary_report()
def report_test_statuses(self):
""" Method to report out various test statuses we
care about
"""
test_statuses = [ 'fail'
, 'timeout'
, 'skipped'
, 'disabled'
]
for test_status in test_statuses:
self.report_tests_by_status(test_status)
def get_executed_test_count(self):
""" Return how many tests were executed """
total_count = 0
for test_list in self.executed_tests.values():
total_count = total_count + len(test_list)
return total_count
def report_executed_tests(self):
""" Report out tests by status """
total_executed_count = self.get_executed_test_count()
if self.total_test_count:
executed_ratio = (float(total_executed_count)/float(self.total_test_count))
executed_percent = executed_ratio * 100
else:
# We prevent division by 0 if we didn't find any tests to execute
executed_ratio = 0
executed_percent = 0
self.logging.info("Executed %s/%s test cases, %.2f percent" %( total_executed_count
, self.total_test_count
, executed_percent))
for test_status in self.executed_tests.keys():
status_count = self.get_count_by_status(test_status)
test_percent = (float(status_count)/float(total_executed_count))*100
self.logging.info("STATUS: %s, %d/%d test cases, %.2f percent executed" %( test_status.upper()
, status_count
, total_executed_count
, test_percent
))
def report_tests_by_status(self, status):
matching_tests = []
if status in self.executed_tests:
for testcase in self.executed_tests[status]:
matching_tests.append(testcase.fullname)
self.logging.info("%s tests: %s" %(status.upper(), ", ".join(matching_tests)))
def get_count_by_status(self, test_status):
""" Return how many tests are in a given test_status """
if test_status in self.executed_tests:
return len(self.executed_tests[test_status])
else:
return 0
def sort_testcases(self):
""" Sort testcases to optimize test execution.
This can be very mode-specific
"""
self.logging.verbose("Reordering testcases to optimize test execution...")
def has_failing_tests(self):
return (self.get_count_by_status('fail') + self.get_count_by_status('timeout'))
|
gpl-2.0
|
thruflo/ntorque
|
src/ntorque/work/requeue.py
|
1
|
3511
|
# -*- coding: utf-8 -*-
"""Provides ``RequeuePoller``, a utility that polls the db and add tasks
to the queue.
"""
__all__ = [
'RequeuePoller',
]
import logging
logger = logging.getLogger(__name__)
import time
import transaction
from datetime import datetime
from redis.exceptions import RedisError
from sqlalchemy.exc import SQLAlchemyError
from pyramid_redis.hooks import RedisFactory
from ntorque import model
from ntorque import util
from . import main
class RequeuePoller(object):
"""Polls the database for tasks that should be re-queued."""
def __init__(self, redis, channel, delay=0.001, interval=5, **kwargs):
self.redis = redis
self.channel = channel
self.delay = delay
self.interval = interval
self.call_in_process = kwargs.get('call_in_process', util.call_in_process)
self.get_tasks = kwargs.get('get_tasks', model.GetDueTasks())
self.logger = kwargs.get('logger', logger)
self.session = kwargs.get('session', model.Session)
self.time = kwargs.get('time', time)
def start(self):
self.poll()
def poll(self):
"""Poll the db ad-infinitum."""
while True:
t1 = self.time.time()
tasks = self.call_in_process(self.query)
if tasks:
for task in tasks:
try:
self.enqueue(*task)
except RedisError as err:
self.logger.warn(err, exc_info=True)
self.time.sleep(self.delay)
current_time = self.time.time()
due_time = t1 + self.interval
if current_time < due_time:
self.time.sleep(due_time - current_time)
def query(self):
tasks = []
with transaction.manager:
try:
tasks = [(x.id, x.retry_count) for x in self.get_tasks()]
except SQLAlchemyError as err:
self.logger.warn(err, exc_info=True)
finally:
self.session.remove()
return tasks
def enqueue(self, id_, retry_count):
"""Push an instruction to re-try the task on the redis channel."""
instruction = '{0}:{1}'.format(id_, retry_count)
self.redis.rpush(self.channel, instruction)
class ConsoleScript(object):
"""Bootstrap the environment and run the consumer."""
def __init__(self, **kwargs):
self.requeue_cls = kwargs.get('requeue_cls', RequeuePoller)
self.get_redis = kwargs.get('get_redis', RedisFactory())
self.get_config = kwargs.get('get_config', main.Bootstrap())
self.session = kwargs.get('session', model.Session)
def __call__(self):
"""Get the configured registry. Unpack the redis client and input
channel(s), instantiate and start the consumer.
"""
# Get the configured registry.
config = self.get_config()
# Unpack the redis client and input channels.
settings = config.registry.settings
redis_client = self.get_redis(settings, registry=config.registry)
channel = settings.get('ntorque.redis_channel')
# Get the requeue interval.
interval = int(settings.get('ntorque.requeue_interval'))
# Instantiate and start the consumer.
poller = self.requeue_cls(redis_client, channel, interval=interval)
try:
poller.start()
finally:
self.session.remove()
main = ConsoleScript()
|
unlicense
|
jacobraj/MAMA
|
site_scons/version_helper.py
|
15
|
1706
|
import os,re,posixpath
## Get the Module Versions
#
# Walk the project tree looking for VERSION.scons files. Read in these files
# and parse the versions accordingly, generating versions which can help
# facilitate building later. The dictionary is then passed to the environment
# for easy access
def get_project_versions(dir):
versions = {}
for r,d,f in os.walk(dir):
for file in f:
if file == "VERSION.scons":
f = open( posixpath.join(r,file) )
line = f.read()
mod,version = line.split()
ver = {}
versions[mod] = {}
major,minor,release = version.split('.')
ver['major'] = major
ver['minor'] = minor
ver['release'] = "%s" % (release)
ver['releaseString'] = '%s.%s.%s' % (major, minor, ver['release'])
ver['build'] = 1
p = re.compile('^(\d+)(\w)$')
m = p.match(ver['release'])
if m:
alpha = m.group(2)
ver['build'] = ord(alpha)
ver['winrelease'] = m.group(1)
else:
ver['build'] = '0'
p = re.compile('^(\d+)')
m = p.match(ver['release'])
if m:
ver['winrelease'] = m.group(1)
versions[mod] = ver
return versions
def write_versions_file( target, source, env ):
versions = env['versions']
f = open( target[0].abspath, 'w' )
for p in sorted( versions ):
f.write( '%s %s\n' % (p, versions[p]['releaseString']) )
f.close()
|
lgpl-2.1
|
gganis/root
|
interpreter/llvm/src/docs/conf.py
|
22
|
8481
|
# -*- coding: utf-8 -*-
#
# LLVM documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'LLVM'
copyright = u'2003-%d, LLVM Project' % date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.9'
# The full version, including alpha/beta/rc tags.
release = '3.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%Y-%m-%d'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'llvm-theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = { "nosidebar": True }
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'LLVMdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'LLVM.tex', u'LLVM Documentation',
u'LLVM project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = []
# Automatically derive the list of man pages from the contents of the command
# guide subdirectory.
basedir = os.path.dirname(__file__)
man_page_authors = "Maintained by The LLVM Team (http://llvm.org/)."
command_guide_subpath = 'CommandGuide'
command_guide_path = os.path.join(basedir, command_guide_subpath)
for name in os.listdir(command_guide_path):
# Ignore non-ReST files and the index page.
if not name.endswith('.rst') or name in ('index.rst',):
continue
# Otherwise, automatically extract the description.
file_subpath = os.path.join(command_guide_subpath, name)
with open(os.path.join(command_guide_path, name)) as f:
title = f.readline().rstrip('\n')
header = f.readline().rstrip('\n')
if len(header) != len(title):
print >>sys.stderr, (
"error: invalid header in %r (does not match title)" % (
file_subpath,))
if ' - ' not in title:
print >>sys.stderr, (
("error: invalid title in %r "
"(expected '<name> - <description>')") % (
file_subpath,))
# Split the name out of the title.
name,description = title.split(' - ', 1)
man_pages.append((file_subpath.replace('.rst',''), name,
description, man_page_authors, 1))
# If true, show URL addresses after external links.
#man_show_urls = False
# FIXME: Define intersphinx configration.
intersphinx_mapping = {}
|
lgpl-2.1
|
AlanZatarain/urssus
|
urssus/postmodel.py
|
4
|
11489
|
# -*- coding: utf-8 -*-
# uRSSus, a multiplatform GUI news agregator
# Copyright (C) 2008 Roberto Alsina
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from __future__ import with_statement
from globals import *
from dbtables import *
from PyQt4 import QtGui, QtCore
import operator
from util import extime
# Roles used in the items
sorting=QtCore.Qt.UserRole
display=QtCore.Qt.DisplayRole
post_id=QtCore.Qt.UserRole+1
class PostModel(QtGui.QStandardItemModel):
def __init__(self, parent, feed=None, textFilter=None, statusFilter=None):
QtGui.QStandardItemModel.__init__(self, parent)
self.feed_id=feed.id
self.textFilter=textFilter
self.statusFilter=statusFilter
self.setSortRole(sorting)
self.star=QtGui.QIcon(':/star.svg')
self.star2=QtGui.QIcon(':/star2.svg')
self._clear()
column,order = config.getValue('ui','postSorting',[2,QtCore.Qt.DescendingOrder])
self.sort(column,order) # Date, descending
self.font=QtGui.QApplication.instance().font()
self.boldFont=QtGui.QApplication.instance().font()
self.boldFont.setBold(True)
self.unreadColor=QtGui.QColor('red')
self.color=QtGui.QColor('black')
self.initData(feed)
def _clear(self):
self.clear()
self.post_data=[]
self.post_ids=[]
self.setColumnCount(4)
self.setHeaderData(0, QtCore.Qt.Horizontal, QtCore.QVariant(""))
self.setHeaderData(1, QtCore.Qt.Horizontal, QtCore.QVariant("Title"))
self.setHeaderData(2, QtCore.Qt.Horizontal, QtCore.QVariant("Date"))
self.setHeaderData(3, QtCore.Qt.Horizontal, QtCore.QVariant("Feed"))
self.postItems={}
def initData(self, update=False):
'''Sets data from the feedDB. If update==True, data is just added, not
replaced.
'''
feed=Feed.get_by(id=self.feed_id)
if not feed or not update:
self._clear()
if feed.xmlUrl: # A regular feed
self.posts=Post.query.filter(Post.feed==feed).filter(Post.deleted==False)
else: # A folder
self.posts=feed.allPostsQuery().filter(Post.deleted==False)
# Filter by text according to the contents of self.textFilter
if self.textFilter:
self.posts=self.posts.filter(sql.or_(Post.title.like('%%%s%%'%self.textFilter),
Post.content.like('%%%s%%'%self.textFilter),
Post.tags.like('%%%s%%'%self.textFilter)))
if self.statusFilter:
self.posts=self.posts.filter(self.statusFilter==True)
maxposts=config.getValue('options', 'maxPostsDisplayed', 1000)
posts=self.posts.order_by(sql.desc('date')).limit(maxposts)
i=0
addFeed=config.getValue('ui','feedOnTitle', False)
for post in posts:
i+=1
if i%10==0:
QtGui.QApplication.instance().processEvents(QtCore.QEventLoop.ExcludeUserInputEvents, 1000)
# Keep references to posts instead of posts, to
# avoid stale data. nextPost/etc are about
# iterating what's shown, not the result
# of self.posts.all()
if post.id in self.post_ids: #Existing post, update
self.updateItem(post)
else:
# New post, add
# Date
d=utc2local(post.date)
ed=extime.Time.fromDatetime(d)
dh=ed.asHumanly()
data=[post.id, unicode(post).lower(), post.date,
unicode(post.feed).lower(), None, None]
self.post_data.append(data)
self.post_ids.append(post.id)
item0=QtGui.QStandardItem()
item1=QtGui.QStandardItem()
item1.setToolTip('%s - Posted at %s'%(unicode(post), dh))
if addFeed:
t='%s: %s'%(unicode(post.feed), unicode(post))
else:
t=unicode(post)
item1.setData(QtCore.QVariant(t), display)
item1.setData(QtCore.QVariant(t.lower()), sorting)
item1.setData(QtCore.QVariant(post.id), post_id)
item2=QtGui.QStandardItem()
item2.setToolTip('%s - Posted at %s'%(unicode(post), dh))
item2.setData(QtCore.QVariant(dh), display)
item2.setTextAlignment(QtCore.Qt.AlignRight)
# AOL Fanhouse posts items with a time differential of milliseconds, so they sorted
# differently on python and Qt. If someone makes it to microseconds, this solution
# is borked
qd=QtCore.QVariant(QtCore.QDateTime(QtCore.QDate(d.year, d.month, d.day),
QtCore.QTime(d.hour, d.minute, d.second, d.microsecond/1000)))
item2.setData(qd, sorting)
item2.setData(QtCore.QVariant(post.id), post_id)
item3=QtGui.QStandardItem()
item3.setData(QtCore.QVariant(unicode(post.feed)), display)
item3.setData(QtCore.QVariant(unicode(post.feed).lower()), sorting)
self.postItems[post.id]=[item0, item1, item2, item3]
self.appendRow([item0, item1, item2, item3])
self.updateItem(post)
if update: # New data, resort
self.sort(*self.lastSort)
self.reset()
def hasPost(self, post):
return post.id in self.postItems
def markRead(self):
'''Marks as read what's shown by the model, as opposite to Feed.markAsRead, which
marks what's on the feed. UI should call this one, usually'''''
feed_set=set()
try:
for d in self.post_data:
if d[5]:
if d[5]:
post=Post.get_by(id=d[0])
post.unread=False
self.updateItem(post)
feed_set.add(post.feed)
elixir.session.commit()
except:
elixir.session.rollback()
info("Marking read posts from feeds: %s"%(','.join(str(x) for x in list(feed_set))))
for f in feed_set:
f.curUnread=-1
feedStatusQueue.put([1, f.id])
def indexFromPost(self, post=None, id=None):
if not id and not post:
return QtCore.QModelIndex()
if not id:
id=post.id
if post and post.id in self.postItems:
return self.indexFromItem(self.postItems[id][1])
return QtCore.QModelIndex()
def postFromIndex(self, index):
if index.column()<>1:
index=self.index(index.row(), 1, index.parent())
item=self.itemFromIndex(index)
if item:
id=item.data(post_id).toInt()[0]
return Post.get_by(id=id)
return None
def updateItem(self, post):
if not post.id in self.postItems: #post is not being displayed
return
item0, item1, item2, item3=self.postItems[post.id]
idx=self.post_ids.index(post.id)
data=self.post_data[idx]
# Only change what's really changed
if post.important <> data[4]:
if post.important:
item0.setIcon(self.star)
else:
item0.setIcon(self.star2)
item0.setData(QtCore.QVariant(post.important), sorting)
if post.unread <> data[5]:
if post.unread:
f=self.boldFont
c=self.unreadColor
else:
f=self.font
c=self.color
item1.setForeground(c)
item2.setForeground(c)
item3.setForeground(c)
item1.setFont(f)
item2.setFont(f)
item3.setFont(f)
# Update our post_data, too. Probably not the best way
# FIXME: not efficient
# self.post_ids=[id for [id, _, _, _, _, _] in self.post_data]
self.post_data[idx]=[post.id,
unicode(post).lower(),
post.date,
unicode(post.feed).lower(),
post.important,
post.unread]
colkey=[5, 1, 2, 3]
def sort(self, column, order):
order = [QtCore.Qt.AscendingOrder,QtCore.Qt.DescendingOrder][order]
# Thanks pyar!
self.post_data.sort(key=operator.itemgetter(self.colkey[column]),
reverse=order==QtCore.Qt.DescendingOrder)
self.post_ids=[id for [id, _, _, _, _, _] in self.post_data]
self.lastSort=(column, order)
config.setValue('ui','postSorting',[column,order])
self.reset()
QtGui.QStandardItemModel.sort(self, column, order)
def nextPostIndex(self, post):
'''Takes a Post and returns the index of the following post'''
if not self.post_ids:
return QtCore.QModelIndex()
# First, find it in our list of ids
if not post:
idx=-1
else:
idx=self.post_ids.index(post.id)
if idx==-1: #current post not here, so return the first
return self.indexFromItem(self.postItems[self.post_ids[0]][1])
elif idx==len(self.post_ids)-1: # Last post, no next
return QtCore.QModelIndex()
else:
return self.indexFromItem(self.postItems[self.post_ids[idx+1]][1])
def nextUnreadPostIndex(self, post):
if not self.post_ids:
return QtCore.QModelIndex()
# Create filtered lists
if post:
unread_data=[x for x in self.post_data if x[5] or x[0]==post.id]
else:
unread_data=[x for x in self.post_data if x[5]]
unread_ids=[id for [id, _, _, _, _, _] in unread_data]
# And now it's pretty much like nextPostIndex
# FIXME: merge them
if not unread_ids:
return QtCore.QModelIndex()
# First, find it in our list of ids
if not post:
idx=-1
else:
idx=unread_ids.index(post.id)
if idx==-1: #current post not here, so return the first
return self.indexFromItem(self.postItems[unread_ids[0]][1])
elif idx==len(unread_ids)-1: # Last post, no next
return QtCore.QModelIndex()
else:
return self.indexFromItem(self.postItems[unread_ids[idx+1]][1])
def previousPostIndex(self, post):
'''Takes a Post and returns the index of the following post'''
# First, find it in our list of ids
if not self.post_ids:
return QtCore.QModelIndex()
if not post:
idx=-1
else:
idx=self.post_ids.index(post.id)
if idx==-1: #current post not here, so return the last
return self.indexFromItem(self.postItems[self.post_ids[-1]][1])
elif idx==0: # First post, no previous
return QtCore.QModelIndex()
else:
return self.indexFromItem(self.postItems[self.post_ids[idx-1]][1])
def previousUnreadPostIndex(self, post):
if not self.post_ids:
return QtCore.QModelIndex()
# Create filtered lists
if post:
unread_data=[x for x in self.post_data if x[5] or x[0]==post.id]
else:
unread_data=[x for x in self.post_data if x[5]]
unread_ids=[id for [id, _, _, _, _, _] in unread_data]
# And now it's pretty much like previousPostIndex
# FIXME: merge them
if not unread_ids:
return QtCore.QModelIndex()
# First, find it in our list of ids
if not post:
idx=-1
else:
idx=unread_ids.index(post.id)
if idx==-1: #current post not here, so return the last
return self.indexFromItem(self.postItems[unread_ids[-1]][1])
elif idx==0: # First post, no previous
return QtCore.QModelIndex()
else:
return self.indexFromItem(self.postItems[unread_ids[idx-1]][1])
|
lgpl-2.1
|
eahneahn/free
|
lib/python2.7/site-packages/pip-1.5-py2.7.egg/pip/_vendor/colorama/ansi.py
|
171
|
1089
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
def code_to_chars(code):
return CSI + str(code) + 'm'
class AnsiCodes(object):
def __init__(self, codes):
for name in dir(codes):
if not name.startswith('_'):
value = getattr(codes, name)
setattr(self, name, code_to_chars(value))
class AnsiFore:
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
class AnsiBack:
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
class AnsiStyle:
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiCodes( AnsiFore )
Back = AnsiCodes( AnsiBack )
Style = AnsiCodes( AnsiStyle )
|
agpl-3.0
|
CiscoSystems/nova
|
nova/compute/power_state.py
|
47
|
2019
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Power state is the state we get by calling virt driver on a particular
domain. The hypervisor is always considered the authority on the status
of a particular VM, and the power_state in the DB should be viewed as a
snapshot of the VMs's state in the (recent) past. It can be periodically
updated, and should also be updated at the end of a task if the task is
supposed to affect power_state.
"""
# NOTE(maoy): These are *not* virDomainState values from libvirt.
# The hex value happens to match virDomainState for backward-compatibility
# reasons.
NOSTATE = 0x00
RUNNING = 0x01
PAUSED = 0x03
SHUTDOWN = 0x04 # the VM is powered off
CRASHED = 0x06
SUSPENDED = 0x07
# TODO(maoy): BUILDING state is only used in bare metal case and should
# eventually be removed/cleaned up. NOSTATE is probably enough.
BUILDING = 0x09
# TODO(justinsb): Power state really needs to be a proper class,
# so that we're not locked into the libvirt status codes and can put mapping
# logic here rather than spread throughout the code
STATE_MAP = {
NOSTATE: 'pending',
RUNNING: 'running',
PAUSED: 'paused',
SHUTDOWN: 'shutdown',
CRASHED: 'crashed',
SUSPENDED: 'suspended',
BUILDING: 'building',
}
|
apache-2.0
|
ricardoquesada/cocoslive
|
cocoslive/util.py
|
2
|
2311
|
#!/usr/bin/env python
#
# cocos live - (c) 2009 Ricardo Quesada
# http://www.cocoslive.net
#
# License: GNU GPL v3
# See the LICENSE file
#
__docformat__ = 'restructuredtext'
# GAE imports
from google.appengine.api import urlfetch
from google.appengine.api import memcache
# IMPORTNAT:
# geoutil contains the an URL with a secret key used to parse the geo ip from a paid service
# so, geoutil it's not commited
# If geoutil is not found, it will use free Geo IP services (which are somewhat slower)
#
try:
from geoutil import get_services
except Exception, e:
from geoutil_public import get_services
__all__ = ['getGeoIPCode']
def ipaddr_to_hex( ipaddr ):
'''converts an ipaddress to it's hexadecimal representation to reduce memory on the memcache'''
ret = '00000000'
try:
l = ipaddr.split('.')
ints = map( lambda y: int(y), l )
ret = '%02x%02x%02x%02x' % ( ints[0], ints[1], ints[2], ints[3] )
except Exception, e:
pass
return ret
def getGeoIPCode(ipaddr):
hex_ipaddr = ipaddr_to_hex( ipaddr)
# use the 20 first bits for the cache.
# it is assumed that the rest 12 bits belongs to the same country
# this reduces queries and memory, and improves performance
netmask = hex_ipaddr[0:5]
# new memcache key (6 bytes)
new_memcache_key = "%s" % netmask
data = memcache.get(new_memcache_key)
if data is not None:
return data
services = get_services()
geoipcode = ''
for service in services:
try:
fetch_response = urlfetch.fetch( service % ipaddr)
if fetch_response.status_code == 200:
geoipcode = fetch_response.content
geoipcode = geoipcode.strip().lower()
if geoipcode.startswith('(null)') or geoipcode == 'none' or geoipcode =='':
geoipcode = ''
continue
else:
break
except urlfetch.Error, e:
continue
if geoipcode:
# convert to lower case, and store in mem cache
if geoipcode == '':
geoipcode = 'xx'
else:
geoipcode = 'xx'
# time = 60 * 60 * 24 * 30 # 30 days
time = 0 # never expires
memcache.set(new_memcache_key, geoipcode, time)
return geoipcode
|
gpl-3.0
|
betaY/crawler
|
you-get-master/src/you_get/extractor.py
|
3
|
8692
|
#!/usr/bin/env python
from .common import match1, maybe_print, download_urls, get_filename, parse_host, set_proxy, unset_proxy
from .util import log
from . import json_output
import os
class Extractor():
def __init__(self, *args):
self.url = None
self.title = None
self.vid = None
self.streams = {}
self.streams_sorted = []
if args:
self.url = args[0]
class VideoExtractor():
def __init__(self, *args):
self.url = None
self.title = None
self.vid = None
self.streams = {}
self.streams_sorted = []
self.audiolang = None
self.password_protected = False
self.dash_streams = {}
self.caption_tracks = {}
if args:
self.url = args[0]
def download_by_url(self, url, **kwargs):
self.url = url
self.vid = None
if 'extractor_proxy' in kwargs and kwargs['extractor_proxy']:
set_proxy(parse_host(kwargs['extractor_proxy']))
self.prepare(**kwargs)
if 'extractor_proxy' in kwargs and kwargs['extractor_proxy']:
unset_proxy()
try:
self.streams_sorted = [dict([('id', stream_type['id'])] + list(self.streams[stream_type['id']].items())) for stream_type in self.__class__.stream_types if stream_type['id'] in self.streams]
except:
self.streams_sorted = [dict([('itag', stream_type['itag'])] + list(self.streams[stream_type['itag']].items())) for stream_type in self.__class__.stream_types if stream_type['itag'] in self.streams]
self.extract(**kwargs)
self.download(**kwargs)
def download_by_vid(self, vid, **kwargs):
self.url = None
self.vid = vid
if 'extractor_proxy' in kwargs and kwargs['extractor_proxy']:
set_proxy(parse_host(kwargs['extractor_proxy']))
self.prepare(**kwargs)
if 'extractor_proxy' in kwargs and kwargs['extractor_proxy']:
unset_proxy()
try:
self.streams_sorted = [dict([('id', stream_type['id'])] + list(self.streams[stream_type['id']].items())) for stream_type in self.__class__.stream_types if stream_type['id'] in self.streams]
except:
self.streams_sorted = [dict([('itag', stream_type['itag'])] + list(self.streams[stream_type['itag']].items())) for stream_type in self.__class__.stream_types if stream_type['itag'] in self.streams]
self.extract(**kwargs)
self.download(**kwargs)
def prepare(self, **kwargs):
pass
#raise NotImplementedError()
def extract(self, **kwargs):
pass
#raise NotImplementedError()
def p_stream(self, stream_id):
if stream_id in self.streams:
stream = self.streams[stream_id]
else:
stream = self.dash_streams[stream_id]
if 'itag' in stream:
print(" - itag: %s" % log.sprint(stream_id, log.NEGATIVE))
else:
print(" - format: %s" % log.sprint(stream_id, log.NEGATIVE))
if 'container' in stream:
print(" container: %s" % stream['container'])
if 'video_profile' in stream:
maybe_print(" video-profile: %s" % stream['video_profile'])
if 'quality' in stream:
print(" quality: %s" % stream['quality'])
if 'size' in stream:
print(" size: %s MiB (%s bytes)" % (round(stream['size'] / 1048576, 1), stream['size']))
if 'itag' in stream:
print(" # download-with: %s" % log.sprint("you-get --itag=%s [URL]" % stream_id, log.UNDERLINE))
else:
print(" # download-with: %s" % log.sprint("you-get --format=%s [URL]" % stream_id, log.UNDERLINE))
print()
def p_i(self, stream_id):
if stream_id in self.streams:
stream = self.streams[stream_id]
else:
stream = self.dash_streams[stream_id]
maybe_print(" - title: %s" % self.title)
print(" size: %s MiB (%s bytes)" % (round(stream['size'] / 1048576, 1), stream['size']))
print(" url: %s" % self.url)
print()
def p(self, stream_id=None):
maybe_print("site: %s" % self.__class__.name)
maybe_print("title: %s" % self.title)
if stream_id:
# Print the stream
print("stream:")
self.p_stream(stream_id)
elif stream_id is None:
# Print stream with best quality
print("stream: # Best quality")
stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag']
self.p_stream(stream_id)
elif stream_id == []:
print("streams: # Available quality and codecs")
# Print DASH streams
if self.dash_streams:
print(" [ DASH ] %s" % ('_' * 36))
itags = sorted(self.dash_streams,
key=lambda i: -self.dash_streams[i]['size'])
for stream in itags:
self.p_stream(stream)
# Print all other available streams
print(" [ DEFAULT ] %s" % ('_' * 33))
for stream in self.streams_sorted:
self.p_stream(stream['id'] if 'id' in stream else stream['itag'])
if self.audiolang:
print("audio-languages:")
for i in self.audiolang:
print(" - lang: {}".format(i['lang']))
print(" download-url: {}\n".format(i['url']))
def p_playlist(self, stream_id=None):
maybe_print("site: %s" % self.__class__.name)
print("playlist: %s" % self.title)
print("videos:")
def download(self, **kwargs):
if 'json_output' in kwargs and kwargs['json_output']:
json_output.output(self)
elif 'info_only' in kwargs and kwargs['info_only']:
if 'stream_id' in kwargs and kwargs['stream_id']:
# Display the stream
stream_id = kwargs['stream_id']
if 'index' not in kwargs:
self.p(stream_id)
else:
self.p_i(stream_id)
else:
# Display all available streams
if 'index' not in kwargs:
self.p([])
else:
stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag']
self.p_i(stream_id)
else:
if 'stream_id' in kwargs and kwargs['stream_id']:
# Download the stream
stream_id = kwargs['stream_id']
else:
# Download stream with the best quality
stream_id = self.streams_sorted[0]['id'] if 'id' in self.streams_sorted[0] else self.streams_sorted[0]['itag']
if 'index' not in kwargs:
self.p(stream_id)
else:
self.p_i(stream_id)
if stream_id in self.streams:
urls = self.streams[stream_id]['src']
ext = self.streams[stream_id]['container']
total_size = self.streams[stream_id]['size']
else:
urls = self.dash_streams[stream_id]['src']
ext = self.dash_streams[stream_id]['container']
total_size = self.dash_streams[stream_id]['size']
if not urls:
log.wtf('[Failed] Cannot extract video source.')
# For legacy main()
download_urls(urls, self.title, ext, total_size,
output_dir=kwargs['output_dir'],
merge=kwargs['merge'],
av=stream_id in self.dash_streams)
if not kwargs['caption']:
print('Skipping captions.')
return
for lang in self.caption_tracks:
filename = '%s.%s.srt' % (get_filename(self.title), lang)
print('Saving %s ... ' % filename, end="", flush=True)
srt = self.caption_tracks[lang]
with open(os.path.join(kwargs['output_dir'], filename),
'w', encoding='utf-8') as x:
x.write(srt)
print('Done.')
# For main_dev()
#download_urls(urls, self.title, self.streams[stream_id]['container'], self.streams[stream_id]['size'])
self.__init__()
|
mit
|
willbarton/observation-conditions
|
observation/conditions/map.py
|
1
|
21998
|
# -*- coding: utf-8 -*-
#
# Copyright 2010-2014 Will Barton.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from mpl_toolkits.basemap import Basemap
try:
import Image
except ImportError:
from PIL import Image
from observation.conditions.orderedset import OrderedSet
from observation.conditions.utils import current_update
import datetime
from dateutil import tz
from dateutil.relativedelta import *
import urlparse
import os, os.path
import shutil
import logging
logger = logging.getLogger(__name__)
# The map projection that everything here is based on.
__map__ = Basemap(projection='npstere', resolution='l',
boundinglat=27.0, lon_0=-111)
TYPES = {
'seeing':'seeing',
'transp':'transparency',
'uv':'wind',
'tt':'temperature',
'hr':'humidity',
'nt':'clouds',
}
SEEING_COLOR_TABLE = {
0: (254, 254, 254), # None
1: (198, 198, 198), # Bad
2: (148, 212, 212), # Poor
3: (98, 162, 226), # Average
4: (43,107, 171), # Good
5: (0, 62, 126), # Excellent
}
TRANSPARENCY_COLOR_TABLE = {
0: (254, 254, 254), # None
1: (198, 198, 198), # Bad
2: (148, 212, 212), # Poor
3: (98, 162, 226), # Average
4: (43,107, 171), # Good
5: (0, 62, 126), # Excellent
}
WIND_COLOR_TABLE = {
5: (254, 254, 254), # Very Strong
4: (198, 198, 198), # Strong
3: (148, 212, 212), # Moderate
2: (98, 162, 226), # Light to Moderate
1: (43,107, 171), # Light
0: (0, 62, 126), # Calm
}
TEMPERATURE_COLOR_TABLE = {
50: (198, 198, 198),
45: (125, 0, 0),
40: (168, 0, 0),
35: (225, 0, 0),
30: (253, 29, 0),
25: (253, 89, 0),
20: (253, 157, 0),
15: (253, 221, 0),
10: (161, 253, 89),
5: (93, 253, 157),
0: (250, 250, 250),
-5: (29, 253, 221),
-10: (0, 221, 253),
-15: (0, 136, 253),
-20: (0, 51, 253),
-25: (0, 0, 235),
-30: (0, 0, 178),
-35: (0, 0, 132),
-40: (252, 0, 252),
}
HUMIDITY_COLOR_TABLE = {
100: (225, 0, 0),
95: (183, 0, 0),
90: (234, 0, 0),
85: (254, 52, 1),
80: (252, 134, 2),
75: (254, 198, 0),
70: (234, 251, 22),
65: (148, 254, 106),
60: (85, 250, 173),
55: (8, 254, 237),
50: (128, 192, 192),
45: (113, 177, 241),
40: (78, 142, 206),
35: (48, 112, 176),
30: (13, 77, 141),
25: (8, 3, 93),
}
CLOUDS_COLOR_TABLE = {
100.0: (255, 255, 255),
97.5: (248, 248, 248),
95.0: (238, 238, 238),
92.5: (233, 233, 233),
90.0: (223, 223, 223),
87.5: (218, 218, 218),
85.0: (203, 203, 203),
82.5: (193, 193, 193),
80.0: (188, 252, 252),
77.5: (183, 247, 247),
75.0: (178, 242, 242),
72.5: (173, 237, 237),
70.0: (168, 232, 232),
67.5: (163, 227, 227),
65.0: (158, 222, 222),
62.5: (153, 217, 217),
60.0: (148, 212, 212),
57.5: (128, 192, 192),
55.0: (123, 187, 251),
52.5: (118, 182, 246),
50.0: (113, 177, 241),
47.5: (108, 172, 236),
45.0: (103, 167, 231),
42.5: (98, 162, 226),
40.0: (93, 157, 221),
37.5: (88, 152, 216),
35.0: (83, 147, 211),
32.5: (78, 142, 206),
30.0: (68, 132, 196),
27.5: (48, 112, 176),
25.0: (43, 107, 171),
22.5: (38, 102, 166),
20.0: (33, 97, 161),
17.5: (28, 92, 156),
15.0: (23, 87, 151),
12.5: (18, 82, 146),
10.0: (13, 77, 141),
7.5: (8, 72, 136),
5.0: (3, 67, 131),
2.5: (0, 62, 126),
}
class point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "%s, %s" % (str(self.x), str(self.y))
class bounds(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def __contains__(self, point):
if point.x > self.x and point.y > self.y and \
point.x < self.x + self.width and \
point.y < self.y + self.height:
return True
return False
def __repr__(self):
return str(self.y, self.y, self.width, self.height)
class Forecast(object):
"""
A forecast is the value of a specific longitude and latitude at a
specific date/time as extracted from a ForecastMapSeries.
"""
latitude = None
longitude = None
date = None
def __init__(self, latitude, longitude, date, **values):
self.latitude = latitude
self.longitude = longitude
self.date = date
self.__values = values
for v in values:
# t = TYPES[v]
setattr(self, v, values[v])
def __repr__(self):
return u'<Forecast at %f, %f: %s %s>' % (self.latitude,
self.longitude, self.date, str(self.__values))
class ForecastMap(object):
"""
A weather map is a single map tied to an image. It allows for the
lookup of pixel values on that image based on GPS coordinates.
"""
pixel_dimensions = point(1436, 1436)
def __init__(self, image, region = None):
global __map__
self.region = region
self.image = image
self.map = __map__
self.pixel_dimensions = None
self.image_bounds = None
self.legend_bounds = []
# Based on the region, we map the particular projection used by
# the Canadian Weather Office to a basemap projection.
if not self.region:
# Resolution of map at 300 dpi
self.pixel_dimensions = point(1436, 1436)
# Upper left corner, bottom right corner, relative to
# pixel_dimensions
self.image_bounds = bounds(479, 835, 718, 600)
# Bounds of any legends within the image_bounds
self.legend_bounds = [bounds(4, 4, 395, 42),]
elif region == 'northeast':
# Eastern CA
# Resolution at 510 dpi
self.pixel_dimensions = point(2441, 2441)
# Upper left corner, bottom right corner, relative to
# pixel_dimensions
self.image_bounds = bounds(1299, 1498, 718, 600)
# Bounds of any legends within the image_bounds
self.legend_bounds = [bounds(4, 4, 395, 42),
bounds(660, 196, 56, 401)]
elif region == 'northwest':
# Eastern CA
# Resolution at 530 dpi
self.pixel_dimensions = point(2537, 2537)
# Upper left corner, bottom right corner, relative to
# pixel_dimensions
self.image_bounds = bounds(840, 1548, 718, 600)
# Bounds of any legends within the image_bounds
self.legend_bounds = [bounds(4, 4, 395, 42),
bounds(660, 196, 56, 401)]
elif region == 'southeast':
# Eastern US
# Resolution at 555 dpi
self.pixel_dimensions = point(2656, 2656)
# Upper left corner, bottom right corner, relative to
# pixel_dimensions
self.image_bounds = bounds(1430, 2023, 718, 600)
# Bounds of any legends within the image_bounds
self.legend_bounds = [bounds(4, 4, 395, 42),
bounds(660, 196, 56, 401)]
elif region == 'southwest':
# Western US
# Resolution at 530 dpi
self.pixel_dimensions = point(2999, 3132)
# Upper left corner, bottom right corner, relative to
# pixel_dimensions
self.image_bounds = bounds(1149, 2412, 718, 600)
# Bounds of any legends within the image_bounds
self.legend_bounds = [bounds(4, 4, 395, 42),
bounds(660, 196, 56, 401)]
else:
raise ValueError("Bad value for region")
# The map's dimensions in meters
self.dimensions = point(self.map.urcrnrx - self.map.llcrnrx,
self.map.urcrnry - self.map.llcrnry)
# The ratio of map meters to pixels
self.dimensions_ratio = point(self.pixel_dimensions.x/self.dimensions.x,
self.pixel_dimensions.y/self.dimensions.y)
def coordinatePixel(self, latitude, longitude):
"""
Get the pixel coordinate, given our class variable pixel
dimensions, of the given GPS coordinate.
"""
# X, Y, in meters
x_m, y_m = self.map(longitude, latitude)
# Origin is lower left. Convert it to upper left.
y_m_c = self.dimensions.y - y_m
# X, Y, in pixels
# NOTE: These are the coordinates on the generated map
# projection, NOT on the given weather map image.
x_p, y_p = (self.dimensions_ratio.x * x_m,
self.dimensions_ratio.y * y_m_c)
# Make sure the point is within the given weather map image
# bounds, and is not within any legend bounds in the image.
if point(x_p, y_p) not in self.image_bounds:
raise ValueError("Location outside coverage area")
else:
for legend in self.legend_bounds:
if point(x_p, y_p) in legend:
raise ValueError("Location outside coverage area")
# Convert these pixel coordinates into weather map image
# coordinates
x = x_p - self.image_bounds.x
y = y_p - self.image_bounds.y
return point(x, y)
def coordinateValue(self, latitude, longitude):
"""
Get the color value of the pixel on the map's image at
the given coordinate.
"""
# Get the x, y coordinates for the image.
p = self.coordinatePixel(latitude, longitude)
# Use an intermediate RGB image?
i = Image.open(self.image)
try:
rgbi = i.convert('RGB')
pixels = rgbi.load()
color = pixels[int(p.x), int(p.y)]
del i, rgbi
except Exception, e: # XXX: Catch specific exception
return None
return color
class ForecastMapSet(object):
"""
A set of maps that view different regions but share the same
forecast information.
"""
def __init__(self, images, regions):
self.images = images
self.regions = regions
self.mapset = OrderedSet()
for region in regions:
image = self.images[region]
map = ForecastMap(image, region=region)
self.mapset.add(map)
def coordinateValue(self, latitude, longitude):
"""
Get the color value of the pixel on the map's image at
the given coordinate.
"""
# Find the first map in our map set to contain this value.
value = None
for map in self.mapset:
try:
value = map.coordinateValue(latitude, longitude)
except ValueError:
continue
if value is None:
raise ValueError("Coordinates not in bounds")
return value
def download(url, filepath):
import urllib2
request = urllib2.urlopen(urllib2.Request(url))
try:
with open(filepath, 'wb') as f:
shutil.copyfileobj(request, f)
finally:
request.close()
class ForecastMapSeries(object):
"""
A Forecast Map Series is a collection of forecast maps containing
the same information for different time periods.
"""
base_url = 'http://www.weatheroffice.gc.ca/data/prog/regional/'
def __init__(self, period, color_table, type,
image_path, regions = None):
"""
Create a forecast of the specified period (the time between
images in the imageset) in hours from the specified date, and
optionally for the given regions.
"""
self.period = period
self.regions = regions
self.color_table = color_table
self.type = type
self.image_path = image_path
self.__cachedpath__ = None
# The refresh function will set these.
self.mapset = OrderedSet()
self.date = None
# Get the image set. Unless we're specifically asked to, load
# the most recent set.
self.load()
def __repr__(self):
return """<ForecastMapSeries %s, %d maps>""" % \
(self.type, len(str(self.mapset)))
@property
def range(self):
"""
Provide the ending date and time covered by this map series.
"""
hour_range = len(self.mapset) * self.period
start_dt = self.date + relativedelta(hours=+3)
end_dt = self.date + relativedelta(hours=+hour_range)
end_dt = end_dt.replace(tzinfo=tz.tzlocal()).astimezone(tz.tzutc())
return (start_dt, end_dt)
def forTime(self, time):
"""
Get the set of forecast maps for the specified time. Time
should be an absolute datetime object.
"""
# If there's no tzinfo, assume it's localtime.
if time.tzinfo == None:
time = time.replace(tzinfo=tz.tzlocal()).astimezone(tz.tzutc())
start, end = self.range
if (time < start) or (time > end):
raise ValueError("Time is outside of forecast range", (time,
start, end))
delta = relativedelta(time, self.date).hours
index = delta / self.period
return list(self.mapset)[index]
def coordinateValueAtTime(self, lat, lon, time, nudged=False):
"""
Get the color value of the pixel on the map's image at
the given coordinate at the given datetime.
"""
time = time.astimezone(tz.tzutc())
if self.mapset is None or len(self.mapset) == 0:
raise ValueError("Forecast data unavailable at this time")
# Lookup the map for the given time
map = self.forTime(time)
# Get the coordinate value
color = map.coordinateValue(lat, lon)
try:
value_index = self.color_table.values().index(color)
value = self.color_table.keys()[value_index]
except ValueError:
# XXX: Border colors are black and red. If we're one of
# those colors, we're on a border. Nudge the coordinates
# ever so slightly and see what we get.
if (color == (0, 0, 0) or color == (254, 0, 0)) and not nudged:
return self.coordinateValueAtTime(
lat, lon-0.04, time, nudged=True)
value = -1
return value
def load(self, date=None):
"""
Discover what image file names we have, if any, and the
appropriate dates for those files.
We want to try to update at 18:00 UTC and 06:00 UTC (the
forecasts are updated at "approximately" 17:30 and 5:30).
"""
if date is None and os.path.isdir(self.image_path):
# If we're not given a date, look for the last directory in
# the image_path. Because we name the directory based on the
# date and hour of the forecast, this should give us the
# most recent.
dirs = [d for d in os.listdir(self.image_path) if
os.path.isdir(os.path.join(self.image_path, d))]
dirname = dirs[-1]
date = datetime.datetime.strptime(dirname, '%Y%m%d%H').replace(
tzinfo=tz.tzutc())
else:
# Find the immediately previous update
date = datetime.datetime.now(tz=tz.tzlocal()).astimezone(
tz.tzutc()).replace(
minute=0, second=0, microsecond=0)
if date.hour > 18:
date = date + datetime.timedelta(hours=-(date.hour-18))
elif date.hour < 6:
date = date + datetime.timedelta(hours=(-6 - date.hour))
elif date.hour > 6 and date.hour < 18:
date = date + datetime.timedelta(hours=-(date.hour-6))
else:
date = date.replace(hour=6)
# Get the imageset for the date.
imageset = self.__imageset(date)
self.date = date
self.mapset = OrderedSet()
if not self.regions:
for image in imageset:
map = ForecastMap(image)
self.mapset.add(map)
else:
for images in list(imageset):
map = ForecastMapSet(images, self.regions)
self.mapset.add(map)
return date
def refresh(self):
""" Refresh the map series from the server. """
date = current_update()
# The foldername for the update
dirname = date.strftime('%Y%m%d%H')
# The download path, create it if it doesn't exist.
download_path = os.path.abspath(
os.path.join(self.image_path, dirname))
if not os.path.exists(download_path):
os.makedirs(download_path)
def download_func(image_url, image_path):
# The download path, create it if it doesn't exist.
if not os.path.exists(os.path.dirname(image_path)):
os.makedirs(os.path.dirname(image_path))
# If the image already exists, don't worry about it.
if not os.path.exists(image_path):
logger.debug("Downloading %s" % image_url)
download(image_url, image_path)
imageset = self.__imageset(date, download_func)
self.date = date
self.mapset = OrderedSet()
if not self.regions:
for image in imageset:
map = ForecastMap(image)
self.mapset.add(map)
else:
for images in list(imageset):
map = ForecastMapSet(images, self.regions)
self.mapset.add(map)
return date
def __imageset(self, date, download_func=None):
""" Get the imageset for a given date. If download_func is
provided, it is called as:
download_func(image_url, image_path)
and expected to download the image at the given URL to the
given path. """
dirname = date.strftime('%Y%m%d%H')
images_path = os.path.abspath(
os.path.join(self.image_path, dirname))
# The image name
if not self.regions:
image_format = '%(dirname)s_054_R1_north@america@astro_I_ASTRO_%(type)s_%(period)s.png'
else:
image_format = '%(dirname)s_054_R1_north@america@%(region)s_I_ASTRO_%(type)s_%(period)s.png'
# The URL path
url = urlparse.urljoin(self.base_url, dirname + '/')
# Each period within a 48 hour range has an appropriate image
imageset = []
type = TYPES.keys()[TYPES.values().index(self.type)]
for p in range(3, 49, self.period):
period = str(p).zfill(3)
if not self.regions:
image_name = image_format % {'dirname': dirname,
'type': type,
'period': period,}
image_path = os.path.join(images_path, image_name)
image_url = urlparse.urljoin(url, image_name)
if callable(download_func):
download_func(image_url, image_path)
# Add the image path to the set.
imageset.append(image_path)
else:
rdict = {}
for region in self.regions:
image_name = image_format % {'dirname': dirname,
'type': type,
'period': period,
'region': region,}
image_path = os.path.join(images_path, image_name)
image_url = urlparse.urljoin(url, image_name)
if callable(download_func):
download_func(image_url, image_path)
rdict[region] = image_path
imageset.append(rdict)
return imageset
|
bsd-3-clause
|
cognitiveclass/edx-platform
|
lms/djangoapps/commerce/tests/__init__.py
|
41
|
3664
|
# -*- coding: utf-8 -*-
""" Commerce app tests package. """
import datetime
import json
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from freezegun import freeze_time
import httpretty
import jwt
import mock
from edx_rest_api_client import auth
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client
from student.tests.factories import UserFactory
JSON = 'application/json'
TEST_PUBLIC_URL_ROOT = 'http://www.example.com'
TEST_API_URL = 'http://www-internal.example.com/api'
TEST_API_SIGNING_KEY = 'edx'
TEST_BASKET_ID = 7
TEST_ORDER_NUMBER = '100004'
TEST_PAYMENT_DATA = {
'payment_processor_name': 'test-processor',
'payment_form_data': {},
'payment_page_url': 'http://example.com/pay',
}
@override_settings(ECOMMERCE_API_SIGNING_KEY=TEST_API_SIGNING_KEY, ECOMMERCE_API_URL=TEST_API_URL)
class EdxRestApiClientTest(TestCase):
""" Tests to ensure the client is initialized properly. """
TEST_USER_EMAIL = '[email protected]'
TEST_CLIENT_ID = 'test-client-id'
def setUp(self):
super(EdxRestApiClientTest, self).setUp()
self.user = UserFactory()
self.user.email = self.TEST_USER_EMAIL
self.user.save() # pylint: disable=no-member
@httpretty.activate
@freeze_time('2015-7-2')
@override_settings(JWT_ISSUER='http://example.com/oauth', JWT_EXPIRATION=30)
def test_tracking_context(self):
"""
Ensure the tracking context is set up in the api client correctly and
automatically.
"""
# fake an ecommerce api request.
httpretty.register_uri(
httpretty.POST,
'{}/baskets/1/'.format(TEST_API_URL),
status=200, body='{}',
adding_headers={'Content-Type': JSON}
)
mock_tracker = mock.Mock()
mock_tracker.resolve_context = mock.Mock(return_value={'client_id': self.TEST_CLIENT_ID, 'ip': '127.0.0.1'})
with mock.patch('openedx.core.djangoapps.commerce.utils.tracker.get_tracker', return_value=mock_tracker):
ecommerce_api_client(self.user).baskets(1).post()
# make sure the request's JWT token payload included correct tracking context values.
actual_header = httpretty.last_request().headers['Authorization']
expected_payload = {
'username': self.user.username,
'full_name': self.user.profile.name,
'email': self.user.email,
'iss': settings.JWT_ISSUER,
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=settings.JWT_EXPIRATION),
'tracking_context': {
'lms_user_id': self.user.id, # pylint: disable=no-member
'lms_client_id': self.TEST_CLIENT_ID,
'lms_ip': '127.0.0.1',
},
}
expected_header = 'JWT {}'.format(jwt.encode(expected_payload, TEST_API_SIGNING_KEY))
self.assertEqual(actual_header, expected_header)
@httpretty.activate
def test_client_unicode(self):
"""
The client should handle json responses properly when they contain
unicode character data.
Regression test for ECOM-1606.
"""
expected_content = '{"result": "Préparatoire"}'
httpretty.register_uri(
httpretty.GET,
'{}/baskets/1/order/'.format(TEST_API_URL),
status=200, body=expected_content,
adding_headers={'Content-Type': JSON},
)
actual_object = ecommerce_api_client(self.user).baskets(1).order.get()
self.assertEqual(actual_object, {u"result": u"Préparatoire"})
|
agpl-3.0
|
salilab/mdt
|
constr2005/bonds/asgl.py
|
1
|
1074
|
from modeller import *
import os
import mdt
import mdt.features
env = Environ()
mlib = mdt.Library(env)
mlib.bond_classes.read('${LIB}/bndgrp.lib')
xray = mdt.features.XRayResolution(mlib, bins=[(0.51, 2.001, 'High res(2.0A)')])
bond_type = mdt.features.BondType(mlib)
bond_length = mdt.features.BondLength(mlib,
bins=mdt.uniform_bins(400, 1.0, 0.0025))
m = mdt.Table(mlib, file='mdt.mdt')
m = m.reshape(features=(xray, bond_type, bond_length),
offset=(0,0,0), shape=(1,-1,-1))
text = """
SET X_LABEL_STYLE = 2, X_TICK_LABEL = -999 -999
SET X_TICK = -999 -999 -999
SET TICK_FONT = 5, CAPTION_FONT = 5
SET Y_TICK = -999 -999 -999
SET WORLD_WINDOW = -999 -999 -999 -999
SET NO_XY_SCOLUMNS = 1 1, XY_SCOLUMNS = 2 1
FILL_COLUMN COLUMN = 2, COLUMN_PARAMETERS = 1. 0.0025
SET BAR_XSHIFT = 0.00125
ZOOM SCALE_WORLDX = 0.08
"""
m.write_asgl(asglroot='asgl1-a', plots_per_page=8, dimensions=1,
plot_position=1, every_x_numbered=999, text=text, x_decimal=0)
os.system("asgl asgl1-a")
os.system("ps2pdf asgl1-a.ps")
|
gpl-2.0
|
Euphoria-OS-Devices/android_kernel_motorola_msm8226
|
tools/perf/scripts/python/sched-migration.py
|
11215
|
11670
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
gpl-2.0
|
racmariano/skidom
|
backend/resorts/migrations/0006_auto_20171008_1416.py
|
1
|
1078
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-10-08 18:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dynamic_scraper', '0025_new_follow_pages_page_xpath_pagination_attribute'),
('resorts', '0005_resort_condtions_page_url'),
]
operations = [
migrations.RenameField(
model_name='resort',
old_name='condtions_page_url',
new_name='conditions_page_url',
),
migrations.AddField(
model_name='resort',
name='scraper',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='dynamic_scraper.Scraper'),
),
migrations.AddField(
model_name='resort',
name='scraper_runtime',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='dynamic_scraper.SchedulerRuntime'),
),
]
|
mit
|
kmee/l10n-brazil
|
sped_imposto/models/sped_cest.py
|
2
|
2851
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 Taŭga Tecnologia
# Aristides Caldeira <[email protected]>
# License AGPL-3 or later (http://www.gnu.org/licenses/agpl)
#
from __future__ import division, print_function, unicode_literals
import logging
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
_logger = logging.getLogger(__name__)
try:
from pybrasil.base import mascara
except (ImportError, IOError) as err:
_logger.debug(err)
class SpedCEST(models.Model):
_name = b'sped.cest'
_description = 'CESTs'
_order = 'codigo'
_rec_name = 'cest'
codigo = fields.Char(
string='Código',
size=7,
required=True,
index=True,
)
descricao = fields.Text(
string='Descrição',
required=True,
)
codigo_formatado = fields.Char(
string='CEST',
compute='_compute_cest',
store=True,
)
cest = fields.Char(
string='CEST',
compute='_compute_cest',
store=True,
)
@api.depends('codigo', 'descricao')
def _compute_cest(self):
for cest in self:
cest.codigo_formatado = (
cest.codigo[:2] + '.' +
cest.codigo[2:5] + '.' +
cest.codigo[5:]
)
cest.cest = cest.codigo_formatado
cest.cest += ' - ' + cest.descricao[:60]
@api.depends('codigo')
def _check_codigo(self):
for cest in self:
if cest.id:
cest_ids = self.search(
[('codigo', '=', cest.codigo), ('id', '!=', cest.id)])
else:
cest_ids = self.search([('codigo', '=', cest.codigo)])
if len(cest_ids) > 0:
raise ValidationError(_(u'Código CEST já existe na tabela!'))
@api.model
def name_search(self, name='', args=None, operator='ilike', limit=100):
for i in range(len(args)):
arg = args[i]
if arg[0] == 'id' and isinstance(arg[2], (list, tuple)):
if len(arg[2][0]) >= 3:
lista_ids = []
for item in arg[2]:
lista_ids.append(item[1])
args[i] = ['id', arg[1], lista_ids]
if name and operator in ('=', 'ilike', '=ilike', 'like', 'ilike'):
args = list(args or [])
args = [
'|',
('codigo', '=', name),
'|',
('codigo_formatado', '=', mascara(name, ' . . ')),
('descricao', operator, name),
] + args
cest_ids = self.search(args, limit=limit)
return cest_ids.name_get()
return super(SpedCEST, self).name_search(
name=name, args=args, operator=operator, limit=limit)
|
agpl-3.0
|
richard-willowit/odoo
|
odoo/tools/mimetypes.py
|
16
|
6661
|
# -*- coding: utf-8 -*-
"""
Mimetypes-related utilities
# TODO: reexport stdlib mimetypes?
"""
import collections
import io
import logging
import re
import zipfile
__all__ = ['guess_mimetype']
_logger = logging.getLogger(__name__)
# We define our own guess_mimetype implementation and if magic is available we
# use it instead.
# discriminants for zip-based file formats
_ooxml_dirs = {
'word/': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'pt/': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'xl/': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
}
def _check_ooxml(data):
with io.BytesIO(data) as f, zipfile.ZipFile(f) as z:
filenames = z.namelist()
# OOXML documents should have a [Content_Types].xml file for early
# check that we're interested in this thing at all
if '[Content_Types].xml' not in filenames:
return False
# then there is a directory whose name denotes the type of the file:
# word, pt (powerpoint) or xl (excel)
for dirname, mime in _ooxml_dirs.items():
if any(entry.startswith(dirname) for entry in filenames):
return mime
return False
# checks that a string looks kinda sorta like a mimetype
_mime_validator = re.compile(r"""
[\w-]+ # type-name
/ # subtype separator
[\w-]+ # registration facet or subtype
(?:\.[\w-]+)* # optional faceted name
(?:\+[\w-]+)? # optional structured syntax specifier
""", re.VERBOSE)
def _check_open_container_format(data):
# Open Document Format for Office Applications (OpenDocument) Version 1.2
#
# Part 3: Packages
# 3 Packages
# 3.3 MIME Media Type
with io.BytesIO(data) as f, zipfile.ZipFile(f) as z:
# If a MIME media type for a document exists, then an OpenDocument
# package should contain a file with name "mimetype".
if 'mimetype' not in z.namelist():
return False
# The content of this file shall be the ASCII encoded MIME media type
# associated with the document.
marcel = z.read('mimetype').decode('ascii')
# check that it's not too long (RFC6838 § 4.2 restricts type and
# subtype to 127 characters each + separator, strongly recommends
# limiting them to 64 but does not require it) and that it looks a lot
# like a valid mime type
if len(marcel) < 256 and _mime_validator.match(marcel):
return marcel
return False
_xls_pattern = re.compile(b"""
\x09\x08\x10\x00\x00\x06\x05\x00
| \xFD\xFF\xFF\xFF(\x10|\x1F|\x20|"|\\#|\\(|\\))
""", re.VERBOSE)
_ppt_pattern = re.compile(b"""
\x00\x6E\x1E\xF0
| \x0F\x00\xE8\x03
| \xA0\x46\x1D\xF0
| \xFD\xFF\xFF\xFF(\x0E|\x1C|\x43)\x00\x00\x00
""", re.VERBOSE)
def _check_olecf(data):
""" Pre-OOXML Office formats are OLE Compound Files which all use the same
file signature ("magic bytes") and should have a subheader at offset 512
(0x200).
Subheaders taken from http://www.garykessler.net/library/file_sigs.html
according to which Mac office files *may* have different subheaders. We'll
ignore that.
"""
offset = 0x200
if data.startswith(b'\xEC\xA5\xC1\x00', offset):
return 'application/msword'
# the _xls_pattern stuff doesn't seem to work correctly (the test file
# only has a bunch of \xf* at offset 0x200), that apparently works
elif b'Microsoft Excel' in data:
return 'application/vnd.ms-excel'
elif _ppt_pattern.match(data, offset):
return 'application/vnd.ms-powerpoint'
return False
# for "master" formats with many subformats, discriminants is a list of
# functions, tried in order and the first non-falsy value returned is the
# selected mime type. If all functions return falsy values, the master
# mimetype is returned.
_Entry = collections.namedtuple('_Entry', ['mimetype', 'signatures', 'discriminants'])
_mime_mappings = (
# pdf
_Entry('application/pdf', [b'%PDF'], []),
# jpg, jpeg, png, gif, bmp
_Entry('image/jpeg', [b'\xFF\xD8\xFF\xE0', b'\xFF\xD8\xFF\xE2', b'\xFF\xD8\xFF\xE3', b'\xFF\xD8\xFF\xE1'], []),
_Entry('image/png', [b'\x89PNG\r\n\x1A\n'], []),
_Entry('image/gif', [b'GIF87a', b'GIF89a'], []),
_Entry('image/bmp', [b'BM'], []),
# OLECF files in general (Word, Excel, PPT, default to word because why not?)
_Entry('application/msword', [b'\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1', b'\x0D\x44\x4F\x43'], [
_check_olecf
]),
# zip, but will include jar, odt, ods, odp, docx, xlsx, pptx, apk
_Entry('application/zip', [b'PK\x03\x04'], [_check_ooxml, _check_open_container_format]),
)
def guess_mimetype(bin_data, default='application/octet-stream'):
""" Attempts to guess the mime type of the provided binary data, similar
to but significantly more limited than libmagic
:param str bin_data: binary data to try and guess a mime type for
:returns: matched mimetype or ``application/octet-stream`` if none matched
"""
# by default, guess the type using the magic number of file hex signature (like magic, but more limited)
# see http://www.filesignatures.net/ for file signatures
for entry in _mime_mappings:
for signature in entry.signatures:
if bin_data.startswith(signature):
for discriminant in entry.discriminants:
try:
guess = discriminant(bin_data)
if guess: return guess
except Exception:
# log-and-next
_logger.getChild('guess_mimetype').warn(
"Sub-checker '%s' of type '%s' failed",
discriminant.__name__, entry.mimetype,
exc_info=True
)
# if no discriminant or no discriminant matches, return
# primary mime type
return entry.mimetype
return default
try:
import magic
except ImportError:
magic = None
else:
# There are 2 python libs named 'magic' with incompatible api.
# magic from pypi https://pypi.python.org/pypi/python-magic/
if hasattr(magic,'from_buffer'):
guess_mimetype = lambda bin_data, default=None: magic.from_buffer(bin_data, mime=True)
# magic from file(1) https://packages.debian.org/squeeze/python-magic
elif hasattr(magic,'open'):
ms = magic.open(magic.MAGIC_MIME_TYPE)
ms.load()
guess_mimetype = lambda bin_data, default=None: ms.buffer(bin_data)
|
gpl-3.0
|
toymachine/concurrence
|
lib/concurrence/xmpp/sasl.py
|
2
|
1493
|
# Copyright (C) 2009, Hyves (Startphone Ltd.)
#
# This module is part of the Concurrence Framework and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
import binascii
import base64
import md5
import random
def H(s):
return md5.new(s).digest()
def KD(k, s):
return H(k + ":" + s)
def HEX(n):
return binascii.hexlify(n)
def UNHEX(h):
return binascii.unhexlify(h)
def response(challenge, user, password, realm, digest_uri):
#parse challenge
c = {}
for x in base64.decodestring(challenge).split(","):
i = x.find('=')
if i == -1: continue
key = x[:i].strip()
value = x[i+1:].strip()
if value[0] == '"' and value[-1] == '"': value = value[1:-1]
key = key.replace('-', '_')
c[key] = value
#calculate response
nonce = c['nonce']
cnonce = hex(random.getrandbits(128))[2:-1].lower()
nc = "00000001"
qop = "auth"
digest = HEX(H("%s:%s:%s" % (user, realm, password)))
A2 = "AUTHENTICATE:" + digest_uri
A1 = UNHEX( digest ) + ":" + nonce + ":" + cnonce
response = HEX(KD(HEX(H(A1)), nonce + ":" + nc + ":" + cnonce + ":" + qop + ":" + HEX(H(A2))))
response = """username="%s",realm="%s",nonce="%s",cnonce="%s",nc=00000001,qop=auth,digest-uri="%s",response=%s,charset=utf-8""" % \
(user, realm, nonce, cnonce, digest_uri, response)
return "".join(base64.encodestring(response).split("\n"))
|
bsd-3-clause
|
mzadel/libmapper-sc
|
editors/sced/scedwin/py/Settings.py
|
37
|
2412
|
# sced (SuperCollider mode for gedit)
#
# Copyright 2012 Jakob Leben
# Copyright 2009 Artem Popov and other contributors (see AUTHORS)
#
# sced is free software:
# you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from os import path
import simplejson
def locate():
base = path.expandvars("%APPDATA%")
if base is not None:
return path.join(base, "sced.config.json")
else:
return None
def load():
sets = Settings()
try:
filename = locate()
f = open(filename, "r")
except:
print "Could not open configuration file: " + str(filename)
f = None
data = {}
if f is not None:
try:
data = simplejson.load(f)
except:
print "Configuration file not a valid JSON script!"
f.close()
sets.sc_dir = data.get("supercollider-dir")
sets.advanced = data.get("advanced", False)
lang_data = data.get("interpreter", {})
sets.sclang_cmd = lang_data.get("command")
sets.sclang_work_dir = lang_data.get("runtime-dir")
print "Sced settings loaded."
return sets
# map gconf options to gobject properties
class Settings(object):
def __init__(self):
self.sc_dir = None
self.advanced = None
self.sclang_cmd = None
self.sclang_work_dir = None
def save(self):
try:
path.remove(self.__filename)
except:
print "existing"
f = open(locate(), "w")
data = {
"supercollider-dir": self.sc_dir,
"advanced": self.advanced,
"interpreter": {
"command": self.sclang_cmd,
"runtime-dir": self.sclang_work_dir
}
};
simplejson.dump(data, f, indent=" ")
f.close()
print "Sced settings saved."
|
gpl-3.0
|
cetic/ansible
|
lib/ansible/modules/network/avi/avi_gslb.py
|
7
|
5653
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_gslb
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of Gslb Avi RESTful Object
description:
- This module is used to configure Gslb object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
clear_on_max_retries:
description:
- Max retries after which the remote site is treatedas a fresh start.
- In fresh start all the configsare downloaded.
- Allowed values are 1-1024.
- Default value when not specified in API or module is interpreted by Avi Controller as 20.
client_ip_addr_group:
description:
- Group to specify if the client ip addresses are public or private.
- Field introduced in 17.1.2.
version_added: "2.4"
description:
description:
- User defined description for the object.
dns_configs:
description:
- Sub domain configuration for the gslb.
- Gslb service's fqdn must be a match one of these subdomains.
is_federated:
description:
- This field indicates that this object is replicated across gslb federation.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
version_added: "2.4"
leader_cluster_uuid:
description:
- Mark this site as leader of gslb configuration.
- This site is the one among the avi sites.
name:
description:
- Name for the gslb object.
required: true
send_interval:
description:
- Frequency with which group members communicate.
- Allowed values are 1-3600.
- Default value when not specified in API or module is interpreted by Avi Controller as 15.
sites:
description:
- Select avi site member belonging to this gslb.
tenant_ref:
description:
- It is a reference to an object of type tenant.
third_party_sites:
description:
- Third party site member belonging to this gslb.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the gslb object.
view_id:
description:
- The view-id is used in maintenance mode to differentiate partitioned groups while they havethe same gslb namespace.
- Each partitioned groupwill be able to operate independently by using theview-id.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create Gslb object
avi_gslb:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_gslb
"""
RETURN = '''
obj:
description: Gslb (api/gslb) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
clear_on_max_retries=dict(type='int',),
client_ip_addr_group=dict(type='dict',),
description=dict(type='str',),
dns_configs=dict(type='list',),
is_federated=dict(type='bool',),
leader_cluster_uuid=dict(type='str',),
name=dict(type='str', required=True),
send_interval=dict(type='int',),
sites=dict(type='list',),
tenant_ref=dict(type='str',),
third_party_sites=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
view_id=dict(type='int',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'gslb',
set([]))
if __name__ == '__main__':
main()
|
gpl-3.0
|
beezee/GAE-Django-base-app
|
django/conf/locale/it/formats.py
|
232
|
1838
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y' # 25 Ottobre 2006
TIME_FORMAT = 'H:i:s' # 14:30:59
DATETIME_FORMAT = 'l d F Y H:i:s' # Mercoledì 25 Ottobre 2006 14:30:59
YEAR_MONTH_FORMAT = 'F Y' # Ottobre 2006
MONTH_DAY_FORMAT = 'j/F' # 10/2006
SHORT_DATE_FORMAT = 'd/M/Y' # 25/12/2009
SHORT_DATETIME_FORMAT = 'd/M/Y H:i:s' # 25/10/2009 14:30:59
FIRST_DAY_OF_WEEK = 1 # Lunedì
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%Y/%m/%d', # '2008-10-25', '2008/10/25'
'%d-%m-%Y', '%d/%m/%Y', # '25-10-2006', '25/10/2006'
'%d-%m-%y', '%d/%m/%y', # '25-10-06', '25/10/06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d-%m-%y %H:%M:%S', # '25-10-06 14:30:59'
'%d-%m-%y %H:%M', # '25-10-06 14:30'
'%d-%m-%y', # '25-10-06'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
bsd-3-clause
|
TinLe/Diamond
|
src/collectors/ipvs/test/testipvs.py
|
32
|
1828
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from ipvs import IPVSCollector
################################################################################
class TestIPVSCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('IPVSCollector', {
'interval': 10,
'bin': 'true',
'use_sudo': False
})
self.collector = IPVSCollector(config, None)
def test_import(self):
self.assertTrue(IPVSCollector)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
patch_communicate = patch(
'subprocess.Popen.communicate',
Mock(return_value=(
self.getFixture('ipvsadm').getvalue(),
'')))
patch_communicate.start()
self.collector.collect()
patch_communicate.stop()
metrics = {
"TCP_172_16_1_56:80.total.conns": 116,
"TCP_172_16_1_56:443.total.conns": 59,
"TCP_172_16_1_56:443.10_68_15_66:443.conns": 59,
"TCP_172_16_1_56:443.10_68_15_66:443.outbytes": 216873,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
mit
|
hendrikx-itc/python-minerva
|
src/minerva/storage/outputdescriptor.py
|
1
|
1040
|
# -*- coding: utf-8 -*-
from minerva.storage.valuedescriptor import ValueDescriptor
from minerva.storage import datatype
class OutputDescriptor:
"""
Combines a value descriptor with configuration for serializing values.
"""
def __init__(
self, value_descriptor: ValueDescriptor,
serializer_config: dict=None):
self.value_descriptor = value_descriptor
self.serializer_config = serializer_config
self.serialize = value_descriptor.data_type.string_serializer(
serializer_config
)
@staticmethod
def load(config):
return OutputDescriptor(
ValueDescriptor(
config['name'],
datatype.registry[config['data_type']]
),
config.get('serializer_config')
)
def to_dict(self):
return {
'name': self.value_descriptor.name,
'data_type': self.value_descriptor.data_type.name,
'serializer_config': self.serializer_config
}
|
gpl-3.0
|
mahendra-r/home-assistant
|
config/custom_components/mqtt_example.py
|
19
|
1763
|
"""
custom_components.mqtt_example
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Shows how to communicate with MQTT. Follows a topic on MQTT and updates the
state of an entity to the last message received on that topic.
Also offers a service 'set_state' that will publish a message on the topic that
will be passed via MQTT to our message received listener. Call the service with
example payload {"new_state": "some new state"}.
Configuration:
To use the mqtt_example component you will need to add the following to your
configuration.yaml file.
mqtt_example:
topic: home-assistant/mqtt_example
"""
import homeassistant.loader as loader
# The domain of your component. Should be equal to the name of your component
DOMAIN = "mqtt_example"
# List of component names (string) your component depends upon
DEPENDENCIES = ['mqtt']
CONF_TOPIC = 'topic'
DEFAULT_TOPIC = 'home-assistant/mqtt_example'
def setup(hass, config):
""" Setup our mqtt_example component. """
mqtt = loader.get_component('mqtt')
topic = config[DOMAIN].get('topic', DEFAULT_TOPIC)
entity_id = 'mqtt_example.last_message'
# Listen to a message on MQTT
def message_received(topic, payload, qos):
""" A new MQTT message has been received. """
hass.states.set(entity_id, payload)
mqtt.subscribe(hass, topic, message_received)
hass.states.set(entity_id, 'No messages')
# Service to publish a message on MQTT
def set_state_service(call):
""" Service to send a message. """
mqtt.publish(hass, topic, call.data.get('new_state'))
# Register our service with Home Assistant
hass.services.register(DOMAIN, 'set_state', set_state_service)
# return boolean to indicate that initialization was successful
return True
|
mit
|
jruben/jruben.github.io
|
node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/styles/default.py
|
364
|
2532
|
# -*- coding: utf-8 -*-
"""
pygments.styles.default
~~~~~~~~~~~~~~~~~~~~~~~
The default highlighting style.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class DefaultStyle(Style):
"""
The default style (inspired by Emacs 22).
"""
background_color = "#f8f8f8"
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "italic #408080",
Comment.Preproc: "noitalic #BC7A00",
#Keyword: "bold #AA22FF",
Keyword: "bold #008000",
Keyword.Pseudo: "nobold",
Keyword.Type: "nobold #B00040",
Operator: "#666666",
Operator.Word: "bold #AA22FF",
Name.Builtin: "#008000",
Name.Function: "#0000FF",
Name.Class: "bold #0000FF",
Name.Namespace: "bold #0000FF",
Name.Exception: "bold #D2413A",
Name.Variable: "#19177C",
Name.Constant: "#880000",
Name.Label: "#A0A000",
Name.Entity: "bold #999999",
Name.Attribute: "#7D9029",
Name.Tag: "bold #008000",
Name.Decorator: "#AA22FF",
String: "#BA2121",
String.Doc: "italic",
String.Interpol: "bold #BB6688",
String.Escape: "bold #BB6622",
String.Regex: "#BB6688",
#String.Symbol: "#B8860B",
String.Symbol: "#19177C",
String.Other: "#008000",
Number: "#666666",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
|
mit
|
petrjasek/superdesk-core
|
superdesk/media/image.py
|
2
|
4149
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
"""Utilities for extractid metadata from image files."""
import io
from superdesk.text_utils import decode
from PIL import Image, ExifTags
from PIL import IptcImagePlugin
from PIL.TiffImagePlugin import IFDRational
from flask import json
from .iim_codes import iim_codes
ORIENTATIONS = {
1: ("Normal", 0),
2: ("Mirrored left-to-right", 0),
3: ("Rotated 180 degrees", 180),
4: ("Mirrored top-to-bottom", 0),
5: ("Mirrored along top-left diagonal", 0),
6: ("Rotated 90 degrees", -90),
7: ("Mirrored along top-right diagonal", 0),
8: ("Rotated 270 degrees", -270),
}
EXIF_ORIENTATION_TAG = 274
def fix_orientation(file_stream):
"""Returns the image fixed accordingly to the orientation.
@param file_stream: stream
"""
file_stream.seek(0)
img = Image.open(file_stream)
file_stream.seek(0)
if not hasattr(img, "_getexif"):
return file_stream
rv = img._getexif()
if not rv:
return file_stream
exif = dict(rv)
if exif.get(EXIF_ORIENTATION_TAG, None):
orientation = exif.get(EXIF_ORIENTATION_TAG)
if orientation in [3, 6, 8]:
degrees = ORIENTATIONS[orientation][1]
img2 = img.rotate(degrees)
output = io.BytesIO()
img2.save(output, "jpeg")
output.seek(0)
return output
return file_stream
def get_meta(file_stream):
"""Returns the image metadata in a dictionary of tag:value pairs.
@param file_stream: stream
"""
current = file_stream.tell()
file_stream.seek(0)
img = Image.open(file_stream)
try:
rv = img.getexif()
except AttributeError:
return {}
if not rv:
return {}
exif = dict(rv)
file_stream.seek(current)
exif_meta = {}
for k, v in exif.items():
try:
key = ExifTags.TAGS[k].strip()
except KeyError:
continue
if key == "GPSInfo":
# lookup GPSInfo description key names
value = {
ExifTags.GPSTAGS[vk].strip(): convert_exif_value(vv, vk) for vk, vv in v.items() if is_serializable(vv)
}
exif_meta[key] = value
elif is_serializable(v):
value = v.decode("UTF-8") if isinstance(v, bytes) else v
exif_meta[key] = convert_exif_value(value)
# Remove this as it's too long to send in headers
exif_meta.pop("UserComment", None)
return exif_meta
def convert_exif_value(val, key=None):
if ExifTags.GPSTAGS.get(key) == "GPSAltitudeRef":
return 0 if val == b"\x00" else 1
if isinstance(val, tuple):
return tuple([convert_exif_value(v) for v in val])
if isinstance(val, list):
return list([convert_exif_value(v) for v in val])
if isinstance(val, IFDRational):
try:
return float(str(val._val))
except ValueError:
numerator, denominator = val.limit_rational(100)
return round(numerator / denominator, 3)
return val
def is_serializable(val):
try:
json.dumps(convert_exif_value(val))
except (TypeError, UnicodeError):
return False
return True
def get_meta_iptc(file_stream):
"""Returns the image IPTC metadata in a dictionary of tag:value pairs.
@param file_stream: stream
"""
file_stream.seek(0)
img = Image.open(file_stream)
iptc_raw = IptcImagePlugin.getiptcinfo(img)
metadata = {}
if iptc_raw is None:
return metadata
for code, value in iptc_raw.items():
try:
tag = iim_codes[code]
except KeyError:
continue
if isinstance(value, list):
value = [decode(v) for v in value]
elif isinstance(value, bytes):
value = decode(value)
metadata[tag] = value
return metadata
|
agpl-3.0
|
tpltnt/scapy
|
scapy/layers/llmnr.py
|
4
|
2391
|
from scapy.fields import *
from scapy.packet import *
from scapy.layers.inet import UDP
from scapy.layers.dns import DNSQRField, DNSRRField, DNSRRCountField
"""
LLMNR (Link Local Multicast Node Resolution).
[RFC 4795]
"""
#############################################################################
### LLMNR (RFC4795) ###
#############################################################################
# LLMNR is based on the DNS packet format (RFC1035 Section 4)
# RFC also envisions LLMNR over TCP. Like vista, we don't support it -- arno
_LLMNR_IPv6_mcast_Addr = "FF02:0:0:0:0:0:1:3"
_LLMNR_IPv4_mcast_addr = "224.0.0.252"
class LLMNRQuery(Packet):
name = "Link Local Multicast Node Resolution - Query"
fields_desc = [ ShortField("id", 0),
BitField("qr", 0, 1),
BitEnumField("opcode", 0, 4, { 0:"QUERY" }),
BitField("c", 0, 1),
BitField("tc", 0, 2),
BitField("z", 0, 4),
BitEnumField("rcode", 0, 4, { 0:"ok" }),
DNSRRCountField("qdcount", None, "qd"),
DNSRRCountField("ancount", None, "an"),
DNSRRCountField("nscount", None, "ns"),
DNSRRCountField("arcount", None, "ar"),
DNSQRField("qd", "qdcount"),
DNSRRField("an", "ancount"),
DNSRRField("ns", "nscount"),
DNSRRField("ar", "arcount",0)]
overload_fields = {UDP: {"sport": 5355, "dport": 5355 }}
def hashret(self):
return struct.pack("!H", self.id)
class LLMNRResponse(LLMNRQuery):
name = "Link Local Multicast Node Resolution - Response"
qr = 1
def answers(self, other):
return (isinstance(other, LLMNRQuery) and
self.id == other.id and
self.qr == 1 and
other.qr == 0)
def _llmnr_dispatcher(x, *args, **kargs):
cls = conf.raw_layer
if len(x) >= 3:
if (x[4] & 0x80): # Response
cls = LLMNRResponse
else: # Query
cls = LLMNRQuery
return cls(x, *args, **kargs)
bind_bottom_up(UDP, _llmnr_dispatcher, { "dport": 5355 })
bind_bottom_up(UDP, _llmnr_dispatcher, { "sport": 5355 })
# LLMNRQuery(id=RandShort(), qd=DNSQR(qname="vista.")))
|
gpl-2.0
|
scikit-nano/scikit-nano
|
sknano/core/atoms/_poav_atoms.py
|
2
|
20712
|
# -*- coding: utf-8 -*-
"""
===============================================================================
Mixin Atom classes for POAV analysis (:mod:`sknano.core.atoms._poav_atoms`)
===============================================================================
.. currentmodule:: sknano.core.atoms._poav_atoms
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
__docformat__ = 'restructuredtext en'
from collections import OrderedDict
import functools
import operator
import warnings
import numpy as np
np.seterr(all='warn')
from sknano.core.math import vector as vec
__all__ = ['POAV', 'POAV1', 'POAV2', 'POAVR',
'POAVAtomMixin', 'POAVAtomsMixin']
class POAV:
"""Base class for POAV analysis.
Parameters
----------
sigma_bonds : :class:`~sknano.core.atoms.Bonds`
:class:`~sknano.core.atoms.Bonds` instance.
Attributes
----------
cosa12
cosa23
cosa31
"""
def __init__(self, sigma_bonds):
self.bonds = sigma_bonds
self.bond1 = self.bonds[0].vector
self.bond2 = self.bonds[1].vector
self.bond3 = self.bonds[2].vector
self.bond_angles = self.bonds.angles
self.bond_angle_pairs = self.bonds.bond_angle_pairs
self.sigma_bond_angle12 = self.bond_angles[0]
self.sigma_bond_angle23 = self.bond_angles[1]
self.sigma_bond_angle31 = self.bond_angles[2]
self.cosa12 = np.cos(self.bond_angles[0])
self.cosa23 = np.cos(self.bond_angles[1])
self.cosa31 = np.cos(self.bond_angles[2])
self._v1 = self.bond1
self._v2 = self.bond2
self._v3 = self.bond3
self._pyramidalization_angles = None
self._sigma_pi_angles = None
self._misalignment_angles = None
def __str__(self):
fmtstr = '{}\n=====\n'.format(self.__class__.__name__)
for k, v in list(self.todict(rad2deg=True).items()):
fmtstr += '{}: {}\n'.format(k, v)
return fmtstr
def __repr__(self):
return '{}({bonds!r})'.format(self.__class__.__name__,
**dict(bonds=self.bonds))
@property
def v1(self):
""":class:`~sknano.core.math.Vector` :math:`\\mathbf{v}_1` \
directed along the :math:`\\sigma`-orbital to the \
nearest-neighbor :class:`~sknano.core.atoms.Atom` \
in :class:`~sknano.core.atoms.Bond` 1."""
return self._v1
@property
def v2(self):
""":class:`~sknano.core.math.Vector` :math:`\\mathbf{v}_2` \
directed along the :math:`\\sigma`-orbital to the \
nearest-neighbor :class:`~sknano.core.atoms.Atom` \
in :class:`~sknano.core.atoms.Bond` 2."""
return self._v2
@property
def v3(self):
""":class:`~sknano.core.math.Vector` :math:`\\mathbf{v}_3` \
directed along the :math:`\\sigma`-orbital to the \
nearest-neighbor :class:`~sknano.core.atoms.Atom` \
in :class:`~sknano.core.atoms.Bond` 3."""
return self._v3
@property
def Vv1v2v3(self):
"""Volume of the parallelepiped defined by \
:class:`~sknano.core.math.Vector`\ s `v1`, `v2`, and `v3`.
Computes the scalar triple product of vectors :math:`\\mathbf{v}_1`,
:math:`\\mathbf{v}_2`, and :math:`\\mathbf{v}_3`:
.. math::
V_{v_1v_2v_3} =
|\\mathbf{v}_1\\cdot(\\mathbf{v}_2\\times\\mathbf{v}_3)|
"""
return np.abs(vec.scalar_triple_product(self.v1, self.v2, self.v3))
@property
def vpi(self):
"""General :math:`\\pi`-orbital axis vector \
(:math:`\\mathbf{v}_{\\pi}`) formed by the \
terminii of :class:`~sknano.core.math.Vector`\ s \
:class:`~sknano.core.math.Vector`\ s `v1`, `v2`, and `v3`.
.. math::
\\mathbf{v}_{\\pi} =
\\mathbf{v}_1 + \\mathbf{v}_2\\ + \\mathbf{v}_3
"""
return self.reciprocal_v1 + self.reciprocal_v2 + self.reciprocal_v3
@property
def Vpi(self):
""":math:`\\mathbf{v}_{\\pi}` unit :class:`~sknano.core.math.Vector`
Returns the :math:`\\pi`-orbital axis vector
(:math:`\\mathbf{v}_{\\pi}`) unit vector.
.. math::
\\mathbf{V}_{\\pi} =
\\frac{\\mathbf{v}_{\\pi}}{|\\mathbf{v}_{\\pi}|}
"""
return self.vpi.unit_vector
@property
def reciprocal_v1(self):
"""Reciprocal :class:`~sknano.core.math.Vector` \
:math:`\\mathbf{v}_1^{*}`.
Defined as:
.. math::
\\mathbf{v}_1^{*} =
\\frac{\\mathbf{v}_2\\times\\mathbf{v}_3}
{|\\mathbf{v}_1\\cdot(\\mathbf{v}_2\\times\\mathbf{v}_3)|}
"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
return vec.cross(self.v2, self.v3) / self.Vv1v2v3
except Warning:
return vec.cross(self.v2, self.v3)
@property
def reciprocal_v2(self):
"""Reciprocal :class:`~sknano.core.math.Vector` \
:math:`\\mathbf{v}_2^{*}`.
Defined as:
.. math::
\\mathbf{v}_2^{*} =
\\frac{\\mathbf{v}_3\\times\\mathbf{v}_1}
{|\\mathbf{v}_1\\cdot(\\mathbf{v}_2\\times\\mathbf{v}_3)|}
"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
return vec.cross(self.v3, self.v1) / self.Vv1v2v3
except Warning:
return vec.cross(self.v3, self.v1)
@property
def reciprocal_v3(self):
"""Reciprocal :class:`~sknano.core.math.Vector` \
:math:`\\mathbf{v}_3^{*}`.
Defined as:
.. math::
\\mathbf{v}_3^{*} =
\\frac{\\mathbf{v}_1\\times\\mathbf{v}_2}
{|\\mathbf{v}_1\\cdot(\\mathbf{v}_2\\times\\mathbf{v}_3)|}
"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
return vec.cross(self.v1, self.v2) / self.Vv1v2v3
except Warning:
return vec.cross(self.v1, self.v2)
@property
def V1(self):
""":math:`\\mathbf{v}_1` unit :class:`~sknano.core.math.Vector`
.. math::
\\mathbf{V}_1\\equiv\\frac{\\mathbf{v}_1}{|\\mathbf{v}_1|}
"""
return self.bond1.unit_vector
@property
def V2(self):
""":math:`\\mathbf{v}_2` unit :class:`~sknano.core.math.Vector`
.. math::
\\mathbf{V}_2\\equiv\\frac{\\mathbf{v}_2}{|\\mathbf{v}_2|}
"""
return self.bond2.unit_vector
@property
def V3(self):
""":math:`\\mathbf{v}_3` unit :class:`~sknano.core.math.Vector`
.. math::
\\mathbf{V}_3\\equiv\\frac{\\mathbf{v}_3}{|\\mathbf{v}_3|}
"""
return self.bond3.unit_vector
@property
def R1(self):
""":class:`~sknano.core.atoms.Bond` 1 \
:class:`~sknano.core.math.Vector` \
:attr:`~sknano.core.math.Vector.length`.
"""
return self.bond1.length
@property
def R2(self):
""":class:`~sknano.core.atoms.Bond` 2 \
:class:`~sknano.core.math.Vector` \
:attr:`~sknano.core.math.Vector.length`.
"""
return self.bond2.length
@property
def R3(self):
""":class:`~sknano.core.atoms.Bond` 3 \
:class:`~sknano.core.math.Vector` \
:attr:`~sknano.core.math.Vector.length`.
"""
return self.bond3.length
@property
def t(self):
""":math:`\\frac{1}{6}` the volume of the tetrahedron defined by \
:class:`~sknano.core.math.Vector`\ s `v1`, `v2`, and `v3`.
.. math::
t =
\\frac{|\\mathbf{v}_1\\cdot(\\mathbf{v}_2\\times\\mathbf{v}_3)|}{6}
"""
return self.Vv1v2v3 / 6
@property
def T(self):
""":math:`\\frac{1}{6}` the volume of the tetrahedron defined by \
:class:`~sknano.core.math.Vector`\ s `V1`, `V2`, and `V3`.
.. math::
T =
\\frac{|\\mathbf{V}_1\\cdot(\\mathbf{V}_2\\times\\mathbf{V}_3)|}{6}
"""
return np.abs(vec.scalar_triple_product(self.V1, self.V2, self.V3) / 6)
@property
def A(self):
"""Magnitude of :math:`\\mathbf{v}_{\\pi}`."""
return self.vpi.magnitude
@property
def H(self):
"""Altitude of tetrahedron."""
return 3 * self.T / self.A
@property
def sigma_pi_angles(self):
"""List of :math:`\\theta_{\\sigma-\\pi}` angles."""
return self._sigma_pi_angles
@sigma_pi_angles.setter
def sigma_pi_angles(self, value):
"""Set list of :math:`\\theta_{\\sigma-\\pi}` angles."""
if not isinstance(value, list):
raise TypeError('Expected a list')
self._sigma_pi_angles = value
@property
def pyramidalization_angles(self):
"""List of pyramidalization :math:`\\theta_{P}` angles."""
return self._pyramidalization_angles
@pyramidalization_angles.setter
def pyramidalization_angles(self, value):
"""Set list of :math:`\\theta_{P}` angles."""
if not isinstance(value, list):
raise TypeError('Expected a list')
self._pyramidalization_angles = value
@property
def misalignment_angles(self):
"""List of misalignment :math:`\\phi_{i}` angles."""
return self._misalignment_angles
@misalignment_angles.setter
def misalignment_angles(self, value):
"""Set list of :math:`\\phi` angles."""
if not isinstance(value, list):
raise TypeError('Expected a list')
self._misalignment_angles = value
def todict(self, rad2deg=False):
"""Return dictionary of `POAV` class attributes."""
sigma_pi_angles = self.sigma_pi_angles
pyramidalization_angles = self.pyramidalization_angles
misalignment_angles = self.misalignment_angles
if rad2deg:
sigma_pi_angles = np.degrees(sigma_pi_angles)
pyramidalization_angles = np.degrees(pyramidalization_angles)
misalignment_angles = np.degrees(misalignment_angles)
od = OrderedDict(
[('bond1', self.bond1.length),
('bond2', self.bond2.length),
('bond3', self.bond3.length),
('sigma_bond_angle12', self.sigma_bond_angle12),
('sigma_bond_angle23', self.sigma_bond_angle23),
('sigma_bond_angle31', self.sigma_bond_angle31),
('sigma_pi_angle1', sigma_pi_angles[0]),
('sigma_pi_angle2', sigma_pi_angles[1]),
('sigma_pi_angle3', sigma_pi_angles[2]),
('pyramidalization_angle1', pyramidalization_angles[0]),
('pyramidalization_angle2', pyramidalization_angles[1]),
('pyramidalization_angle3', pyramidalization_angles[2]),
('misalignment_angle1', misalignment_angles[0]),
('misalignment_angle2', misalignment_angles[1]),
('misalignment_angle3', misalignment_angles[2]),
('T', self.T), ('H', self.H), ('A', self.A)])
return od
class POAV1(POAV):
""":class:`POAV` sub-class for POAV1 analysis."""
def __init__(self, *args):
super().__init__(*args)
self._v1 = self.V1
self._v2 = self.V2
self._v3 = self.V3
@property
def m(self):
""":math:`s` character content of the :math:`\\pi`-orbital \
(:math:`s^mp`) for :math:`sp^3` normalized hybridization."""
cos2sigmapi = np.cos(np.mean(self.sigma_pi_angles)) ** 2
return 2 * cos2sigmapi / (1 - 3 * cos2sigmapi)
@property
def n(self):
""":math:`p` character content of the :math:`\\sigma`-orbitals \
(:math:`sp^n`) for :math:`sp^3` normalized hybridization."""
return 3 * self.m + 2
def todict(self, rad2deg=False):
"""Return dictionary of `POAV1` class attributes."""
super_dict = super().todict(rad2deg=rad2deg)
super_dict.update([('m', self.m), ('n', self.n)])
return super_dict
class POAV2(POAV):
""":class:`POAV` sub-class for POAV2 analysis."""
def __init__(self, *args):
super().__init__(*args)
vi = []
for bond, pair in zip(self.bonds, self.bond_angle_pairs):
cosa = \
np.cos(self.bond_angles[
np.in1d(self.bonds, pair, invert=True)])[0]
vi.append(cosa * bond.vector.unit_vector)
self._v1 = vi[0]
self._v2 = vi[1]
self._v3 = vi[2]
@property
def T(self):
""":math:`\\frac{1}{6}` the volume of the tetrahedron defined by \
:class:`~sknano.core.math.Vector`\ s `V1`, `V2`, and `V3`.
.. math::
T =
\\cos\\theta_{12}\\cos\\theta_{23}\\cos\\theta_{31}\\times
\\frac{|\\mathbf{V}_1\\cdot(\\mathbf{V}_2\\times\\mathbf{V}_3)|}{6}
"""
return -functools.reduce(operator.mul,
np.cos(self.bonds.angles), 1) * \
super().T
@property
def n1(self):
""":math:`p` character content of the :math:`\\sigma`-orbital \
hybridization for :math:`\\sigma_1` bond."""
return -self.cosa23 / (self.cosa12 * self.cosa31)
@property
def n2(self):
""":math:`p` character content of the :math:`\\sigma`-orbital \
hybridization for :math:`\\sigma_2` bond."""
return -self.cosa31 / (self.cosa12 * self.cosa23)
@property
def n3(self):
""":math:`p` character content of the :math:`\\sigma`-orbital \
hybridization for :math:`\\sigma_3` bond."""
return -self.cosa12 / (self.cosa31 * self.cosa23)
@property
def m(self):
""":math:`s` character content of the :math:`\\pi`-orbital \
(:math:`s^mp`) for :math:`sp^3` normalized hybridization."""
s1 = 1 / (1 + self.n1)
s2 = 1 / (1 + self.n2)
s3 = 1 / (1 + self.n3)
return 1 / sum([s1, s2, s3]) - 1
def todict(self, rad2deg=False):
"""Return dictionary of `POAV2` class attributes."""
super_dict = super().todict(rad2deg=rad2deg)
super_dict.update(
[('m', self.m), ('n1', self.n1), ('n2', self.n2), ('n3', self.n3)])
return super_dict
class POAVR(POAV):
""":class:`POAV` sub-class for POAVR analysis."""
def __init__(self, *args):
super().__init__(*args)
vi = []
for R, V in zip([self.R1, self.R2, self.R3],
[self.V1, self.V2, self.V3]):
vi.append(R * V)
self._v1 = vi[0]
self._v2 = vi[1]
self._v3 = vi[2]
@property
def T(self):
""":math:`\\frac{1}{6}` the volume of the tetrahedron defined by \
:class:`~sknano.core.math.Vector`\ s `V1`, `V2`, and `V3`.
.. math::
T =
R_1 R_2 R_3 \\times
\\frac{|\\mathbf{V}_1\\cdot(\\mathbf{V}_2\\times\\mathbf{V}_3)|}{6}
"""
return self.R1 * self.R2 * self.R3 * super().T
class POAVAtomMixin:
"""Mixin class for :class:`POAV` analysis."""
@property
def POAV1(self):
""":class:`~sknano.utils.analysis.POAV1` instance."""
try:
return self._POAV1
except AttributeError:
return None
@POAV1.setter
def POAV1(self, value):
"""Set :class:`~sknano.utils.analysis.POAV1` instance."""
if not isinstance(value, POAV1):
raise TypeError('Expected a `POAV1` instance.')
self._POAV1 = value
@property
def POAV2(self):
""":class:`~sknano.utils.analysis.POAV2` instance."""
try:
return self._POAV2
except AttributeError:
return None
@POAV2.setter
def POAV2(self, value):
"""Set :class:`~sknano.utils.analysis.POAV2` instance."""
if not isinstance(value, POAV2):
raise TypeError('Expected a `POAV2` instance.')
self._POAV2 = value
@property
def POAVR(self):
""":class:`~sknano.utils.analysis.POAVR` instance."""
try:
return self._POAVR
except AttributeError:
return None
@POAVR.setter
def POAVR(self, value):
"""Set :class:`~sknano.utils.analysis.POAVR` instance."""
if not isinstance(value, POAVR):
raise TypeError('Expected a `POAVR` instance.')
self._POAVR = value
class POAVAtomsMixin:
"""Mixin class for POAV analysis."""
# @timethis
def compute_POAVs(self):
"""Compute `POAV1`, `POAV2`, `POAVR`."""
super().update_attrs()
POAV_classes = {'POAV1': POAV1, 'POAV2': POAV2, 'POAVR': POAVR}
for atom in self:
# the central atom must have 3 bonds for POAV analysis.
if atom.bonds.Nbonds == 3:
for POAV_name, POAV_class in list(POAV_classes.items()):
setattr(atom, POAV_name, POAV_class(atom.bonds))
for atom in self:
# the central atom must have 3 bonds for POAV analysis.
if atom.bonds.Nbonds == 3:
for POAV_name in ('POAV1', 'POAV2', 'POAVR'):
POAV = getattr(atom, POAV_name)
sigma_pi_angles = []
pyramidalization_angles = []
misalignment_angles = []
for bond, NN in zip(atom.bonds, atom.NN):
# first compute the pyramidalization angle
sigma_pi_angle = vec.angle(POAV.Vpi, bond.vector)
if sigma_pi_angle < np.pi / 2:
sigma_pi_angle = np.pi - sigma_pi_angle
sigma_pi_angles.append(sigma_pi_angle)
pyramidalization_angles.append(
sigma_pi_angle - np.pi / 2)
# the bonded atom must have a POAV to compute the
# misalignment angles
if getattr(NN, POAV_name) is not None:
NN_POAV = getattr(NN, POAV_name)
# compute vector that is orthogonal to the plane
# defined by the bond vector and the POAV of the
# center atom.
nvec = vec.cross(bond.vector, POAV.Vpi)
# the misalignment angle is the angle between the
# nearest neighbor's POAV and the plane defined by
# the bond vector and the POAV of the center atom,
# which is pi/2 minus the angle between
# the NN POAV and the normal vector to the plane
# computed above.
misalignment_angles.append(np.abs(
np.pi / 2 - vec.angle(NN_POAV.Vpi, nvec)))
else:
misalignment_angles.append(np.nan)
POAV.pyramidalization_angles = pyramidalization_angles
POAV.misalignment_angles = misalignment_angles
POAV.sigma_pi_angles = sigma_pi_angles
@property
def POAV1(self):
"""List of :class:`~sknano.core.atoms.POAVAtom` :class:`POAV1` \
:attr:`~sknano.core.atoms.POAVAtom.POAV1` attribute."""
return [atom.POAV1 for atom in self if atom.POAV1 is not None]
@property
def POAV2(self):
"""List of :class:`~sknano.core.atoms.POAVAtom` :class:`POAV2` \
:attr:`~sknano.core.atoms.POAVAtom.POAV2` attribute."""
return [atom.POAV2 for atom in self if atom.POAV2 is not None]
@property
def POAVR(self):
"""List of :class:`~sknano.core.atoms.POAVAtom` :class:`POAVR` \
:attr:`~sknano.core.atoms.POAVAtom.POAVR` attribute."""
return [atom.POAVR for atom in self if atom.POAVR is not None]
def get_POAV_attr(self, POAV_class, attr):
"""Return list of :class:`~sknano.core.atoms.POAVAtom` :class:`POAV1` \
:class:`POAV2` or :class:`POAVR` attribute.
Parameters
----------
POAV_class : :class:`~python:str`
attr : :class:`~python:str`
Returns
-------
:class:`~python:list`
"""
return [getattr(getattr(atom, POAV_class), attr) for atom in self
if getattr(atom, POAV_class) is not None]
|
bsd-2-clause
|
xinwu/horizon
|
openstack_dashboard/test/integration_tests/tests/test_sahara_job_binaries.py
|
50
|
3397
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.pages.project.data_processing\
import jobbinariespage
from openstack_dashboard.test.integration_tests.tests import decorators
JOB_BINARY_INTERNAL = {
# Size of binary name is limited to 50 characters
jobbinariespage.JobbinariesPage.BINARY_NAME:
helpers.gen_random_resource_name(resource='jobbinary',
timestamp=False)[0:50],
jobbinariespage.JobbinariesPage.BINARY_STORAGE_TYPE:
"Internal database",
jobbinariespage.JobbinariesPage.BINARY_URL: None,
jobbinariespage.JobbinariesPage.INTERNAL_BINARY:
"*Create a script",
jobbinariespage.JobbinariesPage.BINARY_PATH: None,
jobbinariespage.JobbinariesPage.SCRIPT_NAME:
helpers.gen_random_resource_name(resource='scriptname',
timestamp=False),
jobbinariespage.JobbinariesPage.SCRIPT_TEXT: "test_script_text",
jobbinariespage.JobbinariesPage.USERNAME: None,
jobbinariespage.JobbinariesPage.PASSWORD: None,
jobbinariespage.JobbinariesPage.DESCRIPTION: "test description"
}
@decorators.services_required("sahara")
class TestSaharaJobBinary(helpers.TestCase):
def _sahara_create_delete_job_binary(self, job_binary_template):
job_name = \
job_binary_template[jobbinariespage.JobbinariesPage.BINARY_NAME]
# create job binary
job_binary_pg = self.home_pg.go_to_dataprocessing_jobbinariespage()
self.assertFalse(job_binary_pg.is_job_binary_present(job_name),
"Job binary was present in the binaries table"
" before its creation.")
job_binary_pg.create_job_binary(**job_binary_template)
# verify that job is created without problems
self.assertFalse(job_binary_pg.is_error_message_present(),
"Error message occurred during binary job creation.")
self.assertTrue(job_binary_pg.is_job_binary_present(job_name),
"Job binary is not in the binaries job table after"
" its creation.")
# delete binary job
job_binary_pg.delete_job_binary(job_name)
# verify that job was successfully deleted
self.assertFalse(job_binary_pg.is_error_message_present(),
"Error message occurred during binary job deletion.")
self.assertFalse(job_binary_pg.is_job_binary_present(job_name),
"Job binary was not removed from binaries job table.")
def test_sahara_create_delete_job_binary_internaldb(self):
"""Test the creation of a Job Binary in the Internal DB."""
self._sahara_create_delete_job_binary(JOB_BINARY_INTERNAL)
|
apache-2.0
|
freenas/samba
|
third_party/dnspython/dns/resolver.py
|
47
|
43033
|
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS stub resolver.
@var default_resolver: The default resolver object
@type default_resolver: dns.resolver.Resolver object"""
import socket
import sys
import time
import dns.exception
import dns.ipv4
import dns.ipv6
import dns.message
import dns.name
import dns.query
import dns.rcode
import dns.rdataclass
import dns.rdatatype
import dns.reversename
if sys.platform == 'win32':
import _winreg
class NXDOMAIN(dns.exception.DNSException):
"""The query name does not exist."""
pass
# The definition of the Timeout exception has moved from here to the
# dns.exception module. We keep dns.resolver.Timeout defined for
# backwards compatibility.
Timeout = dns.exception.Timeout
class NoAnswer(dns.exception.DNSException):
"""The response did not contain an answer to the question."""
pass
class NoNameservers(dns.exception.DNSException):
"""No non-broken nameservers are available to answer the query."""
pass
class NotAbsolute(dns.exception.DNSException):
"""Raised if an absolute domain name is required but a relative name
was provided."""
pass
class NoRootSOA(dns.exception.DNSException):
"""Raised if for some reason there is no SOA at the root name.
This should never happen!"""
pass
class NoMetaqueries(dns.exception.DNSException):
"""Metaqueries are not allowed."""
pass
class Answer(object):
"""DNS stub resolver answer
Instances of this class bundle up the result of a successful DNS
resolution.
For convenience, the answer object implements much of the sequence
protocol, forwarding to its rrset. E.g. "for a in answer" is
equivalent to "for a in answer.rrset", "answer[i]" is equivalent
to "answer.rrset[i]", and "answer[i:j]" is equivalent to
"answer.rrset[i:j]".
Note that CNAMEs or DNAMEs in the response may mean that answer
node's name might not be the query name.
@ivar qname: The query name
@type qname: dns.name.Name object
@ivar rdtype: The query type
@type rdtype: int
@ivar rdclass: The query class
@type rdclass: int
@ivar response: The response message
@type response: dns.message.Message object
@ivar rrset: The answer
@type rrset: dns.rrset.RRset object
@ivar expiration: The time when the answer expires
@type expiration: float (seconds since the epoch)
@ivar canonical_name: The canonical name of the query name
@type canonical_name: dns.name.Name object
"""
def __init__(self, qname, rdtype, rdclass, response,
raise_on_no_answer=True):
self.qname = qname
self.rdtype = rdtype
self.rdclass = rdclass
self.response = response
min_ttl = -1
rrset = None
for count in xrange(0, 15):
try:
rrset = response.find_rrset(response.answer, qname,
rdclass, rdtype)
if min_ttl == -1 or rrset.ttl < min_ttl:
min_ttl = rrset.ttl
break
except KeyError:
if rdtype != dns.rdatatype.CNAME:
try:
crrset = response.find_rrset(response.answer,
qname,
rdclass,
dns.rdatatype.CNAME)
if min_ttl == -1 or crrset.ttl < min_ttl:
min_ttl = crrset.ttl
for rd in crrset:
qname = rd.target
break
continue
except KeyError:
if raise_on_no_answer:
raise NoAnswer
if raise_on_no_answer:
raise NoAnswer
if rrset is None and raise_on_no_answer:
raise NoAnswer
self.canonical_name = qname
self.rrset = rrset
if rrset is None:
while 1:
# Look for a SOA RR whose owner name is a superdomain
# of qname.
try:
srrset = response.find_rrset(response.authority, qname,
rdclass, dns.rdatatype.SOA)
if min_ttl == -1 or srrset.ttl < min_ttl:
min_ttl = srrset.ttl
if srrset[0].minimum < min_ttl:
min_ttl = srrset[0].minimum
break
except KeyError:
try:
qname = qname.parent()
except dns.name.NoParent:
break
self.expiration = time.time() + min_ttl
def __getattr__(self, attr):
if attr == 'name':
return self.rrset.name
elif attr == 'ttl':
return self.rrset.ttl
elif attr == 'covers':
return self.rrset.covers
elif attr == 'rdclass':
return self.rrset.rdclass
elif attr == 'rdtype':
return self.rrset.rdtype
else:
raise AttributeError(attr)
def __len__(self):
return len(self.rrset)
def __iter__(self):
return iter(self.rrset)
def __getitem__(self, i):
return self.rrset[i]
def __delitem__(self, i):
del self.rrset[i]
def __getslice__(self, i, j):
return self.rrset[i:j]
def __delslice__(self, i, j):
del self.rrset[i:j]
class Cache(object):
"""Simple DNS answer cache.
@ivar data: A dictionary of cached data
@type data: dict
@ivar cleaning_interval: The number of seconds between cleanings. The
default is 300 (5 minutes).
@type cleaning_interval: float
@ivar next_cleaning: The time the cache should next be cleaned (in seconds
since the epoch.)
@type next_cleaning: float
"""
def __init__(self, cleaning_interval=300.0):
"""Initialize a DNS cache.
@param cleaning_interval: the number of seconds between periodic
cleanings. The default is 300.0
@type cleaning_interval: float.
"""
self.data = {}
self.cleaning_interval = cleaning_interval
self.next_cleaning = time.time() + self.cleaning_interval
def maybe_clean(self):
"""Clean the cache if it's time to do so."""
now = time.time()
if self.next_cleaning <= now:
keys_to_delete = []
for (k, v) in self.data.iteritems():
if v.expiration <= now:
keys_to_delete.append(k)
for k in keys_to_delete:
del self.data[k]
now = time.time()
self.next_cleaning = now + self.cleaning_interval
def get(self, key):
"""Get the answer associated with I{key}. Returns None if
no answer is cached for the key.
@param key: the key
@type key: (dns.name.Name, int, int) tuple whose values are the
query name, rdtype, and rdclass.
@rtype: dns.resolver.Answer object or None
"""
self.maybe_clean()
v = self.data.get(key)
if v is None or v.expiration <= time.time():
return None
return v
def put(self, key, value):
"""Associate key and value in the cache.
@param key: the key
@type key: (dns.name.Name, int, int) tuple whose values are the
query name, rdtype, and rdclass.
@param value: The answer being cached
@type value: dns.resolver.Answer object
"""
self.maybe_clean()
self.data[key] = value
def flush(self, key=None):
"""Flush the cache.
If I{key} is specified, only that item is flushed. Otherwise
the entire cache is flushed.
@param key: the key to flush
@type key: (dns.name.Name, int, int) tuple or None
"""
if not key is None:
if self.data.has_key(key):
del self.data[key]
else:
self.data = {}
self.next_cleaning = time.time() + self.cleaning_interval
class LRUCacheNode(object):
"""LRUCache node.
"""
def __init__(self, key, value):
self.key = key
self.value = value
self.prev = self
self.next = self
def link_before(self, node):
self.prev = node.prev
self.next = node
node.prev.next = self
node.prev = self
def link_after(self, node):
self.prev = node
self.next = node.next
node.next.prev = self
node.next = self
def unlink(self):
self.next.prev = self.prev
self.prev.next = self.next
class LRUCache(object):
"""Bounded least-recently-used DNS answer cache.
This cache is better than the simple cache (above) if you're
running a web crawler or other process that does a lot of
resolutions. The LRUCache has a maximum number of nodes, and when
it is full, the least-recently used node is removed to make space
for a new one.
@ivar data: A dictionary of cached data
@type data: dict
@ivar sentinel: sentinel node for circular doubly linked list of nodes
@type sentinel: LRUCacheNode object
@ivar max_size: The maximum number of nodes
@type max_size: int
"""
def __init__(self, max_size=100000):
"""Initialize a DNS cache.
@param max_size: The maximum number of nodes to cache; the default is
100000. Must be > 1.
@type max_size: int
"""
self.data = {}
self.set_max_size(max_size)
self.sentinel = LRUCacheNode(None, None)
def set_max_size(self, max_size):
if max_size < 1:
max_size = 1
self.max_size = max_size
def get(self, key):
"""Get the answer associated with I{key}. Returns None if
no answer is cached for the key.
@param key: the key
@type key: (dns.name.Name, int, int) tuple whose values are the
query name, rdtype, and rdclass.
@rtype: dns.resolver.Answer object or None
"""
node = self.data.get(key)
if node is None:
return None
# Unlink because we're either going to move the node to the front
# of the LRU list or we're going to free it.
node.unlink()
if node.value.expiration <= time.time():
del self.data[node.key]
return None
node.link_after(self.sentinel)
return node.value
def put(self, key, value):
"""Associate key and value in the cache.
@param key: the key
@type key: (dns.name.Name, int, int) tuple whose values are the
query name, rdtype, and rdclass.
@param value: The answer being cached
@type value: dns.resolver.Answer object
"""
node = self.data.get(key)
if not node is None:
node.unlink()
del self.data[node.key]
while len(self.data) >= self.max_size:
node = self.sentinel.prev
node.unlink()
del self.data[node.key]
node = LRUCacheNode(key, value)
node.link_after(self.sentinel)
self.data[key] = node
def flush(self, key=None):
"""Flush the cache.
If I{key} is specified, only that item is flushed. Otherwise
the entire cache is flushed.
@param key: the key to flush
@type key: (dns.name.Name, int, int) tuple or None
"""
if not key is None:
node = self.data.get(key)
if not node is None:
node.unlink()
del self.data[node.key]
else:
node = self.sentinel.next
while node != self.sentinel:
next = node.next
node.prev = None
node.next = None
node = next
self.data = {}
class Resolver(object):
"""DNS stub resolver
@ivar domain: The domain of this host
@type domain: dns.name.Name object
@ivar nameservers: A list of nameservers to query. Each nameserver is
a string which contains the IP address of a nameserver.
@type nameservers: list of strings
@ivar search: The search list. If the query name is a relative name,
the resolver will construct an absolute query name by appending the search
names one by one to the query name.
@type search: list of dns.name.Name objects
@ivar port: The port to which to send queries. The default is 53.
@type port: int
@ivar timeout: The number of seconds to wait for a response from a
server, before timing out.
@type timeout: float
@ivar lifetime: The total number of seconds to spend trying to get an
answer to the question. If the lifetime expires, a Timeout exception
will occur.
@type lifetime: float
@ivar keyring: The TSIG keyring to use. The default is None.
@type keyring: dict
@ivar keyname: The TSIG keyname to use. The default is None.
@type keyname: dns.name.Name object
@ivar keyalgorithm: The TSIG key algorithm to use. The default is
dns.tsig.default_algorithm.
@type keyalgorithm: string
@ivar edns: The EDNS level to use. The default is -1, no Edns.
@type edns: int
@ivar ednsflags: The EDNS flags
@type ednsflags: int
@ivar payload: The EDNS payload size. The default is 0.
@type payload: int
@ivar cache: The cache to use. The default is None.
@type cache: dns.resolver.Cache object
"""
def __init__(self, filename='/etc/resolv.conf', configure=True):
"""Initialize a resolver instance.
@param filename: The filename of a configuration file in
standard /etc/resolv.conf format. This parameter is meaningful
only when I{configure} is true and the platform is POSIX.
@type filename: string or file object
@param configure: If True (the default), the resolver instance
is configured in the normal fashion for the operating system
the resolver is running on. (I.e. a /etc/resolv.conf file on
POSIX systems and from the registry on Windows systems.)
@type configure: bool"""
self.reset()
if configure:
if sys.platform == 'win32':
self.read_registry()
elif filename:
self.read_resolv_conf(filename)
def reset(self):
"""Reset all resolver configuration to the defaults."""
self.domain = \
dns.name.Name(dns.name.from_text(socket.gethostname())[1:])
if len(self.domain) == 0:
self.domain = dns.name.root
self.nameservers = []
self.search = []
self.port = 53
self.timeout = 2.0
self.lifetime = 30.0
self.keyring = None
self.keyname = None
self.keyalgorithm = dns.tsig.default_algorithm
self.edns = -1
self.ednsflags = 0
self.payload = 0
self.cache = None
def read_resolv_conf(self, f):
"""Process f as a file in the /etc/resolv.conf format. If f is
a string, it is used as the name of the file to open; otherwise it
is treated as the file itself."""
if isinstance(f, str) or isinstance(f, unicode):
try:
f = open(f, 'r')
except IOError:
# /etc/resolv.conf doesn't exist, can't be read, etc.
# We'll just use the default resolver configuration.
self.nameservers = ['127.0.0.1']
return
want_close = True
else:
want_close = False
try:
for l in f:
if len(l) == 0 or l[0] == '#' or l[0] == ';':
continue
tokens = l.split()
if len(tokens) == 0:
continue
if tokens[0] == 'nameserver':
self.nameservers.append(tokens[1])
elif tokens[0] == 'domain':
self.domain = dns.name.from_text(tokens[1])
elif tokens[0] == 'search':
for suffix in tokens[1:]:
self.search.append(dns.name.from_text(suffix))
finally:
if want_close:
f.close()
if len(self.nameservers) == 0:
self.nameservers.append('127.0.0.1')
def _determine_split_char(self, entry):
#
# The windows registry irritatingly changes the list element
# delimiter in between ' ' and ',' (and vice-versa) in various
# versions of windows.
#
if entry.find(' ') >= 0:
split_char = ' '
elif entry.find(',') >= 0:
split_char = ','
else:
# probably a singleton; treat as a space-separated list.
split_char = ' '
return split_char
def _config_win32_nameservers(self, nameservers):
"""Configure a NameServer registry entry."""
# we call str() on nameservers to convert it from unicode to ascii
nameservers = str(nameservers)
split_char = self._determine_split_char(nameservers)
ns_list = nameservers.split(split_char)
for ns in ns_list:
if not ns in self.nameservers:
self.nameservers.append(ns)
def _config_win32_domain(self, domain):
"""Configure a Domain registry entry."""
# we call str() on domain to convert it from unicode to ascii
self.domain = dns.name.from_text(str(domain))
def _config_win32_search(self, search):
"""Configure a Search registry entry."""
# we call str() on search to convert it from unicode to ascii
search = str(search)
split_char = self._determine_split_char(search)
search_list = search.split(split_char)
for s in search_list:
if not s in self.search:
self.search.append(dns.name.from_text(s))
def _config_win32_fromkey(self, key):
"""Extract DNS info from a registry key."""
try:
servers, rtype = _winreg.QueryValueEx(key, 'NameServer')
except WindowsError:
servers = None
if servers:
self._config_win32_nameservers(servers)
try:
dom, rtype = _winreg.QueryValueEx(key, 'Domain')
if dom:
self._config_win32_domain(dom)
except WindowsError:
pass
else:
try:
servers, rtype = _winreg.QueryValueEx(key, 'DhcpNameServer')
except WindowsError:
servers = None
if servers:
self._config_win32_nameservers(servers)
try:
dom, rtype = _winreg.QueryValueEx(key, 'DhcpDomain')
if dom:
self._config_win32_domain(dom)
except WindowsError:
pass
try:
search, rtype = _winreg.QueryValueEx(key, 'SearchList')
except WindowsError:
search = None
if search:
self._config_win32_search(search)
def read_registry(self):
"""Extract resolver configuration from the Windows registry."""
lm = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
want_scan = False
try:
try:
# XP, 2000
tcp_params = _winreg.OpenKey(lm,
r'SYSTEM\CurrentControlSet'
r'\Services\Tcpip\Parameters')
want_scan = True
except EnvironmentError:
# ME
tcp_params = _winreg.OpenKey(lm,
r'SYSTEM\CurrentControlSet'
r'\Services\VxD\MSTCP')
try:
self._config_win32_fromkey(tcp_params)
finally:
tcp_params.Close()
if want_scan:
interfaces = _winreg.OpenKey(lm,
r'SYSTEM\CurrentControlSet'
r'\Services\Tcpip\Parameters'
r'\Interfaces')
try:
i = 0
while True:
try:
guid = _winreg.EnumKey(interfaces, i)
i += 1
key = _winreg.OpenKey(interfaces, guid)
if not self._win32_is_nic_enabled(lm, guid, key):
continue
try:
self._config_win32_fromkey(key)
finally:
key.Close()
except EnvironmentError:
break
finally:
interfaces.Close()
finally:
lm.Close()
def _win32_is_nic_enabled(self, lm, guid, interface_key):
# Look in the Windows Registry to determine whether the network
# interface corresponding to the given guid is enabled.
#
# (Code contributed by Paul Marks, thanks!)
#
try:
# This hard-coded location seems to be consistent, at least
# from Windows 2000 through Vista.
connection_key = _winreg.OpenKey(
lm,
r'SYSTEM\CurrentControlSet\Control\Network'
r'\{4D36E972-E325-11CE-BFC1-08002BE10318}'
r'\%s\Connection' % guid)
try:
# The PnpInstanceID points to a key inside Enum
(pnp_id, ttype) = _winreg.QueryValueEx(
connection_key, 'PnpInstanceID')
if ttype != _winreg.REG_SZ:
raise ValueError
device_key = _winreg.OpenKey(
lm, r'SYSTEM\CurrentControlSet\Enum\%s' % pnp_id)
try:
# Get ConfigFlags for this device
(flags, ttype) = _winreg.QueryValueEx(
device_key, 'ConfigFlags')
if ttype != _winreg.REG_DWORD:
raise ValueError
# Based on experimentation, bit 0x1 indicates that the
# device is disabled.
return not (flags & 0x1)
finally:
device_key.Close()
finally:
connection_key.Close()
except (EnvironmentError, ValueError):
# Pre-vista, enabled interfaces seem to have a non-empty
# NTEContextList; this was how dnspython detected enabled
# nics before the code above was contributed. We've retained
# the old method since we don't know if the code above works
# on Windows 95/98/ME.
try:
(nte, ttype) = _winreg.QueryValueEx(interface_key,
'NTEContextList')
return nte is not None
except WindowsError:
return False
def _compute_timeout(self, start):
now = time.time()
if now < start:
if start - now > 1:
# Time going backwards is bad. Just give up.
raise Timeout
else:
# Time went backwards, but only a little. This can
# happen, e.g. under vmware with older linux kernels.
# Pretend it didn't happen.
now = start
duration = now - start
if duration >= self.lifetime:
raise Timeout
return min(self.lifetime - duration, self.timeout)
def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None, raise_on_no_answer=True):
"""Query nameservers to find the answer to the question.
The I{qname}, I{rdtype}, and I{rdclass} parameters may be objects
of the appropriate type, or strings that can be converted into objects
of the appropriate type. E.g. For I{rdtype} the integer 2 and the
the string 'NS' both mean to query for records with DNS rdata type NS.
@param qname: the query name
@type qname: dns.name.Name object or string
@param rdtype: the query type
@type rdtype: int or string
@param rdclass: the query class
@type rdclass: int or string
@param tcp: use TCP to make the query (default is False).
@type tcp: bool
@param source: bind to this IP address (defaults to machine default IP).
@type source: IP address in dotted quad notation
@param raise_on_no_answer: raise NoAnswer if there's no answer
(defaults is True).
@type raise_on_no_answer: bool
@rtype: dns.resolver.Answer instance
@raises Timeout: no answers could be found in the specified lifetime
@raises NXDOMAIN: the query name does not exist
@raises NoAnswer: the response did not contain an answer and
raise_on_no_answer is True.
@raises NoNameservers: no non-broken nameservers are available to
answer the question."""
if isinstance(qname, (str, unicode)):
qname = dns.name.from_text(qname, None)
if isinstance(rdtype, (str, unicode)):
rdtype = dns.rdatatype.from_text(rdtype)
if dns.rdatatype.is_metatype(rdtype):
raise NoMetaqueries
if isinstance(rdclass, (str, unicode)):
rdclass = dns.rdataclass.from_text(rdclass)
if dns.rdataclass.is_metaclass(rdclass):
raise NoMetaqueries
qnames_to_try = []
if qname.is_absolute():
qnames_to_try.append(qname)
else:
if len(qname) > 1:
qnames_to_try.append(qname.concatenate(dns.name.root))
if self.search:
for suffix in self.search:
qnames_to_try.append(qname.concatenate(suffix))
else:
qnames_to_try.append(qname.concatenate(self.domain))
all_nxdomain = True
start = time.time()
for qname in qnames_to_try:
if self.cache:
answer = self.cache.get((qname, rdtype, rdclass))
if not answer is None:
if answer.rrset is None and raise_on_no_answer:
raise NoAnswer
else:
return answer
request = dns.message.make_query(qname, rdtype, rdclass)
if not self.keyname is None:
request.use_tsig(self.keyring, self.keyname,
algorithm=self.keyalgorithm)
request.use_edns(self.edns, self.ednsflags, self.payload)
response = None
#
# make a copy of the servers list so we can alter it later.
#
nameservers = self.nameservers[:]
backoff = 0.10
while response is None:
if len(nameservers) == 0:
raise NoNameservers
for nameserver in nameservers[:]:
timeout = self._compute_timeout(start)
try:
if tcp:
response = dns.query.tcp(request, nameserver,
timeout, self.port,
source=source)
else:
response = dns.query.udp(request, nameserver,
timeout, self.port,
source=source)
except (socket.error, dns.exception.Timeout):
#
# Communication failure or timeout. Go to the
# next server
#
response = None
continue
except dns.query.UnexpectedSource:
#
# Who knows? Keep going.
#
response = None
continue
except dns.exception.FormError:
#
# We don't understand what this server is
# saying. Take it out of the mix and
# continue.
#
nameservers.remove(nameserver)
response = None
continue
rcode = response.rcode()
if rcode == dns.rcode.NOERROR or \
rcode == dns.rcode.NXDOMAIN:
break
#
# We got a response, but we're not happy with the
# rcode in it. Remove the server from the mix if
# the rcode isn't SERVFAIL.
#
if rcode != dns.rcode.SERVFAIL:
nameservers.remove(nameserver)
response = None
if not response is None:
break
#
# All nameservers failed!
#
if len(nameservers) > 0:
#
# But we still have servers to try. Sleep a bit
# so we don't pound them!
#
timeout = self._compute_timeout(start)
sleep_time = min(timeout, backoff)
backoff *= 2
time.sleep(sleep_time)
if response.rcode() == dns.rcode.NXDOMAIN:
continue
all_nxdomain = False
break
if all_nxdomain:
raise NXDOMAIN
answer = Answer(qname, rdtype, rdclass, response,
raise_on_no_answer)
if self.cache:
self.cache.put((qname, rdtype, rdclass), answer)
return answer
def use_tsig(self, keyring, keyname=None,
algorithm=dns.tsig.default_algorithm):
"""Add a TSIG signature to the query.
@param keyring: The TSIG keyring to use; defaults to None.
@type keyring: dict
@param keyname: The name of the TSIG key to use; defaults to None.
The key must be defined in the keyring. If a keyring is specified
but a keyname is not, then the key used will be the first key in the
keyring. Note that the order of keys in a dictionary is not defined,
so applications should supply a keyname when a keyring is used, unless
they know the keyring contains only one key.
@param algorithm: The TSIG key algorithm to use. The default
is dns.tsig.default_algorithm.
@type algorithm: string"""
self.keyring = keyring
if keyname is None:
self.keyname = self.keyring.keys()[0]
else:
self.keyname = keyname
self.keyalgorithm = algorithm
def use_edns(self, edns, ednsflags, payload):
"""Configure Edns.
@param edns: The EDNS level to use. The default is -1, no Edns.
@type edns: int
@param ednsflags: The EDNS flags
@type ednsflags: int
@param payload: The EDNS payload size. The default is 0.
@type payload: int"""
if edns is None:
edns = -1
self.edns = edns
self.ednsflags = ednsflags
self.payload = payload
default_resolver = None
def get_default_resolver():
"""Get the default resolver, initializing it if necessary."""
global default_resolver
if default_resolver is None:
default_resolver = Resolver()
return default_resolver
def query(qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
tcp=False, source=None, raise_on_no_answer=True):
"""Query nameservers to find the answer to the question.
This is a convenience function that uses the default resolver
object to make the query.
@see: L{dns.resolver.Resolver.query} for more information on the
parameters."""
return get_default_resolver().query(qname, rdtype, rdclass, tcp, source,
raise_on_no_answer)
def zone_for_name(name, rdclass=dns.rdataclass.IN, tcp=False, resolver=None):
"""Find the name of the zone which contains the specified name.
@param name: the query name
@type name: absolute dns.name.Name object or string
@param rdclass: The query class
@type rdclass: int
@param tcp: use TCP to make the query (default is False).
@type tcp: bool
@param resolver: the resolver to use
@type resolver: dns.resolver.Resolver object or None
@rtype: dns.name.Name"""
if isinstance(name, (str, unicode)):
name = dns.name.from_text(name, dns.name.root)
if resolver is None:
resolver = get_default_resolver()
if not name.is_absolute():
raise NotAbsolute(name)
while 1:
try:
answer = resolver.query(name, dns.rdatatype.SOA, rdclass, tcp)
if answer.rrset.name == name:
return name
# otherwise we were CNAMEd or DNAMEd and need to look higher
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
pass
try:
name = name.parent()
except dns.name.NoParent:
raise NoRootSOA
#
# Support for overriding the system resolver for all python code in the
# running process.
#
_protocols_for_socktype = {
socket.SOCK_DGRAM : [socket.SOL_UDP],
socket.SOCK_STREAM : [socket.SOL_TCP],
}
_resolver = None
_original_getaddrinfo = socket.getaddrinfo
_original_getnameinfo = socket.getnameinfo
_original_getfqdn = socket.getfqdn
_original_gethostbyname = socket.gethostbyname
_original_gethostbyname_ex = socket.gethostbyname_ex
_original_gethostbyaddr = socket.gethostbyaddr
def _getaddrinfo(host=None, service=None, family=socket.AF_UNSPEC, socktype=0,
proto=0, flags=0):
if flags & (socket.AI_ADDRCONFIG|socket.AI_V4MAPPED) != 0:
raise NotImplementedError
if host is None and service is None:
raise socket.gaierror(socket.EAI_NONAME)
v6addrs = []
v4addrs = []
canonical_name = None
try:
# Is host None or a V6 address literal?
if host is None:
canonical_name = 'localhost'
if flags & socket.AI_PASSIVE != 0:
v6addrs.append('::')
v4addrs.append('0.0.0.0')
else:
v6addrs.append('::1')
v4addrs.append('127.0.0.1')
else:
parts = host.split('%')
if len(parts) == 2:
ahost = parts[0]
else:
ahost = host
addr = dns.ipv6.inet_aton(ahost)
v6addrs.append(host)
canonical_name = host
except:
try:
# Is it a V4 address literal?
addr = dns.ipv4.inet_aton(host)
v4addrs.append(host)
canonical_name = host
except:
if flags & socket.AI_NUMERICHOST == 0:
try:
qname = None
if family == socket.AF_INET6 or family == socket.AF_UNSPEC:
v6 = _resolver.query(host, dns.rdatatype.AAAA,
raise_on_no_answer=False)
# Note that setting host ensures we query the same name
# for A as we did for AAAA.
host = v6.qname
canonical_name = v6.canonical_name.to_text(True)
if v6.rrset is not None:
for rdata in v6.rrset:
v6addrs.append(rdata.address)
if family == socket.AF_INET or family == socket.AF_UNSPEC:
v4 = _resolver.query(host, dns.rdatatype.A,
raise_on_no_answer=False)
host = v4.qname
canonical_name = v4.canonical_name.to_text(True)
if v4.rrset is not None:
for rdata in v4.rrset:
v4addrs.append(rdata.address)
except dns.resolver.NXDOMAIN:
raise socket.gaierror(socket.EAI_NONAME)
except:
raise socket.gaierror(socket.EAI_SYSTEM)
port = None
try:
# Is it a port literal?
if service is None:
port = 0
else:
port = int(service)
except:
if flags & socket.AI_NUMERICSERV == 0:
try:
port = socket.getservbyname(service)
except:
pass
if port is None:
raise socket.gaierror(socket.EAI_NONAME)
tuples = []
if socktype == 0:
socktypes = [socket.SOCK_DGRAM, socket.SOCK_STREAM]
else:
socktypes = [socktype]
if flags & socket.AI_CANONNAME != 0:
cname = canonical_name
else:
cname = ''
if family == socket.AF_INET6 or family == socket.AF_UNSPEC:
for addr in v6addrs:
for socktype in socktypes:
for proto in _protocols_for_socktype[socktype]:
tuples.append((socket.AF_INET6, socktype, proto,
cname, (addr, port, 0, 0)))
if family == socket.AF_INET or family == socket.AF_UNSPEC:
for addr in v4addrs:
for socktype in socktypes:
for proto in _protocols_for_socktype[socktype]:
tuples.append((socket.AF_INET, socktype, proto,
cname, (addr, port)))
if len(tuples) == 0:
raise socket.gaierror(socket.EAI_NONAME)
return tuples
def _getnameinfo(sockaddr, flags=0):
host = sockaddr[0]
port = sockaddr[1]
if len(sockaddr) == 4:
scope = sockaddr[3]
family = socket.AF_INET6
else:
scope = None
family = socket.AF_INET
tuples = _getaddrinfo(host, port, family, socket.SOCK_STREAM,
socket.SOL_TCP, 0)
if len(tuples) > 1:
raise socket.error('sockaddr resolved to multiple addresses')
addr = tuples[0][4][0]
if flags & socket.NI_DGRAM:
pname = 'udp'
else:
pname = 'tcp'
qname = dns.reversename.from_address(addr)
if flags & socket.NI_NUMERICHOST == 0:
try:
answer = _resolver.query(qname, 'PTR')
hostname = answer.rrset[0].target.to_text(True)
except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):
if flags & socket.NI_NAMEREQD:
raise socket.gaierror(socket.EAI_NONAME)
hostname = addr
if scope is not None:
hostname += '%' + str(scope)
else:
hostname = addr
if scope is not None:
hostname += '%' + str(scope)
if flags & socket.NI_NUMERICSERV:
service = str(port)
else:
service = socket.getservbyport(port, pname)
return (hostname, service)
def _getfqdn(name=None):
if name is None:
name = socket.gethostname()
return _getnameinfo(_getaddrinfo(name, 80)[0][4])[0]
def _gethostbyname(name):
return _gethostbyname_ex(name)[2][0]
def _gethostbyname_ex(name):
aliases = []
addresses = []
tuples = _getaddrinfo(name, 0, socket.AF_INET, socket.SOCK_STREAM,
socket.SOL_TCP, socket.AI_CANONNAME)
canonical = tuples[0][3]
for item in tuples:
addresses.append(item[4][0])
# XXX we just ignore aliases
return (canonical, aliases, addresses)
def _gethostbyaddr(ip):
try:
addr = dns.ipv6.inet_aton(ip)
sockaddr = (ip, 80, 0, 0)
family = socket.AF_INET6
except:
sockaddr = (ip, 80)
family = socket.AF_INET
(name, port) = _getnameinfo(sockaddr, socket.NI_NAMEREQD)
aliases = []
addresses = []
tuples = _getaddrinfo(name, 0, family, socket.SOCK_STREAM, socket.SOL_TCP,
socket.AI_CANONNAME)
canonical = tuples[0][3]
for item in tuples:
addresses.append(item[4][0])
# XXX we just ignore aliases
return (canonical, aliases, addresses)
def override_system_resolver(resolver=None):
"""Override the system resolver routines in the socket module with
versions which use dnspython's resolver.
This can be useful in testing situations where you want to control
the resolution behavior of python code without having to change
the system's resolver settings (e.g. /etc/resolv.conf).
The resolver to use may be specified; if it's not, the default
resolver will be used.
@param resolver: the resolver to use
@type resolver: dns.resolver.Resolver object or None
"""
if resolver is None:
resolver = get_default_resolver()
global _resolver
_resolver = resolver
socket.getaddrinfo = _getaddrinfo
socket.getnameinfo = _getnameinfo
socket.getfqdn = _getfqdn
socket.gethostbyname = _gethostbyname
socket.gethostbyname_ex = _gethostbyname_ex
socket.gethostbyaddr = _gethostbyaddr
def restore_system_resolver():
"""Undo the effects of override_system_resolver().
"""
global _resolver
_resolver = None
socket.getaddrinfo = _original_getaddrinfo
socket.getnameinfo = _original_getnameinfo
socket.getfqdn = _original_getfqdn
socket.gethostbyname = _original_gethostbyname
socket.gethostbyname_ex = _original_gethostbyname_ex
socket.gethostbyaddr = _original_gethostbyaddr
|
gpl-3.0
|
sergei-maertens/discord-bot
|
bot/plugins/remindme/tests/test_plugin.py
|
1
|
3392
|
from datetime import datetime
from unittest import TestCase, mock
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from ..plugin import Plugin
class PluginTests(TestCase):
def test_parse_time(self):
plugin = Plugin(mock.MagicMock(), {})
def expected(**kwargs):
return (timezone.now() + relativedelta(**kwargs)).replace(microsecond=0)
self.assertEqual(plugin.parse_time('30m'), expected(minutes=30))
self.assertEqual(plugin.parse_time('30min'), expected(minutes=30))
self.assertEqual(plugin.parse_time('30minutes'), expected(minutes=30))
self.assertEqual(plugin.parse_time('1h'), expected(hours=1))
self.assertEqual(plugin.parse_time('1hour'), expected(hours=1))
self.assertEqual(plugin.parse_time('1hours'), expected(hours=1))
self.assertEqual(plugin.parse_time('2days 4h 30m'), expected(days=2, hours=4, minutes=30))
self.assertEqual(plugin.parse_time('5year 3months 2days 1hour'), expected(years=5, months=3, days=2, hours=1))
def test_parse_absolute(self):
plugin = Plugin(mock.MagicMock(), {})
now = datetime.now()
self.assertEqual(
plugin.parse_time('21:00'),
datetime(now.year, now.month, now.day, 21, 0).replace(tzinfo=timezone.utc)
)
self.assertEqual(
plugin.parse_time('2016-11-01'),
datetime(2016, 11, 1, 0, 0, 0).replace(tzinfo=timezone.utc)
)
self.assertEqual(
plugin.parse_time('2016-11-01 21:00'),
datetime(2016, 11, 1, 21, 0).replace(tzinfo=timezone.utc)
)
def test_argument_regex_relative(self):
"""
Test that the command can take both absolute and relative inputs.
"""
plugin = Plugin(mock.MagicMock(), {})
pattern = plugin.remindme._command.regex
match = pattern.match('3days a message')
self.assertIsNotNone(match)
self.assertEqual(match.group('time'), '3days')
self.assertEqual(match.group('message'), 'a message')
match = pattern.match('2hours 30min a message')
self.assertIsNotNone(match)
self.assertEqual(match.group('time'), '2hours 30min')
self.assertEqual(match.group('message'), 'a message')
match = pattern.match('2h30m a message')
self.assertIsNotNone(match)
self.assertEqual(match.group('time'), '2h30m')
self.assertEqual(match.group('message'), 'a message')
def test_argument_regex_absolute(self):
"""
Test that the command can take both absolute and relative inputs.
"""
plugin = Plugin(mock.MagicMock(), {})
pattern = plugin.remindme._command.regex
match = pattern.match('21:00 a message')
self.assertIsNotNone(match)
self.assertEqual(match.group('time'), '21:00')
self.assertEqual(match.group('message'), 'a message')
match = pattern.match('2016-11-01 a message')
self.assertIsNotNone(match)
self.assertEqual(match.group('time'), '2016-11-01')
self.assertEqual(match.group('message'), 'a message')
match = pattern.match('2016-11-01 21:00 a message')
self.assertIsNotNone(match)
self.assertEqual(match.group('time'), '2016-11-01 21:00')
self.assertEqual(match.group('message'), 'a message')
|
mit
|
nelmiux/CarnotKE
|
jyhton/lib-python/2.7/idlelib/ClassBrowser.py
|
91
|
6369
|
"""Class browser.
XXX TO DO:
- reparse when source changed (maybe just a button would be OK?)
(or recheck on window popup)
- add popup menu with more options (e.g. doc strings, base classes, imports)
- show function argument list? (have to do pattern matching on source)
- should the classes and methods lists also be in the module's menu bar?
- add base classes to class browser tree
"""
import os
import sys
import pyclbr
from idlelib import PyShell
from idlelib.WindowList import ListedToplevel
from idlelib.TreeWidget import TreeNode, TreeItem, ScrolledCanvas
from idlelib.configHandler import idleConf
class ClassBrowser:
def __init__(self, flist, name, path):
# XXX This API should change, if the file doesn't end in ".py"
# XXX the code here is bogus!
self.name = name
self.file = os.path.join(path[0], self.name + ".py")
self.init(flist)
def close(self, event=None):
self.top.destroy()
self.node.destroy()
def init(self, flist):
self.flist = flist
# reset pyclbr
pyclbr._modules.clear()
# create top
self.top = top = ListedToplevel(flist.root)
top.protocol("WM_DELETE_WINDOW", self.close)
top.bind("<Escape>", self.close)
self.settitle()
top.focus_set()
# create scrolled canvas
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = self.rootnode()
self.node = node = TreeNode(sc.canvas, None, item)
node.update()
node.expand()
def settitle(self):
self.top.wm_title("Class Browser - " + self.name)
self.top.wm_iconname("Class Browser")
def rootnode(self):
return ModuleBrowserTreeItem(self.file)
class ModuleBrowserTreeItem(TreeItem):
def __init__(self, file):
self.file = file
def GetText(self):
return os.path.basename(self.file)
def GetIconName(self):
return "python"
def GetSubList(self):
sublist = []
for name in self.listclasses():
item = ClassBrowserTreeItem(name, self.classes, self.file)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if os.path.normcase(self.file[-3:]) != ".py":
return
if not os.path.exists(self.file):
return
PyShell.flist.open(self.file)
def IsExpandable(self):
return os.path.normcase(self.file[-3:]) == ".py"
def listclasses(self):
dir, file = os.path.split(self.file)
name, ext = os.path.splitext(file)
if os.path.normcase(ext) != ".py":
return []
try:
dict = pyclbr.readmodule_ex(name, [dir] + sys.path)
except ImportError, msg:
return []
items = []
self.classes = {}
for key, cl in dict.items():
if cl.module == name:
s = key
if hasattr(cl, 'super') and cl.super:
supers = []
for sup in cl.super:
if type(sup) is type(''):
sname = sup
else:
sname = sup.name
if sup.module != cl.module:
sname = "%s.%s" % (sup.module, sname)
supers.append(sname)
s = s + "(%s)" % ", ".join(supers)
items.append((cl.lineno, s))
self.classes[s] = cl
items.sort()
list = []
for item, s in items:
list.append(s)
return list
class ClassBrowserTreeItem(TreeItem):
def __init__(self, name, classes, file):
self.name = name
self.classes = classes
self.file = file
try:
self.cl = self.classes[self.name]
except (IndexError, KeyError):
self.cl = None
self.isfunction = isinstance(self.cl, pyclbr.Function)
def GetText(self):
if self.isfunction:
return "def " + self.name + "(...)"
else:
return "class " + self.name
def GetIconName(self):
if self.isfunction:
return "python"
else:
return "folder"
def IsExpandable(self):
if self.cl:
try:
return not not self.cl.methods
except AttributeError:
return False
def GetSubList(self):
if not self.cl:
return []
sublist = []
for name in self.listmethods():
item = MethodBrowserTreeItem(name, self.cl, self.file)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if not os.path.exists(self.file):
return
edit = PyShell.flist.open(self.file)
if hasattr(self.cl, 'lineno'):
lineno = self.cl.lineno
edit.gotoline(lineno)
def listmethods(self):
if not self.cl:
return []
items = []
for name, lineno in self.cl.methods.items():
items.append((lineno, name))
items.sort()
list = []
for item, name in items:
list.append(name)
return list
class MethodBrowserTreeItem(TreeItem):
def __init__(self, name, cl, file):
self.name = name
self.cl = cl
self.file = file
def GetText(self):
return "def " + self.name + "(...)"
def GetIconName(self):
return "python" # XXX
def IsExpandable(self):
return 0
def OnDoubleClick(self):
if not os.path.exists(self.file):
return
edit = PyShell.flist.open(self.file)
edit.gotoline(self.cl.methods[self.name])
def main():
try:
file = __file__
except NameError:
file = sys.argv[0]
if sys.argv[1:]:
file = sys.argv[1]
else:
file = sys.argv[0]
dir, file = os.path.split(file)
name = os.path.splitext(file)[0]
ClassBrowser(PyShell.flist, name, [dir])
if sys.stdin is sys.__stdin__:
mainloop()
if __name__ == "__main__":
main()
|
apache-2.0
|
sergiopasra/megaradrp
|
megaradrp/instrument/components/lamps.py
|
2
|
1718
|
#
# Copyright 2016-2019 Universidad Complutense de Madrid
#
# This file is part of Megara DRP
#
# SPDX-License-Identifier: GPL-3.0+
# License-Filename: LICENSE.txt
#
import numpy as np
from astropy import units as u
from astropy.modeling.blackbody import blackbody_lambda
from numina.instrument.hwdevice import HWDevice
from megaradrp.simulation.extended import create_th_ar_arc_spectrum
class Lamp(HWDevice):
def __init__(self, name, factor=1.0, illumination=None):
super(Lamp, self).__init__(name=name)
self.factor = factor
if illumination is None:
self._illum = lambda x, y: np.ones_like(x)
else:
self._illum = illumination
def flux(self, wl):
units = u.erg * u.s**-1 * u.cm ** -2 * u.AA**-1 * u.sr**-1
return self.factor * np.ones_like(wl).value * units
def illumination(self, x, y):
return self._illum(x, y)
class BlackBodyLamp(Lamp):
def __init__(self, name, temp, factor=1.0, illumination=None):
self.temp = temp
super(BlackBodyLamp, self).__init__(name, factor=factor,
illumination=illumination)
def flux(self, wl_in):
energy_in_flat = blackbody_lambda(wl_in, self.temp)
return self.factor * energy_in_flat
class FlatLamp(Lamp):
def __init__(self, name, factor=1.0, illumination=None):
super(FlatLamp, self).__init__(name, factor=factor,
illumination=illumination)
class ArcLamp(Lamp):
def flux(self, wl_in):
val = create_th_ar_arc_spectrum(wl_in)
val_u = val * u.erg * u.s**-1 * u.cm ** -2 * u.AA**-1 * u.sr**-1
return self.factor * val_u
|
gpl-3.0
|
racemidev/RegAdminForLinux
|
python/rregadmin/hive/key_cell_wrapper.py
|
1
|
19637
|
# generated by 'xml2py'
# flags '-c -d -v -k defst -lrregadmin -m rregadmin.util.glib_wrapper -m rregadmin.util.icu_wrapper -m rregadmin.util.path_wrapper -m rregadmin.util.icu_wrapper -m rregadmin.util.path_info_wrapper -m rregadmin.util.ustring_wrapper -m rregadmin.util.offset_wrapper -m rregadmin.util.value_wrapper -m rregadmin.util.ustring_list_wrapper -m rregadmin.hive.types_wrapper -r ^key_cell_.* -o key_cell_wrapper.py key_cell_wrapper.xml'
from ctypes import *
from rregadmin.util.glib_wrapper import guint16
from rregadmin.hive.types_wrapper import KeyCell
from rregadmin.hive.types_wrapper import Hive
from rregadmin.util.offset_wrapper import offset
from rregadmin.util.ustring_list_wrapper import ustring
_libraries = {}
_libraries['librregadmin.so.1'] = CDLL('librregadmin.so.1')
from rregadmin.util.ustring_list_wrapper import gboolean
from rregadmin.hive.types_wrapper import Cell
STRING = c_char_p
from rregadmin.hive.types_wrapper import guint32
from rregadmin.hive.types_wrapper import ValueKeyCell
from rregadmin.util.value_wrapper import Value
from rregadmin.hive.types_wrapper import KeyListCell
from rregadmin.hive.types_wrapper import ValueListCell
from rregadmin.hive.types_wrapper import SecurityDescriptorCell
from rregadmin.hive.types_wrapper import ValueCell
key_cell_type = guint16
# ../../../rregadmin/hive/key_cell.h 83
key_cell_alloc = _libraries['librregadmin.so.1'].key_cell_alloc
key_cell_alloc.restype = POINTER(KeyCell)
# key_cell_alloc(in_hive, in_ofs, in_name)
key_cell_alloc.argtypes = [POINTER(Hive), offset, POINTER(ustring)]
key_cell_alloc.__doc__ = \
"""KeyCell * key_cell_alloc(Hive * in_hive, offset in_ofs, unknown * in_name)
../../../rregadmin/hive/key_cell.h:83"""
# ../../../rregadmin/hive/key_cell.h 89
key_cell_unalloc = _libraries['librregadmin.so.1'].key_cell_unalloc
key_cell_unalloc.restype = gboolean
# key_cell_unalloc(in_kc, in_recursive)
key_cell_unalloc.argtypes = [POINTER(KeyCell), gboolean]
key_cell_unalloc.__doc__ = \
"""gboolean key_cell_unalloc(KeyCell * in_kc, gboolean in_recursive)
../../../rregadmin/hive/key_cell.h:89"""
# ../../../rregadmin/hive/key_cell.h 98
key_cell_alloc_root = _libraries['librregadmin.so.1'].key_cell_alloc_root
key_cell_alloc_root.restype = POINTER(KeyCell)
# key_cell_alloc_root(in_hive)
key_cell_alloc_root.argtypes = [POINTER(Hive)]
key_cell_alloc_root.__doc__ = \
"""KeyCell * key_cell_alloc_root(Hive * in_hive)
../../../rregadmin/hive/key_cell.h:98"""
# ../../../rregadmin/hive/key_cell.h 104
key_cell_from_cell = _libraries['librregadmin.so.1'].key_cell_from_cell
key_cell_from_cell.restype = POINTER(KeyCell)
# key_cell_from_cell(in_cell)
key_cell_from_cell.argtypes = [POINTER(Cell)]
key_cell_from_cell.__doc__ = \
"""KeyCell * key_cell_from_cell(Cell * in_cell)
../../../rregadmin/hive/key_cell.h:104"""
# ../../../rregadmin/hive/key_cell.h 110
key_cell_to_cell = _libraries['librregadmin.so.1'].key_cell_to_cell
key_cell_to_cell.restype = POINTER(Cell)
# key_cell_to_cell(in_kc)
key_cell_to_cell.argtypes = [POINTER(KeyCell)]
key_cell_to_cell.__doc__ = \
"""Cell * key_cell_to_cell(KeyCell * in_kc)
../../../rregadmin/hive/key_cell.h:110"""
# ../../../rregadmin/hive/key_cell.h 116
key_cell_is_valid = _libraries['librregadmin.so.1'].key_cell_is_valid
key_cell_is_valid.restype = gboolean
# key_cell_is_valid(in_kc)
key_cell_is_valid.argtypes = [POINTER(KeyCell)]
key_cell_is_valid.__doc__ = \
"""gboolean key_cell_is_valid(KeyCell * in_kc)
../../../rregadmin/hive/key_cell.h:116"""
# ../../../rregadmin/hive/key_cell.h 122
key_cell_get_parent = _libraries['librregadmin.so.1'].key_cell_get_parent
key_cell_get_parent.restype = POINTER(KeyCell)
# key_cell_get_parent(in_kc)
key_cell_get_parent.argtypes = [POINTER(KeyCell)]
key_cell_get_parent.__doc__ = \
"""KeyCell * key_cell_get_parent(KeyCell * in_kc)
../../../rregadmin/hive/key_cell.h:122"""
# ../../../rregadmin/hive/key_cell.h 130
key_cell_get_type_str = _libraries['librregadmin.so.1'].key_cell_get_type_str
key_cell_get_type_str.restype = STRING
# key_cell_get_type_str(in_kc)
key_cell_get_type_str.argtypes = [POINTER(KeyCell)]
key_cell_get_type_str.__doc__ = \
"""unknown * key_cell_get_type_str(KeyCell * in_kc)
../../../rregadmin/hive/key_cell.h:130"""
# ../../../rregadmin/hive/key_cell.h 138
key_cell_get_type_id = _libraries['librregadmin.so.1'].key_cell_get_type_id
key_cell_get_type_id.restype = key_cell_type
# key_cell_get_type_id(in_kc)
key_cell_get_type_id.argtypes = [POINTER(KeyCell)]
key_cell_get_type_id.__doc__ = \
"""key_cell_type key_cell_get_type_id(KeyCell * in_kc)
../../../rregadmin/hive/key_cell.h:138"""
# ../../../rregadmin/hive/key_cell.h 146
key_cell_get_name = _libraries['librregadmin.so.1'].key_cell_get_name
key_cell_get_name.restype = POINTER(ustring)
# key_cell_get_name(in_kc)
key_cell_get_name.argtypes = [POINTER(KeyCell)]
key_cell_get_name.__doc__ = \
"""ustring * key_cell_get_name(KeyCell * in_kc)
../../../rregadmin/hive/key_cell.h:146"""
# ../../../rregadmin/hive/key_cell.h 154
key_cell_get_classname = _libraries['librregadmin.so.1'].key_cell_get_classname
key_cell_get_classname.restype = POINTER(ustring)
# key_cell_get_classname(in_kc)
key_cell_get_classname.argtypes = [POINTER(KeyCell)]
key_cell_get_classname.__doc__ = \
"""ustring * key_cell_get_classname(KeyCell * in_kc)
../../../rregadmin/hive/key_cell.h:154"""
# ../../../rregadmin/hive/key_cell.h 160
key_cell_compare_names = _libraries['librregadmin.so.1'].key_cell_compare_names
key_cell_compare_names.restype = c_int
# key_cell_compare_names(in_kc1, in_kc2)
key_cell_compare_names.argtypes = [POINTER(KeyCell), POINTER(KeyCell)]
key_cell_compare_names.__doc__ = \
"""int key_cell_compare_names(KeyCell * in_kc1, KeyCell * in_kc2)
../../../rregadmin/hive/key_cell.h:160"""
# ../../../rregadmin/hive/key_cell.h 166
key_cell_get_number_of_values = _libraries['librregadmin.so.1'].key_cell_get_number_of_values
key_cell_get_number_of_values.restype = guint32
# key_cell_get_number_of_values(in_kc)
key_cell_get_number_of_values.argtypes = [POINTER(KeyCell)]
key_cell_get_number_of_values.__doc__ = \
"""guint32 key_cell_get_number_of_values(KeyCell * in_kc)
../../../rregadmin/hive/key_cell.h:166"""
# ../../../rregadmin/hive/key_cell.h 172
key_cell_get_value = _libraries['librregadmin.so.1'].key_cell_get_value
key_cell_get_value.restype = POINTER(ValueKeyCell)
# key_cell_get_value(in_kc, in_index)
key_cell_get_value.argtypes = [POINTER(KeyCell), guint32]
key_cell_get_value.__doc__ = \
"""ValueKeyCell * key_cell_get_value(KeyCell * in_kc, guint32 in_index)
../../../rregadmin/hive/key_cell.h:172"""
# ../../../rregadmin/hive/key_cell.h 178
key_cell_get_value_str = _libraries['librregadmin.so.1'].key_cell_get_value_str
key_cell_get_value_str.restype = POINTER(ValueKeyCell)
# key_cell_get_value_str(in_kc, in_name)
key_cell_get_value_str.argtypes = [POINTER(KeyCell), STRING]
key_cell_get_value_str.__doc__ = \
"""ValueKeyCell * key_cell_get_value_str(KeyCell * in_kc, unknown * in_name)
../../../rregadmin/hive/key_cell.h:178"""
# ../../../rregadmin/hive/key_cell.h 184
key_cell_get_value_ustr = _libraries['librregadmin.so.1'].key_cell_get_value_ustr
key_cell_get_value_ustr.restype = POINTER(ValueKeyCell)
# key_cell_get_value_ustr(in_kc, in_name)
key_cell_get_value_ustr.argtypes = [POINTER(KeyCell), POINTER(ustring)]
key_cell_get_value_ustr.__doc__ = \
"""ValueKeyCell * key_cell_get_value_ustr(KeyCell * in_kc, unknown * in_name)
../../../rregadmin/hive/key_cell.h:184"""
# ../../../rregadmin/hive/key_cell.h 190
key_cell_delete_value = _libraries['librregadmin.so.1'].key_cell_delete_value
key_cell_delete_value.restype = gboolean
# key_cell_delete_value(in_kc, in_index)
key_cell_delete_value.argtypes = [POINTER(KeyCell), guint32]
key_cell_delete_value.__doc__ = \
"""gboolean key_cell_delete_value(KeyCell * in_kc, guint32 in_index)
../../../rregadmin/hive/key_cell.h:190"""
# ../../../rregadmin/hive/key_cell.h 196
key_cell_delete_value_str = _libraries['librregadmin.so.1'].key_cell_delete_value_str
key_cell_delete_value_str.restype = gboolean
# key_cell_delete_value_str(in_kc, in_name)
key_cell_delete_value_str.argtypes = [POINTER(KeyCell), STRING]
key_cell_delete_value_str.__doc__ = \
"""gboolean key_cell_delete_value_str(KeyCell * in_kc, unknown * in_name)
../../../rregadmin/hive/key_cell.h:196"""
# ../../../rregadmin/hive/key_cell.h 202
key_cell_delete_value_ustr = _libraries['librregadmin.so.1'].key_cell_delete_value_ustr
key_cell_delete_value_ustr.restype = gboolean
# key_cell_delete_value_ustr(in_kc, in_name)
key_cell_delete_value_ustr.argtypes = [POINTER(KeyCell), POINTER(ustring)]
key_cell_delete_value_ustr.__doc__ = \
"""gboolean key_cell_delete_value_ustr(KeyCell * in_kc, unknown * in_name)
../../../rregadmin/hive/key_cell.h:202"""
# ../../../rregadmin/hive/key_cell.h 209
key_cell_add_value = _libraries['librregadmin.so.1'].key_cell_add_value
key_cell_add_value.restype = POINTER(ValueKeyCell)
# key_cell_add_value(in_kc, in_name, in_val)
key_cell_add_value.argtypes = [POINTER(KeyCell), STRING, POINTER(Value)]
key_cell_add_value.__doc__ = \
"""ValueKeyCell * key_cell_add_value(KeyCell * in_kc, unknown * in_name, unknown * in_val)
../../../rregadmin/hive/key_cell.h:209"""
# ../../../rregadmin/hive/key_cell.h 222
key_cell_get_number_of_subkeys = _libraries['librregadmin.so.1'].key_cell_get_number_of_subkeys
key_cell_get_number_of_subkeys.restype = guint32
# key_cell_get_number_of_subkeys(in_kc)
key_cell_get_number_of_subkeys.argtypes = [POINTER(KeyCell)]
key_cell_get_number_of_subkeys.__doc__ = \
"""guint32 key_cell_get_number_of_subkeys(KeyCell * in_kc)
../../../rregadmin/hive/key_cell.h:222"""
# ../../../rregadmin/hive/key_cell.h 228
key_cell_get_subkey = _libraries['librregadmin.so.1'].key_cell_get_subkey
key_cell_get_subkey.restype = POINTER(KeyCell)
# key_cell_get_subkey(in_kc, in_index)
key_cell_get_subkey.argtypes = [POINTER(KeyCell), guint32]
key_cell_get_subkey.__doc__ = \
"""KeyCell * key_cell_get_subkey(KeyCell * in_kc, guint32 in_index)
../../../rregadmin/hive/key_cell.h:228"""
# ../../../rregadmin/hive/key_cell.h 234
key_cell_get_subkey_str = _libraries['librregadmin.so.1'].key_cell_get_subkey_str
key_cell_get_subkey_str.restype = POINTER(KeyCell)
# key_cell_get_subkey_str(in_kc, in_name)
key_cell_get_subkey_str.argtypes = [POINTER(KeyCell), STRING]
key_cell_get_subkey_str.__doc__ = \
"""KeyCell * key_cell_get_subkey_str(KeyCell * in_kc, unknown * in_name)
../../../rregadmin/hive/key_cell.h:234"""
# ../../../rregadmin/hive/key_cell.h 240
key_cell_get_subkey_ustr = _libraries['librregadmin.so.1'].key_cell_get_subkey_ustr
key_cell_get_subkey_ustr.restype = POINTER(KeyCell)
# key_cell_get_subkey_ustr(in_kc, in_name)
key_cell_get_subkey_ustr.argtypes = [POINTER(KeyCell), POINTER(ustring)]
key_cell_get_subkey_ustr.__doc__ = \
"""KeyCell * key_cell_get_subkey_ustr(KeyCell * in_kc, unknown * in_name)
../../../rregadmin/hive/key_cell.h:240"""
# ../../../rregadmin/hive/key_cell.h 246
key_cell_delete_subkey = _libraries['librregadmin.so.1'].key_cell_delete_subkey
key_cell_delete_subkey.restype = gboolean
# key_cell_delete_subkey(in_kc, in_index)
key_cell_delete_subkey.argtypes = [POINTER(KeyCell), guint32]
key_cell_delete_subkey.__doc__ = \
"""gboolean key_cell_delete_subkey(KeyCell * in_kc, guint32 in_index)
../../../rregadmin/hive/key_cell.h:246"""
# ../../../rregadmin/hive/key_cell.h 252
key_cell_delete_subkey_str = _libraries['librregadmin.so.1'].key_cell_delete_subkey_str
key_cell_delete_subkey_str.restype = gboolean
# key_cell_delete_subkey_str(in_kc, in_name)
key_cell_delete_subkey_str.argtypes = [POINTER(KeyCell), STRING]
key_cell_delete_subkey_str.__doc__ = \
"""gboolean key_cell_delete_subkey_str(KeyCell * in_kc, unknown * in_name)
../../../rregadmin/hive/key_cell.h:252"""
# ../../../rregadmin/hive/key_cell.h 258
key_cell_delete_subkey_ustr = _libraries['librregadmin.so.1'].key_cell_delete_subkey_ustr
key_cell_delete_subkey_ustr.restype = gboolean
# key_cell_delete_subkey_ustr(in_kc, in_name)
key_cell_delete_subkey_ustr.argtypes = [POINTER(KeyCell), POINTER(ustring)]
key_cell_delete_subkey_ustr.__doc__ = \
"""gboolean key_cell_delete_subkey_ustr(KeyCell * in_kc, unknown * in_name)
../../../rregadmin/hive/key_cell.h:258"""
# ../../../rregadmin/hive/key_cell.h 264
key_cell_add_subkey = _libraries['librregadmin.so.1'].key_cell_add_subkey
key_cell_add_subkey.restype = POINTER(KeyCell)
# key_cell_add_subkey(in_kc, in_name)
key_cell_add_subkey.argtypes = [POINTER(KeyCell), POINTER(ustring)]
key_cell_add_subkey.__doc__ = \
"""KeyCell * key_cell_add_subkey(KeyCell * in_kc, unknown * in_name)
../../../rregadmin/hive/key_cell.h:264"""
# ../../../rregadmin/hive/key_cell.h 273
key_cell_set_key_list_cell = _libraries['librregadmin.so.1'].key_cell_set_key_list_cell
key_cell_set_key_list_cell.restype = gboolean
# key_cell_set_key_list_cell(in_kc, in_klc, in_recursive)
key_cell_set_key_list_cell.argtypes = [POINTER(KeyCell), POINTER(KeyListCell), gboolean]
key_cell_set_key_list_cell.__doc__ = \
"""gboolean key_cell_set_key_list_cell(KeyCell * in_kc, KeyListCell * in_klc, gboolean in_recursive)
../../../rregadmin/hive/key_cell.h:273"""
# ../../../rregadmin/hive/key_cell.h 281
key_cell_get_key_list_cell = _libraries['librregadmin.so.1'].key_cell_get_key_list_cell
key_cell_get_key_list_cell.restype = POINTER(KeyListCell)
# key_cell_get_key_list_cell(in_kc)
key_cell_get_key_list_cell.argtypes = [POINTER(KeyCell)]
key_cell_get_key_list_cell.__doc__ = \
"""KeyListCell * key_cell_get_key_list_cell(KeyCell * in_kc)
../../../rregadmin/hive/key_cell.h:281"""
# ../../../rregadmin/hive/key_cell.h 289
key_cell_get_value_list_cell = _libraries['librregadmin.so.1'].key_cell_get_value_list_cell
key_cell_get_value_list_cell.restype = POINTER(ValueListCell)
# key_cell_get_value_list_cell(in_kc)
key_cell_get_value_list_cell.argtypes = [POINTER(KeyCell)]
key_cell_get_value_list_cell.__doc__ = \
"""ValueListCell * key_cell_get_value_list_cell(KeyCell * in_kc)
../../../rregadmin/hive/key_cell.h:289"""
# ../../../rregadmin/hive/key_cell.h 297
key_cell_get_security_descriptor_cell = _libraries['librregadmin.so.1'].key_cell_get_security_descriptor_cell
key_cell_get_security_descriptor_cell.restype = POINTER(SecurityDescriptorCell)
# key_cell_get_security_descriptor_cell(in_kc)
key_cell_get_security_descriptor_cell.argtypes = [POINTER(KeyCell)]
key_cell_get_security_descriptor_cell.__doc__ = \
"""SecurityDescriptorCell * key_cell_get_security_descriptor_cell(KeyCell * in_kc)
../../../rregadmin/hive/key_cell.h:297"""
# ../../../rregadmin/secdesc/security_descriptor.h 62
class SecurityDescriptor_(Structure):
pass
SecurityDescriptor = SecurityDescriptor_
# ../../../rregadmin/hive/key_cell.h 303
key_cell_get_secdesc = _libraries['librregadmin.so.1'].key_cell_get_secdesc
key_cell_get_secdesc.restype = POINTER(SecurityDescriptor)
# key_cell_get_secdesc(in_kc)
key_cell_get_secdesc.argtypes = [POINTER(KeyCell)]
key_cell_get_secdesc.__doc__ = \
"""unknown * key_cell_get_secdesc(KeyCell * in_kc)
../../../rregadmin/hive/key_cell.h:303"""
# ../../../rregadmin/hive/key_cell.h 310
key_cell_set_secdesc = _libraries['librregadmin.so.1'].key_cell_set_secdesc
key_cell_set_secdesc.restype = gboolean
# key_cell_set_secdesc(in_kc, in_secdesc)
key_cell_set_secdesc.argtypes = [POINTER(KeyCell), POINTER(SecurityDescriptor)]
key_cell_set_secdesc.__doc__ = \
"""gboolean key_cell_set_secdesc(KeyCell * in_kc, unknown * in_secdesc)
../../../rregadmin/hive/key_cell.h:310"""
# ../../../rregadmin/hive/key_cell.h 318
key_cell_get_classname_value_cell = _libraries['librregadmin.so.1'].key_cell_get_classname_value_cell
key_cell_get_classname_value_cell.restype = POINTER(ValueCell)
# key_cell_get_classname_value_cell(in_kc)
key_cell_get_classname_value_cell.argtypes = [POINTER(KeyCell)]
key_cell_get_classname_value_cell.__doc__ = \
"""ValueCell * key_cell_get_classname_value_cell(KeyCell * in_kc)
../../../rregadmin/hive/key_cell.h:318"""
# ../../../rregadmin/hive/key_cell.h 326
key_cell_debug_print = _libraries['librregadmin.so.1'].key_cell_debug_print
key_cell_debug_print.restype = None
# key_cell_debug_print(in_kc)
key_cell_debug_print.argtypes = [POINTER(KeyCell)]
key_cell_debug_print.__doc__ = \
"""void key_cell_debug_print(KeyCell * in_kc)
../../../rregadmin/hive/key_cell.h:326"""
# ../../../rregadmin/hive/key_cell.h 334
key_cell_pretty_print = _libraries['librregadmin.so.1'].key_cell_pretty_print
key_cell_pretty_print.restype = None
# key_cell_pretty_print(in_kc, in_pathname)
key_cell_pretty_print.argtypes = [POINTER(KeyCell), STRING]
key_cell_pretty_print.__doc__ = \
"""void key_cell_pretty_print(KeyCell * in_kc, unknown * in_pathname)
../../../rregadmin/hive/key_cell.h:334"""
# ../../../rregadmin/hive/key_cell.h 341
key_cell_get_pretty_output = _libraries['librregadmin.so.1'].key_cell_get_pretty_output
key_cell_get_pretty_output.restype = gboolean
# key_cell_get_pretty_output(in_kc, in_pathname, in_output)
key_cell_get_pretty_output.argtypes = [POINTER(KeyCell), STRING, POINTER(ustring)]
key_cell_get_pretty_output.__doc__ = \
"""gboolean key_cell_get_pretty_output(KeyCell * in_kc, unknown * in_pathname, ustring * in_output)
../../../rregadmin/hive/key_cell.h:341"""
# ../../../rregadmin/hive/key_cell.h 347
key_cell_get_xml_output = _libraries['librregadmin.so.1'].key_cell_get_xml_output
key_cell_get_xml_output.restype = gboolean
# key_cell_get_xml_output(in_kc, in_output)
key_cell_get_xml_output.argtypes = [POINTER(KeyCell), POINTER(ustring)]
key_cell_get_xml_output.__doc__ = \
"""gboolean key_cell_get_xml_output(KeyCell * in_kc, ustring * in_output)
../../../rregadmin/hive/key_cell.h:347"""
# ../../../rregadmin/hive/key_cell.h 355
key_cell_get_parseable_output = _libraries['librregadmin.so.1'].key_cell_get_parseable_output
key_cell_get_parseable_output.restype = gboolean
# key_cell_get_parseable_output(in_kc, in_pathname, in_output)
key_cell_get_parseable_output.argtypes = [POINTER(KeyCell), STRING, POINTER(ustring)]
key_cell_get_parseable_output.__doc__ = \
"""gboolean key_cell_get_parseable_output(KeyCell * in_kc, unknown * in_pathname, ustring * in_output)
../../../rregadmin/hive/key_cell.h:355"""
SecurityDescriptor_._fields_ = [
# ../../../rregadmin/secdesc/security_descriptor.h 62
]
__all__ = ['key_cell_get_secdesc', 'key_cell_get_subkey_str',
'key_cell_get_value_str', 'key_cell_get_value_list_cell',
'key_cell_alloc_root', 'key_cell_get_value_ustr',
'key_cell_set_secdesc', 'key_cell_get_number_of_subkeys',
'SecurityDescriptor_', 'key_cell_get_type_id',
'key_cell_get_subkey_ustr',
'key_cell_get_parseable_output', 'SecurityDescriptor',
'key_cell_get_classname_value_cell',
'key_cell_get_pretty_output', 'key_cell_get_classname',
'key_cell_get_number_of_values',
'key_cell_get_key_list_cell', 'key_cell_debug_print',
'key_cell_type', 'key_cell_delete_subkey_ustr',
'key_cell_get_name', 'key_cell_add_subkey',
'key_cell_alloc', 'key_cell_delete_value_str',
'key_cell_pretty_print', 'key_cell_add_value',
'key_cell_unalloc', 'key_cell_delete_value_ustr',
'key_cell_from_cell', 'key_cell_set_key_list_cell',
'key_cell_get_type_str', 'key_cell_get_subkey',
'key_cell_get_value', 'key_cell_to_cell',
'key_cell_delete_subkey', 'key_cell_is_valid',
'key_cell_get_parent',
'key_cell_get_security_descriptor_cell',
'key_cell_get_xml_output', 'key_cell_compare_names',
'key_cell_delete_subkey_str', 'key_cell_delete_value']
|
gpl-2.0
|
jonathonwalz/ansible
|
lib/ansible/modules/network/cloudengine/ce_bgp_neighbor_af.py
|
26
|
109137
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_bgp_neighbor_af
version_added: "2.4"
short_description: Manages BGP neighbor Address-family configuration on HUAWEI CloudEngine switches.
description:
- Manages BGP neighbor Address-family configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
vrf_name:
description:
- Name of a BGP instance. The name is a case-sensitive string of characters.
The BGP instance can be used only after the corresponding VPN instance is created.
required: true
af_type:
description:
- Address family type of a BGP instance.
required: true
choices: ['ipv4uni', 'ipv4multi', 'ipv4vpn', 'ipv6uni', 'ipv6vpn', 'evpn']
remote_address:
description:
- IPv4 or IPv6 peer connection address.
required: true
advertise_irb:
description:
- If the value is true, advertised IRB routes are distinguished.
If the value is false, advertised IRB routes are not distinguished.
required: false
default: no_use
choices: ['no_use','true','false']
advertise_arp:
description:
- If the value is true, advertised ARP routes are distinguished.
If the value is false, advertised ARP routes are not distinguished.
required: false
default: no_use
choices: ['no_use','true','false']
advertise_remote_nexthop:
description:
- If the value is true, the remote next-hop attribute is advertised to peers.
If the value is false, the remote next-hop attribute is not advertised to any peers.
required: false
default: no_use
choices: ['no_use','true','false']
advertise_community:
description:
- If the value is true, the community attribute is advertised to peers.
If the value is false, the community attribute is not advertised to peers.
required: false
default: no_use
choices: ['no_use','true','false']
advertise_ext_community:
description:
- If the value is true, the extended community attribute is advertised to peers.
If the value is false, the extended community attribute is not advertised to peers.
required: false
default: no_use
choices: ['no_use','true','false']
discard_ext_community:
description:
- If the value is true, the extended community attribute in the peer route information is discarded.
If the value is false, the extended community attribute in the peer route information is not discarded.
required: false
default: no_use
choices: ['no_use','true','false']
allow_as_loop_enable:
description:
- If the value is true, repetitive local AS numbers are allowed.
If the value is false, repetitive local AS numbers are not allowed.
required: false
default: no_use
choices: ['no_use','true','false']
allow_as_loop_limit:
description:
- Set the maximum number of repetitive local AS number.
The value is an integer ranging from 1 to 10.
required: false
default: null
keep_all_routes:
description:
- If the value is true, the system stores all route update messages received from all peers (groups)
after BGP connection setup.
If the value is false, the system stores only BGP update messages that are received from peers
and pass the configured import policy.
required: false
default: no_use
choices: ['no_use','true','false']
nexthop_configure:
description:
- null, The next hop is not changed.
local, The next hop is changed to the local IP address.
invariable, Prevent the device from changing the next hop of each imported IGP route
when advertising it to its BGP peers.
required: false
default: null
choices: ['null', 'local', 'invariable']
preferred_value:
description:
- Assign a preferred value for the routes learned from a specified peer.
The value is an integer ranging from 0 to 65535.
required: false
default: null
public_as_only:
description:
- If the value is true, sent BGP update messages carry only the public AS number but do not carry
private AS numbers.
If the value is false, sent BGP update messages can carry private AS numbers.
required: false
default: no_use
choices: ['no_use','true','false']
public_as_only_force:
description:
- If the value is true, sent BGP update messages carry only the public AS number but do not carry
private AS numbers.
If the value is false, sent BGP update messages can carry private AS numbers.
required: false
default: no_use
choices: ['no_use','true','false']
public_as_only_limited:
description:
- Limited use public as number.
required: false
default: no_use
choices: ['no_use','true','false']
public_as_only_replace:
description:
- Private as replaced by public as number.
required: false
default: no_use
choices: ['no_use','true','false']
public_as_only_skip_peer_as:
description:
- Public as only skip peer as.
required: false
default: no_use
choices: ['no_use','true','false']
route_limit:
description:
- Configure the maximum number of routes that can be accepted from a peer.
The value is an integer ranging from 1 to 4294967295.
required: false
default: null
route_limit_percent:
description:
- Specify the percentage of routes when a router starts to generate an alarm.
The value is an integer ranging from 1 to 100.
required: false
default: null
route_limit_type:
description:
- Noparameter, After the number of received routes exceeds the threshold and the timeout
timer expires,no action.
AlertOnly, An alarm is generated and no additional routes will be accepted if the maximum
number of routes allowed have been received.
IdleForever, The connection that is interrupted is not automatically re-established if the
maximum number of routes allowed have been received.
IdleTimeout, After the number of received routes exceeds the threshold and the timeout timer
expires, the connection that is interrupted is automatically re-established.
required: false
default: null
choices: ['noparameter', 'alertOnly', 'idleForever', 'idleTimeout']
route_limit_idle_timeout:
description:
- Specify the value of the idle-timeout timer to automatically reestablish the connections after
they are cut off when the number of routes exceeds the set threshold.
The value is an integer ranging from 1 to 1200.
required: false
default: null
rt_updt_interval:
description:
- Specify the minimum interval at which Update packets are sent. The value is an integer, in seconds.
The value is an integer ranging from 0 to 600.
required: false
default: null
redirect_ip:
description:
- Redirect ip.
required: false
default: no_use
choices: ['no_use','true','false']
redirect_ip_vaildation:
description:
- Redirect ip vaildation.
required: false
default: no_use
choices: ['no_use','true','false']
reflect_client:
description:
- If the value is true, the local device functions as the route reflector and a peer functions
as a client of the route reflector.
If the value is false, the route reflector and client functions are not configured.
required: false
default: no_use
choices: ['no_use','true','false']
substitute_as_enable:
description:
- If the value is true, the function to replace a specified peer's AS number in the AS-Path attribute with
the local AS number is enabled.
If the value is false, the function to replace a specified peer's AS number in the AS-Path attribute with
the local AS number is disabled.
required: false
default: no_use
choices: ['no_use','true','false']
import_rt_policy_name:
description:
- Specify the filtering policy applied to the routes learned from a peer.
The value is a string of 1 to 40 characters.
required: false
default: null
export_rt_policy_name:
description:
- Specify the filtering policy applied to the routes to be advertised to a peer.
The value is a string of 1 to 40 characters.
required: false
default: null
import_pref_filt_name:
description:
- Specify the IPv4 filtering policy applied to the routes received from a specified peer.
The value is a string of 1 to 169 characters.
required: false
default: null
export_pref_filt_name:
description:
- Specify the IPv4 filtering policy applied to the routes to be advertised to a specified peer.
The value is a string of 1 to 169 characters.
required: false
default: null
import_as_path_filter:
description:
- Apply an AS_Path-based filtering policy to the routes received from a specified peer.
The value is an integer ranging from 1 to 256.
required: false
default: null
export_as_path_filter:
description:
- Apply an AS_Path-based filtering policy to the routes to be advertised to a specified peer.
The value is an integer ranging from 1 to 256.
required: false
default: null
import_as_path_name_or_num:
description:
- A routing strategy based on the AS path list for routing received by a designated peer.
required: false
default: null
export_as_path_name_or_num:
description:
- Application of a AS path list based filtering policy to the routing of a specified peer.
required: false
default: null
import_acl_name_or_num:
description:
- Apply an IPv4 ACL-based filtering policy to the routes received from a specified peer.
The value is a string of 1 to 32 characters.
required: false
default: null
export_acl_name_or_num:
description:
- Apply an IPv4 ACL-based filtering policy to the routes to be advertised to a specified peer.
The value is a string of 1 to 32 characters.
required: false
default: null
ipprefix_orf_enable:
description:
- If the value is true, the address prefix-based Outbound Route Filter (ORF) capability is
enabled for peers.
If the value is false, the address prefix-based Outbound Route Filter (ORF) capability is
disabled for peers.
required: false
default: no_use
choices: ['no_use','true','false']
is_nonstd_ipprefix_mod:
description:
- If the value is true, Non-standard capability codes are used during capability negotiation.
If the value is false, RFC-defined standard ORF capability codes are used during capability negotiation.
required: false
default: no_use
choices: ['no_use','true','false']
orftype:
description:
- ORF Type.
The value is an integer ranging from 0 to 65535.
required: false
default: null
orf_mode:
description:
- ORF mode.
null, Default value.
receive, ORF for incoming packets.
send, ORF for outgoing packets.
both, ORF for incoming and outgoing packets.
required: false
default: null
choices: ['null', 'receive', 'send', 'both']
soostring:
description:
- Configure the Site-of-Origin (SoO) extended community attribute.
The value is a string of 3 to 21 characters.
required: false
default: null
default_rt_adv_enable:
description:
- If the value is true, the function to advertise default routes to peers is enabled.
If the value is false, the function to advertise default routes to peers is disabled.
required: false
default: no_use
choices: ['no_use','true', 'false']
default_rt_adv_policy:
description:
- Specify the name of a used policy. The value is a string.
The value is a string of 1 to 40 characters.
required: false
default: null
default_rt_match_mode:
description:
- null, Null.
matchall, Advertise the default route if all matching conditions are met.
matchany, Advertise the default route if any matching condition is met.
required: false
default: null
choices: ['null', 'matchall', 'matchany']
add_path_mode:
description:
- null, Null.
receive, Support receiving Add-Path routes.
send, Support sending Add-Path routes.
both, Support receiving and sending Add-Path routes.
required: false
default: null
choices: ['null', 'receive', 'send', 'both']
adv_add_path_num:
description:
- The number of addPath advertise route.
The value is an integer ranging from 2 to 64.
required: false
default: null
origin_as_valid:
description:
- If the value is true, Application results of route announcement.
If the value is false, Routing application results are not notified.
required: false
default: no_use
choices: ['no_use','true', 'false']
vpls_enable:
description:
- If the value is true, vpls enable.
If the value is false, vpls disable.
required: false
default: no_use
choices: ['no_use','true', 'false']
vpls_ad_disable:
description:
- If the value is true, enable vpls-ad.
If the value is false, disable vpls-ad.
required: false
default: no_use
choices: ['no_use','true', 'false']
update_pkt_standard_compatible:
description:
- If the value is true, When the vpnv4 multicast neighbor receives and updates the message,
the message has no label.
If the value is false, When the vpnv4 multicast neighbor receives and updates the message,
the message has label.
required: false
default: no_use
choices: ['no_use','true', 'false']
'''
EXAMPLES = '''
- name: CloudEngine BGP neighbor address family test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config BGP peer Address_Family"
ce_bgp_neighbor_af:
state: present
vrf_name: js
af_type: ipv4uni
remote_address: 192.168.10.10
nexthop_configure: local
provider: "{{ cli }}"
- name: "Undo BGP peer Address_Family"
ce_bgp_neighbor_af:
state: absent
vrf_name: js
af_type: ipv4uni
remote_address: 192.168.10.10
nexthop_configure: local
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"af_type": "ipv4uni", "nexthop_configure": "local",
"remote_address": "192.168.10.10",
"state": "present", "vrf_name": "js"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {"bgp neighbor af": {"af_type": "ipv4uni", "remote_address": "192.168.10.10",
"vrf_name": "js"},
"bgp neighbor af other": {"af_type": "ipv4uni", "nexthop_configure": "null",
"vrf_name": "js"}}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"bgp neighbor af": {"af_type": "ipv4uni", "remote_address": "192.168.10.10",
"vrf_name": "js"},
"bgp neighbor af other": {"af_type": "ipv4uni", "nexthop_configure": "local",
"vrf_name": "js"}}
updates:
description: command sent to the device
returned: always
type: list
sample: ["peer 192.168.10.10 next-hop-local"]
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_nc_config, set_nc_config, ce_argument_spec, check_ip_addr
# get bgp peer af
CE_GET_BGP_PEER_AF_HEADER = """
<filter type="subtree">
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpVrfAFs>
<bgpVrfAF>
<afType>%s</afType>
<peerAFs>
<peerAF>
<remoteAddress></remoteAddress>
"""
CE_GET_BGP_PEER_AF_TAIL = """
</peerAF>
</peerAFs>
</bgpVrfAF>
</bgpVrfAFs>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</filter>
"""
# merge bgp peer af
CE_MERGE_BGP_PEER_AF_HEADER = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpVrfAFs>
<bgpVrfAF>
<afType>%s</afType>
<peerAFs>
<peerAF operation="merge">
<remoteAddress>%s</remoteAddress>
"""
CE_MERGE_BGP_PEER_AF_TAIL = """
</peerAF>
</peerAFs>
</bgpVrfAF>
</bgpVrfAFs>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
# create bgp peer af
CE_CREATE_BGP_PEER_AF = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpVrfAFs>
<bgpVrfAF>
<afType>%s</afType>
<peerAFs>
<peerAF operation="create">
<remoteAddress>%s</remoteAddress>
</peerAF>
</peerAFs>
</bgpVrfAF>
</bgpVrfAFs>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
# delete bgp peer af
CE_DELETE_BGP_PEER_AF = """
<config>
<bgp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<bgpcomm>
<bgpVrfs>
<bgpVrf>
<vrfName>%s</vrfName>
<bgpVrfAFs>
<bgpVrfAF>
<afType>%s</afType>
<peerAFs>
<peerAF operation="delete">
<remoteAddress>%s</remoteAddress>
</peerAF>
</peerAFs>
</bgpVrfAF>
</bgpVrfAFs>
</bgpVrf>
</bgpVrfs>
</bgpcomm>
</bgp>
</config>
"""
class BgpNeighborAf(object):
""" Manages BGP neighbor Address-family configuration """
def netconf_get_config(self, **kwargs):
""" netconf_get_config """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = get_nc_config(module, conf_str)
return xml_str
def netconf_set_config(self, **kwargs):
""" netconf_set_config """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = set_nc_config(module, conf_str)
return xml_str
def check_bgp_neighbor_af_args(self, **kwargs):
""" check_bgp_neighbor_af_args """
module = kwargs["module"]
result = dict()
need_cfg = False
vrf_name = module.params['vrf_name']
if vrf_name:
if len(vrf_name) > 31 or len(vrf_name) == 0:
module.fail_json(
msg='Error: The len of vrf_name %s is out of [1 - 31].' % vrf_name)
state = module.params['state']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
if not check_ip_addr(ipaddr=remote_address):
module.fail_json(
msg='Error: The remote_address %s is invalid.' % remote_address)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if state == "present":
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<remoteAddress>(.*)</remoteAddress>.*', recv_xml)
if re_find:
result["remote_address"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != remote_address:
need_cfg = True
else:
need_cfg = True
else:
if "<data/>" in recv_xml:
pass
else:
re_find = re.findall(
r'.*<remoteAddress>(.*)</remoteAddress>.*', recv_xml)
if re_find:
result["remote_address"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] == remote_address:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def check_bgp_neighbor_af_other(self, **kwargs):
""" check_bgp_neighbor_af_other """
module = kwargs["module"]
result = dict()
need_cfg = False
state = module.params['state']
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
if state == "absent":
result["need_cfg"] = need_cfg
return result
advertise_irb = module.params['advertise_irb']
if advertise_irb != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseIrb></advertiseIrb>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseIrb>(.*)</advertiseIrb>.*', recv_xml)
if re_find:
result["advertise_irb"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_irb:
need_cfg = True
else:
need_cfg = True
advertise_arp = module.params['advertise_arp']
if advertise_arp != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseArp></advertiseArp>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseArp>(.*)</advertiseArp>.*', recv_xml)
if re_find:
result["advertise_arp"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_arp:
need_cfg = True
else:
need_cfg = True
advertise_remote_nexthop = module.params['advertise_remote_nexthop']
if advertise_remote_nexthop != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseRemoteNexthop></advertiseRemoteNexthop>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseRemoteNexthop>(.*)</advertiseRemoteNexthop>.*', recv_xml)
if re_find:
result["advertise_remote_nexthop"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_remote_nexthop:
need_cfg = True
else:
need_cfg = True
advertise_community = module.params['advertise_community']
if advertise_community != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseCommunity></advertiseCommunity>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseCommunity>(.*)</advertiseCommunity>.*', recv_xml)
if re_find:
result["advertise_community"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_community:
need_cfg = True
else:
need_cfg = True
advertise_ext_community = module.params['advertise_ext_community']
if advertise_ext_community != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advertiseExtCommunity></advertiseExtCommunity>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advertiseExtCommunity>(.*)</advertiseExtCommunity>.*', recv_xml)
if re_find:
result["advertise_ext_community"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != advertise_ext_community:
need_cfg = True
else:
need_cfg = True
discard_ext_community = module.params['discard_ext_community']
if discard_ext_community != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<discardExtCommunity></discardExtCommunity>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<discardExtCommunity>(.*)</discardExtCommunity>.*', recv_xml)
if re_find:
result["discard_ext_community"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != discard_ext_community:
need_cfg = True
else:
need_cfg = True
allow_as_loop_enable = module.params['allow_as_loop_enable']
if allow_as_loop_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<allowAsLoopEnable></allowAsLoopEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<allowAsLoopEnable>(.*)</allowAsLoopEnable>.*', recv_xml)
if re_find:
result["allow_as_loop_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != allow_as_loop_enable:
need_cfg = True
else:
need_cfg = True
allow_as_loop_limit = module.params['allow_as_loop_limit']
if allow_as_loop_limit:
if int(allow_as_loop_limit) > 10 or int(allow_as_loop_limit) < 1:
module.fail_json(
msg='the value of allow_as_loop_limit %s is out of [1 - 10].' % allow_as_loop_limit)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<allowAsLoopLimit></allowAsLoopLimit>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<allowAsLoopLimit>(.*)</allowAsLoopLimit>.*', recv_xml)
if re_find:
result["allow_as_loop_limit"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != allow_as_loop_limit:
need_cfg = True
else:
need_cfg = True
keep_all_routes = module.params['keep_all_routes']
if keep_all_routes != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<keepAllRoutes></keepAllRoutes>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<keepAllRoutes>(.*)</keepAllRoutes>.*', recv_xml)
if re_find:
result["keep_all_routes"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != keep_all_routes:
need_cfg = True
else:
need_cfg = True
nexthop_configure = module.params['nexthop_configure']
if nexthop_configure:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<nextHopConfigure></nextHopConfigure>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<nextHopConfigure>(.*)</nextHopConfigure>.*', recv_xml)
if re_find:
result["nexthop_configure"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != nexthop_configure:
need_cfg = True
else:
need_cfg = True
preferred_value = module.params['preferred_value']
if preferred_value:
if int(preferred_value) > 65535 or int(preferred_value) < 0:
module.fail_json(
msg='the value of preferred_value %s is out of [0 - 65535].' % preferred_value)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<preferredValue></preferredValue>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<preferredValue>(.*)</preferredValue>.*', recv_xml)
if re_find:
result["preferred_value"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != preferred_value:
need_cfg = True
else:
need_cfg = True
public_as_only = module.params['public_as_only']
if public_as_only != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnly></publicAsOnly>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnly>(.*)</publicAsOnly>.*', recv_xml)
if re_find:
result["public_as_only"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only:
need_cfg = True
else:
need_cfg = True
public_as_only_force = module.params['public_as_only_force']
if public_as_only_force != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnlyForce></publicAsOnlyForce>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnlyForce>(.*)</publicAsOnlyForce>.*', recv_xml)
if re_find:
result["public_as_only_force"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only_force:
need_cfg = True
else:
need_cfg = True
public_as_only_limited = module.params['public_as_only_limited']
if public_as_only_limited != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnlyLimited></publicAsOnlyLimited>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnlyLimited>(.*)</publicAsOnlyLimited>.*', recv_xml)
if re_find:
result["public_as_only_limited"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only_limited:
need_cfg = True
else:
need_cfg = True
public_as_only_replace = module.params['public_as_only_replace']
if public_as_only_replace != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnlyReplace></publicAsOnlyReplace>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnlyReplace>(.*)</publicAsOnlyReplace>.*', recv_xml)
if re_find:
result["public_as_only_replace"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only_replace:
need_cfg = True
else:
need_cfg = True
public_as_only_skip_peer_as = module.params[
'public_as_only_skip_peer_as']
if public_as_only_skip_peer_as != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<publicAsOnlySkipPeerAs></publicAsOnlySkipPeerAs>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<publicAsOnlySkipPeerAs>(.*)</publicAsOnlySkipPeerAs>.*', recv_xml)
if re_find:
result["public_as_only_skip_peer_as"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != public_as_only_skip_peer_as:
need_cfg = True
else:
need_cfg = True
route_limit = module.params['route_limit']
if route_limit:
if int(route_limit) < 1:
module.fail_json(
msg='the value of route_limit %s is out of [1 - 4294967295].' % route_limit)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<routeLimit></routeLimit>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeLimit>(.*)</routeLimit>.*', recv_xml)
if re_find:
result["route_limit"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != route_limit:
need_cfg = True
else:
need_cfg = True
route_limit_percent = module.params['route_limit_percent']
if route_limit_percent:
if int(route_limit_percent) < 1 or int(route_limit_percent) > 100:
module.fail_json(
msg='Error: The value of route_limit_percent %s is out of [1 - 100].' % route_limit_percent)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<routeLimitPercent></routeLimitPercent>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeLimitPercent>(.*)</routeLimitPercent>.*', recv_xml)
if re_find:
result["route_limit_percent"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != route_limit_percent:
need_cfg = True
else:
need_cfg = True
route_limit_type = module.params['route_limit_type']
if route_limit_type:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<routeLimitType></routeLimitType>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeLimitType>(.*)</routeLimitType>.*', recv_xml)
if re_find:
result["route_limit_type"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != route_limit_type:
need_cfg = True
else:
need_cfg = True
route_limit_idle_timeout = module.params['route_limit_idle_timeout']
if route_limit_idle_timeout:
if int(route_limit_idle_timeout) < 1 or int(route_limit_idle_timeout) > 1200:
module.fail_json(
msg='Error: The value of route_limit_idle_timeout %s is out of '
'[1 - 1200].' % route_limit_idle_timeout)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<routeLimitIdleTimeout></routeLimitPercent>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<routeLimitIdleTimeout>(.*)</routeLimitIdleTimeout>.*', recv_xml)
if re_find:
result["route_limit_idle_timeout"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != route_limit_idle_timeout:
need_cfg = True
else:
need_cfg = True
rt_updt_interval = module.params['rt_updt_interval']
if rt_updt_interval:
if int(rt_updt_interval) < 0 or int(rt_updt_interval) > 600:
module.fail_json(
msg='Error: The value of rt_updt_interval %s is out of [0 - 600].' % rt_updt_interval)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<rtUpdtInterval></rtUpdtInterval>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<rtUpdtInterval>(.*)</rtUpdtInterval>.*', recv_xml)
if re_find:
result["rt_updt_interval"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != rt_updt_interval:
need_cfg = True
else:
need_cfg = True
redirect_ip = module.params['redirect_ip']
if redirect_ip != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<redirectIP></redirectIP>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<redirectIP>(.*)</redirectIP>.*', recv_xml)
if re_find:
result["redirect_ip"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != redirect_ip:
need_cfg = True
else:
need_cfg = True
redirect_ip_vaildation = module.params['redirect_ip_vaildation']
if redirect_ip_vaildation != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<redirectIPVaildation></redirectIPVaildation>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<redirectIPVaildation>(.*)</redirectIPVaildation>.*', recv_xml)
if re_find:
result["redirect_ip_vaildation"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != redirect_ip_vaildation:
need_cfg = True
else:
need_cfg = True
reflect_client = module.params['reflect_client']
if reflect_client != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<reflectClient></reflectClient>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<reflectClient>(.*)</reflectClient>.*', recv_xml)
if re_find:
result["reflect_client"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != reflect_client:
need_cfg = True
else:
need_cfg = True
substitute_as_enable = module.params['substitute_as_enable']
if substitute_as_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<substituteAsEnable></substituteAsEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<substituteAsEnable>(.*)</substituteAsEnable>.*', recv_xml)
if re_find:
result["substitute_as_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != substitute_as_enable:
need_cfg = True
else:
need_cfg = True
import_rt_policy_name = module.params['import_rt_policy_name']
if import_rt_policy_name:
if len(import_rt_policy_name) < 1 or len(import_rt_policy_name) > 40:
module.fail_json(
msg='Error: The len of import_rt_policy_name %s is out of [1 - 40].' % import_rt_policy_name)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importRtPolicyName></importRtPolicyName>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importRtPolicyName>(.*)</importRtPolicyName>.*', recv_xml)
if re_find:
result["import_rt_policy_name"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_rt_policy_name:
need_cfg = True
else:
need_cfg = True
export_rt_policy_name = module.params['export_rt_policy_name']
if export_rt_policy_name:
if len(export_rt_policy_name) < 1 or len(export_rt_policy_name) > 40:
module.fail_json(
msg='Error: The len of export_rt_policy_name %s is out of [1 - 40].' % export_rt_policy_name)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportRtPolicyName></exportRtPolicyName>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportRtPolicyName>(.*)</exportRtPolicyName>.*', recv_xml)
if re_find:
result["export_rt_policy_name"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_rt_policy_name:
need_cfg = True
else:
need_cfg = True
import_pref_filt_name = module.params['import_pref_filt_name']
if import_pref_filt_name:
if len(import_pref_filt_name) < 1 or len(import_pref_filt_name) > 169:
module.fail_json(
msg='Error: The len of import_pref_filt_name %s is out of [1 - 169].' % import_pref_filt_name)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importPrefFiltName></importPrefFiltName>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importPrefFiltName>(.*)</importPrefFiltName>.*', recv_xml)
if re_find:
result["import_pref_filt_name"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_pref_filt_name:
need_cfg = True
else:
need_cfg = True
export_pref_filt_name = module.params['export_pref_filt_name']
if export_pref_filt_name:
if len(export_pref_filt_name) < 1 or len(export_pref_filt_name) > 169:
module.fail_json(
msg='Error: The len of export_pref_filt_name %s is out of [1 - 169].' % export_pref_filt_name)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportPrefFiltName></exportPrefFiltName>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportPrefFiltName>(.*)</exportPrefFiltName>.*', recv_xml)
if re_find:
result["export_pref_filt_name"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_pref_filt_name:
need_cfg = True
else:
need_cfg = True
import_as_path_filter = module.params['import_as_path_filter']
if import_as_path_filter:
if int(import_as_path_filter) < 1 or int(import_as_path_filter) > 256:
module.fail_json(
msg='Error: The value of import_as_path_filter %s is out of [1 - 256].' % import_as_path_filter)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importAsPathFilter></importAsPathFilter>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importAsPathFilter>(.*)</importAsPathFilter>.*', recv_xml)
if re_find:
result["import_as_path_filter"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_as_path_filter:
need_cfg = True
else:
need_cfg = True
export_as_path_filter = module.params['export_as_path_filter']
if export_as_path_filter:
if int(export_as_path_filter) < 1 or int(export_as_path_filter) > 256:
module.fail_json(
msg='Error: The value of export_as_path_filter %s is out of [1 - 256].' % export_as_path_filter)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportAsPathFilter></exportAsPathFilter>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportAsPathFilter>(.*)</exportAsPathFilter>.*', recv_xml)
if re_find:
result["export_as_path_filter"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_as_path_filter:
need_cfg = True
else:
need_cfg = True
import_as_path_name_or_num = module.params[
'import_as_path_name_or_num']
if import_as_path_name_or_num:
if len(import_as_path_name_or_num) < 1 or len(import_as_path_name_or_num) > 51:
module.fail_json(
msg='Error: The len of import_as_path_name_or_num %s is out '
'of [1 - 51].' % import_as_path_name_or_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importAsPathNameOrNum></importAsPathNameOrNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importAsPathNameOrNum>(.*)</importAsPathNameOrNum>.*', recv_xml)
if re_find:
result["import_as_path_name_or_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_as_path_name_or_num:
need_cfg = True
else:
need_cfg = True
export_as_path_name_or_num = module.params[
'export_as_path_name_or_num']
if export_as_path_name_or_num:
if len(export_as_path_name_or_num) < 1 or len(export_as_path_name_or_num) > 51:
module.fail_json(
msg='Error: The len of export_as_path_name_or_num %s is out '
'of [1 - 51].' % export_as_path_name_or_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportAsPathNameOrNum></exportAsPathNameOrNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportAsPathNameOrNum>(.*)</exportAsPathNameOrNum>.*', recv_xml)
if re_find:
result["export_as_path_name_or_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_as_path_name_or_num:
need_cfg = True
else:
need_cfg = True
import_acl_name_or_num = module.params['import_acl_name_or_num']
if import_acl_name_or_num:
if len(import_acl_name_or_num) < 1 or len(import_acl_name_or_num) > 32:
module.fail_json(
msg='Error: The len of import_acl_name_or_num %s is out of [1 - 32].' % import_acl_name_or_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<importAclNameOrNum></importAclNameOrNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<importAclNameOrNum>(.*)</importAclNameOrNum>.*', recv_xml)
if re_find:
result["import_acl_name_or_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != import_acl_name_or_num:
need_cfg = True
else:
need_cfg = True
export_acl_name_or_num = module.params['export_acl_name_or_num']
if export_acl_name_or_num:
if len(export_acl_name_or_num) < 1 or len(export_acl_name_or_num) > 32:
module.fail_json(
msg='Error: The len of export_acl_name_or_num %s is out of [1 - 32].' % export_acl_name_or_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<exportAclNameOrNum></exportAclNameOrNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<exportAclNameOrNum>(.*)</exportAclNameOrNum>.*', recv_xml)
if re_find:
result["export_acl_name_or_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != export_acl_name_or_num:
need_cfg = True
else:
need_cfg = True
ipprefix_orf_enable = module.params['ipprefix_orf_enable']
if ipprefix_orf_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<ipprefixOrfEnable></ipprefixOrfEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<ipprefixOrfEnable>(.*)</ipprefixOrfEnable>.*', recv_xml)
if re_find:
result["ipprefix_orf_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != ipprefix_orf_enable:
need_cfg = True
else:
need_cfg = True
is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod']
if is_nonstd_ipprefix_mod != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<isNonstdIpprefixMod></isNonstdIpprefixMod>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<isNonstdIpprefixMod>(.*)</isNonstdIpprefixMod>.*', recv_xml)
if re_find:
result["is_nonstd_ipprefix_mod"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != is_nonstd_ipprefix_mod:
need_cfg = True
else:
need_cfg = True
orftype = module.params['orftype']
if orftype:
if int(orftype) < 0 or int(orftype) > 65535:
module.fail_json(
msg='Error: The value of orftype %s is out of [0 - 65535].' % orftype)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<orftype></orftype>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<orftype>(.*)</orftype>.*', recv_xml)
if re_find:
result["orftype"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != orftype:
need_cfg = True
else:
need_cfg = True
orf_mode = module.params['orf_mode']
if orf_mode:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<orfMode></orfMode>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<orfMode>(.*)</orfMode>.*', recv_xml)
if re_find:
result["orf_mode"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != orf_mode:
need_cfg = True
else:
need_cfg = True
soostring = module.params['soostring']
if soostring:
if len(soostring) < 3 or len(soostring) > 21:
module.fail_json(
msg='Error: The len of soostring %s is out of [3 - 21].' % soostring)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<soostring></soostring>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<soostring>(.*)</soostring>.*', recv_xml)
if re_find:
result["soostring"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != soostring:
need_cfg = True
else:
need_cfg = True
default_rt_adv_enable = module.params['default_rt_adv_enable']
if default_rt_adv_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<defaultRtAdvEnable></defaultRtAdvEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<defaultRtAdvEnable>(.*)</defaultRtAdvEnable>.*', recv_xml)
if re_find:
result["default_rt_adv_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != default_rt_adv_enable:
need_cfg = True
else:
need_cfg = True
default_rt_adv_policy = module.params['default_rt_adv_policy']
if default_rt_adv_policy:
if len(default_rt_adv_policy) < 1 or len(default_rt_adv_policy) > 40:
module.fail_json(
msg='Error: The len of default_rt_adv_policy %s is out of [1 - 40].' % default_rt_adv_policy)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<defaultRtAdvPolicy></defaultRtAdvPolicy>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<defaultRtAdvPolicy>(.*)</defaultRtAdvPolicy>.*', recv_xml)
if re_find:
result["default_rt_adv_policy"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != default_rt_adv_policy:
need_cfg = True
else:
need_cfg = True
default_rt_match_mode = module.params['default_rt_match_mode']
if default_rt_match_mode:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<defaultRtMatchMode></defaultRtMatchMode>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<defaultRtMatchMode>(.*)</defaultRtMatchMode>.*', recv_xml)
if re_find:
result["default_rt_match_mode"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != default_rt_match_mode:
need_cfg = True
else:
need_cfg = True
add_path_mode = module.params['add_path_mode']
if add_path_mode:
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<addPathMode></addPathMode>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<addPathMode>(.*)</addPathMode>.*', recv_xml)
if re_find:
result["add_path_mode"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != add_path_mode:
need_cfg = True
else:
need_cfg = True
adv_add_path_num = module.params['adv_add_path_num']
if adv_add_path_num:
if int(orftype) < 2 or int(orftype) > 64:
module.fail_json(
msg='Error: The value of adv_add_path_num %s is out of [2 - 64].' % adv_add_path_num)
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<advAddPathNum></advAddPathNum>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<advAddPathNum>(.*)</advAddPathNum>.*', recv_xml)
if re_find:
result["adv_add_path_num"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != adv_add_path_num:
need_cfg = True
else:
need_cfg = True
origin_as_valid = module.params['origin_as_valid']
if origin_as_valid != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<originAsValid></originAsValid>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<originAsValid>(.*)</originAsValid>.*', recv_xml)
if re_find:
result["origin_as_valid"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != origin_as_valid:
need_cfg = True
else:
need_cfg = True
vpls_enable = module.params['vpls_enable']
if vpls_enable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<vplsEnable></vplsEnable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<vplsEnable>(.*)</vplsEnable>.*', recv_xml)
if re_find:
result["vpls_enable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != vpls_enable:
need_cfg = True
else:
need_cfg = True
vpls_ad_disable = module.params['vpls_ad_disable']
if vpls_ad_disable != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<vplsAdDisable></vplsAdDisable>" + CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<vplsAdDisable>(.*)</vplsAdDisable>.*', recv_xml)
if re_find:
result["vpls_ad_disable"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != vpls_ad_disable:
need_cfg = True
else:
need_cfg = True
update_pkt_standard_compatible = module.params[
'update_pkt_standard_compatible']
if update_pkt_standard_compatible != 'no_use':
conf_str = CE_GET_BGP_PEER_AF_HEADER % (
vrf_name, af_type) + "<updatePktStandardCompatible></updatePktStandardCompatible>" + \
CE_GET_BGP_PEER_AF_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "<data/>" in recv_xml:
need_cfg = True
else:
re_find = re.findall(
r'.*<updatePktStandardCompatible>(.*)</updatePktStandardCompatible>.*', recv_xml)
if re_find:
result["update_pkt_standard_compatible"] = re_find
result["vrf_name"] = vrf_name
result["af_type"] = af_type
if re_find[0] != update_pkt_standard_compatible:
need_cfg = True
else:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def merge_bgp_peer_af(self, **kwargs):
""" merge_bgp_peer_af """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = CE_MERGE_BGP_PEER_AF_HEADER % (
vrf_name, af_type, remote_address) + CE_MERGE_BGP_PEER_AF_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp peer address family failed.')
cmds = []
if af_type == "ipv4uni":
cmd = "ipv4-family unicast"
elif af_type == "ipv4multi":
cmd = "ipv4-family multicast"
elif af_type == "ipv6uni":
cmd = "ipv6-family unicast"
cmds.append(cmd)
cmd = "peer %s" % remote_address
cmds.append(cmd)
return cmds
def create_bgp_peer_af(self, **kwargs):
""" create_bgp_peer_af """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = CE_CREATE_BGP_PEER_AF % (vrf_name, af_type, remote_address)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Create bgp peer address family failed.')
cmds = []
if af_type == "ipv4uni":
cmd = "ipv4-family unicast"
elif af_type == "ipv4multi":
cmd = "ipv4-family multicast"
elif af_type == "ipv6uni":
cmd = "ipv6-family unicast"
cmds.append(cmd)
cmd = "peer %s" % remote_address
cmds.append(cmd)
return cmds
def delete_bgp_peer_af(self, **kwargs):
""" delete_bgp_peer_af """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = CE_DELETE_BGP_PEER_AF % (vrf_name, af_type, remote_address)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Delete bgp peer address family failed.')
cmds = []
if af_type == "ipv4uni":
cmd = "ipv4-family unicast"
elif af_type == "ipv4multi":
cmd = "ipv4-family multicast"
elif af_type == "ipv6uni":
cmd = "ipv6-family unicast"
cmds.append(cmd)
cmd = "undo peer %s" % remote_address
cmds.append(cmd)
return cmds
def merge_bgp_peer_af_other(self, **kwargs):
""" merge_bgp_peer_af_other """
module = kwargs["module"]
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = CE_MERGE_BGP_PEER_AF_HEADER % (
vrf_name, af_type, remote_address)
cmds = []
advertise_irb = module.params['advertise_irb']
if advertise_irb != 'no_use':
conf_str += "<advertiseIrb>%s</advertiseIrb>" % advertise_irb
if advertise_irb == "ture":
cmd = "peer %s advertise irb" % remote_address
else:
cmd = "undo peer %s advertise irb" % remote_address
cmds.append(cmd)
advertise_arp = module.params['advertise_arp']
if advertise_arp != 'no_use':
conf_str += "<advertiseArp>%s</advertiseArp>" % advertise_arp
if advertise_arp == "ture":
cmd = "peer %s advertise arp" % remote_address
else:
cmd = "undo peer %s advertise arp" % remote_address
cmds.append(cmd)
advertise_remote_nexthop = module.params['advertise_remote_nexthop']
if advertise_remote_nexthop != 'no_use':
conf_str += "<advertiseRemoteNexthop>%s</advertiseRemoteNexthop>" % advertise_remote_nexthop
if advertise_remote_nexthop == "true":
cmd = "peer %s advertise remote-nexthop" % remote_address
else:
cmd = "undo peer %s advertise remote-nexthop" % remote_address
cmds.append(cmd)
advertise_community = module.params['advertise_community']
if advertise_community != 'no_use':
conf_str += "<advertiseCommunity>%s</advertiseCommunity>" % advertise_community
if advertise_community == "true":
cmd = "peer %s advertise-community" % remote_address
else:
cmd = "undo peer %s advertise-community" % remote_address
cmds.append(cmd)
advertise_ext_community = module.params['advertise_ext_community']
if advertise_ext_community != 'no_use':
conf_str += "<advertiseExtCommunity>%s</advertiseExtCommunity>" % advertise_ext_community
if advertise_ext_community == "true":
cmd = "peer %s advertise-ext-community" % remote_address
else:
cmd = "undo peer %s advertise-ext-community" % remote_address
cmds.append(cmd)
discard_ext_community = module.params['discard_ext_community']
if discard_ext_community != 'no_use':
conf_str += "<discardExtCommunity>%s</discardExtCommunity>" % discard_ext_community
if discard_ext_community == "true":
cmd = "peer %s discard-ext-community" % remote_address
else:
cmd = "undo peer %s discard-ext-community" % remote_address
cmds.append(cmd)
allow_as_loop_enable = module.params['allow_as_loop_enable']
if allow_as_loop_enable != 'no_use':
conf_str += "<allowAsLoopEnable>%s</allowAsLoopEnable>" % allow_as_loop_enable
if allow_as_loop_enable == "true":
cmd = "peer %s allow-as-loop" % remote_address
else:
cmd = "undo peer %s allow-as-loop" % remote_address
cmds.append(cmd)
allow_as_loop_limit = module.params['allow_as_loop_limit']
if allow_as_loop_limit:
conf_str += "<allowAsLoopLimit>%s</allowAsLoopLimit>" % allow_as_loop_limit
if allow_as_loop_enable == "true":
cmd = "peer %s allow-as-loop %s" % (remote_address, allow_as_loop_limit)
else:
cmd = "undo peer %s allow-as-loop" % remote_address
cmds.append(cmd)
keep_all_routes = module.params['keep_all_routes']
if keep_all_routes != 'no_use':
conf_str += "<keepAllRoutes>%s</keepAllRoutes>" % keep_all_routes
if keep_all_routes == "true":
cmd = "peer %s keep-all-routes" % remote_address
else:
cmd = "undo peer %s keep-all-routes" % remote_address
cmds.append(cmd)
nexthop_configure = module.params['nexthop_configure']
if nexthop_configure:
conf_str += "<nextHopConfigure>%s</nextHopConfigure>" % nexthop_configure
if nexthop_configure == "local":
cmd = "peer %s next-hop-local" % remote_address
cmds.append(cmd)
elif nexthop_configure == "invariable":
cmd = "peer %s next-hop-invariable" % remote_address
cmds.append(cmd)
preferred_value = module.params['preferred_value']
if preferred_value:
conf_str += "<preferredValue>%s</preferredValue>" % preferred_value
cmd = "peer %s preferred-value %s" % (remote_address, preferred_value)
cmds.append(cmd)
public_as_only = module.params['public_as_only']
if public_as_only != 'no_use':
conf_str += "<publicAsOnly>%s</publicAsOnly>" % public_as_only
if public_as_only == "true":
cmd = "peer %s public-as-only" % remote_address
else:
cmd = "undo peer %s public-as-only" % remote_address
cmds.append(cmd)
public_as_only_force = module.params['public_as_only_force']
if public_as_only_force != 'no_use':
conf_str += "<publicAsOnlyForce>%s</publicAsOnlyForce>" % public_as_only_force
if public_as_only_force == "true":
cmd = "peer %s public-as-only force" % remote_address
else:
cmd = "undo peer %s public-as-only force" % remote_address
cmds.append(cmd)
public_as_only_limited = module.params['public_as_only_limited']
if public_as_only_limited != 'no_use':
conf_str += "<publicAsOnlyLimited>%s</publicAsOnlyLimited>" % public_as_only_limited
if public_as_only_limited == "true":
cmd = "peer %s public-as-only limited" % remote_address
else:
cmd = "undo peer %s public-as-only limited" % remote_address
cmds.append(cmd)
public_as_only_replace = module.params['public_as_only_replace']
if public_as_only_replace != 'no_use':
conf_str += "<publicAsOnlyReplace>%s</publicAsOnlyReplace>" % public_as_only_replace
if public_as_only_replace == "true":
cmd = "peer %s public-as-only force replace" % remote_address
else:
cmd = "undo peer %s public-as-only force replace" % remote_address
cmds.append(cmd)
public_as_only_skip_peer_as = module.params[
'public_as_only_skip_peer_as']
if public_as_only_skip_peer_as != 'no_use':
conf_str += "<publicAsOnlySkipPeerAs>%s</publicAsOnlySkipPeerAs>" % public_as_only_skip_peer_as
if public_as_only_skip_peer_as == "true":
cmd = "peer %s public-as-only force include-peer-as" % remote_address
else:
cmd = "undo peer %s public-as-only force include-peer-as" % remote_address
cmds.append(cmd)
route_limit = module.params['route_limit']
if route_limit:
conf_str += "<routeLimit>%s</routeLimit>" % route_limit
cmd = "peer %s route-limit %s" % (remote_address, route_limit)
cmds.append(cmd)
route_limit_percent = module.params['route_limit_percent']
if route_limit_percent:
conf_str += "<routeLimitPercent>%s</routeLimitPercent>" % route_limit_percent
cmd = "peer %s route-limit %s %s" % (remote_address, route_limit, route_limit_percent)
cmds.append(cmd)
route_limit_type = module.params['route_limit_type']
if route_limit_type:
conf_str += "<routeLimitType>%s</routeLimitType>" % route_limit_type
if route_limit_type == "alertOnly":
cmd = "peer %s route-limit %s %s alert-only" % (remote_address, route_limit, route_limit_percent)
cmds.append(cmd)
elif route_limit_type == "idleForever":
cmd = "peer %s route-limit %s %s idle-forever" % (remote_address, route_limit, route_limit_percent)
cmds.append(cmd)
elif route_limit_type == "idleTimeout":
cmd = "peer %s route-limit %s %s idle-timeout" % (remote_address, route_limit, route_limit_percent)
cmds.append(cmd)
route_limit_idle_timeout = module.params['route_limit_idle_timeout']
if route_limit_idle_timeout:
conf_str += "<routeLimitIdleTimeout>%s</routeLimitIdleTimeout>" % route_limit_idle_timeout
cmd = "peer %s route-limit %s %s idle-timeout %s" % (remote_address, route_limit,
route_limit_percent, route_limit_idle_timeout)
cmds.append(cmd)
rt_updt_interval = module.params['rt_updt_interval']
if rt_updt_interval:
conf_str += "<rtUpdtInterval>%s</rtUpdtInterval>" % rt_updt_interval
cmd = "peer %s route-update-interval %s" % (remote_address, rt_updt_interval)
cmds.append(cmd)
redirect_ip = module.params['redirect_ip']
if redirect_ip != 'no_use':
conf_str += "<redirectIP>%s</redirectIP>" % redirect_ip
redirect_ip_vaildation = module.params['redirect_ip_vaildation']
if redirect_ip_vaildation != 'no_use':
conf_str += "<redirectIPVaildation>%s</redirectIPVaildation>" % redirect_ip_vaildation
reflect_client = module.params['reflect_client']
if reflect_client != 'no_use':
conf_str += "<reflectClient>%s</reflectClient>" % reflect_client
if reflect_client == "true":
cmd = "peer %s reflect-client" % remote_address
else:
cmd = "undo peer %s reflect-client" % remote_address
cmds.append(cmd)
substitute_as_enable = module.params['substitute_as_enable']
if substitute_as_enable != 'no_use':
conf_str += "<substituteAsEnable>%s</substituteAsEnable>" % substitute_as_enable
import_rt_policy_name = module.params['import_rt_policy_name']
if import_rt_policy_name:
conf_str += "<importRtPolicyName>%s</importRtPolicyName>" % import_rt_policy_name
cmd = "peer %s route-policy %s import" % (remote_address, import_rt_policy_name)
cmds.append(cmd)
export_rt_policy_name = module.params['export_rt_policy_name']
if export_rt_policy_name:
conf_str += "<exportRtPolicyName>%s</exportRtPolicyName>" % export_rt_policy_name
cmd = "peer %s route-policy %s export" % (remote_address, export_rt_policy_name)
cmds.append(cmd)
import_pref_filt_name = module.params['import_pref_filt_name']
if import_pref_filt_name:
conf_str += "<importPrefFiltName>%s</importPrefFiltName>" % import_pref_filt_name
cmd = "peer %s filter-policy %s import" % (remote_address, import_pref_filt_name)
cmds.append(cmd)
export_pref_filt_name = module.params['export_pref_filt_name']
if export_pref_filt_name:
conf_str += "<exportPrefFiltName>%s</exportPrefFiltName>" % export_pref_filt_name
cmd = "peer %s filter-policy %s export" % (remote_address, export_pref_filt_name)
cmds.append(cmd)
import_as_path_filter = module.params['import_as_path_filter']
if import_as_path_filter:
conf_str += "<importAsPathFilter>%s</importAsPathFilter>" % import_as_path_filter
cmd = "peer %s as-path-filter %s import" % (remote_address, import_as_path_filter)
cmds.append(cmd)
export_as_path_filter = module.params['export_as_path_filter']
if export_as_path_filter:
conf_str += "<exportAsPathFilter>%s</exportAsPathFilter>" % export_as_path_filter
cmd = "peer %s as-path-filter %s export" % (remote_address, export_as_path_filter)
cmds.append(cmd)
import_as_path_name_or_num = module.params[
'import_as_path_name_or_num']
if import_as_path_name_or_num:
conf_str += "<importAsPathNameOrNum>%s</importAsPathNameOrNum>" % import_as_path_name_or_num
cmd = "peer %s as-path-filter %s import" % (remote_address, import_as_path_name_or_num)
cmds.append(cmd)
export_as_path_name_or_num = module.params[
'export_as_path_name_or_num']
if export_as_path_name_or_num:
conf_str += "<exportAsPathNameOrNum>%s</exportAsPathNameOrNum>" % export_as_path_name_or_num
cmd = "peer %s as-path-filter %s export" % (remote_address, export_as_path_name_or_num)
cmds.append(cmd)
import_acl_name_or_num = module.params['import_acl_name_or_num']
if import_acl_name_or_num:
conf_str += "<importAclNameOrNum>%s</importAclNameOrNum>" % import_acl_name_or_num
cmd = "peer %s filter-policy %s import" % (remote_address, import_acl_name_or_num)
cmds.append(cmd)
export_acl_name_or_num = module.params['export_acl_name_or_num']
if export_acl_name_or_num:
conf_str += "<exportAclNameOrNum>%s</exportAclNameOrNum>" % export_acl_name_or_num
cmd = "peer %s filter-policy %s export" % (remote_address, export_acl_name_or_num)
cmds.append(cmd)
ipprefix_orf_enable = module.params['ipprefix_orf_enable']
if ipprefix_orf_enable != 'no_use':
conf_str += "<ipprefixOrfEnable>%s</ipprefixOrfEnable>" % ipprefix_orf_enable
if ipprefix_orf_enable == "true":
cmd = "peer %s capability-advertise orf ip-prefix" % remote_address
else:
cmd = "undo peer %s capability-advertise orf ip-prefix" % remote_address
cmds.append(cmd)
is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod']
if is_nonstd_ipprefix_mod != 'no_use':
conf_str += "<isNonstdIpprefixMod>%s</isNonstdIpprefixMod>" % is_nonstd_ipprefix_mod
if is_nonstd_ipprefix_mod == "true":
if ipprefix_orf_enable == "true":
cmd = "peer %s capability-advertise orf non-standard-compatible" % remote_address
else:
cmd = "undo peer %s capability-advertise orf non-standard-compatible" % remote_address
cmds.append(cmd)
else:
if ipprefix_orf_enable == "true":
cmd = "peer %s capability-advertise orf" % remote_address
else:
cmd = "undo peer %s capability-advertise orf" % remote_address
cmds.append(cmd)
orftype = module.params['orftype']
if orftype:
conf_str += "<orftype>%s</orftype>" % orftype
orf_mode = module.params['orf_mode']
if orf_mode:
conf_str += "<orfMode>%s</orfMode>" % orf_mode
if ipprefix_orf_enable == "true":
cmd = "peer %s capability-advertise orf ip-prefix %s" % (remote_address, orf_mode)
else:
cmd = "undo peer %s capability-advertise orf ip-prefix %s" % (remote_address, orf_mode)
cmds.append(cmd)
soostring = module.params['soostring']
if soostring:
conf_str += "<soostring>%s</soostring>" % soostring
cmd = "peer %s soo %s" % (remote_address, soostring)
cmds.append(cmd)
cmd = ""
default_rt_adv_enable = module.params['default_rt_adv_enable']
if default_rt_adv_enable != 'no_use':
conf_str += "<defaultRtAdvEnable>%s</defaultRtAdvEnable>" % default_rt_adv_enable
if default_rt_adv_enable == "true":
cmd += "peer %s default-route-advertise" % remote_address
else:
cmd += "undo peer %s default-route-advertise" % remote_address
default_rt_adv_policy = module.params['default_rt_adv_policy']
if default_rt_adv_policy:
conf_str += "<defaultRtAdvPolicy>%s</defaultRtAdvPolicy>" % default_rt_adv_policy
cmd += " route-policy %s" % default_rt_adv_policy
default_rt_match_mode = module.params['default_rt_match_mode']
if default_rt_match_mode:
conf_str += "<defaultRtMatchMode>%s</defaultRtMatchMode>" % default_rt_match_mode
if default_rt_match_mode == "matchall":
cmd += " conditional-route-match-all"
elif default_rt_match_mode == "matchany":
cmd += " conditional-route-match-any"
if cmd:
cmds.append(cmd)
add_path_mode = module.params['add_path_mode']
if add_path_mode:
conf_str += "<addPathMode>%s</addPathMode>" % add_path_mode
adv_add_path_num = module.params['adv_add_path_num']
if adv_add_path_num:
conf_str += "<advAddPathNum>%s</advAddPathNum>" % adv_add_path_num
origin_as_valid = module.params['origin_as_valid']
if origin_as_valid != 'no_use':
conf_str += "<originAsValid>%s</originAsValid>" % origin_as_valid
vpls_enable = module.params['vpls_enable']
if vpls_enable != 'no_use':
conf_str += "<vplsEnable>%s</vplsEnable>" % vpls_enable
vpls_ad_disable = module.params['vpls_ad_disable']
if vpls_ad_disable != 'no_use':
conf_str += "<vplsAdDisable>%s</vplsAdDisable>" % vpls_ad_disable
update_pkt_standard_compatible = module.params[
'update_pkt_standard_compatible']
if update_pkt_standard_compatible != 'no_use':
conf_str += "<updatePktStandardCompatible>%s</updatePktStandardCompatible>" % update_pkt_standard_compatible
conf_str += CE_MERGE_BGP_PEER_AF_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "<ok/>" not in recv_xml:
module.fail_json(msg='Error: Merge bgp peer address family other failed.')
return cmds
def main():
""" main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
vrf_name=dict(type='str', required=True),
af_type=dict(choices=['ipv4uni', 'ipv4multi', 'ipv4vpn',
'ipv6uni', 'ipv6vpn', 'evpn'], required=True),
remote_address=dict(type='str', required=True),
advertise_irb=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
advertise_arp=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
advertise_remote_nexthop=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
advertise_community=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
advertise_ext_community=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
discard_ext_community=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
allow_as_loop_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
allow_as_loop_limit=dict(type='str'),
keep_all_routes=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
nexthop_configure=dict(choices=['null', 'local', 'invariable']),
preferred_value=dict(type='str'),
public_as_only=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
public_as_only_force=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
public_as_only_limited=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
public_as_only_replace=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
public_as_only_skip_peer_as=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
route_limit=dict(type='str'),
route_limit_percent=dict(type='str'),
route_limit_type=dict(
choices=['noparameter', 'alertOnly', 'idleForever', 'idleTimeout']),
route_limit_idle_timeout=dict(type='str'),
rt_updt_interval=dict(type='str'),
redirect_ip=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
redirect_ip_vaildation=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
reflect_client=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
substitute_as_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
import_rt_policy_name=dict(type='str'),
export_rt_policy_name=dict(type='str'),
import_pref_filt_name=dict(type='str'),
export_pref_filt_name=dict(type='str'),
import_as_path_filter=dict(type='str'),
export_as_path_filter=dict(type='str'),
import_as_path_name_or_num=dict(type='str'),
export_as_path_name_or_num=dict(type='str'),
import_acl_name_or_num=dict(type='str'),
export_acl_name_or_num=dict(type='str'),
ipprefix_orf_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
is_nonstd_ipprefix_mod=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
orftype=dict(type='str'),
orf_mode=dict(choices=['null', 'receive', 'send', 'both']),
soostring=dict(type='str'),
default_rt_adv_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
default_rt_adv_policy=dict(type='str'),
default_rt_match_mode=dict(choices=['null', 'matchall', 'matchany']),
add_path_mode=dict(choices=['null', 'receive', 'send', 'both']),
adv_add_path_num=dict(type='str'),
origin_as_valid=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
vpls_enable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
vpls_ad_disable=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']),
update_pkt_standard_compatible=dict(type='str', default='no_use', choices=['no_use', 'true', 'false']))
argument_spec.update(ce_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
changed = False
proposed = dict()
existing = dict()
end_state = dict()
updates = []
state = module.params['state']
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
advertise_irb = module.params['advertise_irb']
advertise_arp = module.params['advertise_arp']
advertise_remote_nexthop = module.params['advertise_remote_nexthop']
advertise_community = module.params['advertise_community']
advertise_ext_community = module.params['advertise_ext_community']
discard_ext_community = module.params['discard_ext_community']
allow_as_loop_enable = module.params['allow_as_loop_enable']
allow_as_loop_limit = module.params['allow_as_loop_limit']
keep_all_routes = module.params['keep_all_routes']
nexthop_configure = module.params['nexthop_configure']
preferred_value = module.params['preferred_value']
public_as_only = module.params['public_as_only']
public_as_only_force = module.params['public_as_only_force']
public_as_only_limited = module.params['public_as_only_limited']
public_as_only_replace = module.params['public_as_only_replace']
public_as_only_skip_peer_as = module.params['public_as_only_skip_peer_as']
route_limit = module.params['route_limit']
route_limit_percent = module.params['route_limit_percent']
route_limit_type = module.params['route_limit_type']
route_limit_idle_timeout = module.params['route_limit_idle_timeout']
rt_updt_interval = module.params['rt_updt_interval']
redirect_ip = module.params['redirect_ip']
redirect_ip_vaildation = module.params['redirect_ip_vaildation']
reflect_client = module.params['reflect_client']
substitute_as_enable = module.params['substitute_as_enable']
import_rt_policy_name = module.params['import_rt_policy_name']
export_rt_policy_name = module.params['export_rt_policy_name']
import_pref_filt_name = module.params['import_pref_filt_name']
export_pref_filt_name = module.params['export_pref_filt_name']
import_as_path_filter = module.params['import_as_path_filter']
export_as_path_filter = module.params['export_as_path_filter']
import_as_path_name_or_num = module.params['import_as_path_name_or_num']
export_as_path_name_or_num = module.params['export_as_path_name_or_num']
import_acl_name_or_num = module.params['import_acl_name_or_num']
export_acl_name_or_num = module.params['export_acl_name_or_num']
ipprefix_orf_enable = module.params['ipprefix_orf_enable']
is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod']
orftype = module.params['orftype']
orf_mode = module.params['orf_mode']
soostring = module.params['soostring']
default_rt_adv_enable = module.params['default_rt_adv_enable']
default_rt_adv_policy = module.params['default_rt_adv_policy']
default_rt_match_mode = module.params['default_rt_match_mode']
add_path_mode = module.params['add_path_mode']
adv_add_path_num = module.params['adv_add_path_num']
origin_as_valid = module.params['origin_as_valid']
vpls_enable = module.params['vpls_enable']
vpls_ad_disable = module.params['vpls_ad_disable']
update_pkt_standard_compatible = module.params[
'update_pkt_standard_compatible']
ce_bgp_peer_af_obj = BgpNeighborAf()
# get proposed
proposed["state"] = state
if vrf_name:
proposed["vrf_name"] = vrf_name
if af_type:
proposed["af_type"] = af_type
if remote_address:
proposed["remote_address"] = remote_address
if advertise_irb != 'no_use':
proposed["advertise_irb"] = advertise_irb
if advertise_arp != 'no_use':
proposed["advertise_arp"] = advertise_arp
if advertise_remote_nexthop != 'no_use':
proposed["advertise_remote_nexthop"] = advertise_remote_nexthop
if advertise_community != 'no_use':
proposed["advertise_community"] = advertise_community
if advertise_ext_community != 'no_use':
proposed["advertise_ext_community"] = advertise_ext_community
if discard_ext_community != 'no_use':
proposed["discard_ext_community"] = discard_ext_community
if allow_as_loop_enable != 'no_use':
proposed["allow_as_loop_enable"] = allow_as_loop_enable
if allow_as_loop_limit:
proposed["allow_as_loop_limit"] = allow_as_loop_limit
if keep_all_routes != 'no_use':
proposed["keep_all_routes"] = keep_all_routes
if nexthop_configure:
proposed["nexthop_configure"] = nexthop_configure
if preferred_value:
proposed["preferred_value"] = preferred_value
if public_as_only != 'no_use':
proposed["public_as_only"] = public_as_only
if public_as_only_force != 'no_use':
proposed["public_as_only_force"] = public_as_only_force
if public_as_only_limited != 'no_use':
proposed["public_as_only_limited"] = public_as_only_limited
if public_as_only_replace != 'no_use':
proposed["public_as_only_replace"] = public_as_only_replace
if public_as_only_skip_peer_as != 'no_use':
proposed["public_as_only_skip_peer_as"] = public_as_only_skip_peer_as
if route_limit:
proposed["route_limit"] = route_limit
if route_limit_percent:
proposed["route_limit_percent"] = route_limit_percent
if route_limit_type:
proposed["route_limit_type"] = route_limit_type
if route_limit_idle_timeout:
proposed["route_limit_idle_timeout"] = route_limit_idle_timeout
if rt_updt_interval:
proposed["rt_updt_interval"] = rt_updt_interval
if redirect_ip != 'no_use':
proposed["redirect_ip"] = redirect_ip
if redirect_ip_vaildation != 'no_use':
proposed["redirect_ip_vaildation"] = redirect_ip_vaildation
if reflect_client != 'no_use':
proposed["reflect_client"] = reflect_client
if substitute_as_enable != 'no_use':
proposed["substitute_as_enable"] = substitute_as_enable
if import_rt_policy_name:
proposed["import_rt_policy_name"] = import_rt_policy_name
if export_rt_policy_name:
proposed["export_rt_policy_name"] = export_rt_policy_name
if import_pref_filt_name:
proposed["import_pref_filt_name"] = import_pref_filt_name
if export_pref_filt_name:
proposed["export_pref_filt_name"] = export_pref_filt_name
if import_as_path_filter:
proposed["import_as_path_filter"] = import_as_path_filter
if export_as_path_filter:
proposed["export_as_path_filter"] = export_as_path_filter
if import_as_path_name_or_num:
proposed["import_as_path_name_or_num"] = import_as_path_name_or_num
if export_as_path_name_or_num:
proposed["export_as_path_name_or_num"] = export_as_path_name_or_num
if import_acl_name_or_num:
proposed["import_acl_name_or_num"] = import_acl_name_or_num
if export_acl_name_or_num:
proposed["export_acl_name_or_num"] = export_acl_name_or_num
if ipprefix_orf_enable != 'no_use':
proposed["ipprefix_orf_enable"] = ipprefix_orf_enable
if is_nonstd_ipprefix_mod != 'no_use':
proposed["is_nonstd_ipprefix_mod"] = is_nonstd_ipprefix_mod
if orftype:
proposed["orftype"] = orftype
if orf_mode:
proposed["orf_mode"] = orf_mode
if soostring:
proposed["soostring"] = soostring
if default_rt_adv_enable != 'no_use':
proposed["default_rt_adv_enable"] = default_rt_adv_enable
if default_rt_adv_policy:
proposed["default_rt_adv_policy"] = default_rt_adv_policy
if default_rt_match_mode:
proposed["default_rt_match_mode"] = default_rt_match_mode
if add_path_mode:
proposed["add_path_mode"] = add_path_mode
if adv_add_path_num:
proposed["adv_add_path_num"] = adv_add_path_num
if origin_as_valid != 'no_use':
proposed["origin_as_valid"] = origin_as_valid
if vpls_enable != 'no_use':
proposed["vpls_enable"] = vpls_enable
if vpls_ad_disable != 'no_use':
proposed["vpls_ad_disable"] = vpls_ad_disable
if update_pkt_standard_compatible != 'no_use':
proposed["update_pkt_standard_compatible"] = update_pkt_standard_compatible
if not ce_bgp_peer_af_obj:
module.fail_json(msg='Error: Init module failed.')
bgp_peer_af_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_args(
module=module)
bgp_peer_af_other_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_other(
module=module)
# state exist bgp peer address family config
exist_tmp = dict()
for item in bgp_peer_af_rst:
if item != "need_cfg":
exist_tmp[item] = bgp_peer_af_rst[item]
if exist_tmp:
existing["bgp neighbor af"] = exist_tmp
# state exist bgp peer address family other config
exist_tmp = dict()
for item in bgp_peer_af_other_rst:
if item != "need_cfg":
exist_tmp[item] = bgp_peer_af_other_rst[item]
if exist_tmp:
existing["bgp neighbor af other"] = exist_tmp
if state == "present":
if bgp_peer_af_rst["need_cfg"]:
if "remote_address" in bgp_peer_af_rst.keys():
cmd = ce_bgp_peer_af_obj.merge_bgp_peer_af(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
cmd = ce_bgp_peer_af_obj.create_bgp_peer_af(module=module)
changed = True
for item in cmd:
updates.append(item)
if bgp_peer_af_other_rst["need_cfg"]:
cmd = ce_bgp_peer_af_obj.merge_bgp_peer_af_other(module=module)
changed = True
for item in cmd:
updates.append(item)
else:
if bgp_peer_af_rst["need_cfg"]:
cmd = ce_bgp_peer_af_obj.delete_bgp_peer_af(module=module)
changed = True
for item in cmd:
updates.append(item)
if bgp_peer_af_other_rst["need_cfg"]:
pass
# state end bgp peer address family config
bgp_peer_af_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_args(
module=module)
end_tmp = dict()
for item in bgp_peer_af_rst:
if item != "need_cfg":
end_tmp[item] = bgp_peer_af_rst[item]
if end_tmp:
end_state["bgp neighbor af"] = end_tmp
# state end bgp peer address family other config
bgp_peer_af_other_rst = ce_bgp_peer_af_obj.check_bgp_neighbor_af_other(
module=module)
end_tmp = dict()
for item in bgp_peer_af_other_rst:
if item != "need_cfg":
end_tmp[item] = bgp_peer_af_other_rst[item]
if end_tmp:
end_state["bgp neighbor af other"] = end_tmp
results = dict()
results['proposed'] = proposed
results['existing'] = existing
results['changed'] = changed
results['end_state'] = end_state
results['updates'] = updates
module.exit_json(**results)
if __name__ == '__main__':
main()
|
gpl-3.0
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/networkx/algorithms/vitality.py
|
72
|
2488
|
"""
Vitality measures.
"""
# Copyright (C) 2012 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = "\n".join(['Aric Hagberg ([email protected])',
'Renato Fabbri'])
__all__ = ['closeness_vitality']
def weiner_index(G, weight=None):
# compute sum of distances between all node pairs
# (with optional weights)
weiner=0.0
if weight is None:
for n in G:
path_length=nx.single_source_shortest_path_length(G,n)
weiner+=sum(path_length.values())
else:
for n in G:
path_length=nx.single_source_dijkstra_path_length(G,
n,weight=weight)
weiner+=sum(path_length.values())
return weiner
def closeness_vitality(G, weight=None):
"""Compute closeness vitality for nodes.
Closeness vitality of a node is the change in the sum of distances
between all node pairs when excluding that node.
Parameters
----------
G : graph
weight : None or string (optional)
The name of the edge attribute used as weight. If None the edge
weights are ignored.
Returns
-------
nodes : dictionary
Dictionary with nodes as keys and closeness vitality as the value.
Examples
--------
>>> G=nx.cycle_graph(3)
>>> nx.closeness_vitality(G)
{0: 4.0, 1: 4.0, 2: 4.0}
See Also
--------
closeness_centrality()
References
----------
.. [1] Ulrik Brandes, Sec. 3.6.2 in
Network Analysis: Methodological Foundations, Springer, 2005.
http://books.google.com/books?id=TTNhSm7HYrIC
"""
multigraph = G.is_multigraph()
wig = weiner_index(G,weight)
closeness_vitality = {}
for n in G:
# remove edges connected to node n and keep list of edges with data
# could remove node n but it doesn't count anyway
if multigraph:
edges = G.edges(n,data=True,keys=True)
if G.is_directed():
edges += G.in_edges(n,data=True,keys=True)
else:
edges = G.edges(n,data=True)
if G.is_directed():
edges += G.in_edges(n,data=True)
G.remove_edges_from(edges)
closeness_vitality[n] = wig - weiner_index(G,weight)
# add edges and data back to graph
G.add_edges_from(edges)
return closeness_vitality
|
agpl-3.0
|
pawaranand/phrerp
|
erpnext/home/doctype/feed/feed.py
|
37
|
1545
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
import frappe.permissions
from frappe.model.document import Document
class Feed(Document):
pass
def on_doctype_update():
if not frappe.db.sql("""show index from `tabFeed`
where Key_name="feed_doctype_docname_index" """):
frappe.db.commit()
frappe.db.sql("""alter table `tabFeed`
add index feed_doctype_docname_index(doc_type, doc_name)""")
def get_permission_query_conditions(user):
if not user: user = frappe.session.user
if not frappe.permissions.apply_user_permissions("Feed", "read", user):
return ""
user_permissions = frappe.defaults.get_user_permissions(user)
can_read = frappe.get_user(user).get_can_read()
can_read_doctypes = ['"{}"'.format(doctype) for doctype in
list(set(can_read) - set(user_permissions.keys()))]
if not can_read_doctypes:
return ""
conditions = ["tabFeed.doc_type in ({})".format(", ".join(can_read_doctypes))]
if user_permissions:
can_read_docs = []
for doctype, names in user_permissions.items():
for n in names:
can_read_docs.append('"{}|{}"'.format(doctype, n))
if can_read_docs:
conditions.append("concat_ws('|', tabFeed.doc_type, tabFeed.doc_name) in ({})".format(
", ".join(can_read_docs)))
return "(" + " or ".join(conditions) + ")"
def has_permission(doc, user):
return frappe.has_permission(doc.doc_type, "read", doc.doc_name, user=user)
|
agpl-3.0
|
Chilledheart/googletest
|
googlemock/scripts/upload_gmock.py
|
770
|
2833
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gmock.py v0.1.0 -- uploads a Google Mock patch for review.
This simple wrapper passes all command line flags and
[email protected] to upload.py.
USAGE: upload_gmock.py [options for upload.py]
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GMOCK_GROUP = '[email protected]'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Mock discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GMOCK_GROUP not in cc_list:
cc_list.append(GMOCK_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GMOCK_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
|
bsd-3-clause
|
kamyu104/django
|
tests/aggregation_regress/tests.py
|
102
|
55724
|
from __future__ import unicode_literals
import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
F, Q, Avg, Count, Max, StdDev, Sum, Value, Variance,
)
from django.test import TestCase, skipUnlessAnyDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from .models import (
Alfa, Author, Book, Bravo, Charlie, Clues, Entries, HardbackBook, ItemTag,
Publisher, SelfRefFK, Store, WithManualPK,
)
class AggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = HardbackBook.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15), weight=4.5)
cls.b6 = HardbackBook.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15), weight=3.7)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in six.iteritems(kwargs):
self.assertEqual(getattr(obj, attr), value)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Tests that the subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature('supports_subqueries_in_group_by')
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
# Oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)}
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={'price_per_page': 'price / pages'}).aggregate(Sum('pages')),
{'pages__sum': 3703}
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(
select={"manufacture_cost": "price * .5"}).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Order of the annotate/extra in the query doesn't matter
obj = Book.objects.extra(select={'manufacture_cost': 'price * .5'}).annotate(
mean_auth_age=Avg('authors__age')).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Values queries can be combined with annotate and extra
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).values().get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0,
})
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0
})
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name').get(pk=self.b1.pk)
self.assertEqual(obj, {
"name": 'The Definitive Guide to Django: Web Development Done Right',
})
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name', 'mean_auth_age').get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
self.assertQuerysetEqual(
qs, [
{"name": 'Python Web Development with Django'}
],
lambda b: b,
)
# The annotations are added to values output if values() precedes
# annotate()
obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# Check that all of the objects are getting counted (allow_nulls) and
# that values respects the amount of objects
self.assertEqual(
len(Author.objects.annotate(Avg('friends__age')).values()),
9
)
# Check that consecutive calls to annotate accumulate in the query
qs = (
Book.objects
.values('price')
.annotate(oldest=Max('authors__age'))
.order_by('oldest', 'price')
.annotate(Max('publisher__num_awards'))
)
self.assertQuerysetEqual(
qs, [
{'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
{'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
{'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
{'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
{'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
],
lambda b: b,
)
def test_aggrate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = (
Book.objects
.all()
.annotate(num_authors=Count('authors__id'))
.aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
)
self.assertEqual(vals, {
'num_authors__sum': 10,
'num_authors__avg': Approximate(1.666, places=2),
'pages__max': 1132,
'price__max': Decimal("82.80")
})
# Regression for #15624 - Missing SELECT columns when using values, annotate
# and aggregate in a single query
self.assertEqual(
Book.objects.annotate(c=Count('authors')).values('c').aggregate(Max('c')),
{'c__max': 3}
)
def test_decimal_aggregate_annotation_filter(self):
"""
Filtering on an aggregate annotation with Decimal values should work.
Requires special handling on SQLite (#18247).
"""
self.assertEqual(
len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__gt=Decimal(40))),
1
)
self.assertEqual(
len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__lte=Decimal(40))),
4
)
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
self.assertRaises(
FieldError,
lambda: Book.objects.all().aggregate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(
Book.objects.annotate(num_authors=Count('authors')).count(),
6
)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
self.assertEqual(
vals,
{'num_authors__max': 3}
)
vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
self.assertEqual(
vals,
{'avg_price__max': 75.0}
)
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
self.assertEqual(
vals,
{'number': 1132, 'select': 1132}
)
# Regression for #10064: select_related() plays nice with aggregates
obj = Book.objects.select_related('publisher').annotate(
num_authors=Count('authors')).values().get(isbn='013790395')
self.assertEqual(obj, {
'contact_id': self.a8.id,
'id': self.b5.id,
'isbn': '013790395',
'name': 'Artificial Intelligence: A Modern Approach',
'num_authors': 2,
'pages': 1132,
'price': Decimal("82.8"),
'pubdate': datetime.date(1995, 1, 15),
'publisher_id': self.p3.id,
'rating': 4.0,
})
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors'))),
6
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
1
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
5
)
self.assertEqual(
len(
Book.objects
.annotate(num_authors=Count('authors'))
.filter(num_authors__lt=3)
.exclude(num_authors__lt=2)
),
2
)
self.assertEqual(
len(
Book.objects
.annotate(num_authors=Count('authors'))
.exclude(num_authors__lt=2)
.filter(num_authors__lt=3)
),
2
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = (
Publisher.objects
.annotate(num_books=Count('book'))
.filter(num_books__lt=F('num_awards') / 2)
.order_by('name')
.values('name', 'num_books', 'num_awards')
)
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = (
Publisher.objects
.annotate(num_books=Count('book'))
.exclude(num_books__lt=F('num_awards') / 2)
.order_by('name')
.values('name', 'num_books', 'num_awards')
)
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
# ... and where the F() references an aggregate
qs = (
Publisher.objects
.annotate(num_books=Count('book'))
.filter(num_awards__gt=2 * F('num_books'))
.order_by('name')
.values('name', 'num_books', 'num_awards')
)
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = (
Publisher.objects
.annotate(num_books=Count('book'))
.exclude(num_books__lt=F('num_awards') / 2)
.order_by('name')
.values('name', 'num_books', 'num_awards')
)
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = (
Clues.objects
.values('EntryID__Entry')
.annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
)
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
self.assertQuerysetEqual(qs, [])
def test_boolean_conversion(self):
# Aggregates mixed up ordering of columns for backend's convert_values
# method. Refs #21126.
e = Entries.objects.create(Entry='foo')
c = Clues.objects.create(EntryID=e, Clue='bar')
qs = Clues.objects.select_related('EntryID').annotate(Count('ID'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].EntryID, e)
self.assertIs(qs[0].EntryID.Exclude, False)
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(
Book.objects.filter(id__in=[]).count(),
0
)
vals = (
Book.objects
.filter(id__in=[])
.aggregate(
num_authors=Count('authors'),
avg_authors=Avg('authors'),
max_authors=Max('authors'),
max_price=Max('price'),
max_rating=Max('rating'),
)
)
self.assertEqual(
vals,
{'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
)
qs = (
Publisher.objects
.filter(name="Jonno's House of Books")
.annotate(
num_authors=Count('book__authors'),
avg_authors=Avg('book__authors'),
max_authors=Max('book__authors'),
max_price=Max('book__price'),
max_rating=Max('book__rating'),
).values()
)
self.assertQuerysetEqual(
qs,
[{
'max_authors': None,
'name': "Jonno's House of Books",
'num_awards': 0,
'max_price': None,
'num_authors': 0,
'max_rating': None,
'id': self.p5.id,
'avg_authors': None,
}],
lambda p: p
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name
)
# Regression for #10127 - Empty select_related() works with annotate
qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
self.assertQuerysetEqual(
qs,
[
('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'),
('Practical Django Projects', 29.0, 'Apress', 'James Bennett'),
(
'Python Web Development with Django',
Approximate(30.333, places=2),
'Prentice Hall',
'Jeffrey Forcier',
),
('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley')
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = Book.objects.extra(select={'pub': 'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': self.b1.id, 'id__count': 2},
{'pub': self.b2.id, 'id__count': 1},
{'pub': self.b3.id, 'id__count': 2},
{'pub': self.b4.id, 'id__count': 1}
],
lambda b: b
)
qs = (
Book.objects
.extra(select={'pub': 'publisher_id', 'foo': 'pages'})
.values('pub')
.annotate(Count('id'))
.order_by('pub')
)
self.assertQuerysetEqual(
qs, [
{'pub': self.p1.id, 'id__count': 2},
{'pub': self.p2.id, 'id__count': 1},
{'pub': self.p3.id, 'id__count': 2},
{'pub': self.p4.id, 'id__count': 1}
],
lambda b: b
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = (
Book.objects
.filter(pages__gt=100)
.annotate(n_authors=Count('authors'))
.filter(n_authors__gt=2)
.order_by('n_authors')
)
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids), [
"Python Web Development with Django",
],
lambda b: b.name
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qstr = str(Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by().query)
# Check that there is just one GROUP BY clause (zero commas means at
# most one clause)
self.assertEqual(qstr[qstr.index('GROUP BY'):].count(', '), 0)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
self.assertRaises(
ValueError,
Book.objects.all().annotate,
Avg('authors__age'), authors__age__avg=Avg('authors__age')
)
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with a field name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, age=Avg('friends__age'))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with an m2m name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, friends=Count('friends'))
def test_values_queryset_non_conflict(self):
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in values(), so it is.
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 1)
# Same problem, but aggregating over m2m fields
results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 32.0)
# Same problem, but colliding with an m2m field
results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['friends'], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with a reverse-related name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, book_contact_set=Avg('friends__age'))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count('authors'))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(), [
'Artificial Intelligence: A Modern Approach',
'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
'Practical Django Projects',
'Python Web Development with Django',
'Sams Teach Yourself Django in 24 Hours',
'The Definitive Guide to Django: Web Development Done Right'
],
lambda b: b.name
)
# Regression for #10248 - Annotations work with dates()
qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
self.assertQuerysetEqual(
qs, [
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
lambda b: b
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = (
Book.objects
.annotate(mean_auth_age=Avg('authors__age'))
.extra(select={'sheets': '(pages + %s) / %s'}, select_params=[1, 2])
.order_by('sheets')
.values('sheets')
)
self.assertQuerysetEqual(
qs, [
150,
175,
224,
264,
473,
566
],
lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values('publisher').annotate(Count('publisher')).count(),
4
)
self.assertEqual(
Book.objects.annotate(Count('publisher')).values('publisher').count(),
6
)
# Note: intentionally no order_by(), that case needs tests, too.
publishers = Publisher.objects.filter(id__in=[1, 2])
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
publishers = publishers.annotate(n_books=Count("book"))
sorted_publishers = sorted(publishers, key=lambda x: x.name)
self.assertEqual(
sorted_publishers[0].n_books,
2
)
self.assertEqual(
sorted_publishers[1].n_books,
1
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books, [
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
{'n_pages': 2078}
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('pages')),
{'n_pages': 2078},
)
qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs,
[
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{
'n_authors': 1,
'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'
}
],
lambda h: h
)
qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs,
[
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{
'n_authors': 1,
'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'
}
],
lambda h: h,
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
self.assertRaises(
FieldError,
lambda: Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
{"pk__count": None}
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg('age')),
{'age__avg': None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
Author.objects.count()
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).filter(
pages__lt=F("n_authors") * 200
).values_list("pk")
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs), [
"Python Web Development with Django"
],
attrgetter("name")
)
def test_values_annotate_values(self):
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).values_list("pk", flat=True)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# Test that when a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = Book.objects.values_list("name").annotate(
n_authors=Count("authors")
).filter(
pages__gt=F("n_authors")
).values_list("name", flat=True)
# Results should be the same, all Books have more pages than authors
self.assertEqual(
list(qs), list(Book.objects.values_list("name", flat=True))
)
def test_values_list_annotation_args_ordering(self):
"""
Annotate *args ordering should be preserved in values_list results.
**kwargs comes after *args.
Regression test for #23659.
"""
books = Book.objects.values_list("publisher__name").annotate(
Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages")
).order_by("-publisher__name")
self.assertEqual(books[0], ('Sams', 1, 23.09, 45.0, 528.0))
def test_annotation_disjunction(self):
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(n_authors=2) | Q(name="Python Web Development with Django")
)
self.assertQuerysetEqual(
qs, [
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = (
Book.objects
.annotate(n_authors=Count("authors"))
.filter(
Q(name="The Definitive Guide to Django: Web Development Done Right")
| (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
)
)
self.assertQuerysetEqual(
qs,
[
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
).order_by('pk')
self.assertQuerysetEqual(
qs, [
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None)
).order_by("num_awards")
self.assertQuerysetEqual(
qs, [
"Jonno's House of Books",
"Sams",
"Apress",
"Prentice Hall",
"Morgan Kaufmann"
],
attrgetter("name")
)
def test_quoting_aggregate_order_by(self):
qs = Book.objects.filter(
name="Python Web Development with Django"
).annotate(
authorCount=Count("authors")
).order_by("authorCount")
self.assertQuerysetEqual(
qs, [
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount)
)
@skipUnlessDBFeature('supports_stddev')
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev('pages')),
{'pages__stddev': Approximate(311.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating')),
{'rating__stddev': Approximate(0.60, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price')),
{'price__stddev': Approximate(24.16, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('pages', sample=True)),
{'pages__stddev': Approximate(341.19, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating', sample=True)),
{'rating__stddev': Approximate(0.66, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price', sample=True)),
{'price__stddev': Approximate(26.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages')),
{'pages__variance': Approximate(97010.80, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating')),
{'rating__variance': Approximate(0.36, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price')),
{'price__variance': Approximate(583.77, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages', sample=True)),
{'pages__variance': Approximate(116412.96, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating', sample=True)),
{'rating__variance': Approximate(0.44, 2)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price', sample=True)),
{'price__variance': Approximate(700.53, 2)}
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Neither in this case
qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = Author.objects.annotate(Count('book')).filter(book__count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Referencing the auto-generated name in an aggregate() also works.
self.assertEqual(
Author.objects.annotate(Count('book')).aggregate(Max('book__count')),
{'book__count__max': 2}
)
def test_annotate_joins(self):
"""
Test that the base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count('pk'))
self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None)
# Check that the query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count('book_contact_set'))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
_, _, group_by = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(group_by), 1)
self.assertIn('id', group_by[0][0])
self.assertNotIn('name', group_by[0][0])
self.assertNotIn('age', group_by[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(grouping), 1)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('age', grouping[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related('contact').annotate(
num_authors=Count('authors'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
# In the case of `group_by_selected_pks` we also group by contact.id because of the select_related.
self.assertEqual(len(grouping), 1 if connection.features.allows_group_by_pk else 2)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('contact', grouping[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by('name')],
[
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
]
)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count('book_contact_set__contact'))
self.assertIn(' JOIN ', str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
django_book = Book.objects.get(name='Practical Django Projects')
ItemTag.objects.create(object_id=django_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(django_book))
ItemTag.objects.create(object_id=django_book.id, tag='django',
content_type=ContentType.objects.get_for_model(django_book))
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=django_book.pk)
ItemTag.objects.create(object_id=wmpk.id, tag='hi mom',
content_type=ContentType.objects.get_for_model(wmpk))
ai_book = Book.objects.get(name__startswith='Paradigms of Artificial Intelligence')
ItemTag.objects.create(object_id=ai_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(ai_book))
self.assertEqual(Book.objects.aggregate(Count('tags')), {'tags__count': 3})
results = Book.objects.annotate(Count('tags')).order_by('-tags__count', 'name')
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
('Practical Django Projects', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Artificial Intelligence: A Modern Approach', 0),
('Python Web Development with Django', 0),
('Sams Teach Yourself Django in 24 Hours', 0),
('The Definitive Guide to Django: Web Development Done Right', 0)
]
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(
Q(book_cnt=2), Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(Q(book_cnt=2) | Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
def test_name_filters(self):
qs = Author.objects.annotate(Count('book')).filter(
Q(book__count__exact=2) | Q(name='Adrian Holovaty')
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_name_expressions(self):
# Test that aggregates are spotted correctly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = Author.objects.annotate(Count('book')).filter(
Q(name='Peter Norvig') | Q(age=F('book__count') + 33)
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_ticket_11293(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors')).filter(
q1 | q2).order_by('pk')
self.assertQuerysetEqual(
query, [1, 4, 5, 6],
lambda b: b.pk)
def test_ticket_11293_q_immutable(self):
"""
Check that splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn='')
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors'))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
Check that an F() object referring to related column works correctly
in group by.
"""
qs = Book.objects.annotate(
acount=Count('authors')
).filter(
acount=F('publisher__num_awards')
)
self.assertQuerysetEqual(
qs, ['Sams Teach Yourself Django in 24 Hours'],
lambda b: b.name)
def test_annotate_reserved_word(self):
"""
Regression #18333 - Ensure annotated column name is properly quoted.
"""
vals = Book.objects.annotate(select=Count('authors__id')).aggregate(Sum('select'), Avg('select'))
self.assertEqual(vals, {
'select__sum': 10,
'select__avg': Approximate(1.666, places=2),
})
def test_annotate_on_relation(self):
book = Book.objects.annotate(avg_price=Avg('price'), publisher_name=F('publisher__name')).get(pk=self.b1.pk)
self.assertEqual(book.avg_price, 30.00)
self.assertEqual(book.publisher_name, "Apress")
def test_aggregate_on_relation(self):
# A query with an existing annotation aggregation on a relation should
# succeed.
qs = Book.objects.annotate(avg_price=Avg('price')).aggregate(
publisher_awards=Sum('publisher__num_awards')
)
self.assertEqual(qs['publisher_awards'], 30)
def test_annotate_distinct_aggregate(self):
# There are three books with rating of 4.0 and two of the books have
# the same price. Hence, the distinct removes one rating of 4.0
# from the results.
vals1 = Book.objects.values('rating', 'price').distinct().aggregate(result=Sum('rating'))
vals2 = Book.objects.aggregate(result=Sum('rating') - Value(4.0))
self.assertEqual(vals1, vals2)
class JoinPromotionTests(TestCase):
def test_ticket_21150(self):
b = Bravo.objects.create()
c = Charlie.objects.create(bravo=b)
qs = Charlie.objects.select_related('alfa').annotate(Count('bravo__charlie'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertIs(qs[0].alfa, None)
a = Alfa.objects.create()
c.alfa = a
c.save()
# Force re-evaluation
qs = qs.all()
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].alfa, a)
def test_existing_join_not_promoted(self):
# No promotion for existing joins
qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(Count('alfa__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
# Also, the existing join is unpromoted when doing filtering for already
# promoted join.
qs = Charlie.objects.annotate(Count('alfa__name')).filter(alfa__name__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
# But, as the join is nullable first use by annotate will be LOUTER
qs = Charlie.objects.annotate(Count('alfa__name'))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = Book.objects.annotate(Count('contact__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
class SelfReferentialFKTests(TestCase):
def test_ticket_24748(self):
t1 = SelfRefFK.objects.create(name='t1')
SelfRefFK.objects.create(name='t2', parent=t1)
SelfRefFK.objects.create(name='t3', parent=t1)
self.assertQuerysetEqual(
SelfRefFK.objects.annotate(num_children=Count('children')).order_by('name'),
[('t1', 2), ('t2', 0), ('t3', 0)],
lambda x: (x.name, x.num_children)
)
|
bsd-3-clause
|
mick-d/nipype
|
doc/sphinxext/numpy_ext/docscrape.py
|
10
|
15677
|
# -*- coding: utf-8 -*-
"""Extract reference documentation from the NumPy source tree.
"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import object
import inspect
import textwrap
import re
import pydoc
from warnings import warn
from io import StringIO
class Reader(object):
"""A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data, list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l + 1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self, n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
class NumpyDocString(object):
def __init__(self, docstring, config={}):
docstring = textwrap.dedent(docstring).split('\n')
self._doc = Reader(docstring)
self._parsed_data = {
'Signature': '',
'Summary': [''],
'Extended Summary': [],
'Parameters': [],
'Returns': [],
'Raises': [],
'Warns': [],
'Other Parameters': [],
'Attributes': [],
'Methods': [],
'See Also': [],
'Notes': [],
'Warnings': [],
'References': '',
'Examples': '',
'index': {}
}
self._parse()
def __getitem__(self, key):
return self._parsed_data[key]
def __setitem__(self, key, val):
if key not in self._parsed_data:
warn("Unknown section %s" % key)
else:
self._parsed_data[key] = val
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith('.. index::'):
return True
l2 = self._doc.peek(1).strip() # ---------- or ==========
return l2.startswith('-' * len(l1)) or l2.startswith('=' * len(l1))
def _strip(self, doc):
i = 0
j = 0
for i, line in enumerate(doc):
if line.strip():
break
for j, line in enumerate(doc[::-1]):
if line.strip():
break
return doc[i:len(doc) - j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += ['']
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith('..'): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self, content):
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if ' : ' in header:
arg_name, arg_type = header.split(' : ')[:2]
else:
arg_name, arg_type = header, ''
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
params.append((arg_name, arg_type, desc))
return params
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
g = m.groups()
if g[1] is None:
return g[3], None
else:
return g[2], g[1]
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
if not name:
return
name, role = parse_item_name(name)
items.append((name, list(rest), role))
del rest[:]
current_func = None
rest = []
for line in content:
if not line.strip():
continue
m = self._name_rgx.match(line)
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
rest = [line.split(':', 1)[1].strip()]
if not rest[0]:
rest = []
elif not line.startswith(' '):
push_item(current_func, rest)
current_func = None
if ',' in line:
for func in line.split(','):
if func.strip():
push_item(func, [])
elif line.strip():
current_func = line
elif current_func is not None:
rest.append(line.strip())
push_item(current_func, rest)
return items
def _parse_index(self, section, content):
"""
.. index: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split('::')
if len(section) > 1:
out['default'] = strip_each_in(section[1].split(','))[0]
for line in content:
line = line.split(':')
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(','))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
self['Signature'] = summary_str
if not self._is_at_section():
self['Summary'] = self._doc.read_to_next_empty_line()
else:
self['Summary'] = summary
if not self._is_at_section():
self['Extended Summary'] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
for (section, content) in self._read_sections():
if not section.startswith('..'):
section = ' '.join([s.capitalize() for s in section.split(' ')])
if section in ('Parameters', 'Returns', 'Raises', 'Warns',
'Other Parameters', 'Attributes', 'Methods'):
self[section] = self._parse_param_list(content)
elif section.startswith('.. index::'):
self['index'] = self._parse_index(section, content)
elif section == 'See Also':
self['See Also'] = self._parse_see_also(content)
else:
self[section] = content
# string conversion routines
def _str_header(self, name, symbol='-'):
return [name, len(name) * symbol]
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
if self['Signature']:
return [self['Signature'].replace('*', '\*')] + ['']
else:
return ['']
def _str_summary(self):
if self['Summary']:
return self['Summary'] + ['']
else:
return []
def _str_extended_summary(self):
if self['Extended Summary']:
return self['Extended Summary'] + ['']
else:
return []
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param, param_type, desc in self[name]:
out += ['%s : %s' % (param, param_type)]
out += self._str_indent(desc)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += ['']
return out
def _str_see_also(self, func_role):
if not self['See Also']:
return []
out = []
out += self._str_header("See Also")
last_had_desc = True
for func, desc, role in self['See Also']:
if role:
link = ':%s:`%s`' % (role, func)
elif func_role:
link = ':%s:`%s`' % (func_role, func)
else:
link = "`%s`_" % func
if desc or last_had_desc:
out += ['']
out += [link]
else:
out[-1] += ", %s" % link
if desc:
out += self._str_indent([' '.join(desc)])
last_had_desc = True
else:
last_had_desc = False
out += ['']
return out
def _str_index(self):
idx = self['index']
out = []
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in list(idx.items()):
if section == 'default':
continue
out += [' :%s: %s' % (section, ', '.join(references))]
return out
def __str__(self, func_role=''):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_section('Warnings')
out += self._str_see_also(func_role)
for s in ('Notes', 'References', 'Examples'):
out += self._str_section(s)
for param_list in ('Attributes', 'Methods'):
out += self._str_param_list(param_list)
out += self._str_index()
return '\n'.join(out)
def indent(str, indent=4):
indent_str = ' ' * indent
if str is None:
return indent_str
lines = str.split('\n')
return '\n'.join(indent_str + l for l in lines)
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n")
def header(text, style='-'):
return text + '\n' + style * len(text) + '\n'
class FunctionDoc(NumpyDocString):
def __init__(self, func, role='func', doc=None, config={}):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
raise ValueError("No function or docstring given")
doc = inspect.getdoc(func) or ''
NumpyDocString.__init__(self, doc)
if not self['Signature'] and func is not None:
func, func_name = self.get_func()
try:
# try to read signature
argspec = inspect.getargspec(func)
argspec = inspect.formatargspec(*argspec)
argspec = argspec.replace('*', '\*')
signature = '%s%s' % (func_name, argspec)
except TypeError as e:
signature = '%s()' % func_name
self['Signature'] = signature
def get_func(self):
func_name = getattr(self._f, '__name__', self.__class__.__name__)
if inspect.isclass(self._f):
func = getattr(self._f, '__call__', self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ''
func, func_name = self.get_func()
signature = self['Signature'].replace('*', '\*')
roles = {'func': 'function',
'meth': 'method'}
if self._role:
if self._role not in roles:
print("Warning: invalid role %s" % self._role)
out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''),
func_name)
out += super(FunctionDoc, self).__str__(func_role=self._role)
return out
class ClassDoc(NumpyDocString):
extra_public_methods = ['__call__']
def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
config={}):
if not inspect.isclass(cls) and cls is not None:
raise ValueError("Expected a class or None, but got %r" % cls)
self._cls = cls
if modulename and not modulename.endswith('.'):
modulename += '.'
self._mod = modulename
if doc is None:
if cls is None:
raise ValueError("No class or documentation string given")
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
if config.get('show_class_members', True):
if not self['Methods']:
self['Methods'] = [(name, '', '')
for name in sorted(self.methods)]
if not self['Attributes']:
self['Attributes'] = [(name, '', '')
for name in sorted(self.properties)]
@property
def methods(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if ((not name.startswith('_') or
name in self.extra_public_methods) and
callable(func))]
@property
def properties(self):
if self._cls is None:
return []
return [name for name, func in inspect.getmembers(self._cls)
if not name.startswith('_') and func is None]
|
bsd-3-clause
|
gundalow/ansible
|
lib/ansible/utils/vars.py
|
10
|
10215
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import keyword
import random
import uuid
from json import dumps
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils.six import iteritems, string_types, PY3
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common._collections_compat import MutableMapping, MutableSequence
from ansible.parsing.splitter import parse_kv
ADDITIONAL_PY2_KEYWORDS = frozenset(("True", "False", "None"))
_MAXSIZE = 2 ** 32
cur_id = 0
node_mac = ("%012x" % uuid.getnode())[:12]
random_int = ("%08x" % random.randint(0, _MAXSIZE))[:8]
def get_unique_id():
global cur_id
cur_id += 1
return "-".join([
node_mac[0:8],
node_mac[8:12],
random_int[0:4],
random_int[4:8],
("%012x" % cur_id)[:12],
])
def _validate_mutable_mappings(a, b):
"""
Internal convenience function to ensure arguments are MutableMappings
This checks that all arguments are MutableMappings or raises an error
:raises AnsibleError: if one of the arguments is not a MutableMapping
"""
# If this becomes generally needed, change the signature to operate on
# a variable number of arguments instead.
if not (isinstance(a, MutableMapping) and isinstance(b, MutableMapping)):
myvars = []
for x in [a, b]:
try:
myvars.append(dumps(x))
except Exception:
myvars.append(to_native(x))
raise AnsibleError("failed to combine variables, expected dicts but got a '{0}' and a '{1}': \n{2}\n{3}".format(
a.__class__.__name__, b.__class__.__name__, myvars[0], myvars[1])
)
def combine_vars(a, b, merge=None):
"""
Return a copy of dictionaries of variables based on configured hash behavior
"""
if merge or merge is None and C.DEFAULT_HASH_BEHAVIOUR == "merge":
return merge_hash(a, b)
else:
# HASH_BEHAVIOUR == 'replace'
_validate_mutable_mappings(a, b)
result = a.copy()
result.update(b)
return result
def merge_hash(x, y, recursive=True, list_merge='replace'):
"""
Return a new dictionary result of the merges of y into x,
so that keys from y take precedence over keys from x.
(x and y aren't modified)
"""
if list_merge not in ('replace', 'keep', 'append', 'prepend', 'append_rp', 'prepend_rp'):
raise AnsibleError("merge_hash: 'list_merge' argument can only be equal to 'replace', 'keep', 'append', 'prepend', 'append_rp' or 'prepend_rp'")
# verify x & y are dicts
_validate_mutable_mappings(x, y)
# to speed things up: if x is empty or equal to y, return y
# (this `if` can be remove without impact on the function
# except performance)
if x == {} or x == y:
return y.copy()
# in the following we will copy elements from y to x, but
# we don't want to modify x, so we create a copy of it
x = x.copy()
# to speed things up: use dict.update if possible
# (this `if` can be remove without impact on the function
# except performance)
if not recursive and list_merge == 'replace':
x.update(y)
return x
# insert each element of y in x, overriding the one in x
# (as y has higher priority)
# we copy elements from y to x instead of x to y because
# there is a high probability x will be the "default" dict the user
# want to "patch" with y
# therefore x will have much more elements than y
for key, y_value in iteritems(y):
# if `key` isn't in x
# update x and move on to the next element of y
if key not in x:
x[key] = y_value
continue
# from this point we know `key` is in x
x_value = x[key]
# if both x's element and y's element are dicts
# recursively "combine" them or override x's with y's element
# depending on the `recursive` argument
# and move on to the next element of y
if isinstance(x_value, MutableMapping) and isinstance(y_value, MutableMapping):
if recursive:
x[key] = merge_hash(x_value, y_value, recursive, list_merge)
else:
x[key] = y_value
continue
# if both x's element and y's element are lists
# "merge" them depending on the `list_merge` argument
# and move on to the next element of y
if isinstance(x_value, MutableSequence) and isinstance(y_value, MutableSequence):
if list_merge == 'replace':
# replace x value by y's one as it has higher priority
x[key] = y_value
elif list_merge == 'append':
x[key] = x_value + y_value
elif list_merge == 'prepend':
x[key] = y_value + x_value
elif list_merge == 'append_rp':
# append all elements from y_value (high prio) to x_value (low prio)
# and remove x_value elements that are also in y_value
# we don't remove elements from x_value nor y_value that were already in double
# (we assume that there is a reason if there where such double elements)
# _rp stands for "remove present"
x[key] = [z for z in x_value if z not in y_value] + y_value
elif list_merge == 'prepend_rp':
# same as 'append_rp' but y_value elements are prepend
x[key] = y_value + [z for z in x_value if z not in y_value]
# else 'keep'
# keep x value even if y it's of higher priority
# it's done by not changing x[key]
continue
# else just override x's element with y's one
x[key] = y_value
return x
def load_extra_vars(loader):
extra_vars = {}
for extra_vars_opt in context.CLIARGS.get('extra_vars', tuple()):
data = None
extra_vars_opt = to_text(extra_vars_opt, errors='surrogate_or_strict')
if extra_vars_opt is None or not extra_vars_opt:
continue
if extra_vars_opt.startswith(u"@"):
# Argument is a YAML file (JSON is a subset of YAML)
data = loader.load_from_file(extra_vars_opt[1:])
elif extra_vars_opt[0] in [u'/', u'.']:
raise AnsibleOptionsError("Please prepend extra_vars filename '%s' with '@'" % extra_vars_opt)
elif extra_vars_opt[0] in [u'[', u'{']:
# Arguments as YAML
data = loader.load(extra_vars_opt)
else:
# Arguments as Key-value
data = parse_kv(extra_vars_opt)
if isinstance(data, MutableMapping):
extra_vars = combine_vars(extra_vars, data)
else:
raise AnsibleOptionsError("Invalid extra vars data supplied. '%s' could not be made into a dictionary" % extra_vars_opt)
return extra_vars
def load_options_vars(version):
if version is None:
version = 'Unknown'
options_vars = {'ansible_version': version}
attrs = {'check': 'check_mode',
'diff': 'diff_mode',
'forks': 'forks',
'inventory': 'inventory_sources',
'skip_tags': 'skip_tags',
'subset': 'limit',
'tags': 'run_tags',
'verbosity': 'verbosity'}
for attr, alias in attrs.items():
opt = context.CLIARGS.get(attr)
if opt is not None:
options_vars['ansible_%s' % alias] = opt
return options_vars
def _isidentifier_PY3(ident):
if not isinstance(ident, string_types):
return False
# NOTE Python 3.7 offers str.isascii() so switch over to using it once
# we stop supporting 3.5 and 3.6 on the controller
try:
# Python 2 does not allow non-ascii characters in identifiers so unify
# the behavior for Python 3
ident.encode('ascii')
except UnicodeEncodeError:
return False
if not ident.isidentifier():
return False
if keyword.iskeyword(ident):
return False
return True
def _isidentifier_PY2(ident):
if not isinstance(ident, string_types):
return False
if not ident:
return False
if C.INVALID_VARIABLE_NAMES.search(ident):
return False
if keyword.iskeyword(ident) or ident in ADDITIONAL_PY2_KEYWORDS:
return False
return True
if PY3:
isidentifier = _isidentifier_PY3
else:
isidentifier = _isidentifier_PY2
isidentifier.__doc__ = """Determine if string is valid identifier.
The purpose of this function is to be used to validate any variables created in
a play to be valid Python identifiers and to not conflict with Python keywords
to prevent unexpected behavior. Since Python 2 and Python 3 differ in what
a valid identifier is, this function unifies the validation so playbooks are
portable between the two. The following changes were made:
* disallow non-ascii characters (Python 3 allows for them as opposed to Python 2)
* True, False and None are reserved keywords (these are reserved keywords
on Python 3 as opposed to Python 2)
:arg ident: A text string of identifier to check. Note: It is callers
responsibility to convert ident to text if it is not already.
Originally posted at http://stackoverflow.com/a/29586366
"""
|
gpl-3.0
|
raymondnijssen/QGIS
|
tests/src/python/test_authmanager_pki_postgres.py
|
11
|
9727
|
# -*- coding: utf-8 -*-
"""
Tests for auth manager PKI access to postgres.
This is an integration test for QGIS Desktop Auth Manager postgres provider that
checks if QGIS can use a stored auth manager auth configuration to access
a PKI protected postgres.
Configuration from the environment:
* QGIS_POSTGRES_SERVER_PORT (default: 55432)
* QGIS_POSTGRES_EXECUTABLE_PATH (default: /usr/lib/postgresql/9.4/bin)
From build dir, run: ctest -R PyQgsAuthManagerPKIPostgresTest -V
or, if your PostgreSQL path differs from the default:
QGIS_POSTGRES_EXECUTABLE_PATH=/usr/lib/postgresql/<your_version_goes_here>/bin \
ctest -R PyQgsAuthManagerPKIPostgresTest -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import os
import time
import signal
import stat
import subprocess
import tempfile
import glob
from shutil import rmtree
from utilities import unitTestDataPath
from qgis.core import (
QgsApplication,
QgsAuthManager,
QgsAuthMethodConfig,
QgsVectorLayer,
QgsDataSourceUri,
QgsWkbTypes,
)
from qgis.PyQt.QtNetwork import QSslCertificate
from qgis.PyQt.QtCore import QFile
from qgis.testing import (
start_app,
unittest,
)
__author__ = 'Alessandro Pasotti'
__date__ = '25/10/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
QGIS_POSTGRES_SERVER_PORT = os.environ.get('QGIS_POSTGRES_SERVER_PORT', '55432')
QGIS_POSTGRES_EXECUTABLE_PATH = os.environ.get('QGIS_POSTGRES_EXECUTABLE_PATH', '/usr/lib/postgresql/9.4/bin')
assert os.path.exists(QGIS_POSTGRES_EXECUTABLE_PATH)
QGIS_AUTH_DB_DIR_PATH = tempfile.mkdtemp()
# Postgres test path
QGIS_PG_TEST_PATH = tempfile.mkdtemp()
os.environ['QGIS_AUTH_DB_DIR_PATH'] = QGIS_AUTH_DB_DIR_PATH
qgis_app = start_app()
QGIS_POSTGRES_CONF_TEMPLATE = """
hba_file = '%(tempfolder)s/pg_hba.conf'
listen_addresses = '*'
port = %(port)s
max_connections = 100
unix_socket_directories = '%(tempfolder)s'
ssl = true
ssl_ciphers = 'DEFAULT:!LOW:!EXP:!MD5:@STRENGTH' # allowed SSL ciphers
ssl_cert_file = '%(server_cert)s'
ssl_key_file = '%(server_key)s'
ssl_ca_file = '%(sslrootcert_path)s'
password_encryption = on
"""
QGIS_POSTGRES_HBA_TEMPLATE = """
hostssl all all 0.0.0.0/0 cert clientcert=1
hostssl all all ::1/0 cert clientcert=1
host all all 127.0.0.1/32 trust
host all all ::1/32 trust
"""
class TestAuthManager(unittest.TestCase):
@classmethod
def setUpAuth(cls):
"""Run before all tests and set up authentication"""
authm = QgsApplication.authManager()
assert (authm.setMasterPassword('masterpassword', True))
cls.pg_conf = os.path.join(cls.tempfolder, 'postgresql.conf')
cls.pg_hba = os.path.join(cls.tempfolder, 'pg_hba.conf')
# Client side
cls.sslrootcert_path = os.path.join(cls.certsdata_path, 'chains_subissuer-issuer-root_issuer2-root2.pem')
cls.sslcert = os.path.join(cls.certsdata_path, 'gerardus_cert.pem')
cls.sslkey = os.path.join(cls.certsdata_path, 'gerardus_key.pem')
assert os.path.isfile(cls.sslcert)
assert os.path.isfile(cls.sslkey)
assert os.path.isfile(cls.sslrootcert_path)
os.chmod(cls.sslcert, stat.S_IRUSR)
os.chmod(cls.sslkey, stat.S_IRUSR)
os.chmod(cls.sslrootcert_path, stat.S_IRUSR)
cls.auth_config = QgsAuthMethodConfig("PKI-Paths")
cls.auth_config.setConfig('certpath', cls.sslcert)
cls.auth_config.setConfig('keypath', cls.sslkey)
cls.auth_config.setName('test_pki_auth_config')
cls.username = 'Gerardus'
cls.sslrootcert = QSslCertificate.fromPath(cls.sslrootcert_path)
assert cls.sslrootcert is not None
authm.storeCertAuthorities(cls.sslrootcert)
authm.rebuildCaCertsCache()
authm.rebuildTrustedCaCertsCache()
authm.rebuildCertTrustCache()
assert (authm.storeAuthenticationConfig(cls.auth_config)[0])
assert cls.auth_config.isValid()
# Server side
cls.server_cert = os.path.join(cls.certsdata_path, 'localhost_ssl_cert.pem')
cls.server_key = os.path.join(cls.certsdata_path, 'localhost_ssl_key.pem')
cls.server_rootcert = cls.sslrootcert_path
os.chmod(cls.server_cert, stat.S_IRUSR)
os.chmod(cls.server_key, stat.S_IRUSR)
os.chmod(cls.server_rootcert, stat.S_IRUSR)
# Place conf in the data folder
with open(cls.pg_conf, 'w+') as f:
f.write(QGIS_POSTGRES_CONF_TEMPLATE % {
'port': cls.port,
'tempfolder': cls.tempfolder,
'server_cert': cls.server_cert,
'server_key': cls.server_key,
'sslrootcert_path': cls.sslrootcert_path,
})
with open(cls.pg_hba, 'w+') as f:
f.write(QGIS_POSTGRES_HBA_TEMPLATE)
@classmethod
def setUpClass(cls):
"""Run before all tests:
Creates an auth configuration"""
cls.port = QGIS_POSTGRES_SERVER_PORT
cls.dbname = 'test_pki'
cls.tempfolder = QGIS_PG_TEST_PATH
cls.certsdata_path = os.path.join(unitTestDataPath('auth_system'), 'certs_keys')
cls.hostname = 'localhost'
cls.data_path = os.path.join(cls.tempfolder, 'data')
os.mkdir(cls.data_path)
cls.setUpAuth()
subprocess.check_call([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'initdb'), '-D', cls.data_path])
cls.server = subprocess.Popen([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'postgres'), '-D',
cls.data_path, '-c',
"config_file=%s" % cls.pg_conf],
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait max 10 secs for the server to start
end = time.time() + 10
while True:
line = cls.server.stderr.readline()
print(line)
if line.find(b"database system is ready to accept") != -1:
break
if time.time() > end:
raise Exception("Timeout connecting to PostgreSQL")
# Create a DB
subprocess.check_call([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'createdb'), '-h', 'localhost', '-p', cls.port, 'test_pki'])
# Inject test SQL from test path
test_sql = os.path.join(unitTestDataPath('provider'), 'testdata_pg.sql')
subprocess.check_call([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'psql'), '-h', 'localhost', '-p', cls.port, '-f', test_sql, cls.dbname])
# Create a role
subprocess.check_call([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'psql'), '-h', 'localhost', '-p', cls.port, '-c', 'CREATE ROLE "%s" WITH SUPERUSER LOGIN' % cls.username, cls.dbname])
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
cls.server.terminate()
os.kill(cls.server.pid, signal.SIGABRT)
del cls.server
time.sleep(2)
rmtree(QGIS_AUTH_DB_DIR_PATH)
rmtree(cls.tempfolder)
def setUp(self):
"""Run before each test."""
pass
def tearDown(self):
"""Run after each test."""
pass
@classmethod
def _getPostGISLayer(cls, type_name, layer_name=None, authcfg=None):
"""
PG layer factory
"""
if layer_name is None:
layer_name = 'pg_' + type_name
uri = QgsDataSourceUri()
uri.setWkbType(QgsWkbTypes.Point)
uri.setConnection("localhost", cls.port, cls.dbname, "", "", QgsDataSourceUri.SslVerifyFull, authcfg)
uri.setKeyColumn('pk')
uri.setSrid('EPSG:4326')
uri.setDataSource('qgis_test', 'someData', "geom", "", "pk")
# Note: do not expand here!
layer = QgsVectorLayer(uri.uri(False), layer_name, 'postgres')
return layer
def testValidAuthAccess(self):
"""
Access the protected layer with valid credentials
"""
pg_layer = self._getPostGISLayer('testlayer_èé', authcfg=self.auth_config.id())
self.assertTrue(pg_layer.isValid())
def testInvalidAuthAccess(self):
"""
Access the protected layer with not valid credentials
"""
pg_layer = self._getPostGISLayer('testlayer_èé')
self.assertFalse(pg_layer.isValid())
def testRemoveTemporaryCerts(self):
"""
Check that no temporary cert remain after connection with
postgres provider
"""
def cleanTempPki():
pkies = glob.glob(os.path.join(tempfile.gettempdir(), 'tmp*_{*}.pem'))
for fn in pkies:
f = QFile(fn)
f.setPermissions(QFile.WriteOwner)
f.remove()
# remove any temppki in temprorary path to check that no
# other pki remain after connection
cleanTempPki()
# connect using postgres provider
pg_layer = self._getPostGISLayer('testlayer_èé', authcfg=self.auth_config.id())
self.assertTrue(pg_layer.isValid())
# do test no certs remained
pkies = glob.glob(os.path.join(tempfile.gettempdir(), 'tmp*_{*}.pem'))
self.assertEqual(len(pkies), 0)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
csmanjuvijay/usb-next
|
Documentation/target/tcm_mod_builder.py
|
497
|
22865
|
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_proto.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
if proto_ident == "FC":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
elif proto_ident == "SAS":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
elif proto_ident == "iSCSI":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n"
buf += " .name = \"" + fabric_mod_name + "\",\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_common.h>\n"
buf += "#include <scsi/scsi_proto.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('aborted_task\)\(', fo):
buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
gpl-2.0
|
rpavlik/chromium
|
progs/packertest/packertest.py
|
4
|
45218
|
#!/usr/bin/python
# Copyright (c) 2004, Red Hat.
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
# This script generates the packertest*.c files from the APIspec.txt file.
import sys, string, re
import copy
sys.path.append( "../../glapi_parser" )
import apiutil
"""
Various ways of permuting a list of lists (a.k.a. Cartesian product).
The permutation is returned as a list of tuples.
For example:
permute( [ [1,2], [3,4,5] ] ) gives
[(1,3), (2,3), (1,4), (2,4), (1,5), (2,5)]
"""
#
# global flags
#
omit = 0
nopack = 0
allfuncs = 0
stub = 0
verbose = 0
debug = 0
def permute(Lists):
import operator
if Lists:
result = map(lambda I: (I,), Lists[0])
for list in Lists[1:]:
curr = []
for item in list:
new = map(operator.add, result, [(item,)]*len(result))
curr[len(curr):] = new
result = curr
else:
result = []
return result
# This version was written by Tim Peters and is somewhat faster,
# especially for large numbers of small lists.
def permute2(seqs):
n = len(seqs)
if n == 0:
return []
if n == 1:
return map(lambda i: (i,), seqs[0])
# find good splitting point
prods = []
prod = 1
for x in seqs:
prod = prod * len(x)
prods.append(prod)
for i in range(n):
if prods[i] ** 2 >= prod:
break
n = min(i + 1, n - 1)
a = permute2(seqs[:n])
b = permute2(seqs[n:])
sprayb = []
lena = len(a)
for x in b:
sprayb[len(sprayb):] = [x] * lena
import operator
return map(operator.add, a * len(b), sprayb)
#
# Debug funcs
#
def PrintRecord(record):
argList = apiutil.MakeDeclarationString(record.params)
if record.category == "Chromium":
prefix = "cr"
else:
prefix = "gl"
print '%s %s%s(%s);' % (record.returnType, prefix, record.name, argList )
if len(record.props) > 0:
print ' /* %s */' % string.join(record.props, ' ')
def PrintGet(record):
argList = apiutil.MakeDeclarationString(record.params)
if record.category == "Chromium":
prefix = "cr"
else:
prefix = "gl"
if 'get' in record.props:
print '%s %s%s(%s);' % (record.returnType, prefix, record.name, argList )
def PrintSetClient(record):
argList = apiutil.MakeDeclarationString(record.params)
if record.category == "Chromium":
prefix = "cr"
else:
prefix = "gl"
if 'setclient' in record.props:
print '%s %s%s(%s);' % (record.returnType, prefix, record.name, argList )
def PrintEnum(record):
paramList = apiutil.MakeDeclarationString(record.params)
if record.category == "Chromium":
prefix = "cr"
else:
prefix = "gl"
for (name, type, vecSize) in record.params:
if type == "GLenum" :
for i in range(len(record.paramprop)):
(name,enums) = record.paramprop[i]
print 'name = %s' % name
print 'enums = %s' % enums
#evec = string.split(enums,' ')
#for j in range(len(evec)):
#print 'evec%d = %s' % (j, evec[j])
def DumpTest():
apiutil.ProcessSpecFile("../../glapi_parser/APIspec.txt", PrintRecord)
apiutil.ProcessSpecFile("../../glapi_parser/APIspec.txt", PrintGet)
apiutil.ProcessSpecFile("../../glapi_parser/APIspec.txt", PrintSetClient)
apiutil.ProcessSpecFile("../../glapi_parser/APIspec.txt", PrintEnum)
#======================================================================
def CopyrightC(f ):
f.write( """/* Copyright (c) 2001, Stanford University
All rights reserved.
See the file LICENSE.txt for information on redistributing this software. */
""")
def CopyrightDef(f):
f.write( """; Copyright (c) 2001, Stanford University
; All rights reserved.
;
; See the file LICENSE.txt for information on redistributing this software.
""")
#======================================================================
printf_mapping = {
'GLint': ('%d','int'),
'GLshort': ('%hd','short'),
'GLbyte': ('%d','int'),
'GLubyte': ('%u','unsigned'),
'GLuint': ('%u','unsigned'),
'GLushort': ('%hu','unsigned short'),
'GLenum': ('%s','' ),
'GLfloat': ('%f','float'),
'GLclampf': ('%f','float'),
'GLdouble': ('%f','float'),
'GLclampd': ('%f','float'),
'GLbitfield': ('0x%x','int'),
'GLboolean': ('%s',''),
'GLsizei': ('%u','unsigned'),
'GLsizeiptrARB': ('%u','unsigned'),
'GLintptrARB': ('%u','unsigned')
}
# currently not used
printf_pointer_mapping = {
'GLint *': ('%d','int'),
'GLshort *': ('%hd','short'),
'GLbyte *': ('%d','int'),
'GLubyte *': ('%u','unsigned'),
'GLuint *': ('%u','unsigned'),
'GLushort *': ('%hu','unsigned short'),
'GLenum *': ('%s','' ),
'GLfloat *': ('%f','float'),
'GLclampf *': ('%f','float'),
'GLdouble *': ('%f','float'),
'GLclampd *': ('%f','float'),
'GLbitfield *': ('0x%x','int'),
'GLboolean *': ('%s',''),
'GLsizeiptrARB': ('%u','unsigned'),
'GLintptrARB': ('%u','unsigned'),
'GLsizei *': ('%u','unsigned')
}
#
# Not used but will work if renamed range_mapping
#
limit_mapping = {
'GLuint': (['0', '4294967294u']),
'GLsizei': ([0, 65535]),
'GLfloat': ([-3.40282347e+38, 3.40282347e+38]),
'GLbyte': ([-128, 127]),
'GLvoid': ([0]),
'GLubyte': ([0, 255]),
'GLdouble': ([-1.7976931348623157e+308, 1.7976931348623157e+308]),
'GLshort': ([-32768, 32767]),
'GLint': ([-2147483647, 2147483646]),
'GLbitfield': ([0, 0xffffffff]),
'GLushort': ([0, 65535]),
'GLclampf': ([-3.40282347e+38, 3.40282347e+38]),
'GLclampd': ([-1.7976931348623157e+308, 1.7976931348623157e+308]),
'GLsizeiptrARB': ([0]),
'GLintptrARB': ([0]),
'GLboolean': ['GL_FALSE', 'GL_TRUE']
}
#
# Not used but will work if renamed range_mapping
#
range_mapping1 = {
'GLuint': (['0', '1024']),
'GLsizei': ([0, 1024]),
'GLfloat': ([-3.40282347e+2, 3.40282347e+2]),
'GLbyte': ([-20, 100]),
'GLvoid': ([0]),
'GLubyte': ([0, 64]),
'GLdouble': ([-1.7976931348623157e+3, 1.7976931348623157e+3]),
'GLshort': ([-250, 230]),
'GLint': ([-2147, 500]),
'GLbitfield': ([0, 0xffffffff]),
'GLushort': ([0, 400]),
'GLclampf': ([-3.40282347e+3, 3.40282347e+8]),
'GLclampd': ([-1.7976931348623157e+3, 1.7976931348623157e+3]),
'GLsizeiptrARB': ([0]),
'GLintptrARB': ([0]),
'GLboolean': ['GL_FALSE', 'GL_TRUE']
}
range_mapping = {
'GLuint': ([3]),
'GLsizei': ([10]),
'GLfloat': ([3.40]),
'GLbyte': ([2]),
'GLvoid': ([0]),
'GLubyte': ([14]),
'GLdouble': ([10.79]),
'GLshort': ([2]),
'GLint': ([1]),
'GLbitfield': ([0xffffff]),
'GLushort': ([5]),
'GLclampf': ([ 245.66]),
'GLclampd': ([1234.33]),
'GLsizeiptrARB': ([0]),
'GLintptrARB': ([0]),
'GLboolean': ['GL_FALSE', 'GL_TRUE']
}
#
# Special names that trigger a separate file since the
# output for these GL calls are huge
# CombinerOutputNV generates a massive file
# TexImage3DEXT isn't used much anymore
#
special_keys = [
'BlendFuncSeparateEXT',
'ColorTable',
'ColorTableEXT',
'CombinerInputNV',
#'CombinerOutputNV',
'TexSubImage1D',
'TexSubImage2D',
'TexSubImage3D',
'TexImage1D',
'TexImage2D',
#'TexImage3DEXT',
'TexImage3D'
]
#
# special casing
#
# 'CreateContext',
# 'DestroyContext',
# 'MakeCurrent',
# 'WindowDestroy',
# 'WindowPosition',
# 'WindowShow',
# 'WindowSize',
# 'Writeback',
# 'WindowCreate',
# 'SwapBuffers',
special_funcs = [
'Begin',
'End',
'BoundsInfoCR',
'BarrierCreateCR',
'BarrierDestroyCR',
'BarrierExecCR',
'SemaphoreCreateCR',
'SemaphoreDestroyCR',
'SemaphorePCR',
'SemaphoreVCR',
'AreTexturesResident',
'CallLists',
'EndList',
'DeleteTextures',
'PointParameterfvARB',
'PointParameteriv',
'PrioritizeTextures',
'PushAttrib',
'PopAttrib',
'AreProgramsResidentNV',
'DeleteProgramsARB',
'DeleteProgramsNV',
'ExecuteProgramNV',
'GenProgramsARB',
'GenProgramsNV',
'GetProgramEnvParameterdvARB',
'GetProgramEnvParameterfvARB',
'GetProgramivARB',
'GetProgramivNV',
'GetProgramLocalParameterdvARB',
'GetProgramLocalParameterfvARB',
'GetProgramNamedParameterdvNV',
'GetProgramNamedParameterfvNV',
'GetProgramParameterdvNV',
'GetProgramParameterfvNV',
'GetProgramStringARB',
'GetProgramStringNV',
'LoadProgramNV',
'ProgramEnvParameter4dARB',
'ProgramEnvParameter4dvARB',
'ProgramEnvParameter4fARB',
'ProgramEnvParameter4fvARB',
'ProgramLocalParameter4dARB',
'ProgramLocalParameter4dvARB',
'ProgramLocalParameter4fARB',
'ProgramLocalParameter4fvARB',
'ProgramNamedParameter4dNV',
'ProgramNamedParameter4dvNV',
'ProgramNamedParameter4fNV',
'ProgramNamedParameter4fvNV',
'ProgramParameter4dNV',
'ProgramParameter4dvNV',
'ProgramParameter4fNV',
'ProgramParameter4fvNV',
'ProgramParameters4dvNV',
'ProgramParameters4fvNV',
'ProgramStringARB',
'RequestResidentProgramsNV',
'DeleteQueriesARB',
'GenQueriesARB',
'BufferSubDataARB',
'GetBufferSubDataARB',
'BufferDataARB',
'GenBuffersARB',
'DeleteBuffersARB',
'GenFencesNV',
'IsFenceNV',
'TestFenceNV',
'GetFenceivNV',
'DeleteFencesNV',
'GetVertexAttribPointervNV',
'CompressedTexImage1DARB',
'CompressedTexImage2DARB',
'CompressedTexImage3DARB',
'CompressedTexSubImage1DARB',
'CompressedTexSubImage2DARB',
'CompressedTexSubImage3DARB',
'GetCompressedTexImageARB',
#'TexParameterfv',
#'TexParameteriv',
'GetVertexAttribPointervARB',
'ReadPixels',
'ChromiumParametervCR',
'GetChromiumParametervCR',
#'GetTexImage'
]
#
def enableTex(f):
f.write( """
void enableTex(void)
{
glEnable(GL_TEXTURE_1D);
glEnable(GL_TEXTURE_2D);
glEnable(GL_TEXTURE_3D);
glEnable(GL_DEPTH_TEST);
glEnable(GL_MAP1_COLOR_4);
glEnable(GL_MAP1_INDEX);
glEnable(GL_MAP1_NORMAL);
glEnable(GL_MAP1_TEXTURE_COORD_1);
glEnable(GL_MAP1_TEXTURE_COORD_2);
glEnable(GL_MAP1_TEXTURE_COORD_3);
glEnable(GL_MAP1_TEXTURE_COORD_4);
glEnable(GL_MAP1_VERTEX_3);
glEnable(GL_MAP1_VERTEX_4);
glEnable(GL_MAP2_COLOR_4);
glEnable(GL_MAP2_INDEX);
glEnable(GL_MAP2_NORMAL);
glEnable(GL_MAP2_TEXTURE_COORD_1);
glEnable(GL_MAP2_TEXTURE_COORD_2);
glEnable(GL_MAP2_TEXTURE_COORD_3);
glEnable(GL_MAP2_TEXTURE_COORD_4);
glEnable(GL_MAP2_VERTEX_3);
glEnable(GL_MAP2_VERTEX_4);
glFrontFace(GL_CCW);
glActiveTextureARB(GL_TEXTURE0_ARB);
}
""")
def makeStripeImage(f):
f.write( """
static GLuint mid1,mid2,mid3;
void makeStripeImage( GLubyte *stripeImage)
{
int j;
glGenTextures(1, &mid1);
glGenTextures(1, &mid2);
glGenTextures(1, &mid3);
glBindTexture(GL_TEXTURE_1D, mid1);
glBindTexture(GL_TEXTURE_2D, mid2);
glBindTexture(GL_TEXTURE_3D, mid3);
for (j = 0; j < 32; j++) {
stripeImage[4*j] = (GLubyte) ((j<=4) ? 255 : 0);
stripeImage[4*j+1] = (GLubyte) ((j>4) ? 255 : 0);
stripeImage[4*j+2] = (GLubyte) 0;
stripeImage[4*j+3] = (GLubyte) 255;
}
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glEnable(GL_TEXTURE_1D);
glEnable(GL_TEXTURE_2D);
glEnable(GL_TEXTURE_3D);
glTexImage1D(GL_TEXTURE_1D, 0, GL_RGB, 8, 0, GL_RGB, GL_UNSIGNED_BYTE, (const GLvoid *)stripeImage);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 8, 8, 0, GL_RGB, GL_UNSIGNED_BYTE, (const GLvoid *)stripeImage);
glTexImage3D(GL_TEXTURE_3D, 0, GL_RGB, 8, 8, 8, 0, GL_RGB, GL_UNSIGNED_BYTE, (const GLvoid *)stripeImage);
}
""")
def makeGenTexture(f):
f.write( """
static GLuint sid1,sid2,sid3;
void genTexture( GLubyte *stripeImage)
{
int j;
glGenTextures(1, &sid1);
glGenTextures(1, &sid2);
glGenTextures(1, &sid3);
glBindTexture(GL_TEXTURE_1D, sid1);
glBindTexture(GL_TEXTURE_2D, sid2);
glBindTexture(GL_TEXTURE_3D, sid3);
for (j = 0; j < 32; j++) {
stripeImage[4*j] = (GLubyte) ((j<=4) ? 255 : 0);
stripeImage[4*j+1] = (GLubyte) ((j>4) ? 255 : 0);
stripeImage[4*j+2] = (GLubyte) 0;
stripeImage[4*j+3] = (GLubyte) 255;
}
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glEnable(GL_TEXTURE_1D);
glEnable(GL_TEXTURE_2D);
glEnable(GL_TEXTURE_3D);
glTexImage1D(GL_TEXTURE_1D, 0, GL_RGB, 8, 0, GL_RGB, GL_UNSIGNED_BYTE, (const GLvoid *)stripeImage);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 8, 8, 0, GL_RGB, GL_UNSIGNED_BYTE, (const GLvoid *)stripeImage);
glTexImage3D(GL_TEXTURE_3D, 0, GL_RGB, 8, 8, 8, 0, GL_RGB, GL_UNSIGNED_BYTE, (const GLvoid *)stripeImage);
}
""")
def genIdentf(f):
f.write( """
GLfloat m[16] = {
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
};
""")
def genIdentd(f):
f.write( """
GLdouble m[16] = {
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
};
""")
def initMap1(f):
f.write( """
void initMap1(double *parray)
{
int i;
static double points[10 * 4] = {
-0.5, 0.0, 0.0, 1.0,
-0.4, 0.5, 0.0, 1.0,
-0.3, -0.5, 0.0, 1.0,
-0.2, 0.5, 0.0, 1.0,
-0.1, -0.5, 0.0, 1.0,
0.0, 0.5, 0.0, 1.0,
0.1, -0.5, 0.0, 1.0,
0.2, 0.5, 0.0, 1.0,
0.3, -0.5, 0.0, 1.0,
0.4, 0.0, 0.0, 1.0,
};
glFrontFace(GL_CCW);
glEnable(GL_DEPTH_TEST);
for (i = 0; i < 40; i++)
parray[i] = points[i];
}
""")
def initBuffer(f):
f.write("void initBuffer(void)\n{\n")
f.write("\tconst void *data = malloc(320);\n\n")
f.write("\tglBindBufferARB( GL_ARRAY_BUFFER_ARB, 3);\n")
f.write("\tglBufferDataARB( GL_ARRAY_BUFFER_ARB, 320, data, GL_STATIC_DRAW_ARB);\n")
f.write("}\n")
#
# Generate the permuted parameter lists for a GL function
# Return a list of lists containing the arguments
# A definition in APUutil.txt looks like this
# name ColorMaterial
# return void
# param face GLenum
# paramprop face GL_FRONT GL_BACK GL_FRONT_AND_BACK
# param mode GLenum
# paramprop mode GL_EMISSION GL_AMBIENT GL_DIFFUSE GL_SPECULAR GL_AMBIENT_AND_DIFFUSE
# category 1.0
# chromium pack
# The input to this function is read in like this:
#
# FUNC_NAME ColorMaterial PARAMS [('face', 'GLenum', 0), ('mode', 'GLenum', 0)]
# ['GL_FRONT', 'GL_BACK', 'GL_FRONT_AND_BACK']
# ['GL_EMISSION', 'GL_AMBIENT', 'GL_DIFFUSE', 'GL_SPECULAR', 'GL_AMBIENT_AND_DIFFUSE']
#
# Other non enumerated args can be specified by using the paramlist keyword
# paramlist size 2 4 5 6
#
# After permuting all of the arguments the function we get:
#
# ('GL_FRONT', 'GL_EMISSION')
# ('GL_BACK', 'GL_EMISSION')
# ('GL_FRONT_AND_BACK', 'GL_EMISSION')
# ('GL_FRONT', 'GL_AMBIENT')
# ('GL_BACK', 'GL_AMBIENT')
# ('GL_FRONT_AND_BACK', 'GL_AMBIENT')
# ('GL_FRONT', 'GL_DIFFUSE')
# ('GL_BACK', 'GL_DIFFUSE')
# ('GL_FRONT_AND_BACK', 'GL_DIFFUSE')
# ('GL_FRONT', 'GL_SPECULAR')
# ('GL_BACK', 'GL_SPECULAR')
# ('GL_FRONT_AND_BACK', 'GL_SPECULAR')
# ('GL_FRONT', 'GL_AMBIENT_AND_DIFFUSE')
# ('GL_BACK', 'GL_AMBIENT_AND_DIFFUSE')
# ('GL_FRONT_AND_BACK', 'GL_AMBIENT_AND_DIFFUSE')
#
# paramset format
#
# paramset [format type] [GL_RED GL_GREEN GL_BLUE GL_ALPHA GL_BGR GL_LUMINANCE GL_LUMINANCE_ALPHA] [GL_UNSIGNED_BYTE GL_BYTE GL_UNSIGNED_SHORT GL_SHORT GL_UNSIGNED_INT GL_INT GL_FLOAT]
# paramset [format type] [GL_RGB] [GL_UNSIGNED_BYTE GL_BYTE GL_UNSIGNED_SHORT GL_SHORT GL_UNSIGNED_INT GL_INT GL_FLOAT GL_UNSIGNED_BYTE_3_3_2 GL_UNSIGNED_BYTE_2_3_3_REV GL_UNSIGNED_SHORT_5_6_5 GL_UNSIGNED_SHORT_5_6_5_REV]
# paramset [format type] [GL_RGBA GL_BGRA] [GL_UNSIGNED_BYTE GL_BYTE GL_UNSIGNED_SHORT GL_SHORT GL_UNSIGNED_INT GL_INT GL_FLOAT GL_UNSIGNED_SHORT_4_4_4_4 GL_UNSIGNED_SHORT_4_4_4_4_REV GL_UNSIGNED_SHORT_5_5_5_1 GL_UNSIGNED_SHORT_1_5_5_5_REV GL_UNSIGNED_INT_8_8_8_8 GL_UNSIGNED_INT_8_8_8_8_REV GL_UNSIGNED_INT_10_10_10_2 GL_UNSIGNED_INT_2_10_10_10_REV]
#
def GenParmSetLists(func_name):
pset = apiutil.ParamSet(func_name)
params = apiutil.Parameters(func_name)
for index in range(len(params)):
(name, type, vecSize) = params[index]
returnlist = []
rlist = []
if pset != []:
# number of disjoint sets
for i in range(len(pset)):
parset = pset[i]
namelist = parset[0]
setlist = parset[1:]
for j in range(len(namelist)):
returnlist.append((namelist[j],setlist[j]))
rlist.append(returnlist)
return rlist
def GenParmLists(func_name,f):
# Fetch the parameter properties
params = apiutil.Parameters(func_name)
return_type = apiutil.ReturnType(func_name)
if return_type != 'void':
# Yet another gross hack for glGetString
if string.find( return_type, '*' ) == -1:
return_type = return_type + " *"
#print ""
#print "FUNC_NAME %s PARAMS %s" % (func_name,params)
pvec = []
tvec = []
rlist = GenParmSetLists(func_name)
#print "RETURNLIST"
#print rlist
#
# At this point rlist is constructed as
#
#
#
fvec = copy.deepcopy(apiutil.ParamProps(func_name))
vallist = copy.deepcopy(apiutil.ParamList(func_name))
valinit = copy.deepcopy(apiutil.ParamVec(func_name))
multiList = []
if rlist != []:
for kk in range(len(rlist)):
#print "RLIST-KK"
pvec = []
#print rlist[kk]
rvec = rlist[kk]
params = apiutil.Parameters(func_name)
for index in range(len(params)):
(name, type, vecSize) = params[index]
action = apiutil.ParamAction(func_name)
#print "name = %s type = %s" % (name,type)
if vecSize >= 1:
#
# Vector arg
#
#print "/* VECTOR */"
f.write( "\t%s %s[%d]; /* VECTOR1 */\n" % (type[0:type.index('*')],name, vecSize))
# TODO: Add dummy vars to vector/matrix
tvec = name
if type == "GLenum":
#print "name = %s type = %s" % (name,type)
if rvec != []:
fvec = rvec
else:
fvec = copy.deepcopy(apiutil.ParamProps(func_name))
#print "fvec = %s" % fvec
for k in range(len(fvec)):
d = fvec.pop(0)
if d[0] == name:
tvec = d[1]
break
elif vallist != []:
#print "name = %s type = %s" % (name,type)
vallist = copy.deepcopy(apiutil.ParamList(func_name))
#print "vallist = %s" % vallist
for k in range(len(vallist)):
d = vallist[k]
(dname,vec) = d
#print d[0]
if d[0] == name :
#print "name = %s (dname,vec) = (%s) %s" % (name,dname,vec)
tvec = vec
break;
elif valinit != []:
#print "name = %s type = %s" % (name,type)
valinit = copy.deepcopy(apiutil.ParamVec(func_name))
#print "valinit = %s" % valinit
cnt = 0
d = valinit[k]
(dname,vec) = d
#print d[0]
if d[0] == name :
#print "name = %s (dname,vec) = (%s) %s" % (name,dname,vec)
for nint in range(len(vec)):
f.write("\t%s[%d] = %s;/* VA1 */\n" % (name,nint,vec[nint]))
#print "\t%s[%d] = %s;\n" % (name,nint,vec[nint])
break;
elif range_mapping.has_key( type ):
#print "name = %s type = %s" % (name,type)
if rvec != []:
#print "rvec = %s" % rvec
fvec = rvec
#print "fvec = %s" % fvec
for k in range(len(fvec)):
d = fvec.pop(0)
if d[0] == name:
tvec = d[1]
break
else:
fvec = copy.deepcopy(apiutil.ParamProps(func_name))
else:
tvec = range_mapping[type]
else:
tvec = [0]
pvec.append(tvec)
#print "PVEC.APPEND(%s)" % tvec
#print "PVEC %s" % pvec
#print "PVECLEN = %d" % len(pvec)
#for i in range(len(pvec)):
#print pvec[i]
argLists = permute2(pvec)
#print argLists
#print "ARGLIST = %d" % len(argLists)
#for i in range(len(argLists)):
#print argLists[i]
multiList.append(argLists)
else:
for index in range(len(params)):
(name, type, vecSize) = params[index]
action = apiutil.ParamAction(func_name)
#print "name = %s type = %s" % (name,type)
#print valinit
if vecSize >= 1:
#
# Vector arg
#
#print "/* VECTOR */"
if type[0:5] == "const" :
f.write( "\t%s %s[%d]; /* VECTOR2a */\n" % (type[6:type.index('*')],name, vecSize))
else:
f.write( "\t%s %s[%d]; /* VECTOR2 */\n" % (type[0:type.index('*')],name, vecSize))
# TODO: Add dummy vars to vector/matrix
tvec = name
for index in range(len(params)):
(name, type, vecSize) = params[index]
action = apiutil.ParamAction(func_name)
if type == "GLenum":
fvec = copy.deepcopy(apiutil.ParamProps(func_name))
for k in range(len(fvec)):
d = fvec.pop(0)
if d[0] == name:
tvec = d[1]
break
elif vallist != []:
vallist = copy.deepcopy(apiutil.ParamList(func_name))
for k in range(len(vallist)):
d = vallist.pop(0)
if d[0] == name:
tvec = d[1]
break;
elif valinit != []:
#print "name = %s type = %s" % (name,type)
valinit = copy.deepcopy(apiutil.ParamVec(func_name))
#print "valinit = %s" % valinit
cnt = 0
for k in range(len(valinit)):
d = valinit[k]
(dname,vec) = d
#print d[0]
if d[0] == name :
#print "name = %s (dname,vec) = (%s) %s" % (name,dname,vec)
for nint in range(len(vec)):
f.write("\t%s[%d] = (%s)%s;/* VA2 */\n" % (name,nint,type[0:type.index('*')],vec[nint]))
#print "\t%s[%d] = %s;\n" % (name,nint,vec[nint])
break;
elif range_mapping.has_key( type ):
tvec = range_mapping[type]
else:
tvec = [0]
pvec.append(tvec)
#print tvec
#print "PVEC %s" % pvec
#print "PVEC %s" % pvec
#print "PVECLEN = %d" % len(pvec)
#for i in range(len(pvec)):
#print pvec[i]
argLists = permute2(pvec)
#print argLists
#print "ARGLIST = %d" % len(argLists)
#for i in range(len(argLists)):
#print argLists[i]
multiList.append(argLists)
return multiList
def PerformAction(func_name,f):
action = apiutil.ParamAction(func_name)
if action != []:
#
# Actions are performed here such as limiting the number
# of textures that are being enumerated
#
(name,doit) = action[0]
#print "func_name = " + func_name + " action = %s" % action
if doit == ['makeStripeImage']:
f.write("\tmakeStripeImage((GLubyte *)%s);\n" % name)
elif doit == ['enableTex']:
f.write("\tenableTex();\n")
elif doit == ['initMap1']:
f.write("\tinitMap1((double *)%s);\n" % name)
elif doit == ['maxTex']:
print "Limit textures"
elif doit == ['genTex']:
print "Gen textures"
f.write("\tgenTexture((GLubyte *)%s);\n" % name)
elif doit == ['pixelStore']:
f.write("glPixelStorei( GL_PACK_LSB_FIRST, 0);\n")
f.write("glPixelStorei( GL_UNPACK_LSB_FIRST, 0);\n")
f.write("glPixelStorei( GL_PACK_SWAP_BYTES, 0);\n")
f.write("glPixelStorei( GL_UNPACK_SWAP_BYTES, 0);\n")
f.write("\tgenTexture((GLubyte *)%s);\n" % name)
elif doit == ['initBuffer']:
f.write("\tinitBuffer();\n")
elif doit == ['identf']:
genIdentf(f)
elif doit == ['identd']:
genIdentd(f)
#
# write the test function
#
def PrintTableFunc( func_name, params, can_have_pointers,f):
"""Emit a packer test function."""
#print "PrintTableFunc (%s,%s,%d,f)" % (func_name,str(params),can_have_pointers)
# Save original function name
orig_func_name = func_name
# Convert to a non-vector version of the function if possible
#func_name = apiutil.NonVectorFunction( func_name )
if not func_name:
func_name = orig_func_name
# Check if there are any pointer parameters.
# That's usually a problem so we'll emit an error function.
nonVecParams = apiutil.Parameters(func_name)
return_type = apiutil.ReturnType(func_name)
f.write("\nstruct %s_params {\n" % func_name)
for (name, type, vecSize) in nonVecParams:
if not apiutil.IsPointer(type) :
f.write("\t%s %s_%s;\n" % (type,func_name,name))
if verbose:
f.write( '\tchar *prgstr;\n')
f.write( '} %s_tab[] = {\n' % func_name)
bail_out = 0
for (name, type, vecSize) in nonVecParams:
if apiutil.IsPointer(type) and vecSize == 0 and not can_have_pointers:
bail_out = 1
counter = 0
# generate the calls
if len(params) == 0:
argstr = ""
else:
argstr = ", "
printfstr = ""
argstr = ""
if return_type != 'void':
# Yet another gross hack for glGetString
if string.find( return_type, '*' ) == 1:
return_type = return_type + " *"
f.write( '\t%s return_val;\n' % return_type)
# At this point all of the declarations have been generated
# The argument lists with the various parameter values have to
# be permuted
# Generate the lists to be permuted
argLists = GenParmLists(func_name,f)
#print argLists
#print len(argLists)
#
# Now iterate over the permuted argument lists generating gl calls with
# the permuted arguments
#
for ki in range(len(argLists)):
argList = argLists[ki]
ncount = 0
if len(argList) > 0:
allargstr = ""
for i in range(len(argList)):
#print argList[i]
q = argList[i]
ll = 0
f.write("{ ")
ncount = ncount + 1
for k in range(len(q)):
(name, type, vecSize) = params[k]
#
# ordinary typed parameter, or vector of unknown size
#
# TODO Vector arg
#
#if vecSize >= 1:
#f.write('%s /* VEC1 */' % name)
#f.write( "\t%s %s[%d]; /* VECTOR1 */\n" % (type[0:type.index('*')],name, vecSize))
#if apiutil.IsPointer ( type ):
# POINTER
#f.write( "\t(%s)%s /* VEC3 */" % (type,name))
if printf_mapping.has_key( type ):
(format_str, cast) = printf_mapping[type]
printfstr += format_str
cast_str = ''
if cast != '':
cast_str = '(%s)' % cast
if type == 'GLenum':
argstr += "%s" % q[k]
elif type == 'GLboolean':
argstr += "%s" % q[k]
else:
argstr += '%s %s' % (cast_str,q[k])
#elif type.find( "*" ):
#printfstr += "%p"
#argstr += "(void *)"
#argstr += '%s' % q[k]
else:
argstr = ""
printfstr = "???"
break
if ll != len(params):
printfstr += ", "
argstr += ", "
ll += 1
f.write( '\t%s' % argstr)
allargstr = allargstr + argstr
argstr = ""
printfstr = ""
if verbose:
f.write( '\n\t\"%s\"' % (allargstr))
#f.write( '\n\t\"%s_tab.%s\"' % (func_name,allargstr))
allargstr = ""
f.write( '},\n')
# finish up
f.write( '};\n\n' )
f.write( '/* COUNT = %d */\n' % ncount)
f.write( 'void crPackTest%s(void)\n' % (func_name))
f.write( '{\n')
f.write( '\tint i;\n')
for (name, type, vecSize) in nonVecParams:
if apiutil.IsPointer(type) and vecSize == 0 and not can_have_pointers:
if vecSize == 0:
if type == "GLvoid *" or type == "GLvoid **" or type == "GLubyte *" or type == "const GLvoid *" or type == "const GLubyte *":
f.write( "\t%s %s[100000];/* VECP2b (%s,%s)*/\n" % (type,name,type,name))
elif type == "const GLfloat *" or type == "GLfloat *" or type == "const GLint *" or type == "GLint *" or type == "GLdouble *" or type == "const GLdouble *" or type == "const GLuint *" or type == "GLuint *" or type == "const GLushort *" or type == "GLushort *":
f.write( "\t%s %s[100000];/* VECP2a (%s,%s)*/\n" % (type[0:type.index('*')],name,type,name))
#f.write( "\t%s %s[100000];/* VECP2a */\n" % (type,name))
bail_out = 1
elif apiutil.IsPointer(type) :
if vecSize == 0:
if type == "GLvoid *" or type == "GLvoid **" or type == "GLubyte *" or type == "const GLvoid *" or type == "const GLubyte *" or type == "GLint *" or type == "const GLint *" or type == "const GLfloat *" or type == "GLfloat *" or type == "GLdouble *" or type == "const GLdouble *" or type == "const GLuint *" or type == "GLuint *" or type == "const GLushort *" or type == "GLushort *":
f.write( "\t%s %s[100000];/* VECP9 (%s,%s)*/\n" % (type[0:type.index('*')],name,type,name))
else:
f.write( "\tGLubyte %s[100000];/* VECP7 */\n" % name)
PerformAction(func_name,f)
f.write('\tfor ( i = 0; i < %d; i++) {\n' % ncount)
if verbose:
f.write('\tif (verbose)\n\tcrDebug(\"gl%s( %%s )\",%s_tab[i].prgstr);\n' % (func_name,func_name))
if return_type != 'void':
f.write( '\t\treturn_val = gl%s(' % func_name)
else:
f.write( '\t\tgl%s(' % func_name)
ll = 0
for (name, type, vecSize) in nonVecParams:
if apiutil.IsPointer(type) and vecSize == 0 and not can_have_pointers:
if vecSize == 0:
if type == "GLvoid *" or type == "GLvoid **" or type == "GLubyte *" or type == "const GLvoid *" or type == "const GLubyte *":
f.write( "%s /* VECS2 */\n" % name)
bail_out = 1
elif apiutil.IsPointer(type) :
if vecSize == 0:
if type == "GLvoid *" or type == "GLvoid **" or type == "GLubyte *" or type == "const GLvoid *" or type == "const GLubyte *" or type == "GLint *" or type == "const GLint *" or type == "GLfloat *" or type == "GLdouble *" or type == "const GLdouble *" or type == "const GLuint *" or type == "GLuint *":
f.write( "\t%s/* VECS4 */\n" % name)
else:
f.write( "\t%s/* VECS5 */\n" % name)
else:
f.write("\t%s_tab[i].%s_%s" % (func_name,func_name,name))
if ll != len(nonVecParams) -1:
f.write(', ')
ll = ll + 1
#
f.write(');\n')
if return_type != 'void':
f.write('\n\t\tif(errChk) {\n')
f.write('\t\t\tchar buf[1024];\n')
f.write('\t\t\tsprintf(buf,\"gl%s( %%s )\",%s_tab[i].prgstr);\n' % (func_name,func_name))
f.write('\t\t\tprintError(buf);\n')
f.write( '\t}\n')
else:
f.write('\n\t\tif(errChk) {\n')
f.write('\t\t\tchar buf[1024];\n')
f.write('\t\t\tsprintf(buf,\"gl%s( %%s )\",%s_tab[i].prgstr);\n' % (func_name,func_name))
f.write('\t\t\tprintError(buf);\n')
f.write( '\t}\n')
f.write( '\t}\n' )
f.write( '}\n' )
def PrintFunc( func_name, params, can_have_pointers,f):
f.write( 'void crPackTest%s(void)\n' % (func_name))
f.write( '{\n')
# Save original function name
orig_func_name = func_name
# Convert to a non-vector version of the function if possible
#func_name = apiutil.NonVectorFunction( func_name )
if not func_name:
func_name = orig_func_name
# Check if there are any pointer parameters.
# That's usually a problem so we'll emit an error function.
nonVecParams = apiutil.Parameters(func_name)
return_type = apiutil.ReturnType(func_name)
bail_out = 0
for (name, type, vecSize) in nonVecParams:
if apiutil.IsPointer(type) and vecSize == 0 and not can_have_pointers:
if vecSize == 0:
if type == "GLvoid *" or type == "GLvoid **" or type == "GLubyte *" or type == "const GLvoid *" or type == "const GLubyte *":
f.write( "\t%s %s[100000];/* VECP7b (%s,%s)*/\n" % (type,name,type,name))
elif type == "GLfloat *" or type == "GLint *" or type == "GLdouble *" or type == "GLuint *" or type == "GLushort *":
f.write( "\t%s %s[100000];/* VECP7a (%s,%s)*/\n" % (type[0:type.index('*')],name,type,name))
elif type == "const GLfloat *" or type == "const GLint *" or type == "const GLdouble *" or type == "const GLuint *" or type == "const GLushort *":
f.write( "\t%s %s[100000];/* VECP7b (%s,%s)*/\n" % (type[6:type.index('*')],name,type,name))
#f.write( "\t%s %s[100000];/* VECP7a */\n" % (type,name))
bail_out = 1
elif apiutil.IsPointer(type) :
if vecSize == 0:
if type == "GLsizei *" or type == "GLubyte *" or type == "const GLvoid *" or type == "const GLubyte *" or type == "GLint *" or type == "const GLint *" or type == "const GLfloat *" or type == "GLfloat *" or type == "GLdouble *" or type == "const GLdouble *" or type == "const GLuint *" or type == "GLuint *":
f.write( "\t%s %s[100000];/* VECP5a (%s,%s)*/\n" % (type[0:type.index('*')],name,type,name))
else:
f.write( "\t%s %s[100000];/* VECP5 */\n" % (type,name))
PerformAction(func_name,f)
# if bail_out:
# for (name, type, vecSize) in nonVecParams:
# print '\t(void)%s;' % (name)
#
# Special casing indicates that some arbitrary data appropriate to
# the call must be supplied
# f.write( '\tcrDebug ( "%s needs to be special cased %d %d!");\n' % (func_name, vecSize, can_have_pointers))
if "extpack" in apiutil.ChromiumProps(func_name):
is_extended = 1
else:
is_extended = 0
counter = 0
# generate the calls
if len(params) == 0:
argstr = ""
else:
argstr = ", "
printfstr = ""
argstr = ""
if return_type != 'void':
# Yet another gross hack for glGetString
if string.find( return_type, '*' ) == 1:
return_type = return_type + " *"
f.write( '\t%s return_val;\n' % return_type)
# At this point all of the declarations have been generated
# The argument lists with the various parameter values have to
# be permuted
# Generate the lists to be permuted
argLists = GenParmLists(func_name,f)
#print argLists
#print len(argLists)
#
# Now iterate over the permuted argument lists generating gl calls with
# the permuted arguments
#
for ki in range(len(argLists)):
argList = argLists[ki]
if len(argList) > 0:
allargstr = ""
for i in range(len(argList)):
#print argList[i]
q = argList[i]
if return_type != 'void':
f.write( '\treturn_val = gl%s(' % func_name)
else:
f.write( '\tgl%s(' % func_name)
ll = 0
nparms = len(params)
if len(q) > nparms:
nparms = len(q)
#for k in range(len(q)):
for k in range(nparms):
(name, type, vecSize) = params[k]
# ordinary typed parameter, or vector of unknown size
#
# TODO Vector arg
#
if vecSize >= 1:
f.write("%s /* VEC2a */" % name)
elif apiutil.IsPointer ( type ):
# POINTER
f.write( "\t(%s)%s /* VEC3a */\n" % (type,name))
# (format_str, cast) = printf_mapping[type]
# printfstr += format_str
# cast_str = ''
# if cast != '':
# cast_str = '(%s)' % cast
# if type == 'GLenum':
# argstr += "%s" % q[k]
# elif type == 'GLboolean':
# argstr += "%s" % q[k]
# else:
# argstr += '%s %s' % (cast_str,q[k])
elif printf_mapping.has_key( type ):
(format_str, cast) = printf_mapping[type]
printfstr += format_str
cast_str = ''
if cast != '':
cast_str = '(%s)' % cast
if type == 'GLenum':
argstr += "%s" % q[k]
elif type == 'GLboolean':
argstr += "%s" % q[k]
else:
argstr += '%s %s' % (cast_str,q[k])
elif type.find( "*" ):
printfstr += "%p"
argstr += "(void *)"
argstr += '%s' % q[k]
else:
argstr = ""
printfstr = "???"
break;
if ll != len(params) - 1:
printfstr += ", "
argstr += ", "
ll += 1
f.write( '%s' % argstr)
allargstr = allargstr + argstr
argstr = ""
printfstr = ""
f.write( ');\n\tif(errChk)\n\t\tprintError(\"gl%s(%s)\");\n' % (func_name,allargstr))
if verbose:
f.write('\tif (verbose)\n\t\tcrDebug(\"gl%s( %s )\");\n' % (func_name,allargstr))
allargstr = ""
#f.write( ');\n')
else:
if return_type != 'void':
f.write( '\treturn_val = gl%s();\n\tif(errChk)\n\t\tprintError(\"gl(%s)\");\n' % (func_name,func_name))
else:
f.write( '\tgl%s();\n\tif(errChk)\n\t\tprintError(\"gl(%s)\");\n' % (func_name,func_name))
if verbose:
f.write('\tif (verbose)\n\t\tcrDebug(\"gl%s( )\");\n' % (func_name))
# finish up
f.write( '}\n' )
#
# Print the header portion of the test program files.
#
def PrintHeaders (f):
CopyrightC(f)
f.write( "\n")
f.write( "/* DO NOT EDIT - THIS FILE GENERATED BY THE packertest.py SCRIPT */\n")
f.write( "\n")
f.write( "\n")
f.write( """
#define GL_GLEXT_PROTOTYPES
#include <GL/gl.h>
#include <GL/glext.h>
#include <GL/glut.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "chromium.h"
#include "cr_error.h"
#include "packertest.h"
extern int errChk;
extern int verbose;
void printError(char *name);
""")
def PrintDynProto(func_name, f):
if apiutil.FindSpecial( "packertest", func_name ):
return
if not allfuncs:
if not apiutil.HasPackOpcode(func_name):
return
pointers_ok = 0
params = apiutil.Parameters(func_name)
if "Chromium" == apiutil.Category(func_name):
is_extended = 1
else:
is_extended = 0
if is_extended:
f.write( "typedef %s (APIENTRY *gl%s_t) (%s);\n" % (return_type,func_name,apiutil.MakeDeclarationString(params)))
f.write( "static gl%s_t %s_func;\n" % (func_name,func_name))
#
# Generate function prototypes
#
def PrintProto(func_name,f,no_special):
if no_special == 1:
if apiutil.FindSpecial( "packertest", func_name ):
return
if not allfuncs:
if not apiutil.HasPackOpcode(func_name):
return
pointers_ok = 0
return_type = apiutil.ReturnType(func_name)
params = apiutil.Parameters(func_name)
if "Chromium" == apiutil.Category(func_name):
is_extended = 1
else:
is_extended = 0
if is_extended:
f.write( "gl%s gl%s_func = (gl%s_t)crGetProcAddress(\"gl%s\");\n" % (func_name,func_name,func_name,func_name))
if return_type != 'void':
# Yet another gross hack for glGetString
if string.find( return_type, '*' ) == -1:
return_type = return_type + " *"
#if "get" in apiutil.Properties(func_name):
#pointers_ok = 1
if func_name == 'Writeback':
pointers_ok = 1
f.write( 'void crPackTest%s (void);\n' % func_name)
#
# Generate the body of the test functions
#
def PrintBodies(func_name,f,no_special, gentables):
#print "func_name = %s no_special = %d" % (func_name, no_special)
if no_special == 1:
if apiutil.FindSpecial( "packertest", func_name ):
return
if not allfuncs:
if not apiutil.HasPackOpcode(func_name):
return
pointers_ok = 0
return_type = apiutil.ReturnType(func_name)
params = apiutil.Parameters(func_name)
if "get" in apiutil.Properties(func_name):
pointers_ok = 1
if func_name == 'Writeback':
pointers_ok = 1
#print "func_name = %s pointers_ok = %d no_special = %d" % (func_name, pointers_ok,no_special)
#print params
if gentables == 1:
PrintTableFunc( func_name, params, pointers_ok ,f)
else:
PrintFunc( func_name, params, pointers_ok ,f)
def PrintMiddle(f):
f.write( """
#define BUFSIZE 2048
static GLuint sbuffer[BUFSIZE];
static void Init( void )
{
glSelectBuffer (BUFSIZE, sbuffer);
glDepthMask(GL_TRUE);
glStencilMask(0xffffffff);
glEnable(GL_DEPTH_TEST);
glShadeModel(GL_FLAT);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
}
static void Draw(void)
{
"""
)
def printTail(f):
f.write ("""
void printError(char *name)
{
GLenum ret = GL_NO_ERROR;
while((ret = glGetError()) != GL_NO_ERROR)
switch (ret) {
case GL_NO_ERROR:
break;
case GL_INVALID_ENUM:
crWarning("%s: GL_INVALID_ENUM",name);
break;
case GL_INVALID_VALUE:
crWarning("%s: GL_INVALID_VALUE",name);
break;
case GL_INVALID_OPERATION:
crWarning("%s: GL_INVALID_OPERATION",name);
break;
case GL_STACK_OVERFLOW:
crWarning("%s: GL_STACK_OVERFLOW",name);
break;
case GL_STACK_UNDERFLOW:
crWarning("%s: GL_STACK_UNDERFLOW",name);
break;
case GL_OUT_OF_MEMORY:
crWarning("%s: GL_OUT_OF_MEMORY",name);
break;
case GL_TABLE_TOO_LARGE:
crWarning("%s: GL_TABLE_TOO_LARGE",name);
break;
default:
crWarning("%s: Unknown GL Error",name);
break;
}
}
static void Key( unsigned char key, int x, int y )
{
(void) x;
(void) y;
switch (key) {
case 'q':
case 27:
exit(0);
break;
}
glutPostRedisplay();
}
static void
PrintHelp(void)
{
printf(\"Usage: packertest [options]\\n\");
printf(\"Options:\\n\");
printf(\" -a enable accum buffer mode (default)\\n\");
printf(\" -A enable all information\\n\");
printf(\" -d double buffer mode (default)\\n\");
printf(\" -error do error check after each call (slow)\\n\");
printf(\" -h print this information\\n\");
printf(\" -i index mode\\n\");
printf(\" -m multisample mode\\n\");
printf(\" -r rgba mode (default)\\n\");
printf(\" -S enable stencil buffer mode (default)\\n\");
printf(\" -s stereo mode\\n\");
printf(\" -v verbose output\\n\");
}
int main(int argc, char *argv[])
{
int i;
int mode;
setbuf(stdout,NULL);
setbuf(stderr,NULL);
mode = GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH | GLUT_ACCUM | GLUT_STENCIL | GLUT_MULTISAMPLE;
for (i = 1; i < argc; i++) {
if (!strcmp( argv[i], \"-error\")) {
errChk = 1;
}
else if (!strcmp( argv[i], \"-A\")) {
mode = GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH | GLUT_ACCUM | GLUT_STENCIL | GLUT_MULTISAMPLE;
}
else if (strcmp(argv[i], \"-s\") == 0) {
mode |= GLUT_STEREO;
}
else if (strcmp(argv[i], \"-i\") == 0) {
mode |= GLUT_INDEX;
mode &= ~GLUT_RGBA;
}
else if (strcmp(argv[i], \"-r\") == 0) {
mode |= GLUT_RGBA;
mode &= ~GLUT_RGBA;
}
else if (strcmp(argv[i], \"-d\") == 0) {
mode |= GLUT_DOUBLE;
}
else if (strcmp(argv[i], \"-D\") == 0) {
mode |= GLUT_DEPTH;
}
else if (strcmp(argv[i], \"-a\") == 0) {
mode |= GLUT_ACCUM;
}
else if (strcmp(argv[i], \"-S\") == 0) {
mode |= GLUT_STENCIL;
}
else if (strcmp(argv[i], \"-m\") == 0) {
mode |= GLUT_MULTISAMPLE;
}
else if (strcmp(argv[i], \"-v\") == 0) {
verbose = 1;
}
else if (!strcmp( argv[i], \"-h\" ) || !strcmp(argv[i], \"--help\"))
{
PrintHelp();
exit(0);
}
}
glutInit( &argc, argv );
glutInitWindowPosition(0, 0);
glutInitWindowSize(400, 300);
glutInitDisplayMode(mode);
glutCreateWindow(argv[0]);
glutDisplayFunc( Draw );
glutKeyboardFunc( Key );
Init();
""")
f.write( 'printf("Press q or <ESC> to exit\\n");')
f.write( """
glutMainLoop();
return 0;
}
"""
)
f.write("\n\n")
#
# Generate calls to the test functions
#
def GenCalls(func_name,f,no_special):
if no_special == 1:
if apiutil.FindSpecial( "packertest", func_name ):
return
if not allfuncs:
if not apiutil.HasPackOpcode(func_name):
return
pointers_ok = 0
return_type = apiutil.ReturnType(func_name)
params = apiutil.Parameters(func_name)
if "Chromium" == apiutil.Category(func_name):
is_extended = 1
else:
is_extended = 0
if is_extended:
f.write( "gl%s gl%s_func = (gl%s_t)crGetProcAddress(\"gl%s\");\n" % (func_name,func_name,func_name,func_name))
if return_type != 'void':
# Yet another gross hack for glGetString
if string.find( return_type, '*' ) == -1:
return_type = return_type + " *"
#if "get" in apiutil.Properties(func_name):
#pointers_ok = 1
if func_name == 'Writeback':
pointers_ok = 1
#print "func_name = %s pointers_ok = %d" % (func_name, pointers_ok)
#print params
f.write( '\tcrPackTest%s ();\n' % func_name)
#f.write( '\tglutSwapBuffers ();\n')
def EndGenCalls(f):
f.write("\tglFinish();\n\tif(errChk)\n\t\tprintError(\"glFinish()\");\n")
f.write("\tcrDebug(\"DONE\\n\");\n")
f.write("\texit(0);\n")
f.write("}\n")
f.write("\n")
def PrintPart1(file):
PrintHeaders(file)
def PrintPart2(file, gentables):
#
# Generate function bodies
#
for func_name in keys:
PrintBodies(func_name,file,1, gentables)
def PrintPart3(file):
enableTex(file)
makeStripeImage(file)
makeGenTexture(file)
initMap1(file)
initBuffer(file)
PrintMiddle(file)
#
# Generate calls
#
i = 0
for fname in special_funcs:
GenCalls(fname, file, 0)
i = i + 1
if i % 20 == 0:
file.write( '\tglutSwapBuffers ();\n')
for func_name in keys:
GenCalls(func_name, file, 1)
i = i + 1
if i % 20 == 0:
file.write( '\tglutSwapBuffers ();\n')
for fname in special_keys:
GenCalls(fname, file, 0)
i = i + 1
if i % 20 == 0:
file.write( '\tglutSwapBuffers ();\n')
file.write( '\tglutSwapBuffers ();\n')
EndGenCalls(file)
printTail(file)
def PrintAll(file, gentables):
PrintPart1(file)
PrintPart2(file, gentables)
PrintPart3(file)
def GenSpecial(func_name,file, gentables):
file = open(file,"w")
PrintHeaders(file)
print "Writing %s" % func_name
PrintBodies(func_name,file,0, gentables)
file.close()
if __name__ == "__main__":
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "adtnosv")
except getopt.error, msg:
print msg
exit
# Process options
gentables = 0
addpath = []
exclude = []
for (o, a) in opts:
if o == '-a':
allfuncs = 1
if o == '-d':
debug = debug + 1
if o == '-t':
gentables = 1
if o == '-o':
omit = 1
if o == '-n':
nopack = 1
if o == '-s':
stub = 1
if o == '-v':
verbose = 1
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
d = apiutil.GetFunctionDict("../../glapi_parser/APIspec.txt")
for func in d.keys():
rec = d[func]
if "stub" in rec.chromium:
print "%s is a stub in Chromium, no test function generated." % func
print ""
for func in d.keys():
rec = d[func]
if "omit" in rec.chromium:
print "%s is not handled by Chromium, no test function generated." % func
print ""
d = []
if allfuncs == 1:
funcs = apiutil.GetFunctionDict("../../glapi_parser/APIspec.txt")
keys = []
for key in funcs.keys():
keys.append(key)
keys.sort()
else:
keys = apiutil.GetDispatchedFunctions("../../glapi_parser/APIspec.txt")
nkeys = len(keys)
assert nkeys > 0
file = open("packertest.h","w")
#
# Generate prototypes for dynanimically loaded funcs
#
for fname1 in keys:
PrintDynProto(fname1,file)
#
# Generate prototypes
#
for func_name in keys:
PrintProto(func_name,file,1)
for fname in special_funcs:
PrintProto(fname,file,0)
for fname in special_keys:
PrintProto(fname,file,0)
file.write("void makeStripeImage( GLubyte *stripeImage);\n")
file.write("void enableTex( void );\n")
file.write("void genTexture( GLubyte *stripeImage);\n")
file.write("void initMap1( double *points);\n")
file.write("void initBuffer( void );\n")
file.close()
file = open("packertest100.c","w")
i = 0
for func_name in keys:
if debug:
print "Generating for %s" % func_name
if i % 25 == 0:
file.close()
fname = "packertest" + str(i) + ".c"
file = open(fname,"w")
print "Writing %s" % fname
PrintPart1(file)
PrintBodies(func_name,file,1, gentables)
else:
PrintBodies(func_name,file,1, gentables)
i = i + 1
file = open("packertest.c","w")
PrintPart1(file)
PrintPart3(file)
file.close()
for fname in special_keys:
filename = "packertest" + fname + ".c"
GenSpecial(fname,filename, 1)
|
bsd-3-clause
|
sachintyagi22/spark
|
examples/src/main/python/streaming/flume_wordcount.py
|
83
|
2032
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Counts words in UTF8 encoded, '\n' delimited text received from the network every second.
Usage: flume_wordcount.py <hostname> <port>
To run this on your local machine, you need to setup Flume first, see
https://flume.apache.org/documentation.html
and then run the example
`$ bin/spark-submit --jars \
external/flume-assembly/target/scala-*/spark-streaming-flume-assembly-*.jar \
examples/src/main/python/streaming/flume_wordcount.py \
localhost 12345
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.flume import FlumeUtils
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: flume_wordcount.py <hostname> <port>", file=sys.stderr)
exit(-1)
sc = SparkContext(appName="PythonStreamingFlumeWordCount")
ssc = StreamingContext(sc, 1)
hostname, port = sys.argv[1:]
kvs = FlumeUtils.createStream(ssc, hostname, int(port))
lines = kvs.map(lambda x: x[1])
counts = lines.flatMap(lambda line: line.split(" ")) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a+b)
counts.pprint()
ssc.start()
ssc.awaitTermination()
|
apache-2.0
|
Petr-Kovalev/nupic-win32
|
py/regions/KNNClassifierRegion.py
|
2
|
42330
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file defines the k Nearest Neighbor classifier region.
"""
import numpy
from nupic.bindings.math import Random
from PyRegion import PyRegion
from nupic.algorithms import KNNClassifier
#---------------------------------------------------------------------------------
class KNNClassifierRegion(PyRegion):
"""
KNNClassifierRegion implements the k Nearest Neighbor classification algorithm.
By default it will implement vanilla 1-nearest neighbor using the L2 (Euclidean)
distance norm. There are options for using different norms as well as
various ways of sparsifying the input.
"""
__VERSION__ = 1
#---------------------------------------------------------------------------------
@classmethod
def getSpec(cls):
ns = dict(
description=KNNClassifierRegion.__doc__,
singleNodeOnly=True,
inputs=dict(
categoryIn=dict(
description='Category of the input sample',
dataType='Real32',
count=1,
required=True,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
bottomUpIn=dict(
description='Belief values over children\'s groups',
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
partitionIn=dict(
description='Partition ID of the input sample',
dataType='Real32',
count=0,
required=True,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
auxDataIn=dict(
description='Auxiliary data from the sensor',
dataType='Real32',
count=0,
required=False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False)
),
outputs=dict(
categoriesOut=dict(
description='A vector representing, for each category '
'index, the likelihood that the input to the node belongs '
'to that category based on the number of neighbors of '
'that category that are among the nearest K.',
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=True),
bestPrototypeIndices=dict(
description='A vector that lists, in descending order of '
'the match, the positions of the prototypes '
'that best match the input pattern.',
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
categoryProbabilitiesOut=dict(
description='A vector representing, for each category '
'index, the probability that the input to the node belongs '
'to that category based on the distance to the nearest '
'neighbor of each category.',
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=True),
),
parameters=dict(
learningMode=dict(
description='Boolean (0/1) indicating whether or not a region '
'is in learning mode.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=1,
accessMode='ReadWrite'),
inferenceMode=dict(
description='Boolean (0/1) indicating whether or not a region '
'is in inference mode.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
acceptanceProbability=dict(
description='During learning, inputs are learned with '
'probability equal to this parameter. '
'If set to 1.0, the default, '
'all inputs will be considered '
'(subject to other tests).',
dataType='Real32',
count=1,
constraints='',
defaultValue=1.0,
#accessMode='Create'),
accessMode='ReadWrite'), # and Create too
confusion=dict(
description='Confusion matrix accumulated during inference. '
'Reset with reset(). This is available to Python '
'client code only.',
dataType='Handle',
count=2,
constraints='',
defaultValue=None,
accessMode='Read'),
activeOutputCount=dict(
description='The number of active elements in the '
'"categoriesOut" output.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Read'),
categoryCount=dict(
description='An integer indicating the number of '
'categories that have been learned',
dataType='UInt32',
count=1,
constraints='',
defaultValue=None,
accessMode='Read'),
coincidenceCount=dict(
description='Number of patterns learned by the classifier.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=None,
accessMode='Read'),
coincidenceMatrix=dict(
description='The actual patterns learned by the classifier.',
dataType='Handle',
count=1,
constraints='',
defaultValue=None,
accessMode='Read'),
k=dict(
description='The number of nearest neighbors to use '
'during inference.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=1,
accessMode='Create'),
maxCategoryCount=dict(
description='The maximal number of categories the '
'classifier will distinguish between.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=2,
accessMode='Create'),
distanceNorm=dict(
description='The norm to use for a distance metric (i.e., '
'the "p" in Lp-norm)',
dataType='Real32',
count=1,
constraints='',
defaultValue=2.0,
accessMode='ReadWrite'),
#accessMode='Create'),
distanceMethod=dict(
description='Method used to compute distances between inputs and'
'prototypes. Possible options are norm, rawOverlap, '
'pctOverlapOfLarger, and pctOverlapOfProto',
dataType="Byte",
count=0,
constraints='enum: norm, rawOverlap, pctOverlapOfLarger, '
'pctOverlapOfProto',
defaultValue='norm',
accessMode='ReadWrite'),
outputProbabilitiesByDist=dict(
description='If True, categoryProbabilitiesOut is the probability of '
'each category based on the distance to the nearest neighbor of '
'each category. If False, categoryProbabilitiesOut is the '
'percentage of neighbors among the top K that are of each category.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='Create'),
distThreshold=dict(
description='Distance Threshold. If a pattern that '
'is less than distThreshold apart from '
'the input pattern already exists in the '
'KNN memory, then the input pattern is '
'not added to KNN memory.',
dataType='Real32',
count=1,
constraints='',
defaultValue=0.0,
#accessMode='Create'),
accessMode='ReadWrite'),
inputThresh=dict(
description='Input binarization threshold, used if '
'"doBinarization" is True.',
dataType='Real32',
count=1,
constraints='',
defaultValue=0.5,
accessMode='Create'),
doBinarization=dict(
description='Whether or not to binarize the input vectors.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='Create'),
useSparseMemory=dict(
description='A boolean flag that determines whether or '
'not the KNNClassifier will use sparse Memory',
dataType='UInt32',
count=1,
constraints='',
defaultValue=1,
accessMode='Create'),
sparseThreshold=dict(
description='If sparse memory is used, input variables '
'whose absolute value is less than this '
'threshold will be stored as zero',
dataType='Real32',
count=1,
constraints='',
defaultValue=0.0,
accessMode='Create'),
relativeThreshold=dict(
description='Whether to multiply sparseThreshold by max value '
' in input',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='Create'),
winnerCount=dict(
description='Only this many elements of the input are '
'stored. All elements are stored if 0.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
doSphering=dict(
description='A boolean indicating whether or not data should'
'be "sphered" (i.e. each dimension should be normalized such'
'that its mean and variance are zero and one, respectively.) This'
' sphering normalization would be performed after all training '
'samples had been received but before inference was performed. '
'The dimension-specific normalization constants would then '
' be applied to all future incoming vectors prior to performing '
' conventional NN inference.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='Create'),
SVDSampleCount=dict(
description='If not 0, carries out SVD transformation after '
'that many samples have been seen.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
SVDDimCount=dict(
description='Number of dimensions to keep after SVD if greater '
'than 0. If set to -1 it is considered unspecified. '
'If set to 0 it is consider "adaptive" and the number '
'is chosen automatically.',
dataType='Int32',
count=1,
constraints='',
defaultValue=-1,
accessMode='Create'),
fractionOfMax=dict(
description='The smallest singular value which is retained '
'as a fraction of the largest singular value. This is '
'used only if SVDDimCount==0 ("adaptive").',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
useAuxiliary=dict(
description='Whether or not the classifier should use auxiliary '
'input data.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='Create'),
justUseAuxiliary=dict(
description='Whether or not the classifier should ONLUY use the '
'auxiliary input data.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='Create'),
clVerbosity=dict(
description='An integer that controls the verbosity level, '
'0 means no verbose output, increasing integers '
'provide more verbosity.',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0 ,
accessMode='ReadWrite'),
doSelfValidation=dict(
description='A boolean flag that determines whether or'
'not the KNNClassifier should perform partitionID-based'
'self-validation during the finishLearning() step.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=None,
accessMode='ReadWrite'),
keepAllDistances=dict(
description='Whether to store all the protoScores in an array, '
'rather than just the ones for the last inference. '
'When this parameter is changed from True to False, '
'all the scores are discarded except for the most '
'recent one.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=None,
accessMode='ReadWrite'),
replaceDuplicates=dict(
description='A boolean flag that determines whether or'
'not the KNNClassifier should replace duplicates'
'during learning. This should be on when online'
'learning.',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=None,
accessMode='ReadWrite'),
cellsPerCol=dict(
description='If >= 1, we assume the input is organized into columns, '
'in the same manner as the temporal pooler AND '
'whenever we store a new prototype, we only store the '
'start cell (first cell) in any column which is bursting.'
'colum ',
dataType='UInt32',
count=1,
constraints='',
defaultValue=0,
accessMode='Create'),
maxStoredPatterns=dict(
description='Limits the maximum number of the training patterns '
'stored. When KNN learns in a fixed capacity mode, '
'the unused patterns are deleted once the number '
'of stored patterns is greater than maxStoredPatterns'
'columns. [-1 is no limit] ',
dataType='Int32',
count=1,
constraints='',
defaultValue=-1,
accessMode='Create'),
),
commands=dict()
)
return ns
#---------------------------------------------------------------------------------
def __init__(self,
maxCategoryCount=0,
bestPrototypeIndexCount=0,
outputProbabilitiesByDist=False,
k=1,
distanceNorm=2.0,
distanceMethod='norm',
distThreshold=0,
doBinarization=False,
inputThresh=0.500,
useSparseMemory=True,
sparseThreshold=0.0,
relativeThreshold=False,
winnerCount=0,
acceptanceProbability=1.0,
seed=42,
doSphering=False,
SVDSampleCount=0,
SVDDimCount=0,
fractionOfMax=0,
useAuxiliary=0,
justUseAuxiliary=0,
clVerbosity=0,
doSelfValidation=False,
replaceDuplicates=False,
cellsPerCol=0,
maxStoredPatterns=-1
):
self.version = KNNClassifierRegion.__VERSION__
# Convert various arguments to match the expectation
# of the KNNClassifier
if SVDSampleCount == 0:
SVDSampleCount = None
if SVDDimCount == -1:
SVDDimCount = None
elif SVDDimCount == 0:
SVDDimCount = 'adaptive'
if fractionOfMax == 0:
fractionOfMax = None
if useAuxiliary == 0:
useAuxiliary = False
if justUseAuxiliary == 0:
justUseAuxiliary = False
# KNN Parameters
self.knnParams = dict(
k=k,
distanceNorm=distanceNorm,
distanceMethod=distanceMethod,
distThreshold=distThreshold,
doBinarization=doBinarization,
binarizationThreshold=inputThresh,
useSparseMemory=useSparseMemory,
sparseThreshold=sparseThreshold,
relativeThreshold=relativeThreshold,
numWinners=winnerCount,
numSVDSamples=SVDSampleCount,
numSVDDims=SVDDimCount,
fractionOfMax=fractionOfMax,
verbosity=clVerbosity,
replaceDuplicates=replaceDuplicates,
cellsPerCol=cellsPerCol,
maxStoredPatterns=maxStoredPatterns
)
# Initialize internal structures
self.outputProbabilitiesByDist = outputProbabilitiesByDist
self.learningMode = True
self.inferenceMode = False
self._epoch = 0
self.acceptanceProbability = acceptanceProbability
self._rgen = numpy.random.RandomState(seed)
self.confusion = numpy.zeros((1, 1))
self.keepAllDistances = False
self._protoScoreCount = 0
self._useAuxiliary = useAuxiliary
self._justUseAuxiliary = justUseAuxiliary
# Sphering normalization
self._doSphering = doSphering
self._normOffset = None
self._normScale = None
self._samples = None
self._labels = None
# Debugging
self.verbosity = clVerbosity
# Boolean controlling whether or not the
# region should perform partitionID-based
# self-validation during the finishLearning()
# step.
self.doSelfValidation = doSelfValidation
# Taps
self._tapFileIn = None
self._tapFileOut = None
self._initEphemerals()
self.maxStoredPatterns = maxStoredPatterns
self.maxCategoryCount = maxCategoryCount
self._bestPrototypeIndexCount = bestPrototypeIndexCount
def _getEphemeralAttributes(self):
"""
List of attributes to not save with serialized state.
"""
return ['_firstComputeCall', '_accuracy', '_protoScores',
'_categoryDistances']
def _initEphemerals(self):
"""
Initialize attributes that are not saved with the checkpoint.
"""
self._firstComputeCall = True
self._accuracy = None
self._protoScores = None
self._categoryDistances = None
self._knn = KNNClassifier.KNNClassifier(**self.knnParams)
for x in ('_partitions', '_useAuxialiary', '_doSphering',
'_scanInfo', '_protoScores', 'doSelfValidation'):
if not hasattr(self, x):
setattr(self, x, None)
def __setstate__(self, state):
"""Set state from serialized state."""
if 'version' not in state:
self.__dict__.update(state)
elif state['version'] == 1:
knnState = state['_knn_state']
del state['_knn_state']
self.__dict__.update(state)
self._initEphemerals()
self._knn.__setstate__(knnState)
else:
raise RuntimeError("Invalid KNNClassifierRegion version for __setstate__")
# Set to current version
self.version = KNNClassifierRegion.__VERSION__
def __getstate__(self):
"""Get serializable state."""
state = self.__dict__.copy()
state['_knn_state'] = self._knn.__getstate__()
del state['_knn']
for field in self._getEphemeralAttributes():
del state[field]
return state
#---------------------------------------------------------------------------------
def initialize(self, dims, splitterMaps):
assert tuple(dims) == (1,) * len(dims)
#---------------------------------------------------------------------------------
def _getActiveOutputCount(self):
if self._knn._categoryList:
return int(max(self._knn._categoryList)+1)
else:
return 0
activeOutputCount = property(fget=_getActiveOutputCount)
#---------------------------------------------------------------------------------
def _getSeenCategoryCount(self):
return len(set(self._knn._categoryList))
categoryCount = property(fget=_getSeenCategoryCount)
#---------------------------------------------------------------------------------
def _getCoincidenceMatrix(self):
if self._knn._M is not None:
return self._knn._M
else:
return self._knn._Memory
#---------------------------------------------------------------------------------
def _getAccuracy(self):
n = self.confusion.shape[0]
assert n == self.confusion.shape[1], "Confusion matrix is non-square."
return self.confusion[range(n), range(n)].sum(), self.confusion.sum()
accuracy = property(fget=_getAccuracy)
#---------------------------------------------------------------------------------
def clear(self):
self._knn.clear()
#---------------------------------------------------------------------------------
def getParameter(self, name, index=-1):
"""
Get the value of the parameter.
@param name -- the name of the parameter to retrieve, as defined
by the Node Spec.
"""
if name == "coincidenceCount":
return self._knn._numPatterns
elif name == "coincidenceMatrix":
return self._getCoincidenceMatrix()
elif name == "k":
return self._knn.k
elif name == "distanceNorm":
return self._knn.distanceNorm
elif name == "distanceMethod":
return self._knn.distanceMethod
elif name == "distThreshold":
return self._knn.distThreshold
elif name == "inputThresh":
return self._knn.binarizationThreshold
elif name == "doBinarization":
return self._knn.doBinarization
elif name == "useSparseMemory":
return self._knn.useSparseMemory
elif name == "sparseThreshold":
return self._knn.sparseThreshold
elif name == "winnerCount":
return self._knn.numWinners
elif name == "relativeThreshold":
return self._knn.relativeThreshold
elif name == "SVDSampleCount":
v = self._knn.numSVDSamples
return v if v is not None else 0
elif name == "SVDDimCount":
v = self._knn.numSVDDims
return v if v is not None else 0
elif name == "fractionOfMax":
v = self._knn.fractionOfMax
return v if v is not None else 0
elif name == "useAuxiliary":
return self._useAuxiliary
elif name == "justUseAuxiliary":
return self._justUseAuxiliary
elif name == "doSphering":
return self._doSphering
elif name == "cellsPerCol":
return self._knn.cellsPerCol
elif name == "maxStoredPatterns":
return self.maxStoredPatterns
elif name == 'categoryRecencyList':
return self._knn._categoryRecencyList
else:
# If any spec parameter name is the same as an attribute, this call
# will get it automatically, e.g. self.learningMode
return PyRegion.getParameter(self, name, index)
#---------------------------------------------------------------------------------
def setParameter(self, name, index, value):
"""
Set the value of the parameter.
@param name -- the name of the parameter to update, as defined
by the Node Spec.
@param value -- the value to which the parameter is to be set.
"""
if name == "learningMode":
if int(value) and not self.learningMode:
self._restartLearning()
self.learningMode = bool(int(value))
self._epoch = 0
elif name == "inferenceMode":
self._epoch = 0
if int(value) and not self.inferenceMode:
self._finishLearning()
self.inferenceMode = bool(int(value))
elif name == "distanceNorm":
self._knn.distanceNorm = value
elif name == "distanceMethod":
self._knn.distanceMethod = value
elif name == "keepAllDistances":
self.keepAllDistances = bool(value)
if not self.keepAllDistances:
# Discard all distances except the latest
if self._protoScores is not None and self._protoScores.shape[0] > 1:
self._protoScores = self._protoScores[-1,:]
if self._protoScores is not None:
self._protoScoreCount = 1
else:
self._protoScoreCount = 0
elif name == "clVerbosity":
self.verbosity = value
self._knn.verbosity = value
elif name == "doSelfValidation":
self.doSelfValidation = value
else:
return PyRegion.setParameter(self, name, index, value)
#---------------------------------------------------------------------------------
def reset(self):
self.confusion = numpy.zeros((1, 1))
#---------------------------------------------------------------------------------
def doInference(self, activeInput):
"""Explicitly run inference on a vector that is passed in and return the
category id. Useful for debugging."""
prediction, inference, allScores = self._knn.infer(activeInput)
return inference
#---------------------------------------------------------------------------------
def enableTap(self, tapPath):
"""
Begin writing output tap files.
@param tapPath -- base name of the output tap files to write.
"""
self._tapFileIn = open(tapPath + '.in', 'w')
self._tapFileOut = open(tapPath + '.out', 'w')
#---------------------------------------------------------------------------------
def disableTap(self):
"""Disable writing of output tap files. """
if self._tapFileIn is not None:
self._tapFileIn.close()
self._tapFileIn = None
if self._tapFileOut is not None:
self._tapFileOut.close()
self._tapFileOut = None
#---------------------------------------------------------------------------------
def handleLogInput(self, inputs):
"""Write inputs to output tap file."""
if self._tapFileIn is not None:
for input in inputs:
for k in range(len(input)):
print >> self._tapFileIn, input[k],
print >> self._tapFileIn
#---------------------------------------------------------------------------------
def handleLogOutput(self, output):
"""Write outputs to output tap file."""
#raise Exception('MULTI-LINE DUMMY\nMULTI-LINE DUMMY')
if self._tapFileOut is not None:
for k in range(len(output)):
print >> self._tapFileOut, output[k],
print >> self._tapFileOut
#---------------------------------------------------------------------------------
def _storeSample(self, inputVector, trueCatIndex, partition=0):
"""
Store a training sample and associated category label
"""
# If this is the first sample, then allocate a numpy array
# of the appropriate size in which to store all samples.
if self._samples is None:
self._samples = numpy.zeros((0, len(inputVector)), dtype=RealNumpyDType)
assert self._labels is None
self._labels = []
# Add the sample vector and category lable
self._samples = numpy.concatenate((self._samples, numpy.atleast_2d(inputVector)), axis=0)
self._labels += [trueCatIndex]
# Add the parition ID
if self._partitions is None:
self._partitions = []
if partition is None:
partition = 0
self._partitions += [partition]
#---------------------------------------------------------------------------------
def compute(self, inputs, outputs):
"""
Process one input sample.
This method is called by the runtime engine.
"""
#raise Exception('MULTI-LINE DUMMY\nMULTI-LINE DUMMY')
#For backward compatibility
if self._useAuxiliary is None:
self._useAuxiliary = False
# If the first time being called, then print potential warning messsages
if self._firstComputeCall:
self._firstComputeCall = False
if self._useAuxiliary:
#print "\n Auxiliary input stream from Image Sensor enabled."
if self._justUseAuxiliary == True:
print " Warning: You have chosen to ignore the image data and instead just use the auxiliary data stream."
# Format inputs
#childInputs = [x.wvector(0) for x in inputs["bottomUpIn"]]
#inputVector = numpy.concatenate([x.array() for x in childInputs])
inputVector = inputs['bottomUpIn']
# Look for auxiliary input
if self._useAuxiliary==True:
#auxVector = inputs['auxDataIn'][0].wvector(0).array()
auxVector = inputs['auxDataIn']
if auxVector.dtype != numpy.float32:
raise RuntimeError, "KNNClassifierRegion expects numpy.float32 for the auxiliary data vector"
if self._justUseAuxiliary == True:
#inputVector = inputs['auxDataIn'][0].wvector(0).array()
inputVector = inputs['auxDataIn']
else:
#inputVector = numpy.concatenate([inputVector, inputs['auxDataIn'][0].wvector(0).array()])
inputVector = numpy.concatenate([inputVector, inputs['auxDataIn']])
# Logging
#self.handleLogInput(childInputs)
self.handleLogInput([inputVector])
# Read the category.
category = -1
assert "categoryIn" in inputs, "No linked category input."
assert len(inputs["categoryIn"]) == 1, "Must have exactly one link to category input."
#catInput = inputs["categoryIn"][0].wvector()
catInput = inputs['categoryIn']
assert len(catInput) == 1, "Category input element count must be exactly 1."
category = catInput[0]
# Read the partition ID.
if "partitionIn" in inputs:
assert len(inputs["partitionIn"]) == 1, "Must have exactly one link to partition input."
#partInput = inputs["partitionIn"][0].wvector()
partInput = inputs['partitionIn']
assert len(partInput) == 1, "Partition input element count must be exactly 1."
partition = int(partInput[0])
else:
partition = None
# ---------------------------------------------------------------------
# Inference (can be done simultaneously with learning)
if self.inferenceMode:
categoriesOut = outputs['categoriesOut']
probabilitiesOut = outputs['categoryProbabilitiesOut']
# If we are sphering, then apply normalization
if self._doSphering:
inputVector = (inputVector + self._normOffset) * self._normScale
nPrototypes = 0
if "bestPrototypeIndices" in outputs:
#bestPrototypeIndicesOut = outputs["bestPrototypeIndices"].wvector()
bestPrototypeIndicesOut = outputs["bestPrototypeIndices"]
nPrototypes = len(bestPrototypeIndicesOut)
winner, inference, protoScores, categoryDistances = \
self._knn.infer(inputVector, partitionId=partition)
if not self.keepAllDistances:
self._protoScores = protoScores
else:
# Keep all prototype scores in an array
if self._protoScores is None:
self._protoScores = numpy.zeros((1, protoScores.shape[0]),
protoScores.dtype)
self._protoScores[0,:] = protoScores#.reshape(1, protoScores.shape[0])
self._protoScoreCount = 1
else:
if self._protoScoreCount == self._protoScores.shape[0]:
# Double the size of the array
newProtoScores = numpy.zeros((self._protoScores.shape[0] * 2,
self._protoScores.shape[1]),
self._protoScores.dtype)
newProtoScores[:self._protoScores.shape[0],:] = self._protoScores
self._protoScores = newProtoScores
# Store the new prototype score
self._protoScores[self._protoScoreCount,:] = protoScores
self._protoScoreCount += 1
self._categoryDistances = categoryDistances
# --------------------------------------------------------------------
# Compute the probability of each category
if self.outputProbabilitiesByDist:
scores = 1.0 - self._categoryDistances
else:
scores = inference
# Probability is simply the scores/scores.sum()
total = scores.sum()
if total == 0:
numScores = len(scores)
probabilities = numpy.ones(numScores) / numScores
else:
probabilities = scores / total
#print "probabilities:", probabilities
#import pdb; pdb.set_trace()
# -------------------------------------------------------------------
# Fill the output vectors with our results
nout = min(len(categoriesOut), len(inference))
categoriesOut.fill(0)
categoriesOut[0:nout] = inference[0:nout]
probabilitiesOut.fill(0)
probabilitiesOut[0:nout] = probabilities[0:nout]
if self.verbosity >= 1:
print "KNNRegion: categoriesOut: ", categoriesOut[0:nout]
print "KNNRegion: probabilitiesOut: ", probabilitiesOut[0:nout]
if self._scanInfo is not None:
self._scanResults = [tuple(inference[:nout])]
# Update the stored confusion matrix.
if category >= 0:
dims = max(category+1, len(inference))
oldDims = len(self.confusion)
if oldDims < dims:
confusion = numpy.zeros((dims, dims))
confusion[0:oldDims, 0:oldDims] = self.confusion
self.confusion = confusion
self.confusion[inference.argmax(), category] += 1
# Calculate the best prototype indices
if nPrototypes > 1:
bestPrototypeIndicesOut.fill(0)
if categoryDistances is not None:
indices = categoryDistances.argsort()
nout = min(len(indices), nPrototypes)
bestPrototypeIndicesOut[0:nout] = indices[0:nout]
elif nPrototypes == 1:
if (categoryDistances is not None) and len(categoryDistances):
bestPrototypeIndicesOut[0] = categoryDistances.argmin()
else:
bestPrototypeIndicesOut[0] = 0
# Logging
self.handleLogOutput(inference)
# ---------------------------------------------------------------------
# Learning mode
if self.learningMode:
if (self.acceptanceProbability < 1.0) and \
(self._rgen.uniform(0.0, 1.0) > self.acceptanceProbability):
pass # Skip.
# Accept the input
else:
# If we are sphering, then we can't provide the data to the KNN
# library until we have computed per-dimension normalization constants.
# So instead, we'll just store each training sample.
if self._doSphering:
# If this is our first sample:
self._storeSample(inputVector, category, partition)
# If we are not sphering, then we just go ahead and pass the raw
# training sample directly to the KNN library.
else:
try:
self._knn.learn(inputVector, category, partition)
except:
self._knn.learn(inputVector, category, partition)
self._epoch += 1
#---------------------------------------------------------------------------------
def getCategoryList(self):
"""
Public API for returning the category list
This is a required API of the NearestNeighbor inspector.
It returns an array which has one entry per stored prototype. The value
of the entry is the category # of that stored prototype.
"""
return self._knn._categoryList
#---------------------------------------------------------------------------------
def removeCategory(self, categoryToRemove):
return self._knn.removeCategory(categoryToRemove)
#---------------------------------------------------------------------------------
def getLatestDistances(self):
"""
Public API for returning the full scores
(distance to each prototype) from the last
compute() inference call.
This is a required API of the NearestNeighbor inspector.
It returns an array which has one entry per stored prototype. The value
of the entry is distance of the most recenty inferred input from the
stored prototype.
"""
if self._protoScores is not None:
if self.keepAllDistances:
return self._protoScores[self._protoScoreCount - 1,:]
else:
return self._protoScores
else:
return None
#---------------------------------------------------------------------------------
def getAllDistances(self):
"""
Return all the prototype distances from all computes available.
Like getLatestDistances, but returns all the scores if more than one set is
available. getLatestDistances will always just return one set of scores.
"""
if self._protoScores is None:
return None
return self._protoScores[:self._protoScoreCount, :]
#---------------------------------------------------------------------------------
def calculateProbabilities(self):
# Get the scores, from 0 to 1
scores = 1.0 - self._categoryDistances
# Probability is simply the score/scores.sum()
total = scores.sum()
if total == 0:
numScores = len(scores)
return numpy.ones(numScores) / numScores
return scores / total
#---------------------------------------------------------------------------------
def _restartLearning(self):
"""
Currently, we allow learning mode to be "re-started" after being
ended, but only if PCA and sphering (if any) operations have
already been completed (for the sake of simplicity.)
"""
self._knn.restartLearning()
#---------------------------------------------------------------------------------
def _finishLearning(self):
"""Does nothing. Kept here for API compatibility """
if self._doSphering:
self._finishSphering()
self._knn.finishLearning()
# Compute leave-one-out validation accuracy if
# we actually received non-trivial partition info
self._accuracy = None
if self.doSelfValidation:
#partitions = self._knn._partitionIdList
#if len(set(partitions)) > 1:
if self._knn._partitionIdArray is not None:
numSamples, numCorrect = self._knn.leaveOneOutTest()
if numSamples:
self._accuracy = float(numCorrect) / float(numSamples)
print "Leave-one-out validation: %d of %d correct ==> %.3f%%" % \
(numCorrect, numSamples, self._accuracy * 100.0)
#---------------------------------------------------------------------------------
def _finishSphering(self):
"""
Compute normalization constants for each feature dimension
based on the collected training samples. Then normalize our
training samples using these constants (so that each input
dimension has mean and variance of zero and one, respectively.)
Then feed these "sphered" training samples into the underlying
SVM model.
"""
# If we are sphering our data, we need to compute the
# per-dimension normalization constants
# First normalize the means (to zero)
self._normOffset = self._samples.mean(axis=0) * -1.0
self._samples += self._normOffset
# Now normalize the variances (to one). However, we need to be
# careful because the variance could conceivably be zero for one
# or more dimensions.
variance = self._samples.var(axis=0)
variance[numpy.where(variance == 0.0)] = 1.0
self._normScale = 1.0 / numpy.sqrt(variance)
self._samples *= self._normScale
# Now feed each "sphered" sample into the SVM library
for sampleIndex in range(len(self._labels)):
self._knn.learn(self._samples[sampleIndex],
self._labels[sampleIndex],
self._partitions[sampleIndex])
#---------------------------------------------------------------------------------
def _arraysToLists(self, samplesArray, labelsArray):
labelsList = list(labelsArray)
samplesList = [[float(y) for y in x] for x in [list(x) for x in samplesArray]]
return samplesList, labelsList
#---------------------------------------------------------------------------------
def getOutputElementCount(self, name):
"""This method will be called only when the node is used in nuPIC 2"""
if name == 'categoriesOut':
return self.maxCategoryCount
elif name == 'categoryProbabilitiesOut':
return self.maxCategoryCount
elif name == 'bestPrototypeIndices':
return self._bestPrototypeIndexCount if self._bestPrototypeIndexCount else 0
else:
raise Exception('Unknown output: ' + name)
#---------------------------------------------------------------------------------
if __name__=='__main__':
from nupic.engine import Network
n = Network()
classifier = n.addRegion(
'classifier',
'py.KNNClassifierRegion',
'{ maxCategoryCount: 48, SVDSampleCount: 400, ' +
' SVDDimCount: 20, distanceNorm: 0.6 }')
|
gpl-3.0
|
yghannam/teuthology
|
teuthology/beanstalk.py
|
5
|
5121
|
import beanstalkc
import yaml
import logging
import pprint
import sys
from collections import OrderedDict
from .config import config
from . import report
log = logging.getLogger(__name__)
def connect():
host = config.queue_host
port = config.queue_port
if host is None or port is None:
raise RuntimeError(
'Beanstalk queue information not found in {conf_path}'.format(
conf_path=config.teuthology_yaml))
return beanstalkc.Connection(host=host, port=port)
def watch_tube(connection, tube_name):
"""
Watch a given tube, potentially correcting to 'multi' if necessary. Returns
the tube_name that was actually used.
"""
if ',' in tube_name:
log.debug("Correcting tube name to 'multi'")
tube_name = 'multi'
connection.watch(tube_name)
connection.ignore('default')
return tube_name
def walk_jobs(connection, tube_name, processor, pattern=None):
"""
def callback(jobs_dict)
"""
log.info("Checking Beanstalk Queue...")
job_count = connection.stats_tube(tube_name)['current-jobs-ready']
if job_count == 0:
log.info('No jobs in Beanstalk Queue')
return
# Try to figure out a sane timeout based on how many jobs are in the queue
timeout = job_count / 2000.0 * 60
for i in range(1, job_count + 1):
print_progress(i, job_count, "Loading")
job = connection.reserve(timeout=timeout)
if job is None or job.body is None:
continue
job_config = yaml.safe_load(job.body)
job_name = job_config['name']
job_id = job.stats()['id']
if pattern is not None and pattern not in job_name:
continue
processor.add_job(job_id, job_config, job)
end_progress()
processor.complete()
def print_progress(index, total, message=None):
msg = "{m} ".format(m=message) if message else ''
sys.stderr.write("{msg}{i}/{total}\r".format(
msg=msg, i=index, total=total))
sys.stderr.flush()
def end_progress():
sys.stderr.write('\n')
sys.stderr.flush()
class JobProcessor(object):
def __init__(self):
self.jobs = OrderedDict()
def add_job(self, job_id, job_config, job_obj=None):
job_id = str(job_id)
job_dict = dict(
index=(len(self.jobs) + 1),
job_config=job_config,
)
if job_obj:
job_dict['job_obj'] = job_obj
self.jobs[job_id] = job_dict
self.process_job(job_id)
def process_job(self, job_id):
pass
def complete(self):
pass
class JobPrinter(JobProcessor):
def __init__(self, show_desc=False, full=False):
super(JobPrinter, self).__init__()
self.show_desc = show_desc
self.full = full
def process_job(self, job_id):
job_config = self.jobs[job_id]['job_config']
job_index = self.jobs[job_id]['index']
job_name = job_config['name']
job_desc = job_config['description']
print 'Job: {i:>4} {job_name}/{job_id}'.format(
i=job_index,
job_id=job_id,
job_name=job_name,
)
if self.full:
pprint.pprint(job_config)
elif job_desc and self.show_desc:
for desc in job_desc.split():
print '\t {desc}'.format(desc=desc)
class RunPrinter(JobProcessor):
def __init__(self):
super(RunPrinter, self).__init__()
self.runs = list()
def process_job(self, job_id):
run = self.jobs[job_id]['job_config']['name']
if run not in self.runs:
self.runs.append(run)
print run
class JobDeleter(JobProcessor):
def __init__(self, pattern):
self.pattern = pattern
super(JobDeleter, self).__init__()
def add_job(self, job_id, job_config, job_obj=None):
job_name = job_config['name']
if self.pattern in job_name:
super(JobDeleter, self).add_job(job_id, job_config, job_obj)
def process_job(self, job_id):
job_config = self.jobs[job_id]['job_config']
job_name = job_config['name']
print 'Deleting {job_name}/{job_id}'.format(
job_id=job_id,
job_name=job_name,
)
job_obj = self.jobs[job_id].get('job_obj')
if job_obj:
job_obj.delete()
report.try_delete_jobs(job_name, job_id)
def main(args):
machine_type = args['--machine_type']
delete = args['--delete']
runs = args['--runs']
show_desc = args['--description']
full = args['--full']
try:
connection = connect()
watch_tube(connection, machine_type)
if delete:
walk_jobs(connection, machine_type,
JobDeleter(delete))
elif runs:
walk_jobs(connection, machine_type,
RunPrinter())
else:
walk_jobs(connection, machine_type,
JobPrinter(show_desc=show_desc, full=full))
except KeyboardInterrupt:
log.info("Interrupted.")
finally:
connection.close()
|
mit
|
TeamEOS/external_skia
|
tools/gen_bench_expectations_from_codereview.py
|
67
|
5806
|
#!/usr/bin/python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate new bench expectations from results of trybots on a code review."""
import collections
import compare_codereview
import os
import re
import shutil
import subprocess
import sys
BENCH_DATA_URL = 'gs://chromium-skia-gm/perfdata/%s/%s/*'
CHECKOUT_PATH = os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
TMP_BENCH_DATA_DIR = os.path.join(CHECKOUT_PATH, '.bench_data')
TryBuild = collections.namedtuple(
'TryBuild', ['builder_name', 'build_number', 'is_finished'])
def find_all_builds(codereview_url):
"""Finds and returns information about trybot runs for a code review.
Args:
codereview_url: URL of the codereview in question.
Returns:
List of NamedTuples: (builder_name, build_number, is_finished)
"""
results = compare_codereview.CodeReviewHTMLParser().parse(codereview_url)
try_builds = []
for builder, data in results.iteritems():
if builder.startswith('Perf'):
build_num = data.url.split('/')[-1] if data.url else None
is_finished = (data.status not in ('pending', 'try-pending') and
build_num is not None)
try_builds.append(TryBuild(builder_name=builder,
build_number=build_num,
is_finished=is_finished))
return try_builds
def _all_trybots_finished(try_builds):
"""Return True iff all of the given try jobs have finished.
Args:
try_builds: list of TryBuild instances.
Returns:
True if all of the given try jobs have finished, otherwise False.
"""
for try_build in try_builds:
if not try_build.is_finished:
return False
return True
def all_trybots_finished(codereview_url):
"""Return True iff all of the try jobs on the given codereview have finished.
Args:
codereview_url: string; URL of the codereview.
Returns:
True if all of the try jobs have finished, otherwise False.
"""
return _all_trybots_finished(find_all_builds(codereview_url))
def get_bench_data(builder, build_num, dest_dir):
"""Download the bench data for the given builder at the given build_num.
Args:
builder: string; name of the builder.
build_num: string; build number.
dest_dir: string; destination directory for the bench data.
"""
url = BENCH_DATA_URL % (builder, build_num)
subprocess.check_call(['gsutil', 'cp', '-R', url, dest_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def find_revision_from_downloaded_data(dest_dir):
"""Finds the revision at which the downloaded data was generated.
Args:
dest_dir: string; directory holding the downloaded data.
Returns:
The revision (git commit hash) at which the downloaded data was
generated, or None if no revision can be found.
"""
for data_file in os.listdir(dest_dir):
match = re.match('bench_(?P<revision>[0-9a-fA-F]{2,40})_data.*', data_file)
if match:
return match.group('revision')
return None
class TrybotNotFinishedError(Exception):
pass
def gen_bench_expectations_from_codereview(codereview_url,
error_on_unfinished=True):
"""Generate bench expectations from a code review.
Scans the given code review for Perf trybot runs. Downloads the results of
finished trybots and uses them to generate new expectations for their
waterfall counterparts.
Args:
url: string; URL of the code review.
error_on_unfinished: bool; throw an error if any trybot has not finished.
"""
try_builds = find_all_builds(codereview_url)
# Verify that all trybots have finished running.
if error_on_unfinished and not _all_trybots_finished(try_builds):
raise TrybotNotFinishedError('Not all trybots have finished.')
failed_data_pull = []
failed_gen_expectations = []
if os.path.isdir(TMP_BENCH_DATA_DIR):
shutil.rmtree(TMP_BENCH_DATA_DIR)
for try_build in try_builds:
try_builder = try_build.builder_name
builder = try_builder.replace('-Trybot', '')
# Download the data.
dest_dir = os.path.join(TMP_BENCH_DATA_DIR, builder)
os.makedirs(dest_dir)
try:
get_bench_data(try_builder, try_build.build_number, dest_dir)
except subprocess.CalledProcessError:
failed_data_pull.append(try_builder)
continue
# Find the revision at which the data was generated.
revision = find_revision_from_downloaded_data(dest_dir)
if not revision:
# If we can't find a revision, then something is wrong with the data we
# downloaded. Skip this builder.
failed_data_pull.append(try_builder)
continue
# Generate new expectations.
output_file = os.path.join(CHECKOUT_PATH, 'expectations', 'bench',
'bench_expectations_%s.txt' % builder)
try:
subprocess.check_call(['python',
os.path.join(CHECKOUT_PATH, 'bench',
'gen_bench_expectations.py'),
'-b', builder, '-o', output_file,
'-d', dest_dir, '-r', revision])
except subprocess.CalledProcessError:
failed_gen_expectations.append(builder)
failure = ''
if failed_data_pull:
failure += 'Failed to load data for: %s\n\n' % ','.join(failed_data_pull)
if failed_gen_expectations:
failure += 'Failed to generate expectations for: %s\n\n' % ','.join(
failed_gen_expectations)
if failure:
raise Exception(failure)
if __name__ == '__main__':
gen_bench_expectations_from_codereview(sys.argv[1])
|
bsd-3-clause
|
ZdenekM/artable
|
art_fake_nodes/src/grasping.py
|
6
|
6629
|
#!/usr/bin/env python
import rospy
from actionlib import SimpleActionServer
from art_msgs.msg import PickPlaceAction, PickPlaceGoal, PickPlaceResult, PickPlaceFeedback
import random
class FakeGrasping:
ALWAYS = 0
NEVER = 1
RANDOM = 2
def __init__(self):
self.left_server = SimpleActionServer('/art/robot/left_arm/pp', PickPlaceAction,
execute_cb=self.pick_place_left_cb)
self.right_server = SimpleActionServer('/art/robot/right_arm/pp', PickPlaceAction,
execute_cb=self.pick_place_right_cb)
self.server = None
self.objects = self.ALWAYS
self.grasp = self.ALWAYS
self.place = self.ALWAYS
self.object_randomness = 0.8 # 80% of time object is known
self.grasp_randomness = 0.4
self.place_randomness = 0.4
self.holding_left = False
self.holding_right = False
self.pick_length = 1 # how long (sec) takes to pick an object
self.place_length = 1 # how long (sec) takes to place an object
random.seed()
def pick_place_left_cb(self, goal):
self.pickplace_cb(goal, True)
def pick_place_right_cb(self, goal):
self.pickplace_cb(goal, False)
def pickplace_cb(self, goal, left=True):
result = PickPlaceResult()
feedback = PickPlaceFeedback()
if left:
self.server = self.left_server
else:
self.server = self.right_server
if not (goal.operation == PickPlaceGoal.PICK_OBJECT_ID or
goal.operation == PickPlaceGoal.PICK_FROM_FEEDER or
goal.operation == PickPlaceGoal.PLACE_TO_POSE):
result.result = PickPlaceResult.BAD_REQUEST
rospy.logerr("BAD_REQUEST, Unknown operation")
self.server.set_aborted(result, "Unknown operation")
return
if self.objects == self.ALWAYS:
pass
elif self.objects == self.NEVER:
result.result = PickPlaceResult.BAD_REQUEST
rospy.logerr("BAD_REQUEST, Unknown object id")
self.server.set_aborted(result, "Unknown object id")
return
elif self.objects == self.RANDOM:
nmbr = random.random()
if nmbr > self.object_randomness:
result.result = PickPlaceResult.BAD_REQUEST
rospy.logerr("BAD_REQUEST, Unknown object id")
self.server.set_aborted(result, "Unknown object id")
return
grasped = False
if goal.operation == PickPlaceGoal.PICK_OBJECT_ID or goal.operation == PickPlaceGoal.PICK_FROM_FEEDER:
rospy.sleep(self.pick_length)
if (left and self.holding_left) or (not left and self.holding_right):
result.result = PickPlaceResult.FAILURE
rospy.logerr("Failure, already holding object in " +
"left" if left else "right" + "arm")
self.server.set_aborted(
result, "Already holding object in " + "left" if left else "right" + "arm")
return
if self.grasp == self.ALWAYS:
grasped = True
pass
elif self.grasp == self.NEVER:
result.result = PickPlaceResult.FAILURE
rospy.logerr("FAILURE, Pick Failed")
self.server.set_aborted(result, "Pick Failed")
return
tries = 5
max_attempts = 5
while tries > 0:
feedback.attempt = (max_attempts - tries) + 1
tries -= 1
self.server.publish_feedback(feedback)
if self.grasp == self.RANDOM:
nmbr = random.random()
if nmbr < self.grasp_randomness:
grasped = True
if grasped:
break
if self.server.is_preempt_requested():
self.server.set_preempted(result, "Pick canceled")
rospy.logerr("Preempted")
return
if not grasped:
result.result = PickPlaceResult.FAILURE
self.server.set_aborted(result, "Pick failed")
rospy.logerr("FAILURE, Pick Failed")
return
else:
if left:
self.holding_left = True
else:
self.holding_right = True
placed = False
if goal.operation == PickPlaceGoal.PLACE_TO_POSE:
rospy.sleep(self.place_length)
if (left and not self.holding_left) or (not left and not self.holding_right):
result.result = PickPlaceResult.FAILURE
rospy.logerr("Failure, already holding object in " +
"left" if left else "right" + "arm")
self.server.set_aborted(
result, "Already holding object in " + "left" if left else "right" + "arm")
return
if self.place == self.ALWAYS:
placed = True
pass
elif self.place == self.NEVER:
result.result = PickPlaceResult.FAILURE
self.server.set_aborted(result, "Place Failed")
rospy.logerr("FAILURE, Place Failed")
return
tries = 5
max_attempts = 5
while tries > 0:
feedback.attempt = (max_attempts - tries) + 1
tries -= 1
self.server.publish_feedback(feedback)
if self.place == self.RANDOM:
nmbr = random.random()
if nmbr < self.place_randomness:
placed = True
if placed:
break
if not placed:
result.result = PickPlaceResult.FAILURE
self.server.set_aborted(result, "Place failed")
rospy.logerr("FAILURE, Place Failed")
return
else:
if left:
self.holding_left = False
else:
self.holding_right = False
result.result = PickPlaceResult.SUCCESS
self.server.set_succeeded(result)
rospy.loginfo("SUCCESS")
print("Finished")
if __name__ == '__main__':
rospy.init_node('fake_grasping')
''',log_level=rospy.DEBUG'''
try:
node = FakeGrasping()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
lgpl-2.1
|
faizan-barmawer/openstack_ironic
|
ironic/openstack/common/apiclient/client.py
|
8
|
12937
|
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2013 Alessio Ababilov
# Copyright 2013 Grid Dynamics
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
OpenStack Client interface. Handles the REST calls and responses.
"""
# E0202: An attribute inherited from %s hide this method
# pylint: disable=E0202
import logging
import time
try:
import simplejson as json
except ImportError:
import json
import requests
from ironic.openstack.common.apiclient import exceptions
from ironic.openstack.common.gettextutils import _
from ironic.openstack.common import importutils
_logger = logging.getLogger(__name__)
class HTTPClient(object):
"""This client handles sending HTTP requests to OpenStack servers.
Features:
- share authentication information between several clients to different
services (e.g., for compute and image clients);
- reissue authentication request for expired tokens;
- encode/decode JSON bodies;
- raise exceptions on HTTP errors;
- pluggable authentication;
- store authentication information in a keyring;
- store time spent for requests;
- register clients for particular services, so one can use
`http_client.identity` or `http_client.compute`;
- log requests and responses in a format that is easy to copy-and-paste
into terminal and send the same request with curl.
"""
user_agent = "ironic.openstack.common.apiclient"
def __init__(self,
auth_plugin,
region_name=None,
endpoint_type="publicURL",
original_ip=None,
verify=True,
cert=None,
timeout=None,
timings=False,
keyring_saver=None,
debug=False,
user_agent=None,
http=None):
self.auth_plugin = auth_plugin
self.endpoint_type = endpoint_type
self.region_name = region_name
self.original_ip = original_ip
self.timeout = timeout
self.verify = verify
self.cert = cert
self.keyring_saver = keyring_saver
self.debug = debug
self.user_agent = user_agent or self.user_agent
self.times = [] # [("item", starttime, endtime), ...]
self.timings = timings
# requests within the same session can reuse TCP connections from pool
self.http = http or requests.Session()
self.cached_token = None
def _http_log_req(self, method, url, kwargs):
if not self.debug:
return
string_parts = [
"curl -i",
"-X '%s'" % method,
"'%s'" % url,
]
for element in kwargs['headers']:
header = "-H '%s: %s'" % (element, kwargs['headers'][element])
string_parts.append(header)
_logger.debug("REQ: %s" % " ".join(string_parts))
if 'data' in kwargs:
_logger.debug("REQ BODY: %s\n" % (kwargs['data']))
def _http_log_resp(self, resp):
if not self.debug:
return
_logger.debug(
"RESP: [%s] %s\n",
resp.status_code,
resp.headers)
if resp._content_consumed:
_logger.debug(
"RESP BODY: %s\n",
resp.text)
def serialize(self, kwargs):
if kwargs.get('json') is not None:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['data'] = json.dumps(kwargs['json'])
try:
del kwargs['json']
except KeyError:
pass
def get_timings(self):
return self.times
def reset_timings(self):
self.times = []
def request(self, method, url, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around `requests.Session.request` to handle tasks such as
setting headers, JSON encoding/decoding, and error handling.
:param method: method of HTTP request
:param url: URL of HTTP request
:param kwargs: any other parameter that can be passed to
requests.Session.request (such as `headers`) or `json`
that will be encoded as JSON and used as `data` argument
"""
kwargs.setdefault("headers", kwargs.get("headers", {}))
kwargs["headers"]["User-Agent"] = self.user_agent
if self.original_ip:
kwargs["headers"]["Forwarded"] = "for=%s;by=%s" % (
self.original_ip, self.user_agent)
if self.timeout is not None:
kwargs.setdefault("timeout", self.timeout)
kwargs.setdefault("verify", self.verify)
if self.cert is not None:
kwargs.setdefault("cert", self.cert)
self.serialize(kwargs)
self._http_log_req(method, url, kwargs)
if self.timings:
start_time = time.time()
resp = self.http.request(method, url, **kwargs)
if self.timings:
self.times.append(("%s %s" % (method, url),
start_time, time.time()))
self._http_log_resp(resp)
if resp.status_code >= 400:
_logger.debug(
"Request returned failure status: %s",
resp.status_code)
raise exceptions.from_response(resp, method, url)
return resp
@staticmethod
def concat_url(endpoint, url):
"""Concatenate endpoint and final URL.
E.g., "http://keystone/v2.0/" and "/tokens" are concatenated to
"http://keystone/v2.0/tokens".
:param endpoint: the base URL
:param url: the final URL
"""
return "%s/%s" % (endpoint.rstrip("/"), url.strip("/"))
def client_request(self, client, method, url, **kwargs):
"""Send an http request using `client`'s endpoint and specified `url`.
If request was rejected as unauthorized (possibly because the token is
expired), issue one authorization attempt and send the request once
again.
:param client: instance of BaseClient descendant
:param method: method of HTTP request
:param url: URL of HTTP request
:param kwargs: any other parameter that can be passed to
`HTTPClient.request`
"""
filter_args = {
"endpoint_type": client.endpoint_type or self.endpoint_type,
"service_type": client.service_type,
}
token, endpoint = (self.cached_token, client.cached_endpoint)
just_authenticated = False
if not (token and endpoint):
try:
token, endpoint = self.auth_plugin.token_and_endpoint(
**filter_args)
except exceptions.EndpointException:
pass
if not (token and endpoint):
self.authenticate()
just_authenticated = True
token, endpoint = self.auth_plugin.token_and_endpoint(
**filter_args)
if not (token and endpoint):
raise exceptions.AuthorizationFailure(
_("Cannot find endpoint or token for request"))
old_token_endpoint = (token, endpoint)
kwargs.setdefault("headers", {})["X-Auth-Token"] = token
self.cached_token = token
client.cached_endpoint = endpoint
# Perform the request once. If we get Unauthorized, then it
# might be because the auth token expired, so try to
# re-authenticate and try again. If it still fails, bail.
try:
return self.request(
method, self.concat_url(endpoint, url), **kwargs)
except exceptions.Unauthorized as unauth_ex:
if just_authenticated:
raise
self.cached_token = None
client.cached_endpoint = None
self.authenticate()
try:
token, endpoint = self.auth_plugin.token_and_endpoint(
**filter_args)
except exceptions.EndpointException:
raise unauth_ex
if (not (token and endpoint) or
old_token_endpoint == (token, endpoint)):
raise unauth_ex
self.cached_token = token
client.cached_endpoint = endpoint
kwargs["headers"]["X-Auth-Token"] = token
return self.request(
method, self.concat_url(endpoint, url), **kwargs)
def add_client(self, base_client_instance):
"""Add a new instance of :class:`BaseClient` descendant.
`self` will store a reference to `base_client_instance`.
Example:
>>> def test_clients():
... from keystoneclient.auth import keystone
... from openstack.common.apiclient import client
... auth = keystone.KeystoneAuthPlugin(
... username="user", password="pass", tenant_name="tenant",
... auth_url="http://auth:5000/v2.0")
... openstack_client = client.HTTPClient(auth)
... # create nova client
... from novaclient.v1_1 import client
... client.Client(openstack_client)
... # create keystone client
... from keystoneclient.v2_0 import client
... client.Client(openstack_client)
... # use them
... openstack_client.identity.tenants.list()
... openstack_client.compute.servers.list()
"""
service_type = base_client_instance.service_type
if service_type and not hasattr(self, service_type):
setattr(self, service_type, base_client_instance)
def authenticate(self):
self.auth_plugin.authenticate(self)
# Store the authentication results in the keyring for later requests
if self.keyring_saver:
self.keyring_saver.save(self)
class BaseClient(object):
"""Top-level object to access the OpenStack API.
This client uses :class:`HTTPClient` to send requests. :class:`HTTPClient`
will handle a bunch of issues such as authentication.
"""
service_type = None
endpoint_type = None # "publicURL" will be used
cached_endpoint = None
def __init__(self, http_client, extensions=None):
self.http_client = http_client
http_client.add_client(self)
# Add in any extensions...
if extensions:
for extension in extensions:
if extension.manager_class:
setattr(self, extension.name,
extension.manager_class(self))
def client_request(self, method, url, **kwargs):
return self.http_client.client_request(
self, method, url, **kwargs)
def head(self, url, **kwargs):
return self.client_request("HEAD", url, **kwargs)
def get(self, url, **kwargs):
return self.client_request("GET", url, **kwargs)
def post(self, url, **kwargs):
return self.client_request("POST", url, **kwargs)
def put(self, url, **kwargs):
return self.client_request("PUT", url, **kwargs)
def delete(self, url, **kwargs):
return self.client_request("DELETE", url, **kwargs)
def patch(self, url, **kwargs):
return self.client_request("PATCH", url, **kwargs)
@staticmethod
def get_class(api_name, version, version_map):
"""Returns the client class for the requested API version
:param api_name: the name of the API, e.g. 'compute', 'image', etc
:param version: the requested API version
:param version_map: a dict of client classes keyed by version
:rtype: a client class for the requested API version
"""
try:
client_path = version_map[str(version)]
except (KeyError, ValueError):
msg = _("Invalid %(api_name)s client version '%(version)s'. "
"Must be one of: %(version_map)s") % {
'api_name': api_name,
'version': version,
'version_map': ', '.join(version_map.keys())
}
raise exceptions.UnsupportedVersion(msg)
return importutils.import_class(client_path)
|
apache-2.0
|
abhattad4/Digi-Menu
|
digimenu2/tests/template_tests/filter_tests/test_dictsort.py
|
342
|
1477
|
from django.template.defaultfilters import dictsort
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_sort(self):
sorted_dicts = dictsort(
[{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}],
'age',
)
self.assertEqual(
[sorted(dict.items()) for dict in sorted_dicts],
[[('age', 18), ('name', 'Jonny B Goode')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 63), ('name', 'Ra Ra Rasputin')]],
)
def test_dictsort_complex_sorting_key(self):
"""
Since dictsort uses template.Variable under the hood, it can sort
on keys like 'foo.bar'.
"""
data = [
{'foo': {'bar': 1, 'baz': 'c'}},
{'foo': {'bar': 2, 'baz': 'b'}},
{'foo': {'bar': 3, 'baz': 'a'}},
]
sorted_data = dictsort(data, 'foo.baz')
self.assertEqual([d['foo']['bar'] for d in sorted_data], [3, 2, 1])
def test_invalid_values(self):
"""
If dictsort is passed something other than a list of dictionaries,
fail silently.
"""
self.assertEqual(dictsort([1, 2, 3], 'age'), '')
self.assertEqual(dictsort('Hello!', 'age'), '')
self.assertEqual(dictsort({'a': 1}, 'age'), '')
self.assertEqual(dictsort(1, 'age'), '')
|
bsd-3-clause
|
petewarden/tensorflow
|
tensorflow/python/training/checkpoint_utils_test.py
|
12
|
19687
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpoints tools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training.tracking import util as trackable_utils
def _create_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
v1 = variable_scope.get_variable("var1", [1, 10])
v2 = variable_scope.get_variable("var2", [10, 10])
v3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
v4 = variable_scope.get_variable("var4", [9, 9])
sess.run(variables.global_variables_initializer())
v1_value, v2_value, v3_value, v4_value = sess.run([v1, v2, v3, v4])
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value, v2_value, v3_value, v4_value
def _create_partition_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
with variable_scope.variable_scope("scope"):
v1 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
sess.run(variables.global_variables_initializer())
v1_value = sess.run(v1._get_variable_list())
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value
class CheckpointsTest(test.TestCase):
def testNoCheckpoints(self):
checkpoint_dir = self.get_temp_dir() + "/no_checkpoints"
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), [])
def testNoTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var5"), [])
def testGetTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), v1)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var2"), v2)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var3"), v3)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "useful_scope/var4"), v4)
def testGetAllVariables(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_create_checkpoints(session, checkpoint_dir)
self.assertEqual(
checkpoint_utils.list_variables(checkpoint_dir),
[("useful_scope/var4", [9, 9]), ("var1", [1, 10]), ("var2", [10, 10]),
("var3", [100, 100])])
def testInitFromCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("my1", [1, 10])
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable("my2", [10, 10])
with variable_scope.variable_scope("other_useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
my3 = variable_scope.get_variable("my3", [100, 100])
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var1": "some_scope/my1",
"useful_scope/": "some_scope/some_other_scope/other_useful_scope/",
})
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var2": "some_scope/some_other_scope/my2",
"var3": my3,
})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
# Check that tensors are not explicitly in the graph.
self.assertLess(len(str(session.graph.as_graph_def())), 29000)
def testInitialValueComesFromCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope(
"some_scope", initializer=init_ops.zeros_initializer()):
my1 = variable_scope.get_variable("my1", [1, 10])
before = my1.initialized_value()
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {"var1": my1})
after = my1.initialized_value()
self.assertAllEqual(session.run(before), [[0.0] * 10])
self.assertAllEqual(session.run(after), v1)
session.run(variables.global_variables_initializer())
self.assertAllEqual(session.run(my1), v1)
self.assertAllEqual(session.run(my1.initialized_value()), v1)
self.assertAllClose(session.run(before), v1)
self.assertAllClose(session.run(after), v1)
with self.assertRaises(AssertionError):
self.assertAllClose(v1, [[0.0] * 10])
def testInitWithScopeDoesNotCaptureSuffixes(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, v4 = _create_checkpoints(session, checkpoint_dir)
with ops.Graph().as_default() as g:
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
with variable_scope.variable_scope("useful_scope_1"):
my5_init = [[1.0, 2.0], [3.0, 4.0]]
my5 = variable_scope.get_variable("var5", initializer=my5_init)
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope/": "useful_scope/"})
with self.session(graph=g) as session:
session.run(variables.global_variables_initializer())
self.assertAllEqual(my4.eval(session), v4)
self.assertAllEqual(my5.eval(session), my5_init)
def testRestoreRunsOnSameDevice(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_create_checkpoints(session, checkpoint_dir)
with ops.Graph().as_default():
with ops.device("/job:ps"):
with variable_scope.variable_scope("useful_scope"):
variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope/": "useful_scope/"})
def testInitFromRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("var1", [1, 10])
my2 = variable_scope.get_variable("var2", [10, 10])
my3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"/": "some_scope/",})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
def testInitToRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
my1 = variable_scope.get_variable("var1", [1, 10])
my2 = variable_scope.get_variable("var2", [10, 10])
my3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"/": "/",})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
def testInitFromPartitionVar(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1 = _create_partition_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.zeros_initializer(),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
# Create another variable with different partitions than the variable in
# the checkpoint.
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.zeros_initializer(),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=16 << 10))
my2_var_list = my2._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"scope/var1": "some_scope/my1",
"scope/": "some_other_scope/"})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
my2_values = session.run(my2_var_list)
# Verify we created different number of partitions.
self.assertNotEquals(len(my2_values), len(v1))
# Verify the values were correctly initialized inspite of different
# partitions.
full_my2_values = np.concatenate(my2_values, axis=0)
full_v1_values = np.concatenate(v1, axis=0)
self.assertAllEqual(full_my2_values, full_v1_values)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"scope/var1": my1_var_list,})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
def testInitFromCheckpointMissing(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
_ = variable_scope.get_variable("my1", [10, 10])
_ = variable_scope.get_variable(
"my2", [1, 10],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer())
# No directory.
with self.assertRaises(errors_impl.OpError):
checkpoint_utils.init_from_checkpoint("no_dir",
{"var1": "some_scope/my1"})
# No variable in checkpoint.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"no_var": "some_scope/my1"})
# No variable in the graph.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"var3": "some_scope/no_var"})
# Shape mismatch.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"var1": "some_scope/my1"})
# Variable 'my1' and 'my2' are missing in given checkpoint scope.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(
checkpoint_dir, {"useful_scope/": "some_scope/"})
# Mapping is not to scope name.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope": "some_scope/"})
def testNoAdditionalReadOpsForResourceVariables(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
my1 = resource_variable_ops.ResourceVariable([[0.0] * 10], name="my1")
with ops.name_scope("init_from_checkpoint"):
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {"var1": my1})
# Basic sanity checks:
session.run(variables.global_variables_initializer())
self.assertAllEqual(session.run(my1), v1)
ops_in_init_from_checkpoint_scope = [
op for op in g.get_operations()
if (op.name.startswith("init_from_checkpoint/") and
not op.name.startswith("init_from_checkpoint/checkpoint_initializer"
) and
op.type != "AssignVariableOp" and
op.type != "Identity")
]
self.assertEqual(ops_in_init_from_checkpoint_scope, [])
class CheckpointIteratorTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testReturnsEmptyIfNoCheckpointsFound(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "no_checkpoints_found")
num_found = 0
for _ in checkpoint_utils.checkpoints_iterator(checkpoint_dir, timeout=0):
num_found += 1
self.assertEqual(num_found, 0)
@test_util.run_in_graph_and_eager_modes
def testReturnsSingleCheckpointIfOneCheckpointFound(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "one_checkpoint_found")
if not gfile.Exists(checkpoint_dir):
gfile.MakeDirs(checkpoint_dir)
save_path = os.path.join(checkpoint_dir, "model.ckpt")
a = resource_variable_ops.ResourceVariable(5)
self.evaluate(a.initializer)
checkpoint = trackable_utils.Checkpoint(a=a)
checkpoint.save(file_prefix=save_path)
num_found = 0
for _ in checkpoint_utils.checkpoints_iterator(checkpoint_dir, timeout=0):
num_found += 1
self.assertEqual(num_found, 1)
@test_util.run_v1_only("Tests v1-style checkpoint sharding")
def testReturnsSingleCheckpointIfOneShardedCheckpoint(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
"one_checkpoint_found_sharded")
if not gfile.Exists(checkpoint_dir):
gfile.MakeDirs(checkpoint_dir)
global_step = variables.Variable(0, name="v0")
# This will result in 3 different checkpoint shard files.
with ops.device("/cpu:0"):
variables.Variable(10, name="v1")
with ops.device("/cpu:1"):
variables.Variable(20, name="v2")
saver = saver_lib.Saver(sharded=True)
with session_lib.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as session:
session.run(variables.global_variables_initializer())
save_path = os.path.join(checkpoint_dir, "model.ckpt")
saver.save(session, save_path, global_step=global_step)
num_found = 0
for _ in checkpoint_utils.checkpoints_iterator(checkpoint_dir, timeout=0):
num_found += 1
self.assertEqual(num_found, 1)
@test_util.run_in_graph_and_eager_modes
def testTimeoutFn(self):
timeout_fn_calls = [0]
def timeout_fn():
timeout_fn_calls[0] += 1
return timeout_fn_calls[0] > 3
results = list(
checkpoint_utils.checkpoints_iterator(
"/non-existent-dir", timeout=0.1, timeout_fn=timeout_fn))
self.assertEqual([], results)
self.assertEqual(4, timeout_fn_calls[0])
@test_util.run_all_in_graph_and_eager_modes
class WaitForNewCheckpointTest(test.TestCase):
def testReturnsNoneAfterTimeout(self):
start = time.time()
ret = checkpoint_utils.wait_for_new_checkpoint(
"/non-existent-dir", "foo", timeout=1.0, seconds_to_sleep=0.5)
end = time.time()
self.assertIsNone(ret)
# We've waited one second.
self.assertGreater(end, start + 0.5)
# The timeout kicked in.
self.assertLess(end, start + 1.1)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
Rfam/rfam-production
|
scripts/release/genome_download_engine.py
|
1
|
1660
|
import os
import sys
import subprocess
from config import gen_config as gc
from config import rfam_local as rl
# ----------------------------------------------------------------------
def launch_genome_download(project_dir, upid_list):
fp = open(upid_list, 'r')
upids = [x.strip() for x in fp]
fp.close()
if not os.path.exists(project_dir):
os.mkdir(project_dir)
os.chmod(project_dir, 0777)
for upid in upids:
# get subdir index and create if it does not exist
subdir_idx = upid[-3:]
subdir = os.path.join(project_dir, subdir_idx)
if not os.path.exists(subdir):
os.mkdir(subdir)
os.chmod(subdir, 0777)
prot_dir = os.path.join(subdir, upid)
if not os.path.exists(prot_dir):
os.mkdir(prot_dir)
os.chmod(prot_dir, 0777)
cmd = ("bsub -M %s "
"-R \"rusage[mem=%s,tmp=%s]\" "
"-o \"%s\" "
"-e \"%s\" "
"-u \"%s\" "
"-n 4 "
"-Ep \"rm -rf luigi\" "
"-g %s "
"python %s %s %s") % (
gc.MEM, gc.MEM, gc.TMP_MEM,
os.path.join(prot_dir, "download.out"),
os.path.join(prot_dir, "download.err"),
gc.USER_EMAIL, gc.LSF_GEN_GROUP,
rl.DWL_SCRIPT, prot_dir, upid)
subprocess.call(cmd, shell=True)
# ----------------------------------------------------------------------
if __name__ == '__main__':
project_dir = sys.argv[1]
upid_list = sys.argv[2]
launch_genome_download(project_dir, upid_list)
|
apache-2.0
|
Factr/maya
|
test_maya.py
|
1
|
2080
|
import pytest
from datetime import datetime
import maya
def test_rfc2822():
r = maya.now().rfc2822()
d = maya.MayaDT.from_rfc2822(r)
assert r == d.rfc2822()
def test_iso8601():
r = maya.now().iso8601()
d = maya.MayaDT.from_iso8601(r)
assert r == d.iso8601()
def test_human_when():
r1 = maya.when('yesterday')
r2 = maya.when('today')
assert r2.day - r1.day == 1
def test_machine_parse():
r1 = maya.parse('August 14, 2015')
assert r1.day == 14
r2 = maya.parse('August 15, 2015')
assert r2.day == 15
def test_dt_tz_translation():
d1 = maya.now().datetime()
d2 = maya.now().datetime(to_timezone='US/Eastern')
assert d1.hour - d2.hour == 5
def test_dt_tz_naive():
d1 = maya.now().datetime(naive=True)
assert d1.tzinfo is None
d2 = maya.now().datetime(to_timezone='US/Eastern', naive=True)
assert d2.tzinfo is None
assert d1.hour - d2.hour == 5
def test_random_date():
d = maya.when('11-17-11 08:09:10')
assert d.year == 2011
assert d.month == 11
assert d.day == 17
assert d.hour == 8
assert d.minute == 9
assert d.second == 10
assert d.microsecond == 0
def test_print_date(capsys):
d = maya.when('11-17-11')
print(d)
out, err = capsys.readouterr()
assert out == '<MayaDT epoch=1321488000.0>\n'
def test_invalid_date():
with pytest.raises(ValueError):
maya.when('another day')
def test_slang_date():
d = maya.when('tomorrow')
assert d.slang_date() == 'tomorrow'
def test_slang_time():
d = maya.when('one hour ago')
assert d.slang_time() == 'an hour ago'
def test_parse():
d = maya.parse('February 21, 1994')
assert format(d) == '1994-02-21 00:00:00+00:00'
d = maya.parse('01/05/2016')
assert format(d) == '2016-01-05 00:00:00+00:00'
d = maya.parse('01/05/2016', day_first=True)
assert format(d) == '2016-05-01 00:00:00+00:00'
def test_datetime_to_timezone():
dt = maya.when('2016-01-01').datetime(to_timezone='US/Eastern')
assert dt.tzinfo.zone == 'US/Eastern'
|
mit
|
karyon/django
|
tests/admin_filters/models.py
|
39
|
2298
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Book(models.Model):
title = models.CharField(max_length=50)
year = models.PositiveIntegerField(null=True, blank=True)
author = models.ForeignKey(
User,
models.SET_NULL,
verbose_name="Verbose Author",
related_name='books_authored',
blank=True, null=True,
)
contributors = models.ManyToManyField(
User,
verbose_name="Verbose Contributors",
related_name='books_contributed',
blank=True,
)
employee = models.ForeignKey(
'Employee',
models.SET_NULL,
verbose_name='Employee',
blank=True, null=True,
)
is_best_seller = models.NullBooleanField(default=0)
date_registered = models.DateField(null=True)
# This field name is intentionally 2 characters long (#16080).
no = models.IntegerField(verbose_name='number', blank=True, null=True)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Department(models.Model):
code = models.CharField(max_length=4, unique=True)
description = models.CharField(max_length=50, blank=True, null=True)
def __str__(self):
return self.description
@python_2_unicode_compatible
class Employee(models.Model):
department = models.ForeignKey(Department, models.CASCADE, to_field="code")
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@python_2_unicode_compatible
class TaggedItem(models.Model):
tag = models.SlugField()
content_type = models.ForeignKey(ContentType, models.CASCADE, related_name='tagged_items')
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.tag
@python_2_unicode_compatible
class Bookmark(models.Model):
url = models.URLField()
tags = GenericRelation(TaggedItem)
def __str__(self):
return self.url
|
bsd-3-clause
|
heracek/django-nonrel
|
django/contrib/localflavor/ie/ie_counties.py
|
503
|
1127
|
"""
Sources:
Irish Counties: http://en.wikipedia.org/wiki/Counties_of_Ireland
"""
from django.utils.translation import ugettext_lazy as _
IE_COUNTY_CHOICES = (
('antrim', _('Antrim')),
('armagh', _('Armagh')),
('carlow', _('Carlow')),
('cavan', _('Cavan')),
('clare', _('Clare')),
('cork', _('Cork')),
('derry', _('Derry')),
('donegal', _('Donegal')),
('down', _('Down')),
('dublin', _('Dublin')),
('fermanagh', _('Fermanagh')),
('galway', _('Galway')),
('kerry', _('Kerry')),
('kildare', _('Kildare')),
('kilkenny', _('Kilkenny')),
('laois', _('Laois')),
('leitrim', _('Leitrim')),
('limerick', _('Limerick')),
('longford', _('Longford')),
('louth', _('Louth')),
('mayo', _('Mayo')),
('meath', _('Meath')),
('monaghan', _('Monaghan')),
('offaly', _('Offaly')),
('roscommon', _('Roscommon')),
('sligo', _('Sligo')),
('tipperary', _('Tipperary')),
('tyrone', _('Tyrone')),
('waterford', _('Waterford')),
('westmeath', _('Westmeath')),
('wexford', _('Wexford')),
('wicklow', _('Wicklow')),
)
|
bsd-3-clause
|
jeanpaul/bitbot
|
jsonrpc/serviceHandler.py
|
61
|
3239
|
"""
Copyright (c) 2007 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from jsonrpc import loads, dumps, JSONEncodeException
def ServiceMethod(fn):
fn.IsServiceMethod = True
return fn
class ServiceException(Exception):
pass
class ServiceRequestNotTranslatable(ServiceException):
pass
class BadServiceRequest(ServiceException):
pass
class ServiceMethodNotFound(ServiceException):
def __init__(self, name):
self.methodName=name
class ServiceHandler(object):
def __init__(self, service):
self.service=service
def handleRequest(self, json):
err=None
result = None
id_=''
try:
req = self.translateRequest(json)
except ServiceRequestNotTranslatable, e:
err = e
req={'id':id_}
if err==None:
try:
id_ = req['id']
methName = req['method']
args = req['params']
except:
err = BadServiceRequest(json)
if err == None:
try:
meth = self.findServiceEndpoint(methName)
except Exception, e:
err = e
if err == None:
try:
result = self.invokeServiceEndpoint(meth, args)
except Exception, e:
err = e
resultdata = self.translateResult(result, err, id_)
return resultdata
def translateRequest(self, data):
try:
req = loads(data)
except:
raise ServiceRequestNotTranslatable(data)
return req
def findServiceEndpoint(self, name):
try:
meth = getattr(self.service, name)
if getattr(meth, "IsServiceMethod"):
return meth
else:
raise ServiceMethodNotFound(name)
except AttributeError:
raise ServiceMethodNotFound(name)
def invokeServiceEndpoint(self, meth, args):
return meth(*args)
def translateResult(self, rslt, err, id_):
if err != None:
err = {"name": err.__class__.__name__, "message":err.message}
rslt = None
try:
data = dumps({"result":rslt,"id":id_,"error":err})
except JSONEncodeException, e:
err = {"name": "JSONEncodeException", "message":"Result Object Not Serializable"}
data = dumps({"result":None, "id":id_,"error":err})
return data
|
mit
|
retomerz/intellij-community
|
python/testData/refactoring/pullup/pyPullUpInfoModel.py
|
80
|
1827
|
class EmptyParent:pass
class SomeParent:
PARENT_CLASS_FIELD = 42
def __init__(self):
self.parent_instance_field = "egg"
def parent_func(self):
pass
class ChildWithDependencies(SomeParent, EmptyParent):
CLASS_FIELD_FOO = 42
CLASS_FIELD_DEPENDS_ON_CLASS_FIELD_FOO = CLASS_FIELD_FOO
CLASS_FIELD_DEPENDS_ON_PARENT_FIELD = SomeParent.PARENT_CLASS_FIELD
def __init__(self):
SomeParent.__init__(self)
self.instance_field_bar = 42
self.depends_on_instance_field_bar = self.instance_field_bar
self.depends_on_class_field_foo = ChildWithDependencies.CLASS_FIELD_FOO
@property
def new_property(self):
return 1
def _set_prop(self, val):
pass
def _get_prop(self):
return 1
def _del_prop(self):
pass
old_property = property(fset=_set_prop)
old_property_2 = property(fget=_get_prop)
old_property_3 = property(fdel=_del_prop)
@property
def new_property(self):
return 1
@new_property.setter
def new_property(self, val):
pass
@property
def new_property_2(self):
return 1
def normal_method(self):
pass
def method_depends_on_parent_method(self):
self.parent_func()
pass
def method_depends_on_parent_field(self):
i = self.parent_instance_field
pass
def method_depends_on_normal_method(self):
self.normal_method()
def method_depends_on_instance_field_bar(self):
eggs = self.instance_field_bar
def method_depends_on_old_property(self):
i = 12
self.old_property = i
q = self.old_property_2
del self.old_property_3
def method_depends_on_new_property(self):
self.new_property = 12
print(self.new_property_2)
|
apache-2.0
|
akiellor/selenium
|
py/test/selenium/webdriver/common/alerts_tests.py
|
4
|
3318
|
#Copyright 2007-2009 WebDriver committers
#Copyright 2007-2009 Google Inc.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from selenium.webdriver.common.by import By
import unittest
class AlertsTest(unittest.TestCase):
def testShouldBeAbleToOverrideTheWindowAlertMethod(self):
self._loadPage("alerts")
self.driver.execute_script(
"window.alert = function(msg) { document.getElementById('text').innerHTML = msg; }")
self.driver.find_element(by=By.ID, value="alert").click()
def testShouldAllowUsersToAcceptAnAlertManually(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self.driver.switch_to_alert()
alert.accept()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowUsersToDismissAnAlertManually(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self.driver.switch_to_alert()
alert.dismiss()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowAUserToAcceptAPrompt(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="prompt").click()
alert = self.driver.switch_to_alert()
alert.accept()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowAUserToDismissAPrompt(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="prompt").click()
alert = self.driver.switch_to_alert()
alert.dismiss()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowAUserToSetTheValueOfAPrompt(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="prompt").click()
alert = self.driver.switch_to_alert()
alert.send_keys("cheese")
alert.accept()
result = self.driver.find_element(by=By.ID, value="text").text
self.assertEqual("cheese", result)
def testShouldAllowTheUserToGetTheTextOfAnAlert(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self.driver.switch_to_alert()
value = alert.text
alert.accept()
self.assertEqual("cheese", value)
def _pageURL(self, name):
return "http://localhost:%d/%s.html" % (self.webserver.port, name)
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
|
apache-2.0
|
pombredanne/dxr
|
dxr/plugins/clang/tests/test_operator_call.py
|
7
|
2060
|
from dxr.plugins.clang.tests import CSingleFileTestCase
class OperatorCallTests(CSingleFileTestCase):
source = """
struct Foo
{
void operator()(int)
{
}
void operator[](int)
{
}
};
int main()
{
Foo foo;
int alpha = 0;
foo(alpha);
int beta = 0;
foo[beta];
return 0;
}
"""
def test_operator_call(self):
self.found_line_eq('+function-ref:Foo::operator()(int)',
'foo<b>(</b>alpha);')
def test_call_argument(self):
self.found_line_eq('+var-ref:main()::alpha',
'foo(<b>alpha</b>);')
def test_operator_subscript(self):
self.found_line_eq('+function-ref:Foo::operator[](int)',
'foo<b>[</b>beta];')
def test_subscript_argument(self):
self.found_line_eq('+var-ref:main()::beta',
'foo[<b>beta</b>];')
class ExplicitOperatorCallTests(CSingleFileTestCase):
source = """
struct Foo
{
void operator()(int)
{
}
void operator[](int)
{
}
};
int main()
{
Foo foo;
int alpha = 0;
foo.operator()(alpha);
int beta = 0;
foo.operator[](beta);
return 0;
}
"""
def test_operator_call(self):
self.found_line_eq('+function-ref:Foo::operator()(int)',
'foo.<b>operator()</b>(alpha);')
def test_call_argument(self):
self.found_line_eq('+var-ref:main()::alpha',
'foo.operator()(<b>alpha</b>);')
def test_operator_subscript(self):
self.found_line_eq('+function-ref:Foo::operator[](int)',
'foo.<b>operator[]</b>(beta);')
def test_subscript_argument(self):
self.found_line_eq('+var-ref:main()::beta',
'foo.operator[](<b>beta</b>);')
|
mit
|
harisibrahimkv/django
|
django/utils/crypto.py
|
44
|
3092
|
"""
Django's standard crypto functions and utilities.
"""
import hashlib
import hmac
import random
import time
from django.conf import settings
from django.utils.encoding import force_bytes
# Use the system PRNG if possible
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
import warnings
warnings.warn('A secure pseudo-random number generator is not available '
'on your system. Falling back to Mersenne Twister.')
using_sysrandom = False
def salted_hmac(key_salt, value, secret=None):
"""
Return the HMAC-SHA1 of 'value', using a key generated from key_salt and a
secret (which defaults to settings.SECRET_KEY).
A different key_salt should be passed in for every application of HMAC.
"""
if secret is None:
secret = settings.SECRET_KEY
key_salt = force_bytes(key_salt)
secret = force_bytes(secret)
# We need to generate a derived key from our base key. We can do this by
# passing the key_salt and our base key through a pseudo-random function and
# SHA1 works nicely.
key = hashlib.sha1(key_salt + secret).digest()
# If len(key_salt + secret) > sha_constructor().block_size, the above
# line is redundant and could be replaced by key = key_salt + secret, since
# the hmac module does the same thing for keys longer than the block size.
# However, we need to ensure that we *always* do this.
return hmac.new(key, msg=force_bytes(value), digestmod=hashlib.sha1)
def get_random_string(length=12,
allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Return a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
('%s%s%s' % (random.getstate(), time.time(), settings.SECRET_KEY)).encode()
).digest()
)
return ''.join(random.choice(allowed_chars) for i in range(length))
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(force_bytes(val1), force_bytes(val2))
def pbkdf2(password, salt, iterations, dklen=0, digest=None):
"""Return the hash of password using pbkdf2."""
if digest is None:
digest = hashlib.sha256
if not dklen:
dklen = None
password = force_bytes(password)
salt = force_bytes(salt)
return hashlib.pbkdf2_hmac(digest().name, password, salt, iterations, dklen)
|
bsd-3-clause
|
rhinstaller/anaconda
|
tests/unit_tests/pyanaconda_tests/modules/payloads/payload/test_flatpak_manager.py
|
2
|
14244
|
#
# Copyright (C) 2021 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
import unittest
import os
import gi
from tempfile import TemporaryDirectory
from unittest.mock import patch, Mock, call
from pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager import FlatpakManager
gi.require_version("Flatpak", "1.0")
from gi.repository.Flatpak import RefKind
class FlatpakTest(unittest.TestCase):
def setUp(self):
self._remote = Mock()
self._installation = Mock()
self._transaction = Mock()
def _setup_flatpak_objects(self, remote_cls, installation_cls, transaction_cls):
remote_cls.new.return_value = self._remote
installation_cls.new_for_path.return_value = self._installation
transaction_cls.new_for_installation.return_value = self._transaction
self._transaction.get_installation.return_value = self._installation
def test_is_available(self):
"""Test check for flatpak availability of the system sources."""
self.assertFalse(FlatpakManager.is_source_available())
with TemporaryDirectory() as temp:
FlatpakManager.LOCAL_REMOTE_PATH = "file://" + temp
self.assertTrue(FlatpakManager.is_source_available())
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Transaction")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Installation")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Remote")
def test_initialize_with_path(self, remote_cls, installation_cls, transaction_cls):
"""Test flatpak initialize with path."""
self._setup_flatpak_objects(remote_cls, installation_cls, transaction_cls)
flatpak = FlatpakManager("/mock/system/root/path")
flatpak.initialize_with_path("/test/path/installation")
remote_cls.new.assert_called_once()
installation_cls.new_for_path.assert_called_once()
transaction_cls.new_for_installation.assert_called_once_with(self._installation)
expected_remote_calls = [call.set_gpg_verify(False),
call.set_url(flatpak.LOCAL_REMOTE_PATH)]
self.assertEqual(self._remote.method_calls, expected_remote_calls)
expected_remote_calls = [call.add_remote(self._remote, False, None)]
self.assertEqual(self._installation.method_calls, expected_remote_calls)
def test_cleanup_call_without_initialize(self):
"""Test the cleanup call without initialize."""
flatpak = FlatpakManager("/tmp/flatpak-test")
flatpak.cleanup()
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.shutil.rmtree")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Transaction")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Installation")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Remote")
def test_cleanup_call_no_repo(self, remote_cls, installation_cls, transaction_cls, rmtree):
"""Test the cleanup call with no repository created."""
flatpak = FlatpakManager("any path")
self._setup_flatpak_objects(remote_cls, installation_cls, transaction_cls)
file_mock_path = Mock()
file_mock_path.get_path.return_value = "/install/test/path"
self._installation.get_path.return_value = file_mock_path
flatpak.initialize_with_path("/install/test/path")
flatpak.cleanup()
rmtree.assert_not_called()
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.shutil.rmtree")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Transaction")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Installation")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Remote")
def test_cleanup_call_mock_repo(self, remote_cls, installation_cls, transaction_cls, rmtree):
"""Test the cleanup call with mocked repository."""
flatpak = FlatpakManager("any path")
self._setup_flatpak_objects(remote_cls, installation_cls, transaction_cls)
with TemporaryDirectory() as temp:
install_path = os.path.join(temp, "install/test/path")
file_mock_path = Mock()
file_mock_path.get_path.return_value = install_path
self._installation.get_path.return_value = file_mock_path
os.makedirs(install_path)
flatpak.initialize_with_path(install_path)
flatpak.cleanup()
rmtree.assert_called_once_with(install_path)
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Transaction")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Installation")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Remote")
def test_get_required_space(self, remote_cls, installation_cls, transaction_cls):
"""Test flatpak required space method."""
flatpak = FlatpakManager("any path")
self._setup_flatpak_objects(remote_cls, installation_cls, transaction_cls)
flatpak.initialize_with_system_path()
self._installation.list_remote_refs_sync.return_value = [
RefMock(installed_size=2000),
RefMock(installed_size=3000),
RefMock(installed_size=100)
]
installation_size = flatpak.get_required_size()
self.assertEqual(installation_size, 5100)
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Transaction")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Installation")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Remote")
def test_get_empty_refs_required_space(self, remote_cls, installation_cls, transaction_cls):
"""Test flatpak required space method with no refs."""
flatpak = FlatpakManager("any path")
self._setup_flatpak_objects(remote_cls, installation_cls, transaction_cls)
flatpak.initialize_with_system_path()
self._installation.list_remote_refs_sync.return_value = []
installation_size = flatpak.get_required_size()
self.assertEqual(installation_size, 0)
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Transaction")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Installation")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Remote")
def test_install(self, remote_cls, installation_cls, transaction_cls):
"""Test flatpak installation is working."""
flatpak = FlatpakManager("remote/path")
self._setup_flatpak_objects(remote_cls, installation_cls, transaction_cls)
flatpak.initialize_with_system_path()
mock_ref_list = [
RefMock(name="org.space.coolapp", kind=RefKind.APP, arch="x86_64", branch="stable"),
RefMock(name="com.prop.notcoolapp", kind=RefKind.APP, arch="i386", branch="f36"),
RefMock(name="org.space.coolruntime", kind=RefKind.RUNTIME, arch="x86_64",
branch="stable"),
RefMock(name="com.prop.notcoolruntime", kind=RefKind.RUNTIME, arch="i386",
branch="f36")
]
self._installation.list_remote_refs_sync.return_value = mock_ref_list
flatpak.install_all()
expected_calls = [call.connect("new_operation", flatpak._operation_started_callback),
call.connect("operation_done", flatpak._operation_stopped_callback),
call.connect("operation_error", flatpak._operation_error_callback),
call.add_install(FlatpakManager.LOCAL_REMOTE_NAME,
mock_ref_list[0].format_ref(),
None),
call.add_install(FlatpakManager.LOCAL_REMOTE_NAME,
mock_ref_list[1].format_ref(),
None),
call.add_install(FlatpakManager.LOCAL_REMOTE_NAME,
mock_ref_list[2].format_ref(),
None),
call.add_install(FlatpakManager.LOCAL_REMOTE_NAME,
mock_ref_list[3].format_ref(),
None),
call.run()]
self.assertEqual(self._transaction.mock_calls, expected_calls)
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Transaction")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Installation")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Remote")
def test_add_remote(self, remote_cls, installation_cls, transaction_cls):
"""Test flatpak add new remote."""
flatpak = FlatpakManager("remote/path")
self._setup_flatpak_objects(remote_cls, installation_cls, transaction_cls)
flatpak.initialize_with_system_path()
flatpak.add_remote("hive", "url://zerglings/home")
remote_cls.new.assert_called_with("hive")
self._remote.set_gpg_verify.assert_called_with(True)
self._remote.set_url("url://zerglings/home")
self.assertEqual(remote_cls.new.call_count, 2)
self.assertEqual(self._installation.add_remote.call_count, 2)
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Transaction")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Installation")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Remote")
def test_remove_remote(self, remote_cls, installation_cls, transaction_cls):
"""Test flatpak remove a remote."""
flatpak = FlatpakManager("remote/path")
self._setup_flatpak_objects(remote_cls, installation_cls, transaction_cls)
mock_remote1 = Mock()
mock_remote2 = Mock()
mock_remote1.get_name.return_value = "nest"
mock_remote2.get_name.return_value = "hive"
self._installation.list_remotes.return_value = [mock_remote1, mock_remote2]
flatpak.initialize_with_system_path()
flatpak.remove_remote("hive")
self._installation.remove_remote.assert_called_once_with("hive", None)
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Variant")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.VariantType")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.open")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Transaction")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Installation")
@patch("pyanaconda.modules.payloads.payload.rpm_ostree.flatpak_manager.Remote")
def test_replace_remote(self, remote_cls, installation_cls, transaction_cls,
open_mock, variant_type, variant):
"""Test flatpak replace remote for installed refs call."""
flatpak = FlatpakManager("/system/test-root")
self._setup_flatpak_objects(remote_cls, installation_cls, transaction_cls)
install_path = "/installation/path"
install_path_mock = Mock()
install_path_mock.get_path.return_value = install_path
self._installation.get_path.return_value = install_path_mock
ref_mock_list = [
RefMock(name="org.space.coolapp", kind=RefKind.APP, arch="x86_64", branch="stable"),
RefMock(name="org.space.coolruntime", kind=RefKind.RUNTIME, arch="x86_64",
branch="stable")
]
self._installation.list_installed_refs.return_value = ref_mock_list
flatpak.initialize_with_system_path()
flatpak.replace_installed_refs_remote("cylon_officer")
expected_refs = list(map(lambda x: x.format_ref(), ref_mock_list))
open_calls = []
for ref in expected_refs:
ref_file_path = os.path.join(install_path, ref, "active/deploy")
open_calls.append(call(ref_file_path, "rb"))
open_calls.append(call(ref_file_path, "wb"))
# test that every file is read and written
self.assertEqual(open_mock.call_count, 2 * len(expected_refs))
open_mock.has_calls(open_calls)
class RefMock(object):
def __init__(self, name="org.app", kind=RefKind.APP, arch="x86_64", branch="stable",
installed_size=0):
self._name = name
self._kind = kind
self._arch = arch
self._branch = branch
self._installed_size = installed_size
def get_name(self):
return self._name
def get_kind(self):
return self._kind
def get_arch(self):
return self._arch
def get_branch(self):
return self._branch
def get_installed_size(self):
return self._installed_size
def format_ref(self):
return "{}/{}/{}/{}".format("app" if self._kind is RefKind.APP else "runtime",
self._name,
self._arch,
self._branch)
|
gpl-2.0
|
vbelakov/h2o
|
py/testdir_multi_jvm/notest_exec2_multi_node.py
|
9
|
4457
|
import unittest, sys, random, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_jobs, h2o_exec as h2e
import h2o_util
import multiprocessing, os, signal, time
from multiprocessing import Process, Queue
print "single writer, single reader flows (after sequential init)"
print "restrict outstanding to # of nodes"
# overrides the calc below if not None
NODES = 3
OUTSTANDING = NODES
TRIALMAX = 10
# problem with keyboard interrupt described
# http://bryceboe.com/2012/02/14/python-multiprocessing-pool-and-keyboardinterrupt-revisited/
def function_no_keyboard_intr(result_queue, function, *args):
signal.signal(signal.SIGINT, signal.SIG_IGN)
result_queue.put(function(*args))
return True
def execit(n, bucket, path, src_key, hex_key, timeoutSecs=60, retryDelaySecs=1, pollTimeoutSecs=30):
np1 = (n+1) % len(h2o.nodes)
np = (n) % len(h2o.nodes)
# doesn't work cause we can't have racing writers
# execExpr = "r2 = (r2==%s) ? %s+1 : %s" % (np1, np1)
if np == 0:
execExpr = "r%s = 1" % np1
print "Sending request to node: %s" % h2o.nodes[np1],
h2e.exec_expr(node=h2o.nodes[np1], execExpr=execExpr, timeoutSecs=30)
else:
# flip to one if the prior value is 1 (unless you're the zero case
execExpr = "r%s = (r%s==1) ? c(1) : c(0);" % (np1, np)
print "Sending request to node: %s" % h2o.nodes[np1],
(resultExec, fpResult) = h2e.exec_expr(node=h2o.nodes[np1], execExpr=execExpr, timeoutSecs=30)
while fpResult != 1:
print "to node: %s" % h2o.nodes[np1]
(resultExec, fpResult) = h2e.exec_expr(node=h2o.nodes[np1], execExpr=execExpr, timeoutSecs=30)
hex_key = np1
return hex_key
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(node_count=NODES, java_heap_GB=4)
# use_hdfs=True, hdfs_name_node='172.16.2.176', hdfs_version='cdh4'
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_exec2_multi_node(self):
for node in h2o.nodes:
# get this key known to this node
execExpr = "r0 = c(0); r1 = c(0); r2 = c(0);"
print "Sending request to node: %s" % node
h2e.exec_expr(node=node, execExpr=execExpr, timeoutSecs=30)
# test the store expression
execExpr = "(r1==0) ? c(0) : c(1)"
print "Sending request to node: %s" % node
h2e.exec_expr(node=node, execExpr=execExpr, timeoutSecs=30)
global OUTSTANDING
if not OUTSTANDING:
OUTSTANDING = min(10, len(h2o.nodes))
execTrial = 0
worker_resultq = multiprocessing.Queue()
while execTrial <= TRIALMAX:
start = time.time()
workers = []
for o in range(OUTSTANDING):
np = execTrial % len(h2o.nodes)
retryDelaySecs = 5
timeoutSecs = 60
bucket = None
csvPathname = None
src_key = None
hex_key = 'a'
tmp = multiprocessing.Process(target=function_no_keyboard_intr,
args=(worker_resultq, execit, np, bucket, csvPathname, src_key, hex_key, timeoutSecs, retryDelaySecs))
tmp.start()
workers.append(tmp)
execTrial += 1
# Exec doesn't get tracked as a job. So can still have outstanding
# now sync on them
for worker in workers:
try:
# this should synchronize
worker.join()
print "worker joined:", worker
# don't need him any more
worker.terminate()
hex_key = worker_resultq.get(timeout=2)
except KeyboardInterrupt:
print 'parent received ctrl-c'
for worker in workers:
worker.terminate()
worker.join()
elapsed = time.time() - start
print "Group end at #", execTrial, "completed in", "%6.2f" % elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
if __name__ == '__main__':
h2o.unit_main()
|
apache-2.0
|
AndreasHeger/alignlib
|
python/bench/bench_Alignment.py
|
1
|
2148
|
import timeit
import alignlib
NUM_SAMPLES=1000
ALISIZE=2000
alignlib_vector = alignlib.makeAlignmentVector()
alignlib_vector.addDiagonal( 0, ALISIZE, 0)
python_vector = []
for x in xrange(ALISIZE):
python_vector.append(x)
def pythonBuildVector():
"""build vector alignment in python."""
vector = []
for x in xrange(ALISIZE):
vector.append(x)
def alignlibBuildVector():
"Stupid test function"
vector = alignlib.makeAlignmentVector()
vector.addDiagonal( 0, ALISIZE, 0)
def pythonMapVector():
"test speed of mapRowToCol"
for x in xrange(ALISIZE):
a = python_vector[x]
def alignlibMapVector():
"test speed of mapRowToCol"
for x in xrange(ALISIZE):
a = alignlib_vector.mapRowToCol(x)
def pythonMapVector():
"""build vector alignment in python."""
for x in xrange(ALISIZE):
a = python_vector[x]
def alignlibCombineVector():
"test combination of vectors"
vector = alignlib.makeAlignmentVector()
alignlib.combineAlignment( vector, alignlib_vector, alignlib_vector, alignlib.RR)
def pythonCombineVector():
"test combination of vectors"
x, y = 0, 0
new = [0] * max(len(python_vector), len(python_vector))
while x < len(python_vector) and y < len(python_vector):
if x < y:
x += 1
continue
elif y < x:
y += 1
continue
mapped_x, mapped_y = python_vector[x], python_vector[y]
new[mapped_x] = mapped_y
x += 1
y += 1
if __name__=='__main__':
tests = ( ("pythonBuildVector", "alignlibBuildVector"),
("pythonMapVector", "alignlibMapVector"),
("pythonCombineVector", "alignlibCombineVector" ) )
for test1, test2 in tests:
t1 = timeit.Timer("%s()" % test1, "from __main__ import %s" % test1)
t2 = timeit.Timer("%s()" % test2, "from __main__ import %s" % test2)
tt1 = t1.timeit( NUM_SAMPLES )
tt2 = t2.timeit( NUM_SAMPLES )
print "%s\t%s\t%5.2f\t%f\t%f" % (test2, test1, 100.0 * tt2 / tt1, tt2, tt1)
|
gpl-2.0
|
kenrachynski/powerline
|
tests/test_shells/postproc.py
|
19
|
3660
|
#!/usr/bin/env python
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import socket
import sys
import codecs
import platform
import re
test_type = sys.argv[1]
test_client = sys.argv[2]
shell = sys.argv[3]
fname = os.path.join('tests', 'shell', '.'.join((shell, test_type, test_client, 'full.log')))
new_fname = os.path.join('tests', 'shell', '.'.join((shell, test_type, test_client, 'log')))
pid_fname = os.path.join('tests', 'shell', '3rd', 'pid')
is_pypy = platform.python_implementation() == 'PyPy'
try:
with open(pid_fname, 'r') as P:
pid = P.read().strip()
except IOError:
pid = None
hostname = socket.gethostname()
user = os.environ['USER']
REFS_RE = re.compile(r'^\[\d+ refs\]\n')
IPYPY_DEANSI_RE = re.compile(r'\033(?:\[(?:\?\d+[lh]|[^a-zA-Z]+[a-ln-zA-Z])|[=>])')
ZSH_HL_RE = re.compile(r'\033\[\?\d+[hl]')
start_str = 'cd tests/shell/3rd'
if shell == 'pdb':
start_str = 'class Foo(object):'
with codecs.open(fname, 'r', encoding='utf-8') as R:
with codecs.open(new_fname, 'w', encoding='utf-8') as W:
found_cd = False
i = -1
for line in (R if shell != 'fish' else R.read().split('\n')):
i += 1
if not found_cd:
found_cd = (start_str in line)
continue
if 'true is the last line' in line:
break
line = line.translate({
ord('\r'): None
})
if REFS_RE.match(line):
continue
line = line.replace(hostname, 'HOSTNAME')
line = line.replace(user, 'USER')
if pid is not None:
line = line.replace(pid, 'PID')
if shell == 'zsh':
line = line.replace('\033[0m\033[23m\033[24m\033[J', '')
line = ZSH_HL_RE.subn('', line)[0]
elif shell == 'fish':
res = ''
try:
while line.index('\033[0;'):
start = line.index('\033[0;')
end = line.index('\033[0m', start)
res += line[start:end + 4] + '\n'
line = line[end + 4:]
except ValueError:
pass
line = res
elif shell == 'tcsh':
try:
start = line.index('\033[0;')
end = line.index(' ', start)
line = line[start:end] + '\n'
except ValueError:
line = ''
elif shell == 'mksh':
# Output is different in travis: on my machine I see full
# command, in travis it is truncated just after `true`.
if line.startswith('[1] + Terminated'):
line = '[1] + Terminated bash -c ...\n'
elif shell == 'dash':
# Position of this line is not stable: it may go both before and
# after the next line
if line.startswith('[1] + Terminated'):
continue
elif shell == 'ipython' and is_pypy:
try:
end_idx = line.rindex('\033[0m')
try:
idx = line[:end_idx].rindex('\033[1;1H')
except ValueError:
idx = line[:end_idx].rindex('\033[?25h')
line = line[idx + len('\033[1;1H'):]
except ValueError:
pass
try:
data_end_idx = line.rindex('\033[1;1H')
line = line[:data_end_idx] + '\n'
except ValueError:
pass
if line == '\033[1;1H\n':
continue
was_empty = line == '\n'
line = IPYPY_DEANSI_RE.subn('', line)[0]
if line == '\n' and not was_empty:
line = ''
elif shell == 'rc':
if line == 'read() failed: Connection reset by peer\n':
line = ''
elif shell == 'pdb':
if is_pypy:
if line == '\033[?1h\033=\033[?25l\033[1A\n':
line = ''
line = IPYPY_DEANSI_RE.subn('', line)[0]
if line == '\n':
line = ''
if line.startswith(('>',)):
line = ''
elif line == '-> self.quitting = 1\n':
line = '-> self.quitting = True\n'
elif line == '\n':
line = ''
if line == '-> self.quitting = True\n':
break
W.write(line)
|
mit
|
mayblue9/bokeh
|
examples/glyphs/daylight.py
|
43
|
2889
|
from __future__ import print_function
import numpy as np
import datetime as dt
from bokeh.browserlib import view
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.glyphs import Patch, Line, Text
from bokeh.models import (
ColumnDataSource, DataRange1d, DatetimeAxis,
DatetimeTickFormatter, Grid, Legend, Plot
)
from bokeh.resources import INLINE
from bokeh.sampledata import daylight
df = daylight.daylight_warsaw_2013
source = ColumnDataSource(dict(
dates = df.Date,
sunrises = df.Sunrise,
sunsets = df.Sunset,
))
patch1_source = ColumnDataSource(dict(
dates = np.concatenate((df.Date, df.Date[::-1])),
times = np.concatenate((df.Sunrise, df.Sunset[::-1]))
))
summer = df[df.Summer == 1]
patch2_source = ColumnDataSource(dict(
dates = np.concatenate((summer.Date, summer.Date[::-1])),
times = np.concatenate((summer.Sunrise, summer.Sunset[::-1]))
))
summer_start = df.Summer.tolist().index(1)
summer_end = df.Summer.tolist().index(0, summer_start)
calendar_start = df.Date.irow(0)
summer_start = df.Date.irow(summer_start)
summer_end = df.Date.irow(summer_end)
calendar_end = df.Date.irow(-1)
d1 = calendar_start + (summer_start - calendar_start)/2
d2 = summer_start + (summer_end - summer_start)/2
d3 = summer_end + (calendar_end - summer_end)/2
text_source = ColumnDataSource(dict(
dates = [d1, d2, d3],
times = [dt.time(11, 30)]*3,
texts = ["CST (UTC+1)", "CEST (UTC+2)", "CST (UTC+1)"],
))
xdr = DataRange1d()
ydr = DataRange1d()
title = "Daylight Hours - Warsaw, Poland"
plot = Plot(
title=title,
x_range=xdr, y_range=ydr,
plot_width=800, plot_height=400
)
patch1 = Patch(x="dates", y="times", fill_color="skyblue", fill_alpha=0.8)
plot.add_glyph(patch1_source, patch1)
patch2 = Patch(x="dates", y="times", fill_color="orange", fill_alpha=0.8)
plot.add_glyph(patch2_source, patch2)
line1 = Line(x="dates", y="sunrises", line_color="yellow", line_width=2)
line1_glyph = plot.add_glyph(source, line1)
line2 = Line(x="dates", y="sunsets", line_color="red", line_width=2)
line2_glyph = plot.add_glyph(source, line2)
text = Text(x="dates", y="times", text="texts", text_align="center")
plot.add_glyph(text_source, text)
xformatter = DatetimeTickFormatter(formats=dict(months=["%b %Y"]))
xaxis = DatetimeAxis(formatter=xformatter)
plot.add_layout(xaxis, 'below')
yaxis = DatetimeAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
legend = Legend(legends=[("sunrise", [line1_glyph]), ("sunset", [line2_glyph])])
plot.add_layout(legend)
doc = Document()
doc.add(plot)
if __name__ == "__main__":
filename = "daylight.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Daylight Plot"))
print("Wrote %s" % filename)
view(filename)
|
bsd-3-clause
|
davenovak/mtasa-blue
|
vendor/google-breakpad/src/third_party/protobuf/protobuf/gtest/test/gtest_shuffle_test.py
|
3023
|
12549
|
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that test shuffling works."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Command to run the gtest_shuffle_test_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_')
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
TEST_FILTER = 'A*.A:A*.B:C*'
ALL_TESTS = []
ACTIVE_TESTS = []
FILTERED_TESTS = []
SHARDED_TESTS = []
SHUFFLED_ALL_TESTS = []
SHUFFLED_ACTIVE_TESTS = []
SHUFFLED_FILTERED_TESTS = []
SHUFFLED_SHARDED_TESTS = []
def AlsoRunDisabledTestsFlag():
return '--gtest_also_run_disabled_tests'
def FilterFlag(test_filter):
return '--gtest_filter=%s' % (test_filter,)
def RepeatFlag(n):
return '--gtest_repeat=%s' % (n,)
def ShuffleFlag():
return '--gtest_shuffle'
def RandomSeedFlag(n):
return '--gtest_random_seed=%s' % (n,)
def RunAndReturnOutput(extra_env, args):
"""Runs the test program and returns its output."""
environ_copy = os.environ.copy()
environ_copy.update(extra_env)
return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output
def GetTestsForAllIterations(extra_env, args):
"""Runs the test program and returns a list of test lists.
Args:
extra_env: a map from environment variables to their values
args: command line flags to pass to gtest_shuffle_test_
Returns:
A list where the i-th element is the list of tests run in the i-th
test iteration.
"""
test_iterations = []
for line in RunAndReturnOutput(extra_env, args).split('\n'):
if line.startswith('----'):
tests = []
test_iterations.append(tests)
elif line.strip():
tests.append(line.strip()) # 'TestCaseName.TestName'
return test_iterations
def GetTestCases(tests):
"""Returns a list of test cases in the given full test names.
Args:
tests: a list of full test names
Returns:
A list of test cases from 'tests', in their original order.
Consecutive duplicates are removed.
"""
test_cases = []
for test in tests:
test_case = test.split('.')[0]
if not test_case in test_cases:
test_cases.append(test_case)
return test_cases
def CalculateTestLists():
"""Calculates the list of tests run under different flags."""
if not ALL_TESTS:
ALL_TESTS.extend(
GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0])
if not ACTIVE_TESTS:
ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0])
if not FILTERED_TESTS:
FILTERED_TESTS.extend(
GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0])
if not SHARDED_TESTS:
SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[])[0])
if not SHUFFLED_ALL_TESTS:
SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations(
{}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_ACTIVE_TESTS:
SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])[0])
if not SHUFFLED_FILTERED_TESTS:
SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0])
if not SHUFFLED_SHARDED_TESTS:
SHUFFLED_SHARDED_TESTS.extend(
GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(1)])[0])
class GTestShuffleUnitTest(gtest_test_utils.TestCase):
"""Tests test shuffling."""
def setUp(self):
CalculateTestLists()
def testShufflePreservesNumberOfTests(self):
self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS))
self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS))
self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS))
self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS))
def testShuffleChangesTestOrder(self):
self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS)
self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS)
self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS,
SHUFFLED_FILTERED_TESTS)
self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS,
SHUFFLED_SHARDED_TESTS)
def testShuffleChangesTestCaseOrder(self):
self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS),
GetTestCases(SHUFFLED_ALL_TESTS))
self.assert_(
GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS),
GetTestCases(SHUFFLED_ACTIVE_TESTS))
self.assert_(
GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS),
GetTestCases(SHUFFLED_FILTERED_TESTS))
self.assert_(
GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS),
GetTestCases(SHUFFLED_SHARDED_TESTS))
def testShuffleDoesNotRepeatTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test),
'%s appears more than once' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test),
'%s appears more than once' % (test,))
def testShuffleDoesNotCreateNewTest(self):
for test in SHUFFLED_ALL_TESTS:
self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_ACTIVE_TESTS:
self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_FILTERED_TESTS:
self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,))
for test in SHUFFLED_SHARDED_TESTS:
self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,))
def testShuffleIncludesAllTests(self):
for test in ALL_TESTS:
self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,))
for test in ACTIVE_TESTS:
self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,))
for test in FILTERED_TESTS:
self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,))
for test in SHARDED_TESTS:
self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,))
def testShuffleLeavesDeathTestsAtFront(self):
non_death_test_found = False
for test in SHUFFLED_ACTIVE_TESTS:
if 'DeathTest.' in test:
self.assert_(not non_death_test_found,
'%s appears after a non-death test' % (test,))
else:
non_death_test_found = True
def _VerifyTestCasesDoNotInterleave(self, tests):
test_cases = []
for test in tests:
[test_case, _] = test.split('.')
if test_cases and test_cases[-1] != test_case:
test_cases.append(test_case)
self.assertEqual(1, test_cases.count(test_case),
'Test case %s is not grouped together in %s' %
(test_case, tests))
def testShuffleDoesNotInterleaveTestCases(self):
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS)
self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS)
def testShuffleRestoresOrderAfterEachIteration(self):
# Get the test lists in all 3 iterations, using random seed 1, 2,
# and 3 respectively. Google Test picks a different seed in each
# iteration, and this test depends on the current implementation
# picking successive numbers. This dependency is not ideal, but
# makes the test much easier to write.
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
# Make sure running the tests with random seed 1 gets the same
# order as in iteration 1 above.
[tests_with_seed1] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1)])
self.assertEqual(tests_in_iteration1, tests_with_seed1)
# Make sure running the tests with random seed 2 gets the same
# order as in iteration 2 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 2.
[tests_with_seed2] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(2)])
self.assertEqual(tests_in_iteration2, tests_with_seed2)
# Make sure running the tests with random seed 3 gets the same
# order as in iteration 3 above. Success means that Google Test
# correctly restores the test order before re-shuffling at the
# beginning of iteration 3.
[tests_with_seed3] = GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(3)])
self.assertEqual(tests_in_iteration3, tests_with_seed3)
def testShuffleGeneratesNewOrderInEachIteration(self):
[tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = (
GetTestsForAllIterations(
{}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)]))
self.assert_(tests_in_iteration1 != tests_in_iteration2,
tests_in_iteration1)
self.assert_(tests_in_iteration1 != tests_in_iteration3,
tests_in_iteration1)
self.assert_(tests_in_iteration2 != tests_in_iteration3,
tests_in_iteration2)
def testShuffleShardedTestsPreservesPartition(self):
# If we run M tests on N shards, the same M tests should be run in
# total, regardless of the random seeds used by the shards.
[tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '0'},
[ShuffleFlag(), RandomSeedFlag(1)])
[tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '1'},
[ShuffleFlag(), RandomSeedFlag(20)])
[tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3',
SHARD_INDEX_ENV_VAR: '2'},
[ShuffleFlag(), RandomSeedFlag(25)])
sorted_sharded_tests = tests1 + tests2 + tests3
sorted_sharded_tests.sort()
sorted_active_tests = []
sorted_active_tests.extend(ACTIVE_TESTS)
sorted_active_tests.sort()
self.assertEqual(sorted_active_tests, sorted_sharded_tests)
if __name__ == '__main__':
gtest_test_utils.Main()
|
gpl-3.0
|
jnerin/ansible
|
test/units/modules/network/onyx/test_onyx_config.py
|
50
|
4600
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.onyx import onyx_config
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxConfigModule(TestOnyxModule):
module = onyx_config
def setUp(self):
super(TestOnyxConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.onyx.onyx_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.onyx.onyx_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible.modules.network.onyx.onyx_config.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestOnyxConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_config_config.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_onyx_config_unchanged(self):
src = load_fixture('onyx_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_onyx_config_src(self):
src = load_fixture('onyx_config_src.cfg')
set_module_args(dict(src=src))
commands = [
'interface mlag-port-channel 2']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_onyx_config_save(self):
set_module_args(dict(save='yes'))
self.execute_module(changed=True)
self.assertEqual(self.run_commands.call_count, 1)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(self.load_config.call_count, 0)
args = self.run_commands.call_args[0][1]
self.assertIn('configuration write', args)
def test_onyx_config_lines_wo_parents(self):
set_module_args(dict(lines=['hostname foo']))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_before(self):
set_module_args(dict(lines=['hostname foo'], before=['test1', 'test2']))
commands = ['test1', 'test2', 'hostname foo']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_after(self):
set_module_args(dict(lines=['hostname foo'], after=['test1', 'test2']))
commands = ['hostname foo', 'test1', 'test2']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_before_after(self):
set_module_args(dict(lines=['hostname foo'],
before=['test1', 'test2'],
after=['test3', 'test4']))
commands = ['test1', 'test2', 'hostname foo', 'test3', 'test4']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_config(self):
config = 'hostname localhost'
set_module_args(dict(lines=['hostname router'], config=config))
commands = ['hostname router']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_match_none(self):
lines = ['hostname router']
set_module_args(dict(lines=lines, match='none'))
self.execute_module(changed=True, commands=lines, is_updates=True)
|
gpl-3.0
|
BAGAsss/mrmc
|
tools/EventClients/lib/python/xbmcclient.py
|
164
|
22913
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Implementation of XBMC's UDP based input system.
A set of classes that abstract the various packets that the event server
currently supports. In addition, there's also a class, XBMCClient, that
provides functions that sends the various packets. Use XBMCClient if you
don't need complete control over packet structure.
The basic workflow involves:
1. Send a HELO packet
2. Send x number of valid packets
3. Send a BYE packet
IMPORTANT NOTE ABOUT TIMEOUTS:
A client is considered to be timed out if XBMC doesn't received a packet
at least once every 60 seconds. To "ping" XBMC with an empty packet use
PacketPING or XBMCClient.ping(). See the documentation for details.
"""
__author__ = "[email protected]"
__version__ = "0.0.3"
from struct import pack
from socket import *
import time
MAX_PACKET_SIZE = 1024
HEADER_SIZE = 32
MAX_PAYLOAD_SIZE = MAX_PACKET_SIZE - HEADER_SIZE
UNIQUE_IDENTIFICATION = (int)(time.time())
PT_HELO = 0x01
PT_BYE = 0x02
PT_BUTTON = 0x03
PT_MOUSE = 0x04
PT_PING = 0x05
PT_BROADCAST = 0x06
PT_NOTIFICATION = 0x07
PT_BLOB = 0x08
PT_LOG = 0x09
PT_ACTION = 0x0A
PT_DEBUG = 0xFF
ICON_NONE = 0x00
ICON_JPEG = 0x01
ICON_PNG = 0x02
ICON_GIF = 0x03
BT_USE_NAME = 0x01
BT_DOWN = 0x02
BT_UP = 0x04
BT_USE_AMOUNT = 0x08
BT_QUEUE = 0x10
BT_NO_REPEAT = 0x20
BT_VKEY = 0x40
BT_AXIS = 0x80
BT_AXISSINGLE = 0x100
MS_ABSOLUTE = 0x01
LOGDEBUG = 0x00
LOGINFO = 0x01
LOGNOTICE = 0x02
LOGWARNING = 0x03
LOGERROR = 0x04
LOGSEVERE = 0x05
LOGFATAL = 0x06
LOGNONE = 0x07
ACTION_EXECBUILTIN = 0x01
ACTION_BUTTON = 0x02
######################################################################
# Helper Functions
######################################################################
def format_string(msg):
""" """
return msg + "\0"
def format_uint32(num):
""" """
return pack ("!I", num)
def format_uint16(num):
""" """
if num<0:
num = 0
elif num>65535:
num = 65535
return pack ("!H", num)
######################################################################
# Packet Classes
######################################################################
class Packet:
"""Base class that implements a single event packet.
- Generic packet structure (maximum 1024 bytes per packet)
- Header is 32 bytes long, so 992 bytes available for payload
- large payloads can be split into multiple packets using H4 and H5
H5 should contain total no. of packets in such a case
- H6 contains length of P1, which is limited to 992 bytes
- if H5 is 0 or 1, then H4 will be ignored (single packet msg)
- H7 must be set to zeros for now
-----------------------------
| -H1 Signature ("XBMC") | - 4 x CHAR 4B
| -H2 Version (eg. 2.0) | - 2 x UNSIGNED CHAR 2B
| -H3 PacketType | - 1 x UNSIGNED SHORT 2B
| -H4 Sequence number | - 1 x UNSIGNED LONG 4B
| -H5 No. of packets in msg | - 1 x UNSIGNED LONG 4B
| -H7 Client's unique token | - 1 x UNSIGNED LONG 4B
| -H8 Reserved | - 10 x UNSIGNED CHAR 10B
|---------------------------|
| -P1 payload | -
-----------------------------
"""
def __init__(self):
self.sig = "XBMC"
self.minver = 0
self.majver = 2
self.seq = 1
self.maxseq = 1
self.payloadsize = 0
self.uid = UNIQUE_IDENTIFICATION
self.reserved = "\0" * 10
self.payload = ""
return
def append_payload(self, blob):
"""Append to existing payload
Arguments:
blob -- binary data to append to the current payload
"""
self.set_payload(self.payload + blob)
def set_payload(self, payload):
"""Set the payload for this packet
Arguments:
payload -- binary data that contains the payload
"""
self.payload = payload
self.payloadsize = len(self.payload)
self.maxseq = int((self.payloadsize + (MAX_PAYLOAD_SIZE - 1)) / MAX_PAYLOAD_SIZE)
def num_packets(self):
""" Return the number of packets required for payload """
return self.maxseq
def get_header(self, packettype=-1, seq=1, maxseq=1, payload_size=0):
"""Construct a header and return as string
Keyword arguments:
packettype -- valid packet types are PT_HELO, PT_BYE, PT_BUTTON,
PT_MOUSE, PT_PING, PT_BORADCAST, PT_NOTIFICATION,
PT_BLOB, PT_DEBUG
seq -- the sequence of this packet for a multi packet message
(default 1)
maxseq -- the total number of packets for a multi packet message
(default 1)
payload_size -- the size of the payload of this packet (default 0)
"""
if packettype < 0:
packettype = self.packettype
header = self.sig
header += chr(self.majver)
header += chr(self.minver)
header += format_uint16(packettype)
header += format_uint32(seq)
header += format_uint32(maxseq)
header += format_uint16(payload_size)
header += format_uint32(self.uid)
header += self.reserved
return header
def get_payload_size(self, seq):
"""Returns the calculated payload size for the particular packet
Arguments:
seq -- the sequence number
"""
if self.maxseq == 1:
return self.payloadsize
if seq < self.maxseq:
return MAX_PAYLOAD_SIZE
return self.payloadsize % MAX_PAYLOAD_SIZE
def get_udp_message(self, packetnum=1):
"""Construct the UDP message for the specified packetnum and return
as string
Keyword arguments:
packetnum -- the packet no. for which to construct the message
(default 1)
"""
if packetnum > self.num_packets() or packetnum < 1:
return ""
header = ""
if packetnum==1:
header = self.get_header(self.packettype, packetnum, self.maxseq,
self.get_payload_size(packetnum))
else:
header = self.get_header(PT_BLOB, packetnum, self.maxseq,
self.get_payload_size(packetnum))
payload = self.payload[ (packetnum-1) * MAX_PAYLOAD_SIZE :
(packetnum-1) * MAX_PAYLOAD_SIZE+
self.get_payload_size(packetnum) ]
return header + payload
def send(self, sock, addr, uid=UNIQUE_IDENTIFICATION):
"""Send the entire message to the specified socket and address.
Arguments:
sock -- datagram socket object (socket.socket)
addr -- address, port pair (eg: ("127.0.0.1", 9777) )
uid -- unique identification
"""
self.uid = uid
for a in range ( 0, self.num_packets() ):
try:
sock.sendto(self.get_udp_message(a+1), addr)
except:
return False
return True
class PacketHELO (Packet):
"""A HELO packet
A HELO packet establishes a valid connection to XBMC. It is the
first packet that should be sent.
"""
def __init__(self, devicename=None, icon_type=ICON_NONE, icon_file=None):
"""
Keyword arguments:
devicename -- the string that identifies the client
icon_type -- one of ICON_NONE, ICON_JPEG, ICON_PNG, ICON_GIF
icon_file -- location of icon file with respect to current working
directory if icon_type is not ICON_NONE
"""
Packet.__init__(self)
self.packettype = PT_HELO
self.icontype = icon_type
self.set_payload ( format_string(devicename)[0:128] )
self.append_payload( chr (icon_type) )
self.append_payload( format_uint16 (0) ) # port no
self.append_payload( format_uint32 (0) ) # reserved1
self.append_payload( format_uint32 (0) ) # reserved2
if icon_type != ICON_NONE and icon_file:
self.append_payload( file(icon_file).read() )
class PacketNOTIFICATION (Packet):
"""A NOTIFICATION packet
This packet displays a notification window in XBMC. It can contain
a caption, a message and an icon.
"""
def __init__(self, title, message, icon_type=ICON_NONE, icon_file=None):
"""
Keyword arguments:
title -- the notification caption / title
message -- the main text of the notification
icon_type -- one of ICON_NONE, ICON_JPEG, ICON_PNG, ICON_GIF
icon_file -- location of icon file with respect to current working
directory if icon_type is not ICON_NONE
"""
Packet.__init__(self)
self.packettype = PT_NOTIFICATION
self.title = title
self.message = message
self.set_payload ( format_string(title) )
self.append_payload( format_string(message) )
self.append_payload( chr (icon_type) )
self.append_payload( format_uint32 (0) ) # reserved
if icon_type != ICON_NONE and icon_file:
self.append_payload( file(icon_file).read() )
class PacketBUTTON (Packet):
"""A BUTTON packet
A button packet send a key press or release event to XBMC
"""
def __init__(self, code=0, repeat=1, down=1, queue=0,
map_name="", button_name="", amount=0, axis=0):
"""
Keyword arguments:
code -- raw button code (default: 0)
repeat -- this key press should repeat until released (default: 1)
Note that queued pressed cannot repeat.
down -- if this is 1, it implies a press event, 0 implies a release
event. (default: 1)
queue -- a queued key press means that the button event is
executed just once after which the next key press is
processed. It can be used for macros. Currently there
is no support for time delays between queued presses.
(default: 0)
map_name -- a combination of map_name and button_name refers to a
mapping in the user's Keymap.xml or Lircmap.xml.
map_name can be one of the following:
"KB" => standard keyboard map ( <keyboard> section )
"XG" => xbox gamepad map ( <gamepad> section )
"R1" => xbox remote map ( <remote> section )
"R2" => xbox universal remote map ( <universalremote>
section )
"LI:devicename" => LIRC remote map where 'devicename' is the
actual device's name
button_name -- a button name defined in the map specified in map_name.
For example, if map_name is "KB" refering to the
<keyboard> section in Keymap.xml then, valid
button_names include "printscreen", "minus", "x", etc.
amount -- unimplemented for now; in the future it will be used for
specifying magnitude of analog key press events
"""
Packet.__init__(self)
self.flags = 0
self.packettype = PT_BUTTON
if type (code ) == str:
code = ord(code)
# assign code only if we don't have a map and button name
if not (map_name and button_name):
self.code = code
else:
self.flags |= BT_USE_NAME
self.code = 0
if (amount != None):
self.flags |= BT_USE_AMOUNT
self.amount = int(amount)
else:
self.amount = 0
if down:
self.flags |= BT_DOWN
else:
self.flags |= BT_UP
if not repeat:
self.flags |= BT_NO_REPEAT
if queue:
self.flags |= BT_QUEUE
if axis == 1:
self.flags |= BT_AXISSINGLE
elif axis == 2:
self.flags |= BT_AXIS
self.set_payload ( format_uint16(self.code) )
self.append_payload( format_uint16(self.flags) )
self.append_payload( format_uint16(self.amount) )
self.append_payload( format_string (map_name) )
self.append_payload( format_string (button_name) )
class PacketMOUSE (Packet):
"""A MOUSE packet
A MOUSE packets sets the mouse position in XBMC
"""
def __init__(self, x, y):
"""
Arguments:
x -- horitontal position ranging from 0 to 65535
y -- vertical position ranging from 0 to 65535
The range will be mapped to the screen width and height in XBMC
"""
Packet.__init__(self)
self.packettype = PT_MOUSE
self.flags = MS_ABSOLUTE
self.append_payload( chr (self.flags) )
self.append_payload( format_uint16(x) )
self.append_payload( format_uint16(y) )
class PacketBYE (Packet):
"""A BYE packet
A BYE packet terminates the connection to XBMC.
"""
def __init__(self):
Packet.__init__(self)
self.packettype = PT_BYE
class PacketPING (Packet):
"""A PING packet
A PING packet tells XBMC that the client is still alive. All valid
packets act as ping (not just this one). A client needs to ping
XBMC at least once in 60 seconds or it will time out.
"""
def __init__(self):
Packet.__init__(self)
self.packettype = PT_PING
class PacketLOG (Packet):
"""A LOG packet
A LOG packet tells XBMC to log the message to xbmc.log with the loglevel as specified.
"""
def __init__(self, loglevel=0, logmessage="", autoprint=True):
"""
Keyword arguments:
loglevel -- the loglevel, follows XBMC standard.
logmessage -- the message to log
autoprint -- if the logmessage should automaticly be printed to stdout
"""
Packet.__init__(self)
self.packettype = PT_LOG
self.append_payload( chr (loglevel) )
self.append_payload( format_string(logmessage) )
if (autoprint):
print logmessage
class PacketACTION (Packet):
"""An ACTION packet
An ACTION packet tells XBMC to do the action specified, based on the type it knows were it needs to be sent.
The idea is that this will be as in scripting/skining and keymapping, just triggered from afar.
"""
def __init__(self, actionmessage="", actiontype=ACTION_EXECBUILTIN):
"""
Keyword arguments:
loglevel -- the loglevel, follows XBMC standard.
logmessage -- the message to log
autoprint -- if the logmessage should automaticly be printed to stdout
"""
Packet.__init__(self)
self.packettype = PT_ACTION
self.append_payload( chr (actiontype) )
self.append_payload( format_string(actionmessage) )
######################################################################
# XBMC Client Class
######################################################################
class XBMCClient:
"""An XBMC event client"""
def __init__(self, name ="", icon_file=None, broadcast=False, uid=UNIQUE_IDENTIFICATION,
ip="127.0.0.1"):
"""
Keyword arguments:
name -- Name of the client
icon_file -- location of an icon file, if any (png, jpg or gif)
uid -- unique identification
"""
self.name = str(name)
self.icon_file = icon_file
self.icon_type = self._get_icon_type(icon_file)
self.ip = ip
self.port = 9777
self.sock = socket(AF_INET,SOCK_DGRAM)
if broadcast:
self.sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
self.uid = uid
def connect(self, ip=None, port=None):
"""Initialize connection to XBMC
ip -- IP Address of XBMC
port -- port that the event server on XBMC is listening on
"""
if ip:
self.ip = ip
if port:
self.port = int(port)
self.addr = (self.ip, self.port)
packet = PacketHELO(self.name, self.icon_type, self.icon_file)
return packet.send(self.sock, self.addr, self.uid)
def close(self):
"""Close the current connection"""
packet = PacketBYE()
return packet.send(self.sock, self.addr, self.uid)
def ping(self):
"""Send a PING packet"""
packet = PacketPING()
return packet.send(self.sock, self.addr, self.uid)
def send_notification(self, title="", message="", icon_file=None):
"""Send a notification to the connected XBMC
Keyword Arguments:
title -- The title/heading for the notifcation
message -- The message to be displayed
icon_file -- location of an icon file, if any (png, jpg, gif)
"""
self.connect()
packet = PacketNOTIFICATION(title, message,
self._get_icon_type(icon_file),
icon_file)
return packet.send(self.sock, self.addr, self.uid)
def send_keyboard_button(self, button=None):
"""Send a keyboard event to XBMC
Keyword Arguments:
button -- name of the keyboard button to send (same as in Keymap.xml)
"""
if not button:
return
return self.send_button(map="KB", button=button)
def send_remote_button(self, button=None):
"""Send a remote control event to XBMC
Keyword Arguments:
button -- name of the remote control button to send (same as in Keymap.xml)
"""
if not button:
return
return self.send_button(map="R1", button=button)
def release_button(self):
"""Release all buttons"""
packet = PacketBUTTON(code=0x01, down=0)
return packet.send(self.sock, self.addr, self.uid)
def send_button(self, map="", button="", amount=0):
"""Send a button event to XBMC
Keyword arguments:
map -- a combination of map_name and button_name refers to a
mapping in the user's Keymap.xml or Lircmap.xml.
map_name can be one of the following:
"KB" => standard keyboard map ( <keyboard> section )
"XG" => xbox gamepad map ( <gamepad> section )
"R1" => xbox remote map ( <remote> section )
"R2" => xbox universal remote map ( <universalremote>
section )
"LI:devicename" => LIRC remote map where 'devicename' is the
actual device's name
button -- a button name defined in the map specified in map, above.
For example, if map is "KB" refering to the <keyboard>
section in Keymap.xml then, valid buttons include
"printscreen", "minus", "x", etc.
"""
packet = PacketBUTTON(map_name=str(map), button_name=str(button), amount=amount)
return packet.send(self.sock, self.addr, self.uid)
def send_button_state(self, map="", button="", amount=0, down=0, axis=0):
"""Send a button event to XBMC
Keyword arguments:
map -- a combination of map_name and button_name refers to a
mapping in the user's Keymap.xml or Lircmap.xml.
map_name can be one of the following:
"KB" => standard keyboard map ( <keyboard> section )
"XG" => xbox gamepad map ( <gamepad> section )
"R1" => xbox remote map ( <remote> section )
"R2" => xbox universal remote map ( <universalremote>
section )
"LI:devicename" => LIRC remote map where 'devicename' is the
actual device's name
button -- a button name defined in the map specified in map, above.
For example, if map is "KB" refering to the <keyboard>
section in Keymap.xml then, valid buttons include
"printscreen", "minus", "x", etc.
"""
if axis:
if amount == 0:
down = 0
else:
down = 1
packet = PacketBUTTON(map_name=str(map), button_name=str(button), amount=amount, down=down, queue=1, axis=axis)
return packet.send(self.sock, self.addr, self.uid)
def send_mouse_position(self, x=0, y=0):
"""Send a mouse event to XBMC
Keywords Arguments:
x -- absolute x position of mouse ranging from 0 to 65535
which maps to the entire screen width
y -- same a 'x' but relates to the screen height
"""
packet = PacketMOUSE(int(x), int(y))
return packet.send(self.sock, self.addr, self.uid)
def send_log(self, loglevel=0, logmessage="", autoprint=True):
"""
Keyword arguments:
loglevel -- the loglevel, follows XBMC standard.
logmessage -- the message to log
autoprint -- if the logmessage should automaticly be printed to stdout
"""
packet = PacketLOG(loglevel, logmessage)
return packet.send(self.sock, self.addr, self.uid)
def send_action(self, actionmessage="", actiontype=ACTION_EXECBUILTIN):
"""
Keyword arguments:
actionmessage -- the ActionString
actiontype -- The ActionType the ActionString should be sent to.
"""
packet = PacketACTION(actionmessage, actiontype)
return packet.send(self.sock, self.addr, self.uid)
def _get_icon_type(self, icon_file):
if icon_file:
if icon_file.lower()[-3:] == "png":
return ICON_PNG
elif icon_file.lower()[-3:] == "gif":
return ICON_GIF
elif icon_file.lower()[-3:] == "jpg":
return ICON_JPEG
return ICON_NONE
|
gpl-2.0
|
vishnugonela/boto
|
tests/integration/dynamodb/test_table.py
|
136
|
3553
|
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import time
from tests.unit import unittest
from boto.dynamodb.layer2 import Layer2
from boto.dynamodb.table import Table
from boto.dynamodb.schema import Schema
class TestDynamoDBTable(unittest.TestCase):
dynamodb = True
def setUp(self):
self.dynamodb = Layer2()
self.schema = Schema.create(('foo', 'N'), ('bar', 'S'))
self.table_name = 'testtable%s' % int(time.time())
def create_table(self, table_name, schema, read_units, write_units):
result = self.dynamodb.create_table(table_name, schema, read_units, write_units)
self.addCleanup(self.dynamodb.delete_table, result)
return result
def assertAllEqual(self, *items):
first = items[0]
for item in items[1:]:
self.assertEqual(first, item)
def test_table_retrieval_parity(self):
created_table = self.dynamodb.create_table(
self.table_name, self.schema, 1, 1)
created_table.refresh(wait_for_active=True)
retrieved_table = self.dynamodb.get_table(self.table_name)
constructed_table = self.dynamodb.table_from_schema(self.table_name,
self.schema)
# All three tables should have the same name
# and schema attributes.
self.assertAllEqual(created_table.name,
retrieved_table.name,
constructed_table.name)
self.assertAllEqual(created_table.schema,
retrieved_table.schema,
constructed_table.schema)
# However for create_time, status, read/write units,
# only the created/retrieved table will have equal
# values.
self.assertEqual(created_table.create_time,
retrieved_table.create_time)
self.assertEqual(created_table.status,
retrieved_table.status)
self.assertEqual(created_table.read_units,
retrieved_table.read_units)
self.assertEqual(created_table.write_units,
retrieved_table.write_units)
# The constructed table will have values of None.
self.assertIsNone(constructed_table.create_time)
self.assertIsNone(constructed_table.status)
self.assertIsNone(constructed_table.read_units)
self.assertIsNone(constructed_table.write_units)
|
mit
|
exowanderer/Charge-Carrier-Trapping-Comparison
|
Charge Carrier Trapping Experiment.py
|
1
|
37127
|
# coding: utf-8
# # Charge Carrier Trapping Experiment
#
# CCT = Charge Carrier Trapping - This is a test of comparing the Zhou et al 2017 results with a data driven analysis using multinest
# In[ ]:
get_ipython().magic('matplotlib inline')
from pylab import *;ion()
from pandas import read_csv, DataFrame, concat
from time import time
from exoparams import PlanetParams
from astropy import units as u
# In[ ]:
test_data = read_csv('test_data.dat')
test_data
# **Check if the data makes sense as is**
# In[ ]:
fig = figure(figsize=(10,10))
errorbar(test_data['DeltaPhase'], test_data['Flux'] , test_data['Sigma'], fmt='o')
fig = figure(figsize=(10,10))
errorbar(np.arange(test_data['DeltaPhase'].size), test_data['Flux'] , test_data['Sigma'], fmt='o')
# errorbar(h38_v2_orbitPhased['DeltaPhase'], h38_v2_orbitPhased['Flux'] , h38_v2_orbitPhased['Sigma'], fmt='o')
# errorbar(w67_v1_orbitPhased['DeltaPhase'], w67_v1_orbitPhased['Flux'] , w67_v1_orbitPhased['Sigma'], fmt='o')
# In[ ]:
for k in test_data['OrbitNumber'].unique():
orbitNNow = test_data['OrbitNumber'] == k
errorbar(test_data['DeltaPhase'][orbitNNow] , test_data['Flux'][orbitNNow] , test_data['Sigma'][orbitNNow], fmt='o')
# In[ ]:
def zhou_model(params):
# Zhou et al. 2017
# The exponential ramp timescale is detector dependennt, and therfore uniform across all observations
# But the difference from orbit to orbit is predicted to be related
# ONLY to the inital number of charge traps populated at the start of the each ramp
# BIG ASSUMPTION
flux = ydata.copy() # I assume that what Zhou means by `flux` is either the WLC or avg WLC value
# flux = ydata.copy() / 128 # I assume that what Zhou means by `flux` is either the WLC or avg WLC value
E0fast = params[0] # Orbit 0; Start with per frame; may want per pixel
E0slow = params[1] # Orbit 0; Start with per frame; may want per pixel
# Separate out the delta-E0 components per orbit
# Keep dE0fast[0] and dE0slow[0] == 0.0 because they correspond to E0fast and E0slow (initial)
dE0fast = np.zeros(nOrbits)
dE0slow = np.zeros(nOrbits)
for k in range(1, nOrbits):
print(k,2*k, 2*k+1,len(params))
dE0fast[k] = params[2*k]
dE0slow[k] = params[2*k+1]
# From Table 3 of Zhou et al. 2017
ETotFast = 270.6
etaFast = 0.006863
tauFast = 224.8
ETotSlow = 1320.0
etaSlow = 0.01311
tauSlow = 2.45e4
coeffFast0= (etaFast * flux / ETotFast + tauFast**-1)
coeffSlow0= (etaSlow * flux / ETotSlow + tauSlow**-1)
coeffFast1= etaFast*flux / coeffFast0
coeffSlow1= etaSlow*flux / coeffSlow0
Efast = zeros(orbit_phase.shape)
Eslow = zeros(orbit_phase.shape)
for k in range(nOrbits):
orbitNow = where(orbitNumber == k)[0]
Efast[orbitNow] = coeffFast1 + (E0fast + dE0fast[k] - coeffFast1)*exp(-coeffFast0 * tphase[orbitNow])
Eslow[orbitNow] = coeffSlow1 + (E0slow + dE0slow[k] - coeffSlow1)*exp(-coeffSlow0 * tphase[orbitNow])
dEFastDtP = etaFast * flux * (ETotFast - Efast) / ETotFast
dEFastDtN = -Efast / tauFast
dESlowDtP = etaSlow * flux * (ETotSlow - Eslow) / ETotSlow
dESlowDtN = -Eslow / tauSlow
lambda phase: 1 - dEFastDtP - dESlowDtP - dEFastDtP - dESlowDtP
# # PyMultiNest Demo
# In[ ]:
from __future__ import absolute_import, unicode_literals, print_function
import pymultinest
import math
import os
import threading, subprocess
from sys import platform
if not os.path.exists("chains"): os.mkdir("chains")
# In[ ]:
get_ipython().magic('matplotlib inline')
from pylab import *;ion()
# from pymultinest.solve import Solver,solve
from numpy import pi, sin, cos, linspace
def single_exponential_model(cube):
alpha = cube[0]
beta = cube[1]
gamma = cube[2]
return lambda xdata: alpha - beta*exp(-gamma*xdata)
def double_exponential_model(cube):
alpha = cube[0]
beta = cube[1]
gamma = cube[2]
delta = cube[3]
epsilon = cube[4]
return lambda xdata: alpha - beta*exp(-gamma*xdata) - delta*exp(-epsilon*xdata)
def straight_line(cube):
offset = cube[0]
slope = cube[1]
return lambda abscissa: offset + slope * abscissa
def sine_wave(cube):
amp = cube[0]
period = cube[1]
return lambda abscissa: amp*sin(2*pi / period * abscissa)
# ** Generate Fake Data for Algorithm Testing **
# In[ ]:
np.random.seed(0)
param0_test= 1#0.05
param1_test= .1#5*pi
param2_test= 10.0
yunc_test = 0.01
nPts_test = int(50)
nThPts_test= int(1e3)
xmin_test = -0.0#*pi
xmax_test = 1.0#*pi
dx_test = 0.01*(xmax_test - xmin_test)
model_test = single_exponential_model
# model_test = sine_wave
# model_test = straight_line
yuncs_test = np.random.normal(yunc_test, 1e-2 * yunc_test, nPts_test)
thdata_test= np.linspace(xmin_test-dx_test, xmax_test+dx_test, nThPts_test)
xdata_test = np.random.uniform(xmin_test, xmax_test, nPts_test)
xdata_test = sort(xdata_test)
ydata_test = model_test([param0_test,param1_test,param2_test])(xdata_test)
yerr_test = np.random.normal(0, yuncs_test, nPts_test)
zdata_test = ydata_test + yerr_test
figure(figsize=(10,10))
plot(thdata_test, model_test([param0_test,param1_test,param2_test])(thdata_test))
errorbar(xdata_test, zdata_test, yunc_test*ones(zdata_test.size), fmt='o')
# # Single Exponential Model
# In[ ]:
nThPts = int(1e3)
model_SEM = single_exponential_model
xdata = test_data['DeltaPhase']
ydata = test_data['Flux']
yuncs = test_data['Sigma']
xmin, xmax = xdata.min(), xdata.max()
dx = (xmax - xmin)/100
thdata_SEM = np.linspace(xmin-dx, xmax+dx, nThPts)
param0_SEM_init= 1.0 # by defintion
param1_SEM_init= (ydata.max() - ydata.min())#/100
param2_SEM_init= round(5/(xdata.max() - xdata.min()))
print(param0_SEM_init, param1_SEM_init, param2_SEM_init)
figure(figsize=(10,10))
plot(thdata_SEM, model_SEM([param0_SEM_init,param1_SEM_init,param2_SEM_init])(thdata_SEM))
errorbar(xdata, ydata, yuncs, fmt='o')
# In[ ]:
# our probability functions
# Taken from the eggbox problem.
# model = sine_wave
# parameters = ["amp", "period"]
# model = straight_line
# parameters = ["offset", "slope"]
model_SEM = single_exponential_model
parameters_SEM = ['max', 'amp1', 'scale1']
def myprior_SEM(cube, ndim, nparams):
cube0_width = 1e-3
cube[0] = cube[0] * cube0_width + (1 - 0.5*cube0_width)# - 10# U(0,2)
cube[1] = cube[1] # - 10# U(0,1) -- default
cube[2] = cube[2] * 1e4 - 5e3# - 1000 # U(0,2000)
def myloglike_SEM(cube, ndim, nparams):
chi = 1.
# print "cube", [cube[i] for i in range(ndim)], cube
# for i in range(ndim):
# chi *= -0.5 * ((cube[i] - 0.2) / 0.1)**2#math.cos(cube[i] / 2.) * math.sin(cube[i] / 2.)
# print "returning", math.pow(2. + chi, 5)
modelNow = model_SEM(cube)(xdata)
return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum()
# In[ ]:
if not os.path.exists("chains"): os.mkdir("chains")
# number of dimensions our problem has
# parameters = ["x", "y"]
n_params_SEM = len(parameters_SEM)
planetName = 'HAT38'
visitName = 'visit1'
modelName = 'single_exponential_model'
outputfiles_basename = 'chains/' + planetName + '-' + visitName + '-' + modelName + '-'
start = time()
plt.figure(figsize=(5*n_params_SEM, 5*n_params_SEM))
# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params_SEM, outputfiles_basename=outputfiles_basename); progress.start()
# threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike_SEM, myprior_SEM, n_params_SEM, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename=outputfiles_basename);
# ok, done. Stop our progress watcher
progress.stop();
print('SEM took', time() - start, 'seconds')
# lets analyse the results
a_SEM = pymultinest.Analyzer(n_params = n_params_SEM, outputfiles_basename=outputfiles_basename);
s_SEM = a_SEM.get_stats();
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a_SEM.outputfiles_basename, 'w') as f:
json.dump(parameters_SEM, f, indent=2)
# store derived stats
with open('%sstats.json' % a_SEM.outputfiles_basename, mode='w') as f:
json.dump(s_SEM, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\t%.15e +- %.15e" % ( s_SEM['nested sampling global log-evidence'], s_SEM['nested sampling global log-evidence error'] ))
print("Global Evidence:\t%.3f +- %.3f" % ( s_SEM['nested sampling global log-evidence'], s_SEM['nested sampling global log-evidence error'] ))
# In[ ]:
import matplotlib.pyplot as plt
plt.clf()
# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.
# Copy and edit this file, and play with it.
p_SEM = pymultinest.PlotMarginalModes(a_SEM)
plt.figure(figsize=(5*n_params_SEM, 5*n_params_SEM))
#plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params_SEM):
plt.subplot(n_params_SEM, n_params_SEM, n_params_SEM * i + i + 1)
p_SEM.plot_marginal(i, with_ellipses = False, with_points = False, grid_points=50)
plt.ylabel("Probability")
plt.xlabel(parameters_SEM[i])
for j in range(i):
plt.subplot(n_params_SEM, n_params_SEM, n_params_SEM * j + i + 1)
#plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
p_SEM.plot_conditional(i, j, with_ellipses = False, with_points = False, grid_points=30)
plt.xlabel(parameters_SEM[i])
plt.ylabel(parameters_SEM[j])
# plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight')
# show("chains/marginals_multinest.pdf")
# In[ ]:
# plt.figure(figsize=(5*n_params, 5*n_params))
# plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0))
for i in range(n_params_SEM):
# print(5*n_params, 1, i+1)
plt.subplot(5*n_params_SEM, 1, i+1)
p_SEM.plot_modes_marginal(i, with_ellipses = True, with_points = False)
plt.ylabel("Probability")
plt.xlabel(parameters_SEM[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i)
p_SEM.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False)
plt.ylabel("Cumulative probability")
plt.xlabel(parameters_SEM[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# In[ ]:
p_SEM.analyser.get_best_fit()['parameters'], [param0_SEM_init, param1_SEM_init, param2_SEM_init]
# In[ ]:
figure(figsize=(10,10))
plot(thdata_SEM, model_SEM([param0_SEM_init,param1_SEM_init, param2_SEM_init])(thdata_SEM), label='Initial Model')
errorbar(xdata, ydata, yuncs, fmt='o', label='Data')
plot(thdata_SEM, model_SEM(p_SEM.analyser.get_best_fit()['parameters'])(thdata_SEM), label='PMN Model')
legend(loc=0)
# In[ ]:
p_SEM.analyser.get_stats()
# # Unrestricted Double Exponential Model
# In[ ]:
nThPts= int(1e3)
model_UDEM = double_exponential_model
xdata = test_data['DeltaPhase']
ydata = test_data['Flux']
yuncs = test_data['Sigma']
xmin, xmax = xdata.min(), xdata.max()
dx = (xmax - xmin)/100
thdata_UDEM = np.linspace(xmin-dx, xmax+dx, nThPts)
param0_UDEM_init = 1.0 # by defintion
param1_UDEM_init = 0.5*(ydata.max() - ydata.min())#/100
param2_UDEM_init = round(5/(xdata.max() - xdata.min()))
param3_UDEM_init = 0.5*(ydata.max() - ydata.min())#/100
param4_UDEM_init = round(5/(xdata.max() - xdata.min()))
print(param0_UDEM_init, param1_UDEM_init, param2_UDEM_init, param3_UDEM_init, param4_UDEM_init)
figure(figsize=(10,10))
plot(thdata_UDEM, model_UDEM([param0_UDEM_init,param1_UDEM_init,param2_UDEM_init, param3_UDEM_init, param4_UDEM_init])(thdata_UDEM))
errorbar(xdata, ydata, yuncs, fmt='o')
# In[ ]:
# our probability functions
# Taken from the eggbox problem.
# model = sine_wave
# parameters = ["amp", "period"]
# model = straight_line
# parameters = ["offset", "slope"]
model_UDEM = double_exponential_model
parameters_UDEM = ['max', 'amp1', 'scale1', 'amp2', 'scale2']
# def myprior_RDEM(cube, ndim, nparams):
# cube[0] = cube[0] * 1e-3 + (1 - 1e-3/2)# - 10# U(0,2)
# cube[1] = -cube[1] * 5e-3 + 5e-4 # - 10# U(0,1) -- default
# cube[2] = cube[2] * 1e4 - 5e3# - 1000 # U(0,2000)
# cube[3] = cube[3] * 5e-3 + 5e-4# - 10# U(0,1) -- default
# cube[4] = cube[4] * 1e4 - 5e3# - 1000 # U(0,2000)
def myprior_UDEM(cube, ndim, nparams):
cube[0] = cube[0] * 1e-2 + (1 - 1e-2/2)# - 10# U(0,2)
cube[1] = cube[1] * 2 - 2/2 # - 10# U(0,1) -- default
cube[2] = cube[2] * 1e4 - 1e4/2# - 1000 # U(0,2000)
cube[3] = cube[3] * 2 - 2/2# - 10# U(0,1) -- default
cube[4] = cube[4] * 1e4 - 1e4/2# - 1000 # U(0,2000)
def myloglike_UDEM(cube, ndim, nparams):
modelNow = model_UDEM(cube)(xdata)
return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum()
# In[ ]:
if not os.path.exists("chains"): os.mkdir("chains")
start = time()
# number of dimensions our problem has
# parameters = ["x", "y"]
n_params_UDEM = len(parameters_UDEM)
savedir = 'chains'
planetName = 'HAT38'
visitName = 'visit1'
modelName = 'unrestricted_double_exponential_model'
outputfiles_basename = savedir + '/' + planetName + '-' + visitName + '-' + modelName + '-'
plt.figure(figsize=(5*n_params_UDEM, 5*n_params_UDEM))
# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params_UDEM, outputfiles_basename=outputfiles_basename)
progress.start()
# threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike_UDEM, myprior_UDEM, n_params_UDEM, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename=outputfiles_basename)
# ok, done. Stop our progress watcher
progress.stop()
# lets analyse the results
a_UDEM = pymultinest.Analyzer(n_params = n_params_UDEM, outputfiles_basename=outputfiles_basename)
s_UDEM = a_UDEM.get_stats()
print('UDEM took', time() - start, 'seconds')
# fig = plt.gcf()
# axs = fig.get_axes()
# for ax in axs:
# ax.set_ylim()
# # ax.set_xscale("log", nonposx='clip')
# # ax.set_yscale("log", nonposy='clip')
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a_UDEM.outputfiles_basename, 'w') as f:
json.dump(parameters_UDEM, f, indent=2)
# store derived stats
with open('%sstats.json' % a_UDEM.outputfiles_basename, mode='w') as f:
json.dump(s_UDEM, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\t%.15e +- %.15e" % ( s_UDEM['nested sampling global log-evidence'], s_UDEM['nested sampling global log-evidence error'] ))
# In[ ]:
import matplotlib.pyplot as plt
plt.clf()
# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.
# Copy and edit this file, and play with it.
p_UDEM = pymultinest.PlotMarginalModes(a_UDEM)
plt.figure(figsize=(5*n_params_UDEM, 5*n_params_UDEM))
#plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params_UDEM):
plt.subplot(n_params_UDEM, n_params_UDEM, n_params_UDEM * i + i + 1)
p_UDEM.plot_marginal(i, with_ellipses = False, with_points = False, grid_points=50)
plt.ylabel("Probability")
plt.xlabel(parameters_UDEM[i])
for j in range(i):
plt.subplot(n_params_UDEM, n_params_UDEM, n_params_UDEM * j + i + 1)
#plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
# p_UDEM.plot_conditional(i, j, with_ellipses=False, with_points=False, grid_points=30)
p_UDEM.plot_conditional(i, j, with_ellipses=False , with_points=False , grid_points=30, only_interpolate=False, use_log_values=False, marginalization_type='sum')
plt.xlabel(parameters_UDEM[i])
plt.ylabel(parameters_UDEM[j])
# plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight')
# show("chains/marginals_multinest.pdf")
# In[ ]:
axes_colors = rcParams['axes.prop_cycle'].by_key()['color']
nColors = len(axes_colors)
# In[ ]:
minLogE, maxLogE = min(a_UDEM.get_equal_weighted_posterior().T[-1]), max(a_UDEM.get_equal_weighted_posterior().T[-1])
rangeLogE = maxLogE - minLogE
minLogE, maxLogE, rangeLogE, nColors
# In[ ]:
from astroML.plotting import hist
from statsmodels.robust import scale
hist(a_UDEM.get_equal_weighted_posterior().T[-1], bins='blocks')
# In[ ]:
nSig = 10
mad_logE = scale.mad(a_UDEM.get_equal_weighted_posterior().T[-1])
med_logE = median(a_UDEM.get_equal_weighted_posterior().T[-1])
madBins = [med_logE - nSig*mad_logE for nSig in range(nColors)]
# In[ ]:
fig = figure(figsize=(15,15));
logEchain = a_UDEM.get_equal_weighted_posterior().T[-1]
mad_logE = scale.mad(a_UDEM.get_equal_weighted_posterior().T[-1])
med_logE = median(a_UDEM.get_equal_weighted_posterior().T[-1])
madBins = [med_logE - nSig*mad_logE for nSig in range(nColors+1)]
for k in range(5):
ax = fig.add_subplot(5,1,k+1);
for nSig in range(nColors):
for klogE in range(logEchain.size):
if logEchain[klogE] > madBins[nSig] or logEchain[klogE] < madBins[nSig+1]:
ax.plot(a_UDEM.get_equal_weighted_posterior().T[k], logEchain,'o', color = axes_colors[nSig], alpha=0.1);
fig.canvas.draw();
# In[ ]:
# plt.figure(figsize=(5*n_params, 5*n_params))
# plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0))
for i in range(n_params_UDEM):
# print(5*n_params, 1, i+1)
plt.subplot(5*n_params_UDEM, 1, i+1)
p_UDEM.plot_modes_marginal(i, with_ellipses = True, with_points = False)
plt.ylabel("Probability")
plt.xlabel(parameters_UDEM[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i)
p_UDEM.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False)
plt.ylabel("Cumulative probability")
plt.xlabel(parameters_UDEM[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# In[ ]:
p_UDEM.analyser.get_best_fit()['parameters'], [param0_UDEM_init, param1_UDEM_init, param2_UDEM_init, param3_UDEM_init, param4_UDEM_init]
# In[ ]:
figure(figsize=(10,10))
plot(thdata_UDEM, model_UDEM([param0_UDEM_init,param1_UDEM_init, param2_UDEM_init, param3_UDEM_init, param4_UDEM_init])(thdata_UDEM), label='Initial Model')
errorbar(xdata, ydata, yuncs, fmt='o', label='Data')
plot(thdata_UDEM, model_UDEM(p_UDEM.analyser.get_best_fit()['parameters'])(thdata_UDEM), label='PMN UDEM Model')
legend(loc=0)
# In[ ]:
p_UDEM.analyser.get_stats()
# # Restricted Double Exponential Model
# In[ ]:
nThPts= int(1e3)
model_RDEM = double_exponential_model
xdata = test_data['DeltaPhase']
ydata = test_data['Flux']
yuncs = test_data['Sigma']
xmin, xmax = xdata.min(), xdata.max()
dx = (xmax - xmin)/100
thdata_RDEM = np.linspace(xmin-dx, xmax+dx, nThPts)
param0_RDEM_init = 1.0 # by defintion
param1_RDEM_init = 0.5*(ydata.max() - ydata.min())#/100
param2_RDEM_init = round(5/(xdata.max() - xdata.min()))
param3_RDEM_init = 0.5*(ydata.max() - ydata.min())#/100
param4_RDEM_init = round(5/(xdata.max() - xdata.min()))
print(param0_RDEM_init, param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init)
figure(figsize=(10,10))
plot(thdata_RDEM, model_RDEM([param0_RDEM_init,param1_RDEM_init,param2_RDEM_init, param3_RDEM_init, param4_RDEM_init])(thdata_RDEM))
errorbar(xdata, ydata, yuncs, fmt='o')
def show(filepath):
""" open the output (pdf) file for the user """
if os.name == 'mac' or platform == 'darwin': subprocess.call(('open', filepath))
elif os.name == 'nt' or platform == 'win32': os.startfile(filepath)
elif platform.startswith('linux') : subprocess.call(('xdg-open', filepath))
# In[ ]:
# our probability functions
# Taken from the eggbox problem.
# model = sine_wave
# parameters = ["amp", "period"]
# model = straight_line
# parameters = ["offset", "slope"]
model_RDEM = double_exponential_model
parameters_RDEM = ['max', 'amp1', 'scale1', 'amp2', 'scale2']
def myprior_RDEM(cube, ndim, nparams):
cube[0] = cube[0] * 1e-3 + (1 - 1e-3/2)# - 10# U(0,2)
cube[1] = -cube[1] * 5e-3 + 5e-4 # - 10# U(0,1) -- default
cube[2] = cube[2] * 1e4 - 5e3# - 1000 # U(0,2000)
cube[3] = cube[3] * 5e-3 + 5e-4# - 10# U(0,1) -- default
cube[4] = cube[4] * 1e4 - 5e3# - 1000 # U(0,2000)
def myloglike_RDEM(cube, ndim, nparams):
chi = 1.
# print "cube", [cube[i] for i in range(ndim)], cube
# for i in range(ndim):
# chi *= -0.5 * ((cube[i] - 0.2) / 0.1)**2#math.cos(cube[i] / 2.) * math.sin(cube[i] / 2.)
# print "returning", math.pow(2. + chi, 5)
modelNow = model_RDEM(cube)(xdata)
return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum()
# In[ ]:
if not os.path.exists("chains"): os.mkdir("chains")
start = time()
# number of dimensions our problem has
# parameters = ["x", "y"]
n_params_RDEM = len(parameters_RDEM)
savedir = 'chains'
planetName = 'HAT38'
visitName = 'visit1'
modelName = 'restricted_double_exponential_model'
outputfiles_basename = savedir + '/' + planetName + '-' + visitName + '-' + modelName + '-'
plt.figure(figsize=(5*n_params_RDEM, 5*n_params_RDEM))
# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params_RDEM, outputfiles_basename=outputfiles_basename); progress.start()
# threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike_RDEM, myprior_RDEM, n_params_RDEM, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename=outputfiles_basename)
# ok, done. Stop our progress watcher
progress.stop()
# lets analyse the results
a_RDEM = pymultinest.Analyzer(n_params = n_params_RDEM, outputfiles_basename=outputfiles_basename)
s_RDEM = a_RDEM.get_stats()
print('RDEM took', time() - start, 'seconds')
# fig = plt.gcf()
# axs = fig.get_axes()
# for ax in axs:
# ax.set_ylim()
# # ax.set_xscale("log", nonposx='clip')
# # ax.set_yscale("log", nonposy='clip')
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a_RDEM.outputfiles_basename, 'w') as f:
json.dump(parameters_RDEM, f, indent=2)
# store derived stats
with open('%sstats.json' % a_RDEM.outputfiles_basename, mode='w') as f:
json.dump(s_RDEM, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\t%.15e +- %.15e" % ( s_RDEM['nested sampling global log-evidence'], s_RDEM['nested sampling global log-evidence error'] ))
# In[ ]:
import matplotlib.pyplot as plt
plt.clf()
# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.
# Copy and edit this file, and play with it.
p_RDEM = pymultinest.PlotMarginalModes(a_RDEM)
plt.figure(figsize=(5*n_params_RDEM, 5*n_params_RDEM))
#plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params_RDEM):
plt.subplot(n_params_RDEM, n_params_RDEM, n_params_RDEM * i + i + 1)
p_RDEM.plot_marginal(i, with_ellipses = False, with_points = False, grid_points=50)
plt.ylabel("Probability")
plt.xlabel(parameters_RDEM[i])
for j in range(i):
plt.subplot(n_params_RDEM, n_params_RDEM, n_params_RDEM * j + i + 1)
#plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
p_RDEM.plot_conditional(i, j, with_ellipses = False, with_points = False, grid_points=30)
plt.xlabel(parameters_RDEM[i])
plt.ylabel(parameters_RDEM[j])
# plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight')
# show("chains/marginals_multinest.pdf")
# In[ ]:
# plt.figure(figsize=(5*n_params, 5*n_params))
# plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0))
for i in range(n_params_RDEM):
# print(5*n_params, 1, i+1)
plt.subplot(5*n_params_RDEM, 1, i+1)
p_RDEM.plot_modes_marginal(i, with_ellipses = True, with_points = False)
plt.ylabel("Probability")
plt.xlabel(parameters_RDEM[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i)
p_RDEM.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False)
plt.ylabel("Cumulative probability")
plt.xlabel(parameters_RDEM[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# In[ ]:
p_RDEM.analyser.get_best_fit()['parameters'], [param0_RDEM_init, param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init]
# In[ ]:
figure(figsize=(10,10))
plot(thdata_RDEM, model_RDEM([param0_RDEM_init,param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init])(thdata_RDEM), label='Initial Model')
errorbar(xdata, ydata, yuncs, fmt='o', label='Data')
plot(thdata_RDEM, model_RDEM(p_RDEM.analyser.get_best_fit()['parameters'])(thdata_RDEM), label='PMN Model')
legend(loc=0)
# In[ ]:
p_RDEM.analyser.get_stats()
# # Compare Unrestricted Double, Restricted Double, and Single Exponential
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a_SEM.outputfiles_basename, 'w') as f:
json.dump(parameters_SEM, f, indent=2)
# store derived stats
with open('%sstats.json' % a_SEM.outputfiles_basename, mode='w') as f:
json.dump(s_SEM, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("SEM Global Evidence:\t\t%.3f +- %.3f" % ( s_SEM['nested sampling global log-evidence'], s_SEM['nested sampling global log-evidence error'] ))
# store name of parameters, always useful
with open('%sparams.json' % a_UDEM.outputfiles_basename, 'w') as f:
json.dump(parameters_UDEM, f, indent=2)
# store derived stats
with open('%sstats.json' % a_UDEM.outputfiles_basename, mode='w') as f:
json.dump(s_UDEM, f, indent=2)
# print()
# print("-" * 30, 'ANALYSIS', "-" * 30)
print("UDEM Global Evidence:\t\t%.3f +- %.3f" % ( s_UDEM['nested sampling global log-evidence'], s_UDEM['nested sampling global log-evidence error'] ))
# store name of parameters, always useful
with open('%sparams.json' % a_RDEM.outputfiles_basename, 'w') as f:
json.dump(parameters_RDEM, f, indent=2)
# store derived stats
with open('%sstats.json' % a_RDEM.outputfiles_basename, mode='w') as f:
json.dump(s_RDEM, f, indent=2)
# print()
# print("-" * 30, 'ANALYSIS', "-" * 30)
print("RDEM Global Evidence:\t\t%.3f +- %.3f" % ( s_RDEM['nested sampling global log-evidence'], s_RDEM['nested sampling global log-evidence error'] ))
figure(figsize=(10,10))
plot(thdata_UDEM, model_SEM([param0_SEM_init,param1_SEM_init, param2_SEM_init])(thdata_SEM), '.', label='Initial SEM Model')
plot(thdata_UDEM, model_RDEM([param0_RDEM_init,param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init])(thdata_RDEM), '--', label='Initial DEM Model')
errorbar(xdata, ydata, yuncs, fmt='o', label='Data')
plot(thdata_SEM, model_SEM(p_SEM.analyser.get_best_fit()['parameters'])(thdata_SEM), label='PMN SEM Model')
plot(thdata_UDEM, model_UDEM(p_UDEM.analyser.get_best_fit()['parameters'])(thdata_UDEM), label='PMN UDEM Model')
plot(thdata_RDEM, model_RDEM(p_RDEM.analyser.get_best_fit()['parameters'])(thdata_RDEM), label='PMN RDEM Model')
legend(loc=0)
# # Polynomials
# In[ ]:
figure(figsize=(20,20))
from numpy.polynomial import polynomial
def time_polynomial(params):
# modelOut = np.zeros(tdata.size)
# for kc, coeff in enumerate(params):
# modelOut += coeff * tdata**kc
if len(params):
return lambda tdata: polynomial.polyval(tdata, params)
else:
return lambda tdata: zeros(tdata.size)
def orbital_polynomial(params):
# modelOut = np.zeros(xdata.size)
# for kc, coeff in enumerate(params):
# modelOut += coeff * xdata**kc
# return modelOut
if len(params):
return lambda odata: polynomial.polyval(odata, params)
else:
return lambda odata: zeros(odata.size)
def wavelength_polynomial(params):
# modelOut = np.zeros(ldata.size)
# for kc, coeff in enumerate(params):
# modelOut += coeff * ldata**kc
# return modelOut
if len(params):
return lambda ldata: polynomial.polyval(ldata, params)
else:
return lambda ldata: zeros(ldata.size)
def polynomial_model(params):
params_list= list(params.copy())[::-1]
timeParams = array([params_list.pop() for _ in range(nTimeCoeffs)])
orbitParams = array([params_list.pop() for _ in range(nOrbitCoeffs)])
waveParams = array([params_list.pop() for _ in range(nWaveCoeffs)])
return lambda tdata, odata, ldata: time_polynomial(timeParams)(tdata) + orbital_polynomial(orbitParams)(odata) + wavelength_polynomial(waveParams)(ldata)
tdata, xdata, ldata = np.random.uniform(-10,10,(3,100))
tdata.sort()
xdata.sort()
ldata.sort()
# tdata, xdata, ldata = [np.linspace(-10,10,100) for _ in range(3)]
for nTimeCoeffs in range(4):
for nOrbitCoeffs in range(4):
for nWaveCoeffs in range(4):
params = np.random.uniform(-20,20,nTimeCoeffs+nOrbitCoeffs+nWaveCoeffs)
plot(tdata, polynomial_model(params)(tdata,xdata,ldata),'.', alpha=0.5, mew=0)
plot(xdata, polynomial_model(params)(tdata,xdata,ldata),'.', alpha=0.5, mew=0)
plot(ldata, polynomial_model(params)(tdata,xdata,ldata),'.', alpha=0.5, mew=0)
# In[ ]:
nThPts= int(1e3)
model_Poly = polynomial_model
xdata = test_data['DeltaPhase']
ydata = test_data['Flux']
yuncs = test_data['Sigma']
nTimeCoeffs = 2
nOrbitCoeffs = 3
nWaveCoeffs = 0
h38PlanetPhase = test_data_input_input['Phase']
h38HSTPhase = test_data['DeltaPhase']
xmin, xmax = xdata.min(), xdata.max()
dx = (xmax - xmin)/100
thdata_Poly = np.linspace(xmin-dx, xmax+dx, nThPts)
param0_Poly_init = 1.0 # by defintion
param1_Poly_init = 1.0
param2_Poly_init = 1.0
param3_Poly_init = 1.0
param4_Poly_init = 1.0
print(param0_Poly_init, param1_Poly_init, param2_Poly_init, param3_Poly_init, param4_Poly_init)
figure(figsize=(10,10))
plot(thdata_Poly, model_Poly([param0_Poly_init,param1_Poly_init,param2_Poly_init, param3_Poly_init, param4_Poly_init])(thdata_Poly))
errorbar(xdata, ydata, yuncs, fmt='o')
# In[ ]:
# our probability functions
# Taken from the eggbox problem.
# model = sine_wave
# parameters = ["amp", "period"]
# model = straight_line
# parameters = ["offset", "slope"]
nTimeCoeffs = 2
nOrbitCoeffs = 3
nWaveCoeffs = 0
h38PlanetPhase = test_data_input_input['Phase']
h38HSTPhase = test_data['DeltaPhase']
model_Poly = polynomial_model
parameters_Poly = ['timeIntercept', 'timeSlope', 'orbitIntercept', 'orbitSlope', 'orbitQuadratic']
cubeKWith = 1e3
def myprior_Poly(cube, ndim, nparams):
for k in len(cube):
cube[k] = cube[k] * cubeKWith - 0.5*cubeKWith
def myloglike_Poly(cube, ndim, nparams):
chi = 1.
# print "cube", [cube[i] for i in range(ndim)], cube
# for i in range(ndim):
# chi *= -0.5 * ((cube[i] - 0.2) / 0.1)**2#math.cos(cube[i] / 2.) * math.sin(cube[i] / 2.)
# print "returning", math.pow(2. + chi, 5)
modelNow = model_Poly(cube)(times, HSTPhase, 0)
return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum()
# In[ ]:
if not os.path.exists("chains"): os.mkdir("chains")
start = time()
# number of dimensions our problem has
# parameters = ["x", "y"]
n_params_Poly = len(parameters_Poly)
savedir = 'chains'
planetName = 'HAT38'
visitName = 'visit1'
modelName = 'polynomial_model'
outputfiles_basename = savedir + '/' + planetName + '-' + visitName + '-' + modelName + '-'
plt.figure(figsize=(5*n_params_Poly, 5*n_params_Poly))
# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params_Poly, outputfiles_basename=outputfiles_basename)
progress.start()
# threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike_Poly, myprior_Poly, n_params_Poly, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename=outputfiles_basename)
# ok, done. Stop our progress watcher
progress.stop()
# lets analyse the results
a_Poly = pymultinest.Analyzer(n_params = n_params_Poly, outputfiles_basename=outputfiles_basename)
s_Poly = a_Poly.get_stats()
print('Polynomial took', time() - start, 'seconds')
# fig = plt.gcf()
# axs = fig.get_axes()
# for ax in axs:
# ax.set_ylim()
# # ax.set_xscale("log", nonposx='clip')
# # ax.set_yscale("log", nonposy='clip')
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a_RDEM.outputfiles_basename, 'w') as f:
json.dump(parameters_RDEM, f, indent=2)
# store derived stats
with open('%sstats.json' % a_RDEM.outputfiles_basename, mode='w') as f:
json.dump(s_RDEM, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\t%.15e +- %.15e" % ( s_RDEM['nested sampling global log-evidence'], s_RDEM['nested sampling global log-evidence error'] ))
# In[ ]:
import matplotlib.pyplot as plt
plt.clf()
# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.
# Copy and edit this file, and play with it.
p_RDEM = pymultinest.PlotMarginalModes(a_RDEM)
plt.figure(figsize=(5*n_params_RDEM, 5*n_params_RDEM))
#plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params_RDEM):
plt.subplot(n_params_RDEM, n_params_RDEM, n_params_RDEM * i + i + 1)
p_RDEM.plot_marginal(i, with_ellipses = False, with_points = False, grid_points=50)
plt.ylabel("Probability")
plt.xlabel(parameters_RDEM[i])
for j in range(i):
plt.subplot(n_params_RDEM, n_params_RDEM, n_params_RDEM * j + i + 1)
#plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
p_RDEM.plot_conditional(i, j, with_ellipses = False, with_points = False, grid_points=30)
plt.xlabel(parameters_RDEM[i])
plt.ylabel(parameters_RDEM[j])
# plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight')
# show("chains/marginals_multinest.pdf")
# In[ ]:
# plt.figure(figsize=(5*n_params, 5*n_params))
# plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0))
for i in range(n_params_RDEM):
# print(5*n_params, 1, i+1)
plt.subplot(5*n_params_RDEM, 1, i+1)
p_RDEM.plot_modes_marginal(i, with_ellipses = True, with_points = False)
plt.ylabel("Probability")
plt.xlabel(parameters_RDEM[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i)
p_RDEM.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False)
plt.ylabel("Cumulative probability")
plt.xlabel(parameters_RDEM[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# In[ ]:
p_RDEM.analyser.get_best_fit()['parameters'], [param0_RDEM_init, param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init]
# In[ ]:
figure(figsize=(10,10))
plot(thdata_RDEM, model_RDEM([param0_RDEM_init,param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init])(thdata_RDEM), label='Initial Model')
errorbar(xdata, ydata, yuncs, fmt='o', label='Data')
plot(thdata_RDEM, model_RDEM(p_RDEM.analyser.get_best_fit()['parameters'])(thdata_RDEM), label='PMN Model')
legend(loc=0)
# In[ ]:
p_RDEM.analyser.get_stats()
|
gpl-3.0
|
juangj/selenium
|
py/test/selenium/webdriver/common/frame_switching_tests.py
|
2
|
17536
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
from http.client import BadStatusLine
except ImportError:
from httplib import BadStatusLine
import pytest
from selenium.common.exceptions import (
NoSuchElementException,
NoSuchFrameException,
WebDriverException)
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# ----------------------------------------------------------------------------------------------
#
# Tests that WebDriver doesn't do anything fishy when it navigates to a page with frames.
#
# ----------------------------------------------------------------------------------------------
@pytest.fixture(autouse=True)
def restore_default_context(driver):
yield
driver.switch_to.default_content()
def testShouldAlwaysFocusOnTheTopMostFrameAfterANavigationEvent(driver, pages):
pages.load("frameset.html")
driver.find_element(By.TAG_NAME, "frameset") # Test passes if this does not throw.
def testShouldNotAutomaticallySwitchFocusToAnIFrameWhenAPageContainingThemIsLoaded(driver, pages):
pages.load("iframes.html")
driver.find_element(By.ID, "iframe_page_heading")
def testShouldOpenPageWithBrokenFrameset(driver, pages):
pages.load("framesetPage3.html")
frame1 = driver.find_element(By.ID, "first")
driver.switch_to.frame(frame1)
driver.switch_to.default_content()
frame2 = driver.find_element(By.ID, "second")
driver.switch_to.frame(frame2) # IE9 can not switch to this broken frame - it has no window.
# ----------------------------------------------------------------------------------------------
#
# Tests that WebDriver can switch to frames as expected.
#
# ----------------------------------------------------------------------------------------------
def testShouldBeAbleToSwitchToAFrameByItsIndex(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(1)
assert driver.find_element(By.ID, "pageNumber").text == "2"
def testShouldBeAbleToSwitchToAnIframeByItsIndex(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
assert driver.find_element(By.NAME, "id-name1").get_attribute("value") == "name"
def testShouldBeAbleToSwitchToAFrameByItsName(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("fourth")
assert driver.find_element(By.TAG_NAME, "frame").get_attribute("name") == "child1"
def testShouldBeAbleToSwitchToAnIframeByItsName(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame("iframe1-name")
assert driver.find_element(By.NAME, "id-name1").get_attribute("value") == "name"
def testShouldBeAbleToSwitchToAFrameByItsID(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("fifth")
assert driver.find_element(By.NAME, "windowOne").text == "Open new window"
def testShouldBeAbleToSwitchToAnIframeByItsID(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame("iframe1")
assert driver.find_element(By.NAME, "id-name1").get_attribute("value") == "name"
def testShouldBeAbleToSwitchToFrameWithNameContainingDot(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("sixth.iframe1")
assert "Page number 3" in driver.find_element(By.TAG_NAME, "body").text
def testShouldBeAbleToSwitchToAFrameUsingAPreviouslyLocatedWebElement(driver, pages):
pages.load("frameset.html")
frame = driver.find_element(By.TAG_NAME, "frame")
driver.switch_to.frame(frame)
assert driver.find_element(By.ID, "pageNumber").text == "1"
def testShouldBeAbleToSwitchToAnIFrameUsingAPreviouslyLocatedWebElement(driver, pages):
pages.load("iframes.html")
frame = driver.find_element(By.TAG_NAME, "iframe")
driver.switch_to.frame(frame)
element = driver.find_element(By.NAME, "id-name1")
assert element.get_attribute("value") == "name"
def testShouldEnsureElementIsAFrameBeforeSwitching(driver, pages):
pages.load("frameset.html")
frame = driver.find_element(By.TAG_NAME, "frameset")
with pytest.raises(NoSuchFrameException):
driver.switch_to.frame(frame)
def testFrameSearchesShouldBeRelativeToTheCurrentlySelectedFrame(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("second")
assert driver.find_element(By.ID, "pageNumber").text == "2"
with pytest.raises(NoSuchElementException):
driver.switch_to.frame(driver.find_element_by_name("third"))
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_name("third"))
with pytest.raises(NoSuchFrameException):
driver.switch_to.frame("second")
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_name("second"))
assert driver.find_element(By.ID, "pageNumber").text == "2"
def testShouldSelectChildFramesByChainedCalls(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
driver.switch_to.frame(driver.find_element_by_name("child2"))
assert driver.find_element(By.ID, "pageNumber").text == "11"
def testShouldThrowFrameNotFoundExceptionLookingUpSubFramesWithSuperFrameNames(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
with pytest.raises(NoSuchElementException):
driver.switch_to.frame(driver.find_element_by_name("second"))
def testShouldThrowAnExceptionWhenAFrameCannotBeFound(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchElementException):
driver.switch_to.frame(driver.find_element_by_name("Nothing here"))
def testShouldThrowAnExceptionWhenAFrameCannotBeFoundByIndex(driver, pages):
pages.load("xhtmlTest.html")
with pytest.raises(NoSuchFrameException):
driver.switch_to.frame(27)
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def testShouldBeAbleToSwitchToParentFrame(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
driver.switch_to.parent_frame()
driver.switch_to.frame(driver.find_element_by_name("first"))
assert driver.find_element(By.ID, "pageNumber").text == "1"
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def testShouldBeAbleToSwitchToParentFrameFromASecondLevelFrame(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("fourth"))
driver.switch_to.frame(driver.find_element_by_name("child1"))
driver.switch_to.parent_frame()
driver.switch_to.frame(driver.find_element_by_name("child2"))
assert driver.find_element(By.ID, "pageNumber").text == "11"
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def testSwitchingToParentFrameFromDefaultContextIsNoOp(driver, pages):
pages.load("xhtmlTest.html")
driver.switch_to.parent_frame()
assert driver.title == "XHTML Test Page"
@pytest.mark.xfail_phantomjs(raises=WebDriverException)
def testShouldBeAbleToSwitchToParentFromAnIframe(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
driver.switch_to.parent_frame()
driver.find_element(By.ID, "iframe_page_heading")
# ----------------------------------------------------------------------------------------------
#
# General frame handling behavior tests
#
# ----------------------------------------------------------------------------------------------
def testShouldContinueToReferToTheSameFrameOnceItHasBeenSelected(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(2)
checkbox = driver.find_element(By.XPATH, "//input[@name='checky']")
checkbox.click()
checkbox.submit()
# TODO(simon): this should not be needed, and is only here because IE's submit returns too
# soon.
WebDriverWait(driver, 3).until(EC.text_to_be_present_in_element((By.XPATH, '//p'), 'Success!'))
@pytest.mark.xfail_marionette(raises=WebDriverException,
reason='https://github.com/mozilla/geckodriver/issues/610')
def testShouldFocusOnTheReplacementWhenAFrameFollowsALinkToA_TopTargetedPage(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(0)
driver.find_element(By.LINK_TEXT, "top").click()
expectedTitle = "XHTML Test Page"
WebDriverWait(driver, 3).until(EC.title_is(expectedTitle))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "only-exists-on-xhtmltest")))
def testShouldAllowAUserToSwitchFromAnIframeBackToTheMainContentOfThePage(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
driver.switch_to.default_content()
driver.find_element(By.ID, "iframe_page_heading")
def testShouldAllowTheUserToSwitchToAnIFrameAndRemainFocusedOnIt(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(0)
driver.find_element(By.ID, "submitButton").click()
assert getTextOfGreetingElement(driver) == "Success!"
def getTextOfGreetingElement(driver):
return WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "greeting"))).text
def testShouldBeAbleToClickInAFrame(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame("third")
# This should replace frame "third" ...
driver.find_element(By.ID, "submitButton").click()
# driver should still be focused on frame "third" ...
assert getTextOfGreetingElement(driver) == "Success!"
# Make sure it was really frame "third" which was replaced ...
driver.switch_to.default_content()
driver.switch_to.frame("third")
assert getTextOfGreetingElement(driver) == "Success!"
def testShouldBeAbleToClickInAFrameThatRewritesTopWindowLocation(driver, pages):
pages.load("click_tests/issue5237.html")
driver.switch_to.frame(driver.find_element_by_id("search"))
driver.find_element(By.ID, "submit").click()
driver.switch_to.default_content()
WebDriverWait(driver, 3).until(EC.title_is("Target page for issue 5237"))
def testShouldBeAbleToClickInASubFrame(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_id("sixth"))
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
# This should replace frame "iframe1" inside frame "sixth" ...
driver.find_element(By.ID, "submitButton").click()
# driver should still be focused on frame "iframe1" inside frame "sixth" ...
assert getTextOfGreetingElement(driver), "Success!"
# Make sure it was really frame "iframe1" inside frame "sixth" which was replaced ...
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_id("sixth"))
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
assert driver.find_element(By.ID, "greeting").text == "Success!"
def testShouldBeAbleToFindElementsInIframesByXPath(driver, pages):
pages.load("iframes.html")
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
element = driver.find_element(By.XPATH, "//*[@id = 'changeme']")
assert element is not None
@pytest.mark.xfail_phantomjs
def testGetCurrentUrlReturnsTopLevelBrowsingContextUrl(driver, pages):
pages.load("frameset.html")
assert "frameset.html" in driver.current_url
driver.switch_to.frame(driver.find_element_by_name("second"))
assert "frameset.html" in driver.current_url
@pytest.mark.xfail_phantomjs
def testGetCurrentUrlReturnsTopLevelBrowsingContextUrlForIframes(driver, pages):
pages.load("iframes.html")
assert "iframes.html" in driver.current_url
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
assert "iframes.html" in driver.current_url
@pytest.mark.xfail_phantomjs(raises=BadStatusLine)
def testShouldBeAbleToSwitchToTheTopIfTheFrameIsDeletedFromUnderUs(driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
driver.switch_to.default_content()
WebDriverWait(driver, 3).until_not(
EC.presence_of_element_located((By.ID, "iframe1")))
addIFrame = driver.find_element(By.ID, "addBackFrame")
addIFrame.click()
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "iframe1")))
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "success")))
@pytest.mark.xfail_phantomjs(raises=BadStatusLine)
def testShouldBeAbleToSwitchToTheTopIfTheFrameIsDeletedFromUnderUsWithFrameIndex(driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
iframe = 0
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
# we should be in the frame now
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
driver.switch_to.default_content()
addIFrame = driver.find_element(By.ID, "addBackFrame")
addIFrame.click()
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "success")))
@pytest.mark.xfail_phantomjs(raises=BadStatusLine)
def testShouldBeAbleToSwitchToTheTopIfTheFrameIsDeletedFromUnderUsWithWebelement(driver, pages):
pages.load("frame_switching_tests/deletingFrame.html")
iframe = driver.find_element(By.ID, "iframe1")
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
# we should be in the frame now
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
driver.switch_to.default_content()
addIFrame = driver.find_element(By.ID, "addBackFrame")
addIFrame.click()
iframe = driver.find_element(By.ID, "iframe1")
WebDriverWait(driver, 3).until(EC.frame_to_be_available_and_switch_to_it(iframe))
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "success")))
@pytest.mark.xfail_chrome(raises=NoSuchElementException)
@pytest.mark.xfail_phantomjs(raises=BadStatusLine)
@pytest.mark.xfail_marionette(raises=WebDriverException,
reason='https://github.com/mozilla/geckodriver/issues/614')
@pytest.mark.xfail_webkitgtk(raises=NoSuchElementException)
def testShouldNotBeAbleToDoAnythingTheFrameIsDeletedFromUnderUs(driver, pages):
if driver.name == 'firefox' and driver.w3c:
pytest.skip('Stalls tests, https://bugzilla.mozilla.org/show_bug.cgi?id=1410799')
pages.load("frame_switching_tests/deletingFrame.html")
driver.switch_to.frame(driver.find_element_by_id("iframe1"))
killIframe = driver.find_element(By.ID, "killIframe")
killIframe.click()
with pytest.raises(NoSuchFrameException):
driver.find_element(By.ID, "killIframe").click()
def testShouldReturnWindowTitleInAFrameset(driver, pages):
pages.load("frameset.html")
driver.switch_to.frame(driver.find_element_by_name("third"))
assert "Unique title" == driver.title
def testJavaScriptShouldExecuteInTheContextOfTheCurrentFrame(driver, pages):
pages.load("frameset.html")
assert driver.execute_script("return window == window.top")
driver.switch_to.frame(driver.find_element(By.NAME, "third"))
assert driver.execute_script("return window != window.top")
def testShouldNotSwitchMagicallyToTheTopWindow(driver, pages):
pages.load("frame_switching_tests/bug4876.html")
driver.switch_to.frame(0)
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "inputText")))
for i in range(20):
try:
input = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "inputText")))
submit = WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.ID, "submitButton")))
input.clear()
import random
input.send_keys("rand%s" % int(random.random()))
submit.click()
finally:
url = driver.execute_script("return window.location.href")
# IE6 and Chrome add "?"-symbol to the end of the URL
if (url.endswith("?")):
url = url[:len(url) - 1]
assert pages.url("frame_switching_tests/bug4876_iframe.html") == url
def testGetShouldSwitchToDefaultContext(driver, pages):
pages.load("iframes.html")
driver.find_element(By.ID, "iframe1")
driver.switch_to.frame(driver.find_element(By.ID, "iframe1"))
driver.find_element(By.ID, "cheese") # Found on formPage.html but not on iframes.html.
pages.load("iframes.html") # This must effectively switch_to.default_content(), too.
driver.find_element(By.ID, "iframe1")
|
apache-2.0
|
arista-eosplus/ansible
|
lib/ansible/modules/messaging/rabbitmq_binding.py
|
69
|
7328
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Manuel Sousa <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_binding
author: "Manuel Sousa (@manuel-sousa)"
version_added: "2.0"
short_description: This module manages rabbitMQ bindings
description:
- This module uses rabbitMQ Rest API to create/delete bindings
requirements: [ "requests >= 1.0.0" ]
options:
state:
description:
- Whether the exchange should be present or absent
- Only present implemented atm
choices: [ "present", "absent" ]
required: false
default: present
name:
description:
- source exchange to create binding on
required: true
aliases: [ "src", "source" ]
login_user:
description:
- rabbitMQ user for connection
required: false
default: guest
login_password:
description:
- rabbitMQ password for connection
required: false
default: false
login_host:
description:
- rabbitMQ host for connection
required: false
default: localhost
login_port:
description:
- rabbitMQ management api port
required: false
default: 15672
vhost:
description:
- rabbitMQ virtual host
- default vhost is /
required: false
default: "/"
destination:
description:
- destination exchange or queue for the binding
required: true
aliases: [ "dst", "dest" ]
destination_type:
description:
- Either queue or exchange
required: true
choices: [ "queue", "exchange" ]
aliases: [ "type", "dest_type" ]
routing_key:
description:
- routing key for the binding
- default is #
required: false
default: "#"
arguments:
description:
- extra arguments for exchange. If defined this argument is a key/value dictionary
required: false
default: {}
'''
EXAMPLES = '''
# Bind myQueue to directExchange with routing key info
- rabbitmq_binding:
name: directExchange
destination: myQueue
type: queue
routing_key: info
# Bind directExchange to topicExchange with routing key *.info
- rabbitmq_binding:
name: topicExchange
destination: topicExchange
type: exchange
routing_key: '*.info'
'''
import requests
import urllib
import json
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent'], type='str'),
name = dict(required=True, aliases=[ "src", "source" ], type='str'),
login_user = dict(default='guest', type='str'),
login_password = dict(default='guest', type='str', no_log=True),
login_host = dict(default='localhost', type='str'),
login_port = dict(default='15672', type='str'),
vhost = dict(default='/', type='str'),
destination = dict(required=True, aliases=[ "dst", "dest"], type='str'),
destination_type = dict(required=True, aliases=[ "type", "dest_type"], choices=[ "queue", "exchange" ],type='str'),
routing_key = dict(default='#', type='str'),
arguments = dict(default=dict(), type='dict')
),
supports_check_mode = True
)
if module.params['destination_type'] == "queue":
dest_type="q"
else:
dest_type="e"
if module.params['routing_key'] == "":
props = "~"
else:
props = urllib.quote(module.params['routing_key'],'')
url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s/%s" % (
module.params['login_host'],
module.params['login_port'],
urllib.quote(module.params['vhost'],''),
urllib.quote(module.params['name'],''),
dest_type,
urllib.quote(module.params['destination'],''),
props
)
# Check if exchange already exists
r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))
if r.status_code==200:
binding_exists = True
response = r.json()
elif r.status_code==404:
binding_exists = False
response = r.text
else:
module.fail_json(
msg = "Invalid response from RESTAPI when trying to check if exchange exists",
details = r.text
)
if module.params['state']=='present':
change_required = not binding_exists
else:
change_required = binding_exists
# Exit if check_mode
if module.check_mode:
module.exit_json(
changed= change_required,
name = module.params['name'],
details = response,
arguments = module.params['arguments']
)
# Do changes
if change_required:
if module.params['state'] == 'present':
url = "http://%s:%s/api/bindings/%s/e/%s/%s/%s" % (
module.params['login_host'],
module.params['login_port'],
urllib.quote(module.params['vhost'],''),
urllib.quote(module.params['name'],''),
dest_type,
urllib.quote(module.params['destination'],'')
)
r = requests.post(
url,
auth = (module.params['login_user'],module.params['login_password']),
headers = { "content-type": "application/json"},
data = json.dumps({
"routing_key": module.params['routing_key'],
"arguments": module.params['arguments']
})
)
elif module.params['state'] == 'absent':
r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password']))
if r.status_code == 204 or r.status_code == 201:
module.exit_json(
changed = True,
name = module.params['name'],
destination = module.params['destination']
)
else:
module.fail_json(
msg = "Error creating exchange",
status = r.status_code,
details = r.text
)
else:
module.exit_json(
changed = False,
name = module.params['name']
)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
jmighion/ansible
|
lib/ansible/utils/module_docs_fragments/nxos.py
|
87
|
4041
|
#
# (c) 2015, Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device. This value applies to either I(cli) or I(nxapi). The port
value will default to the appropriate transport common port if
none is provided in the task. (cli=22, http=80, https=443).
required: false
default: 0 (use common port)
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
either the CLI login or the nxapi authentication depending on which
transport is used. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
required: false
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This is a common argument used for either I(cli)
or I(nxapi) transports. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
required: false
default: null
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
NX-API can be slow to return on long-running commands (sh mac, sh bgp, etc).
require: false
default: 10
version_added: 2.3
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This argument is only used for the I(cli)
transport. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
required: false
transport:
description:
- Configures the transport connection to use when connecting to the
remote device. The transport argument supports connectivity to the
device over cli (ssh) or nxapi.
required: true
default: cli
use_ssl:
description:
- Configures the I(transport) to use SSL if set to true only when the
C(transport=nxapi), otherwise this value is ignored.
required: false
default: no
choices: ['yes', 'no']
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates. If the transport
argument is not nxapi, this value is ignored.
choices: ['yes', 'no']
provider:
description:
- Convenience method that allows all I(nxos) arguments to be passed as
a dict object. All constraints (required, choices, etc) must be
met either by individual arguments or values in this dict.
required: false
default: null
"""
|
gpl-3.0
|
jinnykoo/wuyisj.com
|
src/oscar/apps/customer/notifications/views.py
|
8
|
3485
|
from django.utils.html import strip_tags
from django.utils.translation import ugettext_lazy as _, ungettext
from django.utils.timezone import now
from django.contrib import messages
from django.views import generic
from oscar.core.loading import get_model
from oscar.core.utils import redirect_to_referrer
from oscar.apps.customer.mixins import PageTitleMixin
from oscar.views.generic import BulkEditMixin
Notification = get_model('customer', 'Notification')
class NotificationListView(PageTitleMixin, generic.ListView):
model = Notification
template_name = 'customer/notifications/list.html'
context_object_name = 'notifications'
paginate_by = 20
page_title = _("Notifications")
active_tab = 'notifications'
def get_context_data(self, **kwargs):
ctx = super(NotificationListView, self).get_context_data(**kwargs)
ctx['list_type'] = self.list_type
return ctx
class InboxView(NotificationListView):
list_type = 'inbox'
def get_queryset(self):
qs = self.model._default_manager.filter(
recipient=self.request.user,
location=self.model.INBOX)
# Mark unread notifications so they can be rendered differently...
for obj in qs:
if not obj.is_read:
setattr(obj, 'is_new', True)
# ...but then mark everything as read.
self.mark_as_read(qs)
return qs
def mark_as_read(self, queryset):
unread = queryset.filter(date_read=None)
unread.update(date_read=now())
class ArchiveView(NotificationListView):
list_type = 'archive'
def get_queryset(self):
return self.model._default_manager.filter(
recipient=self.request.user,
location=self.model.ARCHIVE)
class DetailView(PageTitleMixin, generic.DetailView):
model = Notification
template_name = 'customer/notifications/detail.html'
context_object_name = 'notification'
active_tab = 'notifications'
def get_page_title(self):
"""Append subject to page title"""
title = strip_tags(self.object.subject)
return u'%s: %s' % (_('Notification'), title)
def get_queryset(self):
return self.model._default_manager.filter(
recipient=self.request.user)
class UpdateView(BulkEditMixin, generic.RedirectView):
model = Notification
actions = ('archive', 'delete')
checkbox_object_name = 'notification'
def get_object_dict(self, ids):
return self.model.objects.filter(
recipient=self.request.user).in_bulk(ids)
def get_success_response(self):
return redirect_to_referrer(
self.request, 'customer:notifications-inbox')
def archive(self, request, notifications):
for notification in notifications:
notification.archive()
msg = ungettext(
'%(count)d notification archived',
'%(count)d notifications archived', len(notifications)) \
% {'count': len(notifications)}
messages.success(request, msg)
return self.get_success_response()
def delete(self, request, notifications):
for notification in notifications:
notification.delete()
msg = ungettext(
'%(count)d notification deleted',
'%(count)d notifications deleted', len(notifications)) \
% {'count': len(notifications)}
messages.success(request, msg)
return self.get_success_response()
|
bsd-3-clause
|
maartenq/ansible
|
lib/ansible/modules/database/misc/elasticsearch_plugin.py
|
24
|
9640
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Mathew Davies <[email protected]>
# (c) 2017, Sam Doran <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: elasticsearch_plugin
short_description: Manage Elasticsearch plugins
description:
- Manages Elasticsearch plugins.
version_added: "2.0"
author:
- Mathew Davies (@ThePixelDeveloper)
- Sam Doran (@samdoran)
options:
name:
description:
- Name of the plugin to install.
required: True
state:
description:
- Desired state of a plugin.
choices: ["present", "absent"]
default: present
src:
description:
- Optionally set the source location to retrieve the plugin from. This can be a file://
URL to install from a local file, or a remote URL. If this is not set, the plugin
location is just based on the name.
- The name parameter must match the descriptor in the plugin ZIP specified.
- Is only used if the state would change, which is solely checked based on the name
parameter. If, for example, the plugin is already installed, changing this has no
effect.
- For ES 1.x use url.
required: False
version_added: "2.7"
url:
description:
- Set exact URL to download the plugin from (Only works for ES 1.x).
- For ES 2.x and higher, use src.
required: False
timeout:
description:
- "Timeout setting: 30s, 1m, 1h..."
- Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0.
default: 1m
force:
description:
- "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails."
default: False
version_added: "2.7"
plugin_bin:
description:
- Location of the plugin binary. If this file is not found, the default plugin binaries will be used.
- The default changed in Ansible 2.4 to None.
plugin_dir:
description:
- Your configured plugin directory specified in Elasticsearch
default: /usr/share/elasticsearch/plugins/
proxy_host:
description:
- Proxy host to use during plugin installation
version_added: "2.1"
proxy_port:
description:
- Proxy port to use during plugin installation
version_added: "2.1"
version:
description:
- Version of the plugin to be installed.
If plugin exists with previous version, it will NOT be updated
'''
EXAMPLES = '''
# Install Elasticsearch Head plugin in Elasticsearch 2.x
- elasticsearch_plugin:
name: mobz/elasticsearch-head
state: present
# Install a specific version of Elasticsearch Head in Elasticsearch 2.x
- elasticsearch_plugin:
name: mobz/elasticsearch-head
version: 2.0.0
# Uninstall Elasticsearch head plugin in Elasticsearch 2.x
- elasticsearch_plugin:
name: mobz/elasticsearch-head
state: absent
# Install a specific plugin in Elasticsearch >= 5.0
- elasticsearch_plugin:
name: analysis-icu
state: present
# Install the ingest-geoip plugin with a forced installation
- elasticsearch_plugin:
name: ingest-geoip
state: present
force: yes
'''
import os
from ansible.module_utils.basic import AnsibleModule
PACKAGE_STATE_MAP = dict(
present="install",
absent="remove"
)
PLUGIN_BIN_PATHS = tuple([
'/usr/share/elasticsearch/bin/elasticsearch-plugin',
'/usr/share/elasticsearch/bin/plugin'
])
def parse_plugin_repo(string):
elements = string.split("/")
# We first consider the simplest form: pluginname
repo = elements[0]
# We consider the form: username/pluginname
if len(elements) > 1:
repo = elements[1]
# remove elasticsearch- prefix
# remove es- prefix
for string in ("elasticsearch-", "es-"):
if repo.startswith(string):
return repo[len(string):]
return repo
def is_plugin_present(plugin_name, plugin_dir):
return os.path.isdir(os.path.join(plugin_dir, plugin_name))
def parse_error(string):
reason = "ERROR: "
try:
return string[string.index(reason) + len(reason):].strip()
except ValueError:
return string
def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force):
cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"]]
is_old_command = (os.path.basename(plugin_bin) == 'plugin')
# Timeout and version are only valid for plugin, not elasticsearch-plugin
if is_old_command:
if timeout:
cmd_args.append("--timeout %s" % timeout)
if version:
plugin_name = plugin_name + '/' + version
cmd_args[2] = plugin_name
if proxy_host and proxy_port:
cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port))
# Legacy ES 1.x
if url:
cmd_args.append("--url %s" % url)
if force:
cmd_args.append("--batch")
if src:
cmd_args.append(src)
else:
cmd_args.append(plugin_name)
cmd = " ".join(cmd_args)
if module.check_mode:
rc, out, err = 0, "check mode", ""
else:
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg="Installing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
return True, cmd, out, err
def remove_plugin(module, plugin_bin, plugin_name):
cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)]
cmd = " ".join(cmd_args)
if module.check_mode:
rc, out, err = 0, "check mode", ""
else:
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg="Removing plugin '%s' failed: %s" % (plugin_name, reason), err=err)
return True, cmd, out, err
def get_plugin_bin(module, plugin_bin=None):
# Use the plugin_bin that was supplied first before trying other options
valid_plugin_bin = None
if plugin_bin and os.path.isfile(plugin_bin):
valid_plugin_bin = plugin_bin
else:
# Add the plugin_bin passed into the module to the top of the list of paths to test,
# testing for that binary name first before falling back to the default paths.
bin_paths = list(PLUGIN_BIN_PATHS)
if plugin_bin and plugin_bin not in bin_paths:
bin_paths.insert(0, plugin_bin)
# Get separate lists of dirs and binary names from the full paths to the
# plugin binaries.
plugin_dirs = list(set([os.path.dirname(x) for x in bin_paths]))
plugin_bins = list(set([os.path.basename(x) for x in bin_paths]))
# Check for the binary names in the default system paths as well as the path
# specified in the module arguments.
for bin_file in plugin_bins:
valid_plugin_bin = module.get_bin_path(bin_file, opt_dirs=plugin_dirs)
if valid_plugin_bin:
break
if not valid_plugin_bin:
module.fail_json(msg='%s does not exist and no other valid plugin installers were found. Make sure Elasticsearch is installed.' % plugin_bin)
return valid_plugin_bin
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
src=dict(default=None),
url=dict(default=None),
timeout=dict(default="1m"),
force=dict(default=False),
plugin_bin=dict(type="path"),
plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"),
proxy_host=dict(default=None),
proxy_port=dict(default=None),
version=dict(default=None)
),
mutually_exclusive=[("src", "url")],
supports_check_mode=True
)
name = module.params["name"]
state = module.params["state"]
url = module.params["url"]
src = module.params["src"]
timeout = module.params["timeout"]
force = module.params["force"]
plugin_bin = module.params["plugin_bin"]
plugin_dir = module.params["plugin_dir"]
proxy_host = module.params["proxy_host"]
proxy_port = module.params["proxy_port"]
version = module.params["version"]
# Search provided path and system paths for valid binary
plugin_bin = get_plugin_bin(module, plugin_bin)
repo = parse_plugin_repo(name)
present = is_plugin_present(repo, plugin_dir)
# skip if the state is correct
if (present and state == "present") or (state == "absent" and not present):
module.exit_json(changed=False, name=name, state=state)
if state == "present":
changed, cmd, out, err = install_plugin(module, plugin_bin, name, version, src, url, proxy_host, proxy_port, timeout, force)
elif state == "absent":
changed, cmd, out, err = remove_plugin(module, plugin_bin, name)
module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
if __name__ == '__main__':
main()
|
gpl-3.0
|
gg7/sentry
|
src/sentry/migrations/0146_auto__add_field_auditlogentry_ip_address.py
|
36
|
29197
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AuditLogEntry.ip_address'
db.add_column('sentry_auditlogentry', 'ip_address',
self.gf('django.db.models.fields.GenericIPAddressField')(max_length=39, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'AuditLogEntry.ip_address'
db.delete_column('sentry_auditlogentry', 'ip_address')
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'audit_actors'", 'to': "orm['sentry.User']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
bsd-3-clause
|
c-bit/c-bit
|
qa/rpc-tests/listtransactions.py
|
1
|
10721
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The C-Bit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction
import cStringIO
import binascii
def txFromHex(hexstring):
tx = CTransaction()
f = cStringIO.StringIO(binascii.unhexlify(hexstring))
tx.deserialize(f)
return tx
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
class ListTransactionsTest(BitcoinTestFramework):
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
multisig = self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert(len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0)
check_array_result(self.nodes[0].listtransactions("watchonly", 100, 0, True),
{"category":"receive","amount":Decimal("0.1")},
{"txid":txid, "account" : "watchonly"} )
self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert(not is_opt_in(self.nodes[0], txid_1))
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable":"no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
# Create tx2 using createrawtransaction
inputs = [{"txid":utxo_to_use["txid"], "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransaction(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert(not is_opt_in(self.nodes[1], txid_2))
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
sync_mempools(self.nodes)
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable":"no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = txFromHex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = binascii.hexlify(tx3_modified.serialize()).decode('utf-8')
tx3_signed = self.nodes[0].signrawtransaction(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert(is_opt_in(self.nodes[0], txid_3))
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable":"yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout":utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransaction(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert(not is_opt_in(self.nodes[1], txid_4))
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
sync_mempools(self.nodes)
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= 0.004*100000000 # bump the fee
tx3_b = binascii.hexlify(tx3_b.serialize()).decode('utf-8')
tx3_b_signed = self.nodes[0].signrawtransaction(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, True)
assert(is_opt_in(self.nodes[0], txid_3b))
check_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
sync_mempools(self.nodes)
check_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable":"unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert(txid_3b not in self.nodes[0].getrawmempool())
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
|
mit
|
mspark93/VTK
|
Rendering/Tk/Testing/Python/cursor3D.py
|
5
|
9545
|
#!/usr/bin/env python
'''
This little example shows how a cursor can be created in
image viewers, and renderers. The standard TkImageViewerWidget and
TkRenderWidget bindings are used. There is a new binding:
middle button in the image viewer sets the position of the cursor.
'''
import sys
from functools import partial
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
if sys.hexversion < 0x03000000:
# for Python2
import Tkinter as tkinter
from Tkinter import Pack
else:
# for Python3
import tkinter
from tkinter import Pack
#from vtk.tk.vtkTkRenderWindowInteractor import vtkTkRenderWindowInteractor
from vtk.tk.vtkTkRenderWidget import vtkTkRenderWidget
from vtk.tk.vtkTkImageViewerWidget import vtkTkImageViewerWidget
# Tkinter constants.
E = tkinter.E
W = tkinter.W
N = tkinter.N
S = tkinter.S
HORIZONTAL = tkinter.HORIZONTAL
VERTICAL = tkinter.VERTICAL
RIGHT = tkinter.RIGHT
LEFT = tkinter.LEFT
TOP = tkinter.TOP
BOTTOM = tkinter.BOTTOM
X = tkinter.X
BOTH = tkinter.BOTH
NO = tkinter.NO
YES = tkinter.YES
NORMAL = tkinter.NORMAL
DISABLED = tkinter.DISABLED
TRUE = tkinter.TRUE
FALSE = tkinter.FALSE
# Global values.
CURSOR_X = 20
CURSOR_Y = 20
CURSOR_Z = 20
IMAGE_MAG_X = 4
IMAGE_MAG_Y = 4
IMAGE_MAG_Z = 1
class Cursor3DViewer(Testing.vtkTest):
'''
Provide a testing framework for for cursor3D.
Note:
root, the top-level widget for Tk,
tkrw, the vtkTkRenderWidget and
viewer, the Image viewer
are accessible from any function in this class
after SetUp() has run.
'''
def SetUp(self):
'''
Set up cursor3D
'''
def OnClosing():
self.root.quit()
def ViewerDown(viewer):
ViewerSetZSlice(viewer, viewer.GetZSlice() - 1)
def ViewerUp(viewer):
ViewerSetZSlice(viewer, viewer.GetZSlice() + 1)
def ViewerSetZSlice(viewer, z):
viewer.SetZSlice(z)
txt = 'slice: ' + str(z)
sliceLabel.configure(text=txt)
viewer.Render()
def SetCursorFromViewer(event):
x = int(event.x)
y = int(event.y)
# We have to flip y axis because tk uses upper right origin.
self.root.update_idletasks()
height = int(self.tkvw.configure()['height'][4])
y = height - y
z = self.viewer.GetZSlice()
SetCursor( x / IMAGE_MAG_X, y / IMAGE_MAG_Y, z / IMAGE_MAG_Z )
def SetCursor(x, y, z):
CURSOR_X = x
CURSOR_Y = y
CURSOR_Z = z
axes.SetOrigin(CURSOR_X,CURSOR_Y,CURSOR_Z)
imageCursor.SetCursorPosition(
CURSOR_X * IMAGE_MAG_X,
CURSOR_Y * IMAGE_MAG_Y,
CURSOR_Z * IMAGE_MAG_Z)
self.viewer.Render()
self.renWin.Render()
# Pipeline stuff.
reader = vtk.vtkSLCReader()
reader.SetFileName(VTK_DATA_ROOT + "/Data/neghip.slc")
# Cursor stuff
magnify = vtk.vtkImageMagnify()
magnify.SetInputConnection(reader.GetOutputPort())
magnify.SetMagnificationFactors(IMAGE_MAG_X, IMAGE_MAG_Y ,IMAGE_MAG_Z)
imageCursor = vtk.vtkImageCursor3D()
imageCursor.SetInputConnection(magnify.GetOutputPort())
imageCursor.SetCursorPosition(
CURSOR_X*IMAGE_MAG_X,
CURSOR_Y*IMAGE_MAG_Y,
CURSOR_Z*IMAGE_MAG_Z)
imageCursor.SetCursorValue(255)
imageCursor.SetCursorRadius(50*IMAGE_MAG_X)
axes = vtk.vtkAxes()
axes.SymmetricOn()
axes.SetOrigin(CURSOR_X, CURSOR_Y, CURSOR_Z)
axes.SetScaleFactor(50.0)
axes_mapper = vtk.vtkPolyDataMapper()
axes_mapper.SetInputConnection(axes.GetOutputPort())
axesActor = vtk.vtkActor()
axesActor.SetMapper(axes_mapper)
axesActor.GetProperty().SetAmbient(0.5)
# Image viewer stuff.
self.viewer = vtk.vtkImageViewer()
self.viewer.SetInputConnection(imageCursor.GetOutputPort())
self.viewer.SetZSlice(CURSOR_Z*IMAGE_MAG_Z)
self.viewer.SetColorWindow(256)
self.viewer.SetColorLevel(128)
# Create transfer functions for opacity and color.
opacity_transfer_function = vtk.vtkPiecewiseFunction()
opacity_transfer_function.AddPoint(20, 0.0)
opacity_transfer_function.AddPoint(255, 0.2)
color_transfer_function = vtk.vtkColorTransferFunction()
color_transfer_function.AddRGBPoint(0, 0, 0, 0)
color_transfer_function.AddRGBPoint(64, 1, 0, 0)
color_transfer_function.AddRGBPoint(128, 0, 0, 1)
color_transfer_function.AddRGBPoint(192, 0, 1, 0)
color_transfer_function.AddRGBPoint(255, 0, .2, 0)
# Create properties, mappers, volume actors, and ray cast function.
volume_property = vtk.vtkVolumeProperty()
volume_property.SetColor(color_transfer_function)
# volume_property.SetColor(color_transfer_function[0],
# color_transfer_function[1],
# color_transfer_function[2])
volume_property.SetScalarOpacity(opacity_transfer_function)
composite_function = vtk.vtkVolumeRayCastCompositeFunction()
volume_mapper = vtk.vtkVolumeRayCastMapper()
volume_mapper.SetInputConnection(reader.GetOutputPort())
volume_mapper.SetVolumeRayCastFunction(composite_function)
volume = vtk.vtkVolume()
volume.SetMapper(volume_mapper)
volume.SetProperty(volume_property)
# Create outline.
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(reader.GetOutputPort())
outline_mapper = vtk.vtkPolyDataMapper()
outline_mapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outline_mapper)
outlineActor.GetProperty().SetColor(1, 1, 1)
# Create the renderer.
ren = vtk.vtkRenderer()
ren.AddActor(axesActor)
ren.AddVolume(volume)
ren.SetBackground(0.1, 0.2, 0.4)
self.renWin = vtk.vtkRenderWindow()
self.renWin.AddRenderer(ren)
self.renWin.SetSize(256, 256)
# Create the GUI: two renderer widgets and a quit button.
self.root = tkinter.Tk()
self.root.title("cursor3D")
# Define what to do when the user explicitly closes a window.
self.root.protocol("WM_DELETE_WINDOW", OnClosing)
# Help label, frame and quit button
helpLabel = tkinter.Label(self.root,
text=
"MiddleMouse (or shift-LeftMouse) in image viewer to place cursor")
displayFrame = tkinter.Frame(self.root)
quitButton = tkinter.Button(self.root, text= "Quit", command=OnClosing)
# Pack the GUI.
helpLabel.pack()
displayFrame.pack(fill=BOTH, expand=TRUE)
quitButton.pack(fill=X)
# Create the viewer widget.
viewerFrame = tkinter.Frame(displayFrame)
viewerFrame.pack(padx=3, pady=3, side=LEFT, anchor=N,
fill=BOTH, expand=FALSE)
self.tkvw = vtkTkImageViewerWidget(viewerFrame, iv=self.viewer,
width=264, height=264)
viewerControls = tkinter.Frame(viewerFrame)
viewerControls.pack(side=BOTTOM, anchor=S, fill=BOTH, expand=TRUE)
self.tkvw.pack(side=TOP, anchor=N, fill=BOTH, expand=FALSE)
downButton = tkinter.Button(viewerControls, text="Down",
command=[ViewerDown,self.viewer])
upButton = tkinter.Button(viewerControls, text="Up",
command=[ViewerUp,self.viewer])
sliceLabel = tkinter.Label(viewerControls,
text="slice: "+str(CURSOR_Z*IMAGE_MAG_Z))
downButton.pack(side=LEFT, expand=TRUE, fill=BOTH)
upButton.pack(side=LEFT, expand=TRUE, fill=BOTH)
sliceLabel.pack(side=LEFT, expand=TRUE, fill=BOTH)
# Create the render widget
renderFrame = tkinter.Frame(displayFrame)
renderFrame.pack(padx=3, pady=3, side=LEFT, anchor=N,
fill=BOTH, expand=TRUE)
self.tkrw = vtkTkRenderWidget(renderFrame, rw=self.renWin,
width=264, height=264)
self.tkrw.pack(side=TOP, anchor=N, fill=BOTH, expand=TRUE)
# Bindings
self.tkvw.BindTkImageViewer()
self.tkrw.BindTkRenderWidget()
# Lets add an extra binding of the middle button in the image viewer
# to set the cursor location.
self.tkvw.bind('<Button-2>',SetCursorFromViewer)
self.tkvw.bind('<Shift-Button-1>',SetCursorFromViewer)
# Associate the functions with the buttons and label.
#
downButton.config(command=partial(ViewerDown, self.viewer))
upButton.config(command=partial(ViewerUp, self.viewer))
def DoIt(self):
self.SetUp()
self.viewer.Render()
self.tkrw.Render()
self.root.update()
# If you want to interact and use the sliders etc,
# uncomment the following line.
#self.root.mainloop()
img_file = "cursor3D.png"
Testing.compareImage(self.viewer.GetRenderWindow(), Testing.getAbsImagePath(img_file))
# Testing.interact()
if __name__ == '__main__':
cases = [(Cursor3DViewer, 'DoIt')]
del Cursor3DViewer
Testing.main(cases)
|
bsd-3-clause
|
mvidalgarcia/indico
|
indico/modules/users/models/users_test.py
|
2
|
5467
|
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import itertools
import pytest
from speaklater import is_lazy_string
from sqlalchemy.exc import IntegrityError
from indico.modules.users import User
from indico.modules.users.models.users import UserTitle
def test_can_be_modified():
user = User()
# user can modify himself
assert user.can_be_modified(user)
# admin can modify anyone
assert user.can_be_modified(User(is_admin=True))
# normal users can't
assert not user.can_be_modified(User())
def test_full_name():
assert User(first_name='Guinea', last_name='Pig', title=UserTitle.prof).full_name == 'Guinea Pig'
@pytest.mark.parametrize(('last_name_first', 'last_name_upper', 'abbrev_first_name', 'expected'), (
(False, False, False, 'Guinea Pig'),
(False, False, True, 'G. Pig'),
(False, True, False, 'Guinea PIG'),
(False, True, True, 'G. PIG'),
(True, False, False, 'Pig, Guinea'),
(True, False, True, 'Pig, G.'),
(True, True, False, 'PIG, Guinea'),
(True, True, True, 'PIG, G.'),
))
def test_get_full_name(last_name_first, last_name_upper, abbrev_first_name, expected):
user = User(first_name='Guinea', last_name='Pig', title=UserTitle.none)
name = user.get_full_name(last_name_first=last_name_first, last_name_upper=last_name_upper,
abbrev_first_name=abbrev_first_name, show_title=False)
assert name == expected
# titled name with no title is the same
titled_name = user.get_full_name(last_name_first=last_name_first, last_name_upper=last_name_upper,
abbrev_first_name=abbrev_first_name, show_title=True)
assert titled_name == expected
# titled name with a non-empty title
user.title = UserTitle.mr
titled_name = user.get_full_name(last_name_first=last_name_first, last_name_upper=last_name_upper,
abbrev_first_name=abbrev_first_name, show_title=True)
assert titled_name == 'Mr {}'.format(expected)
@pytest.mark.parametrize(('first_name', 'last_name'), (
('Guinea', ''),
('', 'Pig'),
('', '')
))
def test_get_full_name_empty_names(first_name, last_name):
user = User(first_name=first_name, last_name=last_name, title=UserTitle.none)
for last_name_first, last_name_upper, abbrev_first_name in itertools.product((True, False), repeat=3):
# Just make sure it doesn't fail. We don't really care about the output.
# It's only allowed for pending users so in most cases it only shows up
# in the ``repr`` of such a user.
user.get_full_name(last_name_first=last_name_first, last_name_upper=last_name_upper,
abbrev_first_name=abbrev_first_name)
def test_emails(db):
user = User(first_name='Guinea', last_name='Pig')
db.session.add(user)
db.session.flush()
assert user.email is None
assert not user.secondary_emails
user.email = '[email protected]'
db.session.flush()
assert user.all_emails == {'[email protected]'}
user.secondary_emails.add('[email protected]')
db.session.flush()
assert user.all_emails == {'[email protected]', '[email protected]'}
def test_make_email_primary(db):
user = User(first_name='Guinea', last_name='Pig', email='[email protected]')
db.session.add(user)
db.session.flush()
with pytest.raises(ValueError):
user.make_email_primary('[email protected]')
user.secondary_emails = {'[email protected]', '[email protected]'}
db.session.flush()
user.make_email_primary('[email protected]')
db.session.expire(user)
assert user.email == '[email protected]'
assert user.secondary_emails == {'[email protected]', '[email protected]'}
def test_deletion(db):
user = User(first_name='Guinea', last_name='Pig', email='[email protected]', secondary_emails=['[email protected]'])
db.session.add(user)
db.session.flush()
assert not user.is_deleted
assert all(not ue.is_user_deleted for ue in user._all_emails)
user.is_deleted = True
db.session.flush()
assert all(ue.is_user_deleted for ue in user._all_emails)
def test_deletion_no_primary_email():
# this tests setting the is_deleted property on a user with no primary email
# very unlikely case but let's make sure we never try to set the deleted
# flag on a None primary email.
user = User()
assert user.email is None
user.is_deleted = True
def test_settings():
user = User(id=123)
# make sure it's a bound settings proxy
assert user.settings._bound_args == (user,)
def test_title(db):
user = User(first_name='Guinea', last_name='Pig')
db.session.add(user)
db.session.flush()
assert user.title == ''
user.title = UserTitle.prof
assert user.title == UserTitle.prof.title
assert is_lazy_string(user.title)
assert User.find_one(title=UserTitle.prof) == user
@pytest.mark.parametrize(('first_name', 'last_name'), (
('Guinea', ''),
('', 'Pig'),
('', '')
))
def test_no_names(db, first_name, last_name):
with pytest.raises(IntegrityError):
db.session.add(User(first_name=first_name, last_name=last_name))
db.session.flush()
def test_no_names_pending(db):
db.session.add(User(first_name='', last_name='', is_pending=True))
db.session.flush()
|
mit
|
soellman/copernicus
|
cpc/dataflow/connection.py
|
2
|
20714
|
# This file is part of Copernicus
# http://www.copernicus-computing.org/
#
# Copyright (C) 2011, Sander Pronk, Iman Pouya, Erik Lindahl, and others.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import logging
log=logging.getLogger(__name__)
import cpc.util
import apperror
import keywords
import vtype
import function_io
"""Set of classes describing instance connections."""
class ConnError(apperror.ApplicationError):
pass
def splitIOName(name, expectedDirection=None):
"""Split an input/output name into a 3-tuple, as:
instance-name:'in'/'out'.itemname.subItemname[subItem2name]
(except for 'self')
Checks whether [in/out] item corresponds with expected
type, if given (it can be implicit, such as instance-name.itemname.
If expectedDirection is not given, it must be specified in name.
returns tuple of instance-name, in/out/sub-in/sub-out, item-list
"""
instance=None
direction=None
ioitem=None
#subItem=None
# split into fullInstance and ioitem
srcp0=name.split(keywords.SubTypeSep,1)
if len(srcp0) < 2:
if name.find(keywords.SubTypeSep) >= 0:
raise ConnError("Item '%s': syntax error"%name)
fullInstance=name
fullIoItem=None
else:
fullInstance=srcp0[0]
#fullIoItem=".%s"%srcp0[1] # TODO: fix this so it's more flexible
fullIoItem=srcp0[1]
# now split fullInstance
srcp1=fullInstance.rsplit(keywords.InstSep,1)
dirnm=srcp1[-1]
instance=srcp1[0]
if (dirnm==keywords.In or dirnm==keywords.Out):
if (expectedDirection is not None) and (dirnm != expectedDirection):
raise ConnError("%s: expected %s item, not %s"%
(name, expectedDirection, dirnm))
if instance != keywords.Self:
if dirnm == keywords.In:
direction=function_io.inputs
else:
direction=function_io.outputs
else:
if dirnm == keywords.In:
direction=function_io.subnetInputs
else:
direction=function_io.subnetOutputs
elif (dirnm==keywords.ExtIn or dirnm==keywords.ExtOut):
if (expectedDirection is not None) and (dirnm != expectedDirection):
raise ConnError("%s: expected %s item, not %s"%
(name, expectedDirection, dirnm))
if instance != keywords.Self:
raise ConnError(
"%s: can't specify external I/O on non-self instance %s"%
(name, instance))
else:
if dirnm == keywords.ExtIn:
direction=function_io.inputs
else:
direction=function_io.outputs
elif (dirnm==keywords.SubIn or dirnm==keywords.SubOut):
if (expectedDirection is not None) and (dirnm != expectedDirection):
raise ConnError("%s: expected %s item, not %s"%
(name, expectedDirection, dirnm))
# in this case, 'self' doesn't change anything.
if dirnm == keywords.SubIn:
direction=function_io.subnetInputs
else:
direction=function_io.subnetOutputs
else:
if expectedDirection is None:
raise ConnError("Item %s ambigiuous on in/out"%name)
elif srcp1[-1]!="":
raise ConnError("Syntax error in in/out specifier in %s"%name)
else:
dirstr=expectedDirection
# now split ioitem
#if fullIoItem is not None:
# # TODO fix this so we check for closing brackets etc.
# # for now, we just replace the brackets with dots.
# ioitemlist=fullIoItem.replace('[', keywords.SubTypeSep).\
# replace(']', '').split(keywords.SubTypeSep)
## ioitem=ioitemlist[0]
# subItems=ioitemlist[1:]
# for i in range(len(subItems)):
# if subItems[i].isdigit():
# subItems[i]=int(subItems[i])
#else:
# subItems=[]
#log.debug("instance=%s, direction=%s, ioitem=%s, subitems=%s"%
# (str(instance), str(direction), str(ioitem), str(subItems)))
if fullIoItem is not None:
ioitem=vtype.parseItemList(fullIoItem)
else:
ioitem=[]
return (instance, direction, ioitem)
def makeConnectionFromDesc(network, srcStr, dstStr):
"""Make a connection object by splitting two names with splitIOName."""
srcInstanceName, srcDir, srcItemList=splitIOName(srcStr)
dstInstanceName, dstDir, dstItemList=splitIOName(dstStr)
return makeConnection(network,
srcInstanceName, srcDir, srcItemList,
dstInstanceName, dstDir, dstItemList)
def makeInitialValueFromDesc(network, dstStr, val):
"""Make a connection object with an initial value, based on a network,
and splitIO-ablen ame for the destination."""
dstInstanceName, dstDir, dstItemList=splitIOName(dstStr)
return makeInitialValue(network, dstInstanceName, dstDir, dstItemList, val)
def makeConnection(network,
srcInstanceName, srcDir, srcItemList,
dstInstanceName, dstDir, dstItemList):
"""Make a connection object based on a network, and names for source and
destination.
network = the network the instance(s) belong to
srcInstanceName = the name of the source instance
srcDir = direction (function_io.in/out/sub_in/sub_out) of source item
srcItemName = the name of the source item
srcSubItem = sub items (array subscripts, etc) for the source
...
"""
#log.debug("making connection %s.%s.%s -> %s.%s.%s"%
# (srcInstanceName, srcDir, str(srcItemList),
# dstInstanceName, dstDir, str(dstItemList)))
srcInst=network.getInstance(srcInstanceName)
if srcDir==function_io.inputs:
srcIO=srcInst.getInputs()
elif srcDir==function_io.outputs:
srcIO=srcInst.getOutputs()
elif srcDir==function_io.subnetInputs:
srcIO=srcInst.getSubnetInputs()
elif srcDir==function_io.subnetOutputs:
srcIO=srcInst.getSubnetOutputs()
#
dstInst=network.getInstance(dstInstanceName)
if dstDir==function_io.inputs:
dstIO=dstInst.getInputs()
elif dstDir==function_io.outputs:
dstIO=dstInst.getOutputs()
elif dstDir==function_io.subnetInputs:
dstIO=dstInst.getSubnetInputs()
elif dstDir==function_io.subnetOutputs:
dstIO=dstInst.getSubnetOutputs()
return Connection(srcInst, srcIO, srcItemList, dstInst, dstIO, dstItemList)
def makeInitialValue(network,
dstInstanceName, dstDir, dstItemList,
val):
"""Make a connection object with an initial value, based on a network,
and names for the destination."""
dstInst=network.getInstance(dstInstanceName)
if dstDir==function_io.inputs:
dstIO=dstInst.getInputs()
elif dstDir==function_io.outputs:
dstIO=dstInst.getOutputs()
elif dstDir==function_io.subnetInputs:
dstIO=dstInst.getSubnetInputs()
elif dstDir==function_io.subnetOutputs:
dstIO=dstInst.getSubnetOutputs()
return Connection(None, None, None, dstInst, dstIO, dstItemList, val)
def copyConnection(conn, dstNetwork):
"""Copy the connection to run from instances in the destination network."""
if conn.getSrcInstance() is not None:
ret=makeConnection(dstNetwork,
conn.getSrcInstance().getName(),
conn.getSrcIO().getDir(),
conn.getSrcItemList(),
conn.getDstInstance().getName(),
conn.getDstIO().getDir(),
conn.getDstItemList())
else:
ret=makeInitialValue(dstNetwork,
conn.getDstInstance().getName(),
conn.getDstIO().getDir(),
conn.getDstItemList(),
conn.getInitialValue())
#ret.setSubnetLoop(conn.isSubnetLoop())
return ret
class Connection(object):
"""Describes a link between a instance output and a instance input, or
an input's initial value (if the connection has no source instance)."""
__slots__=['srcInstance', 'srcIO', 'srcItemList', 'dstInstance', 'dstIO',
'dstItemList', 'initialValue', 'implicit', 'srcExternal',
'dstExternal', 'srcAcp', 'dstAcp']
def __init__(self,
srcInstance, srcIO, srcItemList,
dstInstance, dstIO, dstItemList,
initialValue=None):
"""Initialize a connection
srcInstance = the function instance of the connection's source
(an output), or None
srcIO = the source output item (the inputs/outputs/.. object),
or None
srcItemList = the source output item list, or None
dstInstance = the function instance of the connection's destination
(an input)
dstItemList = the connection's destination (input) item list.
srcIO = the dest. input item (the inputs/outputs/.. object)
initialValue = the connection's initial value (or None). Only
valid if there srcInstance is None."""
self.srcInstance=srcInstance
self.srcIO=srcIO
self.srcItemList=srcItemList
self.dstInstance=dstInstance
self.dstIO=dstIO
self.dstItemList=dstItemList
self.initialValue=initialValue
#if self.initialValue is not None:
#self.initialValue.addRef()
# check for clashes
if self.srcInstance is None and self.initialValue is None:
raise ConnError("Both source instance and initial value empty")
if self.srcInstance is not None and self.initialValue is not None:
raise ConnError("Both source instance and initial value set")
# whether the connection is implicit in the network. For writing
# active.writeXML
self.implicit=False
self.srcExternal=False # whether the source is an 'ext_in' object
self.dstExternal=False # whether the destination is an 'ext_out' object
#self.subnetLoop=False # whether this connection is self subnet-to-net
# source and destination active connection points for when making
# changes to active networks.
self.srcAcp = None
self.dstAcp = None
def markImplicit(self):
"""Mark this connection as implicit: it shouldn't be written out
when the state is written."""
self.implicit=True
def isImplicit(self):
"""Check whether this connection as implicit: it shouldn't be written
out when the state is written."""
return self.implicit
def getSrcInstance(self):
return self.srcInstance
def getSrcIO(self):
return self.srcIO
def getSrcItemList(self):
return self.srcItemList
def getDstInstance(self):
return self.dstInstance
def getDstIO(self):
return self.dstIO
def getDstItemList(self):
return self.dstItemList
def getInitialValue(self):
return self.initialValue
def setInitialValue(self, value):
if value is not None:
value.addRef()
if self.initialValue is not None:
self.initialValue.rmRef()
self.initialValue=value
def isSrcExternal(self):
"""Return whether the source is an 'external' source (i.e., 'self's
non subnet I/O"""
return self.srcExternal
def isDstExternal(self):
"""Return whether the destination is an 'external' source (i.e., 'self's
non subnet I/O"""
return self.dstExternal
#def isSubnetLoop(self):
# """Check whether this connection is a 'subnet loop': a connection
# in the self instance from one of its inputs to a subnet output,
# or from a subnet input to an output."""
# return self.subnetLoop
#def setSubnetLoop(self, slo):
# """set the subnetloop bool."""
# self.subnetLoop=slo
def connect(self):
"""Connect both ends of the connection."""
# Normally, the connection has a source output and a destination
# input. There is one exception: when connection 'self' inputs/outputs
# to its subnet inputs/outputs
#
# so now we check whether the connection is from/to
# self, and that it connects subnet to non-subnet. If it is
# it's the above exception.
if self.dstInstance.getName() == keywords.Self:
if not self.dstIO.direction.isInput():
if not self.dstIO.direction.isInSubnet():
self.dstExternal=True
else:
raise ConnError("Trying to connect to a self.sub_out: %s"%
(self.dstString()))
if ( (self.srcInstance is not None) and
self.srcInstance.getName() == keywords.Self):
if self.srcIO.direction.isInput():
if not self.srcIO.direction.isInSubnet():
self.srcExternal=True
else:
raise ConnError("Trying to connect from a self.sub_in: %s"%
(self.srcString()))
if not (self.srcExternal or self.dstExternal):
#
# ( (self.dstInstance.getName() == keywords.Self or
# self.srcInstance.getName() == keywords.Self) ):
# check whether we connect an output to an input
if not self.dstIO.direction.isInput():
raise ConnError("Trying to connect an input as dest: %s->%s, %s, %s"%
(self.srcString(), self.dstString(),
str(self.srcIO.direction.isInSubnet()),
str(self.dstIO.direction.isInSubnet())))
if self.srcInstance is not None:
if self.srcIO.direction.isInput():
raise ConnError("Trying to connect an input as source")
if self.srcIO.direction.isInSubnet():
if not self.srcInstance.getName() == keywords.Self:
raise ConnError("Trying to connect to non-self subnet")
# self.srcInstance.addSubnetOutputConnection(self)
#else:
# self.srcInstance.addOutputConnection(self)
if self.dstIO.direction.isInSubnet():
if not self.dstInstance.getName() == keywords.Self:
raise ConnError("Trying to connect to non-self subnet")
#self.dstInstance.addSubnetInputConnection(self)
#else:
#self.dstInstance.addInputConnection(self)
#else:
#else:
# the exception. Check whether we connect a input to a subnet output
# or a subnet input to an output
#self.subnetLoop=True
#if self.srcIO.direction.isInSubnet():
# self.srcInstance.addSubnetInputConnection(self)
#else:
# self.srcInstance.addInputConnection(self)
#if self.srcIO.direction.isInSubnet():
# #if not self.srcItem.isInput() or self.dstItem.isInput():
# # raise ConnError("Trying to connect subnet output to input")
# self.dstInstance.addOutputConnection(self)
#else:
# #if not self.srcItem.isInput() or self.dstItem.isInput():
# # raise ConnError("Trying to connect output to subnet input")
# self.srcInstance.addInputConnection(self)
# self.dstInstance.addSubnetOutputConnection(self)
if self.srcInstance is not None:
if self.srcIO.direction == function_io.outputs:
self.srcInstance.addOutputConnection(self, False)
elif self.srcIO.direction == function_io.subnetOutputs:
self.srcInstance.addSubnetOutputConnection(self, False)
elif self.srcIO.direction == function_io.inputs:
self.srcInstance.addInputConnection(self, False)
elif self.srcIO.direction == function_io.subnetInputs:
self.srcInstance.addSubnetInputConnection(self, False)
if self.dstIO.direction == function_io.inputs:
self.dstInstance.addInputConnection(self, True)
elif self.dstIO.direction == function_io.subnetInputs:
self.dstInstance.addSubnetInputConnection(self, True)
elif self.dstIO.direction == function_io.outputs:
self.dstInstance.addOutputConnection(self, True)
elif self.dstIO.direction == function_io.subnetOutputs:
self.dstInstance.addSubnetOutputConnection(self, True)
def disconnect(self):
"""Disconnect both ends of the connection."""
if self.srcInstance is not None:
if not self.srcIO.direction.isInSubnet():
self.srcInstance.removeOutputConnection(self)
else:
self.srcInstance.removeSubnetOutputConnection(self)
if not self.dstIO.direction.isInSubnet():
self.dstInstance.removeInputConnection(self)
else:
self.dstInstance.removeSubnetInputConnection(self)
def srcString(self):
"""Return the source as a splitIO()-able string."""
if self.srcInstance is None:
return ""
itemStr=vtype.itemListStr(self.srcItemList)
srcDir=self.srcIO.getDir()
# now fix the naming for 'self':
if self.srcInstance.getName() == keywords.Self:
if srcDir == function_io.inputs:
srcDirStr=keywords.ExtIn
elif srcDir == function_io.outputs:
srcDirStr=keywords.ExtOut
if srcDir == function_io.subnetInputs:
srcDirStr=keywords.In #str(function_io.subnetInputs)
elif srcDir == function_io.subnetOutputs:
srcDirStr=keywords.Out #str(function_io.subnetOutputs)
else:
srcDirStr=str(srcDir)
retstr="%s:%s%s"%(self.srcInstance.getName(), srcDirStr, itemStr)
return retstr
def dstString(self):
"""Return the destination as a splitIO()-able string."""
itemStr=vtype.itemListStr(self.dstItemList)
dstDir=self.dstIO.getDir()
# now fix the naming for 'self':
if self.dstInstance.getName() == keywords.Self:
if dstDir == function_io.inputs:
dstDirStr=keywords.ExtIn
elif dstDir == function_io.outputs:
dstDirStr=keywords.ExtOut
if dstDir == function_io.subnetInputs:
dstDirStr=keywords.In #str(function_io.subnetInputs)
elif dstDir == function_io.subnetOutputs:
dstDirStr=keywords.Out #str(function_io.subnetOutputs)
else:
dstDirStr=str(dstDir)
retstr="%s:%s%s"%(self.dstInstance.getName(), dstDirStr, itemStr)
return retstr
def writeXML(self, outf, indent=0):
"""Write a connection out as XML"""
indstr=cpc.util.indStr*indent
if self.srcInstance is not None:
outf.write('%s<connection src="%s" dest="%s" />\n'%
(indstr, self.srcString(), self.dstString()))
else:
val=self.initialValue.type.valueToLiteral(self.initialValue.value)
tp=self.initialValue.type.getFullName()
if not self.initialValue.type.isCompound():
outf.write('%s<assign type="%s" value="%s" dest="%s" />\n'%
(indstr, tp, val, self.dstString()))
else:
outf.write('%s<assign type="%s" dest="%s" />\n'%
(indstr, tp, self.dstString()))
self.initialValue.writeXML(outf, indent+1)
outf.write('%s</assign>\n'%indstr)
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.