repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
harri88/harri88.github.io | node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/styles/vim.py | 364 | 1976 | # -*- coding: utf-8 -*-
"""
pygments.styles.vim
~~~~~~~~~~~~~~~~~~~
A highlighting style for Pygments, inspired by vim.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Token
class VimStyle(Style):
"""
Styles somewhat like vim 7.0
"""
background_color = "#000000"
highlight_color = "#222222"
default_style = "#cccccc"
styles = {
Token: "#cccccc",
Whitespace: "",
Comment: "#000080",
Comment.Preproc: "",
Comment.Special: "bold #cd0000",
Keyword: "#cdcd00",
Keyword.Declaration: "#00cd00",
Keyword.Namespace: "#cd00cd",
Keyword.Pseudo: "",
Keyword.Type: "#00cd00",
Operator: "#3399cc",
Operator.Word: "#cdcd00",
Name: "",
Name.Class: "#00cdcd",
Name.Builtin: "#cd00cd",
Name.Exception: "bold #666699",
Name.Variable: "#00cdcd",
String: "#cd0000",
Number: "#cd00cd",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#cd0000",
Generic.Inserted: "#00cd00",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
| mit |
jmcorgan/gnuradio | gr-blocks/python/blocks/qa_wavfile.py | 51 | 2216 | #!/usr/bin/env python
#
# Copyright 2008,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
import os
from os.path import getsize
g_in_file = os.path.join(os.getenv("srcdir"), "test_16bit_1chunk.wav")
g_extra_header_offset = 36
g_extra_header_len = 18
class test_wavefile(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001_checkwavread(self):
wf = blocks.wavfile_source(g_in_file)
self.assertEqual(wf.sample_rate(), 8000)
def test_002_checkwavcopy(self):
infile = g_in_file
outfile = "test_out.wav"
wf_in = blocks.wavfile_source(infile)
wf_out = blocks.wavfile_sink(outfile,
wf_in.channels(),
wf_in.sample_rate(),
wf_in.bits_per_sample())
self.tb.connect(wf_in, wf_out)
self.tb.run()
wf_out.close()
# we're loosing all extra header chunks
self.assertEqual(getsize(infile) - g_extra_header_len, getsize(outfile))
in_f = file(infile, 'rb')
out_f = file(outfile, 'rb')
in_data = in_f.read()
out_data = out_f.read()
out_f.close()
os.remove(outfile)
# cut extra header chunks input file
self.assertEqual(in_data[:g_extra_header_offset] + \
in_data[g_extra_header_offset + g_extra_header_len:], out_data)
if __name__ == '__main__':
gr_unittest.run(test_wavefile, "test_wavefile.xml")
| gpl-3.0 |
LegitSavage/namebench | nb_third_party/graphy/line_chart.py | 205 | 4253 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code related to line charts."""
import copy
import warnings
from graphy import common
class LineStyle(object):
"""Represents the style for a line on a line chart. Also provides some
convenient presets.
Object attributes (Passed directly to the Google Chart API. Check there for
details):
width: Width of the line
on: Length of a line segment (for dashed/dotted lines)
off: Length of a break (for dashed/dotted lines)
color: Color of the line. A hex string, like 'ff0000' for red. Optional,
AutoColor will fill this in for you automatically if empty.
Some common styles, such as LineStyle.dashed, are available:
LineStyle.solid()
LineStyle.dashed()
LineStyle.dotted()
LineStyle.thick_solid()
LineStyle.thick_dashed()
LineStyle.thick_dotted()
"""
# Widths
THIN = 1
THICK = 2
# Patterns
# ((on, off) tuples, as passed to LineChart.AddLine)
SOLID = (1, 0)
DASHED = (8, 4)
DOTTED = (2, 4)
def __init__(self, width, on, off, color=None):
"""Construct a LineStyle. See class docstring for details on args."""
self.width = width
self.on = on
self.off = off
self.color = color
@classmethod
def solid(cls):
return LineStyle(1, 1, 0)
@classmethod
def dashed(cls):
return LineStyle(1, 8, 4)
@classmethod
def dotted(cls):
return LineStyle(1, 2, 4)
@classmethod
def thick_solid(cls):
return LineStyle(2, 1, 0)
@classmethod
def thick_dashed(cls):
return LineStyle(2, 8, 4)
@classmethod
def thick_dotted(cls):
return LineStyle(2, 2, 4)
class LineChart(common.BaseChart):
"""Represents a line chart."""
def __init__(self, points=None):
super(LineChart, self).__init__()
if points is not None:
self.AddLine(points)
def AddLine(self, points, label=None, color=None,
pattern=LineStyle.SOLID, width=LineStyle.THIN, markers=None):
"""Add a new line to the chart.
This is a convenience method which constructs the DataSeries and appends it
for you. It returns the new series.
points: List of equally-spaced y-values for the line
label: Name of the line (used for the legend)
color: Hex string, like 'ff0000' for red
pattern: Tuple for (length of segment, length of gap). i.e.
LineStyle.DASHED
width: Width of the line (i.e. LineStyle.THIN)
markers: List of Marker objects to attach to this line (see DataSeries
for more info)
"""
if color is not None and isinstance(color[0], common.Marker):
warnings.warn('Your code may be broken! '
'You passed a list of Markers instead of a color. The '
'old argument order (markers before color) is deprecated.',
DeprecationWarning, stacklevel=2)
style = LineStyle(width, pattern[0], pattern[1], color=color)
series = common.DataSeries(points, label=label, style=style,
markers=markers)
self.data.append(series)
return series
def AddSeries(self, points, color=None, style=LineStyle.solid, markers=None,
label=None):
"""DEPRECATED"""
warnings.warn('LineChart.AddSeries is deprecated. Call AddLine instead. ',
DeprecationWarning, stacklevel=2)
return self.AddLine(points, color=color, width=style.width,
pattern=(style.on, style.off), markers=markers,
label=label)
class Sparkline(LineChart):
"""Represent a sparkline. These behave like LineCharts,
mostly, but come without axes.
"""
| apache-2.0 |
tianweizhang/nova | nova/virt/libvirt/vif.py | 2 | 27960 | # Copyright (C) 2011 Midokura KK
# Copyright (C) 2011 Nicira, Inc
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VIF drivers for libvirt."""
import copy
from oslo.config import cfg
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.network import linux_net
from nova.network import model as network_model
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
LOG = logging.getLogger(__name__)
libvirt_vif_opts = [
cfg.BoolOpt('use_virtio_for_bridges',
default=True,
help='Use virtio for bridge interfaces with KVM/QEMU'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_vif_opts, 'libvirt')
CONF.import_opt('use_ipv6', 'nova.netconf')
DEV_PREFIX_ETH = 'eth'
def is_vif_model_valid_for_virt(virt_type, vif_model):
valid_models = {
'qemu': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN],
'kvm': [network_model.VIF_MODEL_VIRTIO,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN],
'xen': [network_model.VIF_MODEL_NETFRONT,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000],
'lxc': [],
'uml': [],
}
if vif_model is None:
return True
if virt_type not in valid_models:
raise exception.UnsupportedVirtType(virt=virt_type)
return vif_model in valid_models[virt_type]
class LibvirtGenericVIFDriver(object):
"""Generic VIF driver for libvirt networking."""
def __init__(self, get_connection):
self.get_connection = get_connection
def _normalize_vif_type(self, vif_type):
return vif_type.replace('2.1q', '2q')
def get_vif_devname(self, vif):
if 'devname' in vif:
return vif['devname']
return ("nic" + vif['id'])[:network_model.NIC_NAME_LEN]
def get_vif_devname_with_prefix(self, vif, prefix):
devname = self.get_vif_devname(vif)
return prefix + devname[3:]
def get_base_config(self, instance, vif, image_meta,
inst_type, virt_type):
conf = vconfig.LibvirtConfigGuestInterface()
# Default to letting libvirt / the hypervisor choose the model
model = None
driver = None
# If the user has specified a 'vif_model' against the
# image then honour that model
if image_meta:
vif_model = image_meta.get('properties',
{}).get('hw_vif_model')
if vif_model is not None:
model = vif_model
# Else if the virt type is KVM/QEMU, use virtio according
# to the global config parameter
if (model is None and
virt_type in ('kvm', 'qemu') and
CONF.libvirt.use_virtio_for_bridges):
model = network_model.VIF_MODEL_VIRTIO
# Workaround libvirt bug, where it mistakenly
# enables vhost mode, even for non-KVM guests
if (model == network_model.VIF_MODEL_VIRTIO and
virt_type == "qemu"):
driver = "qemu"
if not is_vif_model_valid_for_virt(virt_type,
model):
raise exception.UnsupportedHardware(model=model,
virt=virt_type)
designer.set_vif_guest_frontend_config(
conf, vif['address'], model, driver)
return conf
def get_bridge_name(self, vif):
return vif['network']['bridge']
def get_ovs_interfaceid(self, vif):
return vif.get('ovs_interfaceid') or vif['id']
def get_br_name(self, iface_id):
return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
def get_veth_pair_names(self, iface_id):
return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
def get_firewall_required(self, vif):
if vif.is_neutron_filtering_enabled():
return False
if CONF.firewall_driver != "nova.virt.firewall.NoopFirewallDriver":
return True
return False
def get_config_bridge(self, instance, vif, image_meta,
inst_type, virt_type):
"""Get VIF configurations for bridge type."""
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
designer.set_vif_host_backend_bridge_config(
conf, self.get_bridge_name(vif),
self.get_vif_devname(vif))
mac_id = vif['address'].replace(':', '')
name = "nova-instance-" + instance['name'] + "-" + mac_id
if self.get_firewall_required(vif):
conf.filtername = name
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_ovs_bridge(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
designer.set_vif_host_backend_ovs_config(
conf, self.get_bridge_name(vif),
self.get_ovs_interfaceid(vif),
self.get_vif_devname(vif))
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_ovs_hybrid(self, instance, vif, image_meta,
inst_type, virt_type):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance, newvif, image_meta,
inst_type, virt_type)
def get_config_ovs(self, instance, vif, image_meta,
inst_type, virt_type):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
return self.get_config_ovs_hybrid(instance, vif,
image_meta,
inst_type,
virt_type)
else:
return self.get_config_ovs_bridge(instance, vif,
image_meta,
inst_type,
virt_type)
def get_config_ivs_hybrid(self, instance, vif, image_meta,
inst_type, virt_type):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance,
newvif,
image_meta,
inst_type,
virt_type)
def get_config_ivs_ethernet(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance,
vif,
image_meta,
inst_type,
virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_ivs(self, instance, vif, image_meta,
inst_type, virt_type):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
return self.get_config_ivs_hybrid(instance, vif,
image_meta,
inst_type,
virt_type)
else:
return self.get_config_ivs_ethernet(instance, vif,
image_meta,
inst_type,
virt_type)
def get_config_802qbg(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
params = vif["qbg_params"]
designer.set_vif_host_backend_802qbg_config(
conf, vif['network'].get_meta('interface'),
params['managerid'],
params['typeid'],
params['typeidversion'],
params['instanceid'])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_802qbh(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
profile = vif["profile"]
vif_details = vif["details"]
net_type = 'direct'
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
net_type = 'hostdev'
designer.set_vif_host_backend_802qbh_config(
conf, net_type, profile['pci_slot'],
vif_details[network_model.VIF_DETAILS_PROFILEID])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_hw_veb(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
profile = vif["profile"]
vif_details = vif["details"]
net_type = 'direct'
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
net_type = 'hostdev'
designer.set_vif_host_backend_hw_veb(
conf, net_type, profile['pci_slot'],
vif_details[network_model.VIF_DETAILS_VLAN])
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_iovisor(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config_midonet(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
def get_config_mlnx_direct(self, instance, vif, image_meta,
inst_type, virt_type):
conf = self.get_base_config(instance, vif, image_meta,
inst_type, virt_type)
devname = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH)
designer.set_vif_host_backend_direct_config(conf, devname)
designer.set_vif_bandwidth_config(conf, inst_type)
return conf
def get_config(self, instance, vif, image_meta,
inst_type, virt_type):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s virt_type%(virt_type)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif, 'virt_type': virt_type})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'get_config_%s' % vif_slug, None)
if not func:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
return func(instance, vif, image_meta,
inst_type, virt_type)
def plug_bridge(self, instance, vif):
"""Ensure that the bridge exists, and add VIF to it."""
network = vif['network']
if (not network.get_meta('multi_host', False) and
network.get_meta('should_create_bridge', False)):
if network.get_meta('should_create_vlan', False):
iface = CONF.vlan_interface or \
network.get_meta('bridge_interface')
LOG.debug('Ensuring vlan %(vlan)s and bridge %(bridge)s',
{'vlan': network.get_meta('vlan'),
'bridge': self.get_bridge_name(vif)},
instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
network.get_meta('vlan'),
self.get_bridge_name(vif),
iface)
else:
iface = CONF.flat_interface or \
network.get_meta('bridge_interface')
LOG.debug("Ensuring bridge %s",
self.get_bridge_name(vif), instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
self.get_bridge_name(vif),
iface)
def plug_ovs_bridge(self, instance, vif):
"""No manual plugging required."""
pass
def plug_ovs_hybrid(self, instance, vif):
"""Plug using hybrid strategy
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal OVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
iface_id = self.get_ovs_interfaceid(vif)
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
utils.execute('tee',
('/sys/class/net/%s/bridge/multicast_snooping' %
br_name),
process_input='0',
run_as_root=True,
check_exit_code=[0, 1])
if not linux_net.device_exists(v2_name):
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
v2_name, iface_id, vif['address'],
instance['uuid'])
def plug_ovs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.plug_ovs_hybrid(instance, vif)
else:
self.plug_ovs_bridge(instance, vif)
def plug_ivs_ethernet(self, instance, vif):
iface_id = self.get_ovs_interfaceid(vif)
dev = self.get_vif_devname(vif)
linux_net.create_tap_dev(dev)
linux_net.create_ivs_vif_port(dev, iface_id, vif['address'],
instance['uuid'])
def plug_ivs_hybrid(self, instance, vif):
"""Plug using hybrid strategy (same as OVS)
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal IVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
iface_id = self.get_ovs_interfaceid(vif)
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if not linux_net.device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True)
utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True)
utils.execute('tee',
('/sys/class/net/%s/bridge/multicast_snooping' %
br_name),
process_input='0',
run_as_root=True,
check_exit_code=[0, 1])
if not linux_net.device_exists(v2_name):
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
linux_net.create_ivs_vif_port(v2_name, iface_id, vif['address'],
instance['uuid'])
def plug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.plug_ivs_hybrid(instance, vif)
else:
self.plug_ivs_ethernet(instance, vif)
def plug_mlnx_direct(self, instance, vif):
vnic_mac = vif['address']
device_id = instance['uuid']
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id'])
dev_name = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH)
try:
utils.execute('ebrctl', 'add-port', vnic_mac, device_id, fabric,
network_model.VIF_TYPE_MLNX_DIRECT, dev_name,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_802qbg(self, instance, vif):
pass
def plug_802qbh(self, instance, vif):
pass
def plug_hw_veb(self, instance, vif):
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
linux_net.set_vf_interface_vlan(
vif['profile']['pci_slot'],
mac_addr=vif['address'],
vlan=vif['details'][network_model.VIF_DETAILS_VLAN])
def plug_midonet(self, instance, vif):
"""Plug into MidoNet's network port
Bind the vif to a MidoNet virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
linux_net.create_tap_dev(dev)
utils.execute('mm-ctl', '--bind-port', port_id, dev,
run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_iovisor(self, instance, vif):
"""Plug using PLUMgrid IO Visor Driver
Connect a network device to their respective
Virtual Domain in PLUMgrid Platform.
"""
dev = self.get_vif_devname(vif)
iface_id = vif['id']
linux_net.create_tap_dev(dev)
net_id = vif['network']['id']
tenant_id = instance["project_id"]
try:
utils.execute('ifc_ctl', 'gateway', 'add_port', dev,
run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'ifup', dev,
'access_vm',
vif['network']['label'] + "_" + iface_id,
vif['address'], 'pgtag2=%s' % net_id,
'pgtag1=%s' % tenant_id, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug(self, instance, vif):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.VirtualInterfacePlugException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'plug_%s' % vif_slug, None)
if not func:
raise exception.VirtualInterfacePlugException(
_("Plug vif failed because of unexpected "
"vif_type=%s") % vif_type)
func(instance, vif)
def unplug_bridge(self, instance, vif):
"""No manual unplugging required."""
pass
def unplug_ovs_bridge(self, instance, vif):
"""No manual unplugging required."""
pass
def unplug_ovs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy
Unhook port from OVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
if linux_net.device_exists(br_name):
utils.execute('brctl', 'delif', br_name, v1_name,
run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name,
run_as_root=True)
linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ovs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.unplug_ovs_hybrid(instance, vif)
else:
self.unplug_ovs_bridge(instance, vif)
def unplug_ivs_ethernet(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
try:
linux_net.delete_ivs_vif_port(self.get_vif_devname(vif))
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ivs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy (same as OVS)
Unhook port from IVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
linux_net.delete_ivs_vif_port(v2_name)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_ivs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.unplug_ivs_hybrid(instance, vif)
else:
self.unplug_ivs_ethernet(instance, vif)
def unplug_mlnx_direct(self, instance, vif):
vnic_mac = vif['address']
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
network_uuid=vif['network']['id'])
try:
utils.execute('ebrctl', 'del-port', fabric,
vnic_mac, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_802qbg(self, instance, vif):
pass
def unplug_802qbh(self, instance, vif):
pass
def unplug_hw_veb(self, instance, vif):
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
# The ip utility doesn't accept the MAC 00:00:00:00:00:00.
# Therefore, keep the MAC unchanged. Later operations on
# the same VF will not be affected by the existing MAC.
linux_net.set_vf_interface_vlan(vif['profile']['pci_slot'],
mac_addr=vif['address'])
def unplug_midonet(self, instance, vif):
"""Unplug from MidoNet network port
Unbind the vif from a MidoNet virtual port.
"""
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
utils.execute('mm-ctl', '--unbind-port', port_id,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug_iovisor(self, instance, vif):
"""Unplug using PLUMgrid IO Visor Driver
Delete network device and to their respective
connection to the Virtual Domain in PLUMgrid Platform.
"""
iface_id = vif['id']
dev = self.get_vif_devname(vif)
try:
utils.execute('ifc_ctl', 'gateway', 'ifdown',
dev, 'access_vm',
vif['network']['label'] + "_" + iface_id,
vif['address'], run_as_root=True)
utils.execute('ifc_ctl', 'gateway', 'del_port', dev,
run_as_root=True)
linux_net.delete_net_dev(dev)
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
def unplug(self, instance, vif):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
'vif=%(vif)s',
{'vif_type': vif_type, 'instance': instance,
'vif': vif})
if vif_type is None:
raise exception.NovaException(
_("vif_type parameter must be present "
"for this vif_driver implementation"))
vif_slug = self._normalize_vif_type(vif_type)
func = getattr(self, 'unplug_%s' % vif_slug, None)
if not func:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
func(instance, vif)
| apache-2.0 |
XXMrHyde/android_external_chromium_org | chrome/test/functional/webrtc_write_wsh.py | 66 | 2397 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# This module is handler for incoming data to the pywebsocket standalone server
# (source is in http://code.google.com/p/pywebsocket/source/browse/trunk/src/).
# It follows the conventions of the pywebsocket server and in our case receives
# and stores incoming frames to disk.
import Queue
import os
import sys
import threading
_NUMBER_OF_WRITER_THREADS = 10
_HOME_ENV_NAME = 'HOMEPATH' if 'win32' == sys.platform else 'HOME'
_WORKING_DIR = os.path.join(os.environ[_HOME_ENV_NAME], 'webrtc_video_quality')
# I couldn't think of other way to handle this but through a global variable
g_frame_number_counter = 0
g_frames_queue = Queue.Queue()
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
while True:
data = request.ws_stream.receive_message()
if data is None:
return
# We assume we will receive only frames, i.e. binary data
global g_frame_number_counter
frame_number = str(g_frame_number_counter)
g_frame_number_counter += 1
g_frames_queue.put((frame_number, data))
class FrameWriterThread(threading.Thread):
"""Writes received frames to disk.
The frames are named in the format frame_xxxx, where xxxx is the 0-padded
frame number. The frames and their numbers are obtained from a synchronized
queue. The frames are written in the directory specified by _WORKING_DIR.
"""
def __init__(self, queue):
threading.Thread.__init__(self)
self._queue = queue
def run(self):
while True:
frame_number, frame_data = self._queue.get()
file_name = 'frame_' + frame_number.zfill(4)
file_name = os.path.join(_WORKING_DIR, file_name)
frame = open(file_name, "wb")
frame.write(frame_data)
frame.close()
self._queue.task_done()
def start_threads():
for i in range(_NUMBER_OF_WRITER_THREADS):
t = FrameWriterThread(g_frames_queue)
t.setDaemon(True)
t.start()
g_frames_queue.join()
# This handler's entire code is imported as 'it is' and then incorporated in the
# code of the standalone pywebsocket server. If we put this start_threads() call
# inside a if __name__ == '__main__' clause it wouldn't run this code at all
# (tested).
start_threads()
| bsd-3-clause |
loco-odoo/localizacion_co | openerp/addons/crm_helpdesk/report/__init__.py | 442 | 1083 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_helpdesk_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
t3dev/odoo | addons/resource/tests/common.py | 15 | 1756 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests.common import TransactionCase
class TestResourceCommon(TransactionCase):
def _define_calendar(self, name, attendances, tz):
return self.env['resource.calendar'].create({
'name': name,
'tz': tz,
'attendance_ids': [
(0, 0, {
'name': '%s_%d' % (name, index),
'hour_from': att[0],
'hour_to': att[1],
'dayofweek': str(att[2]),
})
for index, att in enumerate(attendances)
],
})
def setUp(self):
super(TestResourceCommon, self).setUp()
# UTC+1 winter, UTC+2 summer
self.calendar_jean = self._define_calendar('40 Hours', [(8, 16, i) for i in range(5)], 'Europe/Brussels')
# UTC+6
self.calendar_patel = self._define_calendar('38 Hours', sum([((9, 12, i), (13, 17, i)) for i in range(5)], ()), 'Etc/GMT-6')
# UTC-8 winter, UTC-7 summer
self.calendar_john = self._define_calendar('8+12 Hours', [(8, 16, 1), (8, 13, 4), (16, 23, 4)], 'America/Los_Angeles')
# Employee is linked to a resource.resource via resource.mixin
self.jean = self.env['resource.test'].create({
'name': 'Jean',
'resource_calendar_id': self.calendar_jean.id,
})
self.patel = self.env['resource.test'].create({
'name': 'Patel',
'resource_calendar_id': self.calendar_patel.id,
})
self.john = self.env['resource.test'].create({
'name': 'John',
'resource_calendar_id': self.calendar_john.id,
})
| gpl-3.0 |
bvcms/bvcms | CmsWeb/Lib/multiprocessing/dummy/connection.py | 168 | 2807 | #
# Analogue of `multiprocessing.connection` which uses queues instead of sockets
#
# multiprocessing/dummy/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [ 'Client', 'Listener', 'Pipe' ]
from Queue import Queue
families = [None]
class Listener(object):
def __init__(self, address=None, family=None, backlog=1):
self._backlog_queue = Queue(backlog)
def accept(self):
return Connection(*self._backlog_queue.get())
def close(self):
self._backlog_queue = None
address = property(lambda self: self._backlog_queue)
def Client(address):
_in, _out = Queue(), Queue()
address.put((_out, _in))
return Connection(_in, _out)
def Pipe(duplex=True):
a, b = Queue(), Queue()
return Connection(a, b), Connection(b, a)
class Connection(object):
def __init__(self, _in, _out):
self._out = _out
self._in = _in
self.send = self.send_bytes = _out.put
self.recv = self.recv_bytes = _in.get
def poll(self, timeout=0.0):
if self._in.qsize() > 0:
return True
if timeout <= 0.0:
return False
self._in.not_empty.acquire()
self._in.not_empty.wait(timeout)
self._in.not_empty.release()
return self._in.qsize() > 0
def close(self):
pass
| gpl-2.0 |
faun/django_test | build/lib/django/contrib/gis/db/backends/oracle/models.py | 310 | 2184 | """
The GeometryColumns and SpatialRefSys models for the Oracle spatial
backend.
It should be noted that Oracle Spatial does not have database tables
named according to the OGC standard, so the closest analogs are used.
For example, the `USER_SDO_GEOM_METADATA` is used for the GeometryColumns
model and the `SDO_COORD_REF_SYS` is used for the SpatialRefSys model.
"""
from django.contrib.gis.db import models
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
class GeometryColumns(models.Model):
"Maps to the Oracle USER_SDO_GEOM_METADATA table."
table_name = models.CharField(max_length=32)
column_name = models.CharField(max_length=1024)
srid = models.IntegerField(primary_key=True)
# TODO: Add support for `diminfo` column (type MDSYS.SDO_DIM_ARRAY).
class Meta:
db_table = 'USER_SDO_GEOM_METADATA'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'column_name'
def __unicode__(self):
return '%s - %s (SRID: %s)' % (self.table_name, self.column_name, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"Maps to the Oracle MDSYS.CS_SRS table."
cs_name = models.CharField(max_length=68)
srid = models.IntegerField(primary_key=True)
auth_srid = models.IntegerField()
auth_name = models.CharField(max_length=256)
wktext = models.CharField(max_length=2046)
# Optional geometry representing the bounds of this coordinate
# system. By default, all are NULL in the table.
cs_bounds = models.PolygonField(null=True)
objects = models.GeoManager()
class Meta:
db_table = 'CS_SRS'
managed = False
@property
def wkt(self):
return self.wktext
@classmethod
def wkt_col(cls):
return 'wktext'
| bsd-3-clause |
lulufei/youtube-dl | test/test_YoutubeDL.py | 24 | 20870 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import copy
from test.helper import FakeYDL, assertRegexpMatches
from youtube_dl import YoutubeDL
from youtube_dl.compat import compat_str
from youtube_dl.extractor import YoutubeIE
from youtube_dl.postprocessor.common import PostProcessor
from youtube_dl.utils import match_filter_func
TEST_URL = 'http://localhost/sample.mp4'
class YDL(FakeYDL):
def __init__(self, *args, **kwargs):
super(YDL, self).__init__(*args, **kwargs)
self.downloaded_info_dicts = []
self.msgs = []
def process_info(self, info_dict):
self.downloaded_info_dicts.append(info_dict)
def to_screen(self, msg):
self.msgs.append(msg)
def _make_result(formats, **kwargs):
res = {
'formats': formats,
'id': 'testid',
'title': 'testttitle',
'extractor': 'testex',
}
res.update(**kwargs)
return res
class TestFormatSelection(unittest.TestCase):
def test_prefer_free_formats(self):
# Same resolution => download webm
ydl = YDL()
ydl.params['prefer_free_formats'] = True
formats = [
{'ext': 'webm', 'height': 460, 'url': TEST_URL},
{'ext': 'mp4', 'height': 460, 'url': TEST_URL},
]
info_dict = _make_result(formats)
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'webm')
# Different resolution => download best quality (mp4)
ydl = YDL()
ydl.params['prefer_free_formats'] = True
formats = [
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
{'ext': 'mp4', 'height': 1080, 'url': TEST_URL},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'mp4')
# No prefer_free_formats => prefer mp4 and flv for greater compatibility
ydl = YDL()
ydl.params['prefer_free_formats'] = False
formats = [
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
{'ext': 'mp4', 'height': 720, 'url': TEST_URL},
{'ext': 'flv', 'height': 720, 'url': TEST_URL},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'mp4')
ydl = YDL()
ydl.params['prefer_free_formats'] = False
formats = [
{'ext': 'flv', 'height': 720, 'url': TEST_URL},
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'flv')
def test_format_selection(self):
formats = [
{'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL},
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': TEST_URL},
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': TEST_URL},
{'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': TEST_URL},
]
info_dict = _make_result(formats)
ydl = YDL({'format': '20/47'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '47')
ydl = YDL({'format': '20/71/worst'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '35')
ydl = YDL()
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '2')
ydl = YDL({'format': 'webm/mp4'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '47')
ydl = YDL({'format': '3gp/40/mp4'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '35')
def test_format_selection_audio(self):
formats = [
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': TEST_URL},
{'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none', 'url': TEST_URL},
{'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none', 'url': TEST_URL},
{'format_id': 'vid', 'ext': 'mp4', 'preference': 4, 'url': TEST_URL},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestaudio'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'audio-high')
ydl = YDL({'format': 'worstaudio'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'audio-low')
formats = [
{'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL},
{'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2, 'url': TEST_URL},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestaudio/worstaudio/best'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'vid-high')
def test_format_selection_audio_exts(self):
formats = [
{'format_id': 'mp3-64', 'ext': 'mp3', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'ogg-64', 'ext': 'ogg', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'aac-64', 'ext': 'aac', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'mp3-32', 'ext': 'mp3', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'aac-32', 'ext': 'aac', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'best'})
ie = YoutubeIE(ydl)
ie._sort_formats(info_dict['formats'])
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'aac-64')
ydl = YDL({'format': 'mp3'})
ie = YoutubeIE(ydl)
ie._sort_formats(info_dict['formats'])
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'mp3-64')
ydl = YDL({'prefer_free_formats': True})
ie = YoutubeIE(ydl)
ie._sort_formats(info_dict['formats'])
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'ogg-64')
def test_format_selection_video(self):
formats = [
{'format_id': 'dash-video-low', 'ext': 'mp4', 'preference': 1, 'acodec': 'none', 'url': TEST_URL},
{'format_id': 'dash-video-high', 'ext': 'mp4', 'preference': 2, 'acodec': 'none', 'url': TEST_URL},
{'format_id': 'vid', 'ext': 'mp4', 'preference': 3, 'url': TEST_URL},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestvideo'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'dash-video-high')
ydl = YDL({'format': 'worstvideo'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'dash-video-low')
def test_youtube_format_selection(self):
order = [
'38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '36', '17', '13',
# Apple HTTP Live Streaming
'96', '95', '94', '93', '92', '132', '151',
# 3D
'85', '84', '102', '83', '101', '82', '100',
# Dash video
'137', '248', '136', '247', '135', '246',
'245', '244', '134', '243', '133', '242', '160',
# Dash audio
'141', '172', '140', '171', '139',
]
for f1id, f2id in zip(order, order[1:]):
f1 = YoutubeIE._formats[f1id].copy()
f1['format_id'] = f1id
f1['url'] = 'url:' + f1id
f2 = YoutubeIE._formats[f2id].copy()
f2['format_id'] = f2id
f2['url'] = 'url:' + f2id
info_dict = _make_result([f1, f2], extractor='youtube')
ydl = YDL({'format': 'best/bestvideo'})
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], f1id)
info_dict = _make_result([f2, f1], extractor='youtube')
ydl = YDL({'format': 'best/bestvideo'})
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], f1id)
def test_format_filtering(self):
formats = [
{'format_id': 'A', 'filesize': 500, 'width': 1000},
{'format_id': 'B', 'filesize': 1000, 'width': 500},
{'format_id': 'C', 'filesize': 1000, 'width': 400},
{'format_id': 'D', 'filesize': 2000, 'width': 600},
{'format_id': 'E', 'filesize': 3000},
{'format_id': 'F'},
{'format_id': 'G', 'filesize': 1000000},
]
for f in formats:
f['url'] = 'http://_/'
f['ext'] = 'unknown'
info_dict = _make_result(formats)
ydl = YDL({'format': 'best[filesize<3000]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'D')
ydl = YDL({'format': 'best[filesize<=3000]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'E')
ydl = YDL({'format': 'best[filesize <= ? 3000]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'F')
ydl = YDL({'format': 'best [filesize = 1000] [width>450]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'B')
ydl = YDL({'format': 'best [filesize = 1000] [width!=450]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'C')
ydl = YDL({'format': '[filesize>?1]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'G')
ydl = YDL({'format': '[filesize<1M]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'E')
ydl = YDL({'format': '[filesize<1MiB]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'G')
class TestYoutubeDL(unittest.TestCase):
def test_subtitles(self):
def s_formats(lang, autocaption=False):
return [{
'ext': ext,
'url': 'http://localhost/video.%s.%s' % (lang, ext),
'_auto': autocaption,
} for ext in ['vtt', 'srt', 'ass']]
subtitles = dict((l, s_formats(l)) for l in ['en', 'fr', 'es'])
auto_captions = dict((l, s_formats(l, True)) for l in ['it', 'pt', 'es'])
info_dict = {
'id': 'test',
'title': 'Test',
'url': 'http://localhost/video.mp4',
'subtitles': subtitles,
'automatic_captions': auto_captions,
'extractor': 'TEST',
}
def get_info(params={}):
params.setdefault('simulate', True)
ydl = YDL(params)
ydl.report_warning = lambda *args, **kargs: None
return ydl.process_video_result(info_dict, download=False)
result = get_info()
self.assertFalse(result.get('requested_subtitles'))
self.assertEqual(result['subtitles'], subtitles)
self.assertEqual(result['automatic_captions'], auto_captions)
result = get_info({'writesubtitles': True})
subs = result['requested_subtitles']
self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['en']))
self.assertTrue(subs['en'].get('data') is None)
self.assertEqual(subs['en']['ext'], 'ass')
result = get_info({'writesubtitles': True, 'subtitlesformat': 'foo/srt'})
subs = result['requested_subtitles']
self.assertEqual(subs['en']['ext'], 'srt')
result = get_info({'writesubtitles': True, 'subtitleslangs': ['es', 'fr', 'it']})
subs = result['requested_subtitles']
self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['es', 'fr']))
result = get_info({'writesubtitles': True, 'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']})
subs = result['requested_subtitles']
self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['es', 'pt']))
self.assertFalse(subs['es']['_auto'])
self.assertTrue(subs['pt']['_auto'])
result = get_info({'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']})
subs = result['requested_subtitles']
self.assertTrue(subs)
self.assertEqual(set(subs.keys()), set(['es', 'pt']))
self.assertTrue(subs['es']['_auto'])
self.assertTrue(subs['pt']['_auto'])
def test_add_extra_info(self):
test_dict = {
'extractor': 'Foo',
}
extra_info = {
'extractor': 'Bar',
'playlist': 'funny videos',
}
YDL.add_extra_info(test_dict, extra_info)
self.assertEqual(test_dict['extractor'], 'Foo')
self.assertEqual(test_dict['playlist'], 'funny videos')
def test_prepare_filename(self):
info = {
'id': '1234',
'ext': 'mp4',
'width': None,
}
def fname(templ):
ydl = YoutubeDL({'outtmpl': templ})
return ydl.prepare_filename(info)
self.assertEqual(fname('%(id)s.%(ext)s'), '1234.mp4')
self.assertEqual(fname('%(id)s-%(width)s.%(ext)s'), '1234-NA.mp4')
# Replace missing fields with 'NA'
self.assertEqual(fname('%(uploader_date)s-%(id)s.%(ext)s'), 'NA-1234.mp4')
def test_format_note(self):
ydl = YoutubeDL()
self.assertEqual(ydl._format_note({}), '')
assertRegexpMatches(self, ydl._format_note({
'vbr': 10,
}), '^\s*10k$')
def test_postprocessors(self):
filename = 'post-processor-testfile.mp4'
audiofile = filename + '.mp3'
class SimplePP(PostProcessor):
def run(self, info):
with open(audiofile, 'wt') as f:
f.write('EXAMPLE')
return [info['filepath']], info
def run_pp(params, PP):
with open(filename, 'wt') as f:
f.write('EXAMPLE')
ydl = YoutubeDL(params)
ydl.add_post_processor(PP())
ydl.post_process(filename, {'filepath': filename})
run_pp({'keepvideo': True}, SimplePP)
self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename)
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile)
os.unlink(filename)
os.unlink(audiofile)
run_pp({'keepvideo': False}, SimplePP)
self.assertFalse(os.path.exists(filename), '%s exists' % filename)
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile)
os.unlink(audiofile)
class ModifierPP(PostProcessor):
def run(self, info):
with open(info['filepath'], 'wt') as f:
f.write('MODIFIED')
return [], info
run_pp({'keepvideo': False}, ModifierPP)
self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename)
os.unlink(filename)
def test_match_filter(self):
class FilterYDL(YDL):
def __init__(self, *args, **kwargs):
super(FilterYDL, self).__init__(*args, **kwargs)
self.params['simulate'] = True
def process_info(self, info_dict):
super(YDL, self).process_info(info_dict)
def _match_entry(self, info_dict, incomplete):
res = super(FilterYDL, self)._match_entry(info_dict, incomplete)
if res is None:
self.downloaded_info_dicts.append(info_dict)
return res
first = {
'id': '1',
'url': TEST_URL,
'title': 'one',
'extractor': 'TEST',
'duration': 30,
'filesize': 10 * 1024,
}
second = {
'id': '2',
'url': TEST_URL,
'title': 'two',
'extractor': 'TEST',
'duration': 10,
'description': 'foo',
'filesize': 5 * 1024,
}
videos = [first, second]
def get_videos(filter_=None):
ydl = FilterYDL({'match_filter': filter_})
for v in videos:
ydl.process_ie_result(v, download=True)
return [v['id'] for v in ydl.downloaded_info_dicts]
res = get_videos()
self.assertEqual(res, ['1', '2'])
def f(v):
if v['id'] == '1':
return None
else:
return 'Video id is not 1'
res = get_videos(f)
self.assertEqual(res, ['1'])
f = match_filter_func('duration < 30')
res = get_videos(f)
self.assertEqual(res, ['2'])
f = match_filter_func('description = foo')
res = get_videos(f)
self.assertEqual(res, ['2'])
f = match_filter_func('description =? foo')
res = get_videos(f)
self.assertEqual(res, ['1', '2'])
f = match_filter_func('filesize > 5KiB')
res = get_videos(f)
self.assertEqual(res, ['1'])
def test_playlist_items_selection(self):
entries = [{
'id': compat_str(i),
'title': compat_str(i),
'url': TEST_URL,
} for i in range(1, 5)]
playlist = {
'_type': 'playlist',
'id': 'test',
'entries': entries,
'extractor': 'test:playlist',
'extractor_key': 'test:playlist',
'webpage_url': 'http://example.com',
}
def get_ids(params):
ydl = YDL(params)
# make a copy because the dictionary can be modified
ydl.process_ie_result(playlist.copy())
return [int(v['id']) for v in ydl.downloaded_info_dicts]
result = get_ids({})
self.assertEqual(result, [1, 2, 3, 4])
result = get_ids({'playlistend': 10})
self.assertEqual(result, [1, 2, 3, 4])
result = get_ids({'playlistend': 2})
self.assertEqual(result, [1, 2])
result = get_ids({'playliststart': 10})
self.assertEqual(result, [])
result = get_ids({'playliststart': 2})
self.assertEqual(result, [2, 3, 4])
result = get_ids({'playlist_items': '2-4'})
self.assertEqual(result, [2, 3, 4])
result = get_ids({'playlist_items': '2,4'})
self.assertEqual(result, [2, 4])
result = get_ids({'playlist_items': '10'})
self.assertEqual(result, [])
if __name__ == '__main__':
unittest.main()
| unlicense |
lochiiconnectivity/boto | boto/ec2/reservedinstance.py | 12 | 8480 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.resultset import ResultSet
from boto.ec2.ec2object import EC2Object
class ReservedInstancesOffering(EC2Object):
def __init__(self, connection=None, id=None, instance_type=None,
availability_zone=None, duration=None, fixed_price=None,
usage_price=None, description=None, instance_tenancy=None,
currency_code=None, offering_type=None,
recurring_charges=None, pricing_details=None):
EC2Object.__init__(self, connection)
self.id = id
self.instance_type = instance_type
self.availability_zone = availability_zone
self.duration = duration
self.fixed_price = fixed_price
self.usage_price = usage_price
self.description = description
self.instance_tenancy = instance_tenancy
self.currency_code = currency_code
self.offering_type = offering_type
self.recurring_charges = recurring_charges
self.pricing_details = pricing_details
def __repr__(self):
return 'ReservedInstanceOffering:%s' % self.id
def startElement(self, name, attrs, connection):
if name == 'recurringCharges':
self.recurring_charges = ResultSet([('item', RecurringCharge)])
return self.recurring_charges
elif name == 'pricingDetailsSet':
self.pricing_details = ResultSet([('item', PricingDetail)])
return self.pricing_details
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesOfferingId':
self.id = value
elif name == 'instanceType':
self.instance_type = value
elif name == 'availabilityZone':
self.availability_zone = value
elif name == 'duration':
self.duration = int(value)
elif name == 'fixedPrice':
self.fixed_price = value
elif name == 'usagePrice':
self.usage_price = value
elif name == 'productDescription':
self.description = value
elif name == 'instanceTenancy':
self.instance_tenancy = value
elif name == 'currencyCode':
self.currency_code = value
elif name == 'offeringType':
self.offering_type = value
elif name == 'marketplace':
self.marketplace = True if value == 'true' else False
def describe(self):
print 'ID=%s' % self.id
print '\tInstance Type=%s' % self.instance_type
print '\tZone=%s' % self.availability_zone
print '\tDuration=%s' % self.duration
print '\tFixed Price=%s' % self.fixed_price
print '\tUsage Price=%s' % self.usage_price
print '\tDescription=%s' % self.description
def purchase(self, instance_count=1):
return self.connection.purchase_reserved_instance_offering(self.id, instance_count)
class RecurringCharge(object):
def __init__(self, connection=None, frequency=None, amount=None):
self.frequency = frequency
self.amount = amount
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class PricingDetail(object):
def __init__(self, connection=None, price=None, count=None):
self.price = price
self.count = count
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class ReservedInstance(ReservedInstancesOffering):
def __init__(self, connection=None, id=None, instance_type=None,
availability_zone=None, duration=None, fixed_price=None,
usage_price=None, description=None,
instance_count=None, state=None):
ReservedInstancesOffering.__init__(self, connection, id, instance_type,
availability_zone, duration, fixed_price,
usage_price, description)
self.instance_count = instance_count
self.state = state
self.start = None
def __repr__(self):
return 'ReservedInstance:%s' % self.id
def endElement(self, name, value, connection):
if name == 'reservedInstancesId':
self.id = value
if name == 'instanceCount':
self.instance_count = int(value)
elif name == 'state':
self.state = value
elif name == 'start':
self.start = value
else:
ReservedInstancesOffering.endElement(self, name, value, connection)
class ReservedInstanceListing(EC2Object):
def __init__(self, connection=None, listing_id=None, id=None,
create_date=None, update_date=None,
status=None, status_message=None, client_token=None):
self.connection = connection
self.listing_id = listing_id
self.id = id
self.create_date = create_date
self.update_date = update_date
self.status = status
self.status_message = status_message
self.client_token = client_token
def startElement(self, name, attrs, connection):
if name == 'instanceCounts':
self.instance_counts = ResultSet([('item', InstanceCount)])
return self.instance_counts
elif name == 'priceSchedules':
self.price_schedules = ResultSet([('item', PriceSchedule)])
return self.price_schedules
return None
def endElement(self, name, value, connection):
if name == 'reservedInstancesListingId':
self.listing_id = value
elif name == 'reservedInstancesId':
self.id = value
elif name == 'createDate':
self.create_date = value
elif name == 'updateDate':
self.update_date = value
elif name == 'status':
self.status = value
elif name == 'statusMessage':
self.status_message = value
else:
setattr(self, name, value)
class InstanceCount(object):
def __init__(self, connection=None, state=None, instance_count=None):
self.state = state
self.instance_count = instance_count
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'state':
self.state = value
elif name == 'instanceCount':
self.instance_count = int(value)
else:
setattr(self, name, value)
class PriceSchedule(object):
def __init__(self, connection=None, term=None, price=None,
currency_code=None, active=None):
self.connection = connection
self.term = term
self.price = price
self.currency_code = currency_code
self.active = active
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'term':
self.term = int(value)
elif name == 'price':
self.price = value
elif name == 'currencyCode':
self.currency_code = value
elif name == 'active':
self.active = True if value == 'true' else False
else:
setattr(self, name, value)
| mit |
jyejare/robottelo | tests/foreman/cli/test_repository.py | 1 | 102147 | # -*- encoding: utf-8 -*-
"""Test class for Repository CLI
:Requirement: Repository
:CaseAutomation: Automated
:CaseLevel: Component
:CaseComponent: Repositories
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import pytest
from fauxfactory import gen_alphanumeric
from fauxfactory import gen_string
from nailgun import entities
from wait_for import wait_for
from robottelo import ssh
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.contentview import ContentView
from robottelo.cli.factory import CLIFactoryError
from robottelo.cli.factory import make_content_view
from robottelo.cli.factory import make_filter
from robottelo.cli.factory import make_gpg_key
from robottelo.cli.factory import make_lifecycle_environment
from robottelo.cli.factory import make_org
from robottelo.cli.factory import make_product
from robottelo.cli.factory import make_product_wait
from robottelo.cli.factory import make_repository
from robottelo.cli.factory import make_role
from robottelo.cli.factory import make_user
from robottelo.cli.file import File
from robottelo.cli.filter import Filter
from robottelo.cli.module_stream import ModuleStream
from robottelo.cli.package import Package
from robottelo.cli.puppetmodule import PuppetModule
from robottelo.cli.repository import Repository
from robottelo.cli.role import Role
from robottelo.cli.settings import Settings
from robottelo.cli.srpm import Srpm
from robottelo.cli.task import Task
from robottelo.cli.user import User
from robottelo.constants import CUSTOM_FILE_REPO
from robottelo.constants import CUSTOM_FILE_REPO_FILES_COUNT
from robottelo.constants import CUSTOM_LOCAL_FOLDER
from robottelo.constants import CUSTOM_MODULE_STREAM_REPO_1
from robottelo.constants import CUSTOM_MODULE_STREAM_REPO_2
from robottelo.constants import DOCKER_REGISTRY_HUB
from robottelo.constants import DOWNLOAD_POLICIES
from robottelo.constants import FAKE_0_YUM_REPO
from robottelo.constants import FAKE_1_PUPPET_REPO
from robottelo.constants import FAKE_1_YUM_REPO
from robottelo.constants import FAKE_2_PUPPET_REPO
from robottelo.constants import FAKE_2_YUM_REPO
from robottelo.constants import FAKE_3_PUPPET_REPO
from robottelo.constants import FAKE_3_YUM_REPO
from robottelo.constants import FAKE_4_PUPPET_REPO
from robottelo.constants import FAKE_4_YUM_REPO
from robottelo.constants import FAKE_5_PUPPET_REPO
from robottelo.constants import FAKE_5_YUM_REPO
from robottelo.constants import FAKE_7_PUPPET_REPO
from robottelo.constants import FAKE_PULP_REMOTE_FILEREPO
from robottelo.constants import FAKE_YUM_DRPM_REPO
from robottelo.constants import FAKE_YUM_MIXED_REPO
from robottelo.constants import FAKE_YUM_SRPM_REPO
from robottelo.constants import FEDORA27_OSTREE_REPO
from robottelo.constants import OS_TEMPLATE_DATA_FILE
from robottelo.constants import REPO_TYPE
from robottelo.constants import RPM_TO_UPLOAD
from robottelo.constants import SRPM_TO_UPLOAD
from robottelo.datafactory import invalid_http_credentials
from robottelo.datafactory import invalid_values_list
from robottelo.datafactory import valid_data_list
from robottelo.datafactory import valid_docker_repository_names
from robottelo.datafactory import valid_http_credentials
from robottelo.decorators import tier1
from robottelo.decorators import tier2
from robottelo.decorators import upgrade
from robottelo.decorators.host import skip_if_os
from robottelo.helpers import get_data_file
from robottelo.host_info import get_host_os_version
from robottelo.test import CLITestCase
from robottelo.utils.issue_handlers import is_open
class RepositoryTestCase(CLITestCase):
"""Repository CLI tests."""
org = None
product = None
def setUp(self):
"""Tests for Repository via Hammer CLI"""
super(RepositoryTestCase, self).setUp()
if RepositoryTestCase.org is None:
RepositoryTestCase.org = make_org(cached=True)
if RepositoryTestCase.product is None:
RepositoryTestCase.product = make_product_wait(
{'organization-id': RepositoryTestCase.org['id']}
)
def _make_repository(self, options=None):
"""Makes a new repository and asserts its success"""
if options is None:
options = {}
if options.get('product-id') is None:
options['product-id'] = self.product['id']
return make_repository(options)
def _get_image_tags_count(self, repo=None):
repo_detail = Repository.info({'id': repo['id']})
return repo_detail
def _validated_image_tags_count(self, repo=None):
"""Wrapper around Repository.info(), that returns once
container-image-tags in repo is greater than 0.
Needed due to BZ#1664631 (container-image-tags is not populated
immediately after synchronization), which was CLOSED WONTFIX
"""
wait_for(
lambda: int(
self._get_image_tags_count(repo=repo)['content-counts']['container-image-tags']
)
> 0,
timeout=30,
delay=2,
logger=self.logger,
)
return self._get_image_tags_count(repo=repo)
@tier1
@upgrade
def test_positive_info_docker_upstream_name(self):
"""Check if repository docker-upstream-name is shown
in repository info
:id: f197a14c-2cf3-4564-9b18-5fd37d469ea4
:expectedresults: repository info command returns upstream-repository-
name value
:BZ: 1189289
:CaseImportance: Critical
"""
repository = self._make_repository(
{
'content-type': 'docker',
'name': gen_string('alpha'),
'docker-upstream-name': 'fedora/rabbitmq',
}
)
self.assertIn('upstream-repository-name', repository)
self.assertEqual(repository['upstream-repository-name'], 'fedora/rabbitmq')
@tier1
def test_positive_create_with_name(self):
"""Check if repository can be created with random names
:id: 604dea2c-d512-4a27-bfc1-24c9655b6ea9
:expectedresults: Repository is created and has random name
:CaseImportance: Critical
"""
for name in valid_data_list().values():
with self.subTest(name):
new_repo = self._make_repository({'name': name})
self.assertEqual(new_repo['name'], name)
@tier1
def test_positive_create_with_name_label(self):
"""Check if repository can be created with random names and
labels
:id: 79d2a6d0-5032-46cd-880c-46cf392521fa
:expectedresults: Repository is created and has random name and labels
:CaseImportance: Critical
"""
for name in valid_data_list().values():
with self.subTest(name):
# Generate a random, 'safe' label
label = gen_string('alpha', 20)
new_repo = self._make_repository({'label': label, 'name': name})
self.assertEqual(new_repo['name'], name)
self.assertEqual(new_repo['label'], label)
@tier1
def test_positive_create_with_yum_repo(self):
"""Create YUM repository
:id: 4c08824f-ba95-486c-94c2-9abf0a3441ea
:expectedresults: YUM repository is created
:CaseImportance: Critical
"""
for url in (
FAKE_0_YUM_REPO,
FAKE_1_YUM_REPO,
FAKE_2_YUM_REPO,
FAKE_3_YUM_REPO,
FAKE_4_YUM_REPO,
):
with self.subTest(url):
new_repo = self._make_repository({'content-type': 'yum', 'url': url})
self.assertEqual(new_repo['url'], url)
self.assertEqual(new_repo['content-type'], 'yum')
@tier1
@upgrade
def test_positive_create_with_puppet_repo(self):
"""Create Puppet repository
:id: 75c309ba-fbc9-419d-8427-7a61b063ec13
:expectedresults: Puppet repository is created
:CaseImportance: Critical
"""
for url in (
FAKE_1_PUPPET_REPO,
FAKE_2_PUPPET_REPO,
FAKE_3_PUPPET_REPO,
FAKE_4_PUPPET_REPO,
FAKE_5_PUPPET_REPO,
):
with self.subTest(url):
new_repo = self._make_repository({'content-type': 'puppet', 'url': url})
self.assertEqual(new_repo['url'], url)
self.assertEqual(new_repo['content-type'], 'puppet')
@tier1
@upgrade
def test_positive_create_with_file_repo(self):
"""Create file repository
:id: 46f63419-1acc-4ae2-be8c-d97816ba342f
:expectedresults: file repository is created
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'file', 'url': CUSTOM_FILE_REPO})
self.assertEqual(new_repo['url'], CUSTOM_FILE_REPO)
self.assertEqual(new_repo['content-type'], 'file')
@tier1
def test_positive_create_with_auth_yum_repo(self):
"""Create YUM repository with basic HTTP authentication
:id: da8309fd-3076-427b-a96f-8d883d6e944f
:expectedresults: YUM repository is created
:CaseImportance: Critical
"""
url = FAKE_5_YUM_REPO
for creds in valid_http_credentials(url_encoded=True):
url_encoded = url.format(creds['login'], creds['pass'])
with self.subTest(url_encoded):
new_repo = self._make_repository({'content-type': 'yum', 'url': url_encoded})
self.assertEqual(new_repo['url'], url_encoded)
self.assertEqual(new_repo['content-type'], 'yum')
@tier1
@upgrade
def test_positive_create_with_download_policy(self):
"""Create YUM repositories with available download policies
:id: ffb386e6-c360-4d4b-a324-ccc21768b4f8
:expectedresults: YUM repository with a download policy is created
:CaseImportance: Critical
"""
for policy in DOWNLOAD_POLICIES:
with self.subTest(policy):
new_repo = self._make_repository(
{'content-type': 'yum', 'download-policy': policy}
)
self.assertEqual(new_repo['download-policy'], policy)
@tier1
@upgrade
def test_positive_create_with_mirror_on_sync(self):
"""Create YUM repositories with available mirror on sync rule
:id: 37a09a91-42fc-4271-b58b-8e00ef0dc5a7
:expectedresults: YUM repository created successfully and its mirror on
sync rule value can be read back
:BZ: 1383258
:CaseImportance: Critical
"""
for value in ['yes', 'no']:
with self.subTest(value):
new_repo = self._make_repository({'content-type': 'yum', 'mirror-on-sync': value})
self.assertEqual(new_repo['mirror-on-sync'], value)
@tier1
def test_positive_create_with_default_download_policy(self):
"""Verify if the default download policy is assigned when creating a
YUM repo without `--download-policy`
:id: 9a3c4d95-d6ca-4377-9873-2c552b7d6ce7
:expectedresults: YUM repository with a default download policy
:CaseImportance: Critical
"""
default_dl_policy = Settings.list({'search': 'name=default_download_policy'})
self.assertTrue(default_dl_policy)
new_repo = self._make_repository({'content-type': 'yum'})
self.assertEqual(new_repo['download-policy'], default_dl_policy[0]['value'])
@tier1
def test_positive_create_immediate_update_to_on_demand(self):
"""Update `immediate` download policy to `on_demand` for a newly
created YUM repository
:id: 1a80d686-3f7b-475e-9d1a-3e1f51d55101
:expectedresults: immediate download policy is updated to on_demand
:CaseImportance: Critical
:BZ: 1732056
"""
new_repo = self._make_repository({'content-type': 'yum'})
self.assertEqual(new_repo['download-policy'], 'immediate')
Repository.update({'id': new_repo['id'], 'download-policy': 'on_demand'})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['download-policy'], 'on_demand')
@tier1
def test_positive_create_immediate_update_to_background(self):
"""Update `immediate` download policy to `background` for a newly
created YUM repository
:id: 7a9243eb-012c-40ad-9105-b078ed0a9eda
:expectedresults: immediate download policy is updated to background
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'yum', 'download-policy': 'immediate'})
Repository.update({'id': new_repo['id'], 'download-policy': 'background'})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['download-policy'], 'background')
@tier1
def test_positive_create_on_demand_update_to_immediate(self):
"""Update `on_demand` download policy to `immediate` for a newly
created YUM repository
:id: 1e8338af-32e5-4f92-9215-bfdc1973c8f7
:expectedresults: on_demand download policy is updated to immediate
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'yum', 'download-policy': 'on_demand'})
Repository.update({'id': new_repo['id'], 'download-policy': 'immediate'})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['download-policy'], 'immediate')
@tier1
def test_positive_create_on_demand_update_to_background(self):
"""Update `on_demand` download policy to `background` for a newly
created YUM repository
:id: da600200-5bd4-4cb8-a891-37cd2233803e
:expectedresults: on_demand download policy is updated to background
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'yum', 'download-policy': 'on_demand'})
Repository.update({'id': new_repo['id'], 'download-policy': 'background'})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['download-policy'], 'background')
@tier1
def test_positive_create_background_update_to_immediate(self):
"""Update `background` download policy to `immediate` for a newly
created YUM repository
:id: cf4dca0c-36bd-4a3c-aa29-f435ac60b3f8
:expectedresults: background download policy is updated to immediate
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'yum', 'download-policy': 'background'})
Repository.update({'id': new_repo['id'], 'download-policy': 'immediate'})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['download-policy'], 'immediate')
@tier1
def test_positive_create_background_update_to_on_demand(self):
"""Update `background` download policy to `on_demand` for a newly
created YUM repository
:id: 0f943e3d-44b7-4b6e-9a7d-d33f7f4864d1
:expectedresults: background download policy is updated to on_demand
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'yum', 'download-policy': 'background'})
Repository.update({'id': new_repo['id'], 'download-policy': 'on_demand'})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['download-policy'], 'on_demand')
@tier1
def test_positive_create_with_auth_puppet_repo(self):
"""Create Puppet repository with basic HTTP authentication
:id: b13f8ae2-60ab-47e6-a096-d3f368e5cab3
:expectedresults: Puppet repository is created
:CaseImportance: Critical
"""
url = FAKE_7_PUPPET_REPO
for creds in valid_http_credentials(url_encoded=True):
url_encoded = url.format(creds['login'], creds['pass'])
with self.subTest(url_encoded):
new_repo = self._make_repository({'content-type': 'puppet', 'url': url_encoded})
self.assertEqual(new_repo['url'], url_encoded)
self.assertEqual(new_repo['content-type'], 'puppet')
@tier1
@upgrade
def test_positive_create_with_gpg_key_by_id(self):
"""Check if repository can be created with gpg key ID
:id: 6d22f0ea-2d27-4827-9b7a-3e1550a47285
:expectedresults: Repository is created and has gpg key
:CaseImportance: Critical
"""
# Make a new gpg key
gpg_key = make_gpg_key({'organization-id': self.org['id']})
for name in valid_data_list().values():
with self.subTest(name):
new_repo = self._make_repository({'gpg-key-id': gpg_key['id'], 'name': name})
self.assertEqual(new_repo['gpg-key']['id'], gpg_key['id'])
self.assertEqual(new_repo['gpg-key']['name'], gpg_key['name'])
@tier1
def test_positive_create_with_gpg_key_by_name(self):
"""Check if repository can be created with gpg key name
:id: 95cde404-3449-410d-9a08-d7f8619a2ad5
:expectedresults: Repository is created and has gpg key
:BZ: 1103944
:CaseImportance: Critical
"""
gpg_key = make_gpg_key({'organization-id': self.org['id']})
for name in valid_data_list().values():
with self.subTest(name):
new_repo = self._make_repository(
{'gpg-key': gpg_key['name'], 'name': name, 'organization-id': self.org['id']}
)
self.assertEqual(new_repo['gpg-key']['id'], gpg_key['id'])
self.assertEqual(new_repo['gpg-key']['name'], gpg_key['name'])
@tier1
def test_positive_create_publish_via_http(self):
"""Create repository published via http
:id: faf6058c-9dd3-444c-ace2-c41791669e9e
:expectedresults: Repository is created and is published via http
:CaseImportance: Critical
"""
for use_http in 'true', 'yes', '1':
with self.subTest(use_http):
repo = self._make_repository({'publish-via-http': use_http})
self.assertEqual(repo['publish-via-http'], 'yes')
@tier1
def test_positive_create_publish_via_https(self):
"""Create repository not published via http
:id: 4395a5df-207c-4b34-a42d-7b3273bd68ec
:expectedresults: Repository is created and is not published via http
:CaseImportance: Critical
"""
for use_http in 'false', 'no', '0':
with self.subTest(use_http):
repo = self._make_repository({'publish-via-http': use_http})
self.assertEqual(repo['publish-via-http'], 'no')
@tier1
@upgrade
def test_positive_create_yum_repo_with_checksum_type(self):
"""Create a YUM repository with a checksum type
:id: 934f4a09-2a64-485d-ae6c-8ef73aa8fb2b
:expectedresults: A YUM repository is created and contains the correct
checksum type
:CaseImportance: Critical
"""
for checksum_type in 'sha1', 'sha256':
with self.subTest(checksum_type):
content_type = 'yum'
repository = self._make_repository(
{
'checksum-type': checksum_type,
'content-type': content_type,
'download-policy': 'immediate',
}
)
self.assertEqual(repository['content-type'], content_type)
self.assertEqual(repository['checksum-type'], checksum_type)
@tier1
def test_positive_create_docker_repo_with_upstream_name(self):
"""Create a Docker repository with upstream name.
:id: 776f92eb-8b40-4efd-8315-4fbbabcb2d4e
:expectedresults: Docker repository is created and contains correct
values.
:CaseImportance: Critical
"""
content_type = 'docker'
new_repo = self._make_repository(
{
'content-type': content_type,
'docker-upstream-name': 'busybox',
'name': 'busybox',
'url': DOCKER_REGISTRY_HUB,
}
)
# Assert that urls and content types matches data passed
self.assertEqual(new_repo['url'], DOCKER_REGISTRY_HUB)
self.assertEqual(new_repo['content-type'], content_type)
self.assertEqual(new_repo['name'], 'busybox')
@tier1
def test_positive_create_docker_repo_with_name(self):
"""Create a Docker repository with a random name.
:id: b6a01434-8672-4196-b61a-dcb86c49f43b
:expectedresults: Docker repository is created and contains correct
values.
:CaseImportance: Critical
"""
for name in valid_docker_repository_names():
with self.subTest(name):
content_type = 'docker'
new_repo = self._make_repository(
{
'content-type': content_type,
'docker-upstream-name': 'busybox',
'name': name,
'url': DOCKER_REGISTRY_HUB,
}
)
# Assert that urls, content types and name matches data passed
self.assertEqual(new_repo['url'], DOCKER_REGISTRY_HUB)
self.assertEqual(new_repo['content-type'], content_type)
self.assertEqual(new_repo['name'], name)
@tier2
def test_positive_create_puppet_repo_same_url_different_orgs(self):
"""Create two repos with the same URL in two different organizations.
:id: b3502064-f400-4e60-a11f-b3772bd23a98
:expectedresults: Repositories are created and puppet modules are
visible from different organizations.
:CaseLevel: Integration
"""
url = 'https://omaciel.fedorapeople.org/b3502064/'
# Create first repo
repo = self._make_repository({'content-type': 'puppet', 'url': url})
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['content-counts']['puppet-modules'], '1')
# Create another org and repo
org = make_org()
product = make_product({'organization-id': org['id']})
new_repo = self._make_repository(
{'url': url, 'product': product, 'content-type': 'puppet'}
)
Repository.synchronize({'id': new_repo['id']})
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['content-counts']['puppet-modules'], '1')
@tier1
def test_negative_create_with_name(self):
"""Repository name cannot be 300-characters long
:id: af0652d3-012d-4846-82ac-047918f74722
:expectedresults: Repository cannot be created
:CaseImportance: Critical
"""
for name in invalid_values_list():
with self.subTest(name):
with self.assertRaises(CLIFactoryError):
self._make_repository({'name': name})
@tier1
def test_negative_create_with_auth_url_with_special_characters(self):
"""Verify that repository URL cannot contain unquoted special characters
:id: 2bd5ee17-0fe5-43cb-9cdc-dc2178c5374c
:expectedresults: Repository cannot be created
:CaseImportance: Critical
"""
# get a list of valid credentials without quoting them
for cred in [creds for creds in valid_http_credentials() if creds['quote'] is True]:
url_encoded = FAKE_5_YUM_REPO.format(cred['login'], cred['pass'])
with self.subTest(url_encoded):
with self.assertRaises(CLIFactoryError):
self._make_repository({'url': url_encoded})
@tier1
def test_negative_create_with_auth_url_too_long(self):
"""Verify that repository URL length is limited
:id: de356c66-4237-4421-89e3-f4f8bbe6f526
:expectedresults: Repository cannot be created
:CaseImportance: Critical
"""
for cred in invalid_http_credentials():
url_encoded = FAKE_5_YUM_REPO.format(cred['login'], cred['pass'])
with self.subTest(url_encoded):
with self.assertRaises(CLIFactoryError):
self._make_repository({'url': url_encoded})
@tier1
def test_negative_create_with_invalid_download_policy(self):
"""Verify that YUM repository cannot be created with invalid download
policy
:id: 3b143bf8-7056-4c94-910d-69a451071f26
:expectedresults: YUM repository is not created with invalid download
policy
:CaseImportance: Critical
"""
with self.assertRaises(CLIFactoryError):
self._make_repository(
{'content-type': 'yum', 'download-policy': gen_string('alpha', 5)}
)
@tier1
def test_negative_update_to_invalid_download_policy(self):
"""Verify that YUM repository cannot be updated to invalid download
policy
:id: 5bd6a2e4-7ff0-42ac-825a-6b2a2f687c89
:expectedresults: YUM repository is not updated to invalid download
policy
:CaseImportance: Critical
"""
with self.assertRaises(CLIReturnCodeError):
new_repo = self._make_repository({'content-type': 'yum'})
Repository.update({'id': new_repo['id'], 'download-policy': gen_string('alpha', 5)})
@tier1
def test_negative_create_non_yum_with_download_policy(self):
"""Verify that non-YUM repositories cannot be created with download
policy
:id: 71388973-50ea-4a20-9406-0aca142014ca
:expectedresults: Non-YUM repository is not created with a download
policy
:BZ: 1439835
:CaseImportance: Critical
"""
os_version = get_host_os_version()
# ostree is not supported for rhel6 so the following check
if os_version.startswith('RHEL6'):
non_yum_repo_types = [
item for item in REPO_TYPE.keys() if item != 'yum' and item != 'ostree'
]
else:
non_yum_repo_types = [item for item in REPO_TYPE.keys() if item != 'yum']
for content_type in non_yum_repo_types:
with self.subTest(content_type):
with self.assertRaisesRegex(
CLIFactoryError,
'Download policy Cannot set attribute download_policy for content type',
):
self._make_repository(
{'content-type': content_type, 'download-policy': 'on_demand'}
)
@tier1
def test_positive_synchronize_yum_repo(self):
"""Check if repository can be created and synced
:id: e3a62529-edbd-4062-9246-bef5f33bdcf0
:expectedresults: Repository is created and synced
:CaseLevel: Integration
:CaseImportance: Critical
"""
for url in FAKE_1_YUM_REPO, FAKE_3_YUM_REPO, FAKE_4_YUM_REPO:
with self.subTest(url):
new_repo = self._make_repository({'content-type': 'yum', 'url': url})
# Assertion that repo is not yet synced
self.assertEqual(new_repo['sync']['status'], 'Not Synced')
# Synchronize it
Repository.synchronize({'id': new_repo['id']})
# Verify it has finished
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['sync']['status'], 'Success')
@tier1
def test_positive_synchronize_file_repo(self):
"""Check if repository can be created and synced
:id: eafc421d-153e-41e1-afbd-938e556ef827
:expectedresults: Repository is created and synced
:CaseLevel: Integration
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'file', 'url': CUSTOM_FILE_REPO})
# Assertion that repo is not yet synced
self.assertEqual(new_repo['sync']['status'], 'Not Synced')
# Synchronize it
Repository.synchronize({'id': new_repo['id']})
# Verify it has finished
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['sync']['status'], 'Success')
self.assertEqual(int(new_repo['content-counts']['files']), CUSTOM_FILE_REPO_FILES_COUNT)
@tier2
@upgrade
def test_positive_synchronize_auth_yum_repo(self):
"""Check if secured repository can be created and synced
:id: b0db676b-e0f0-428c-adf3-1d7c0c3599f0
:expectedresults: Repository is created and synced
:BZ: 1328092
:CaseLevel: Integration
"""
url = FAKE_5_YUM_REPO
for creds in [
cred for cred in valid_http_credentials(url_encoded=True) if cred['http_valid']
]:
url_encoded = url.format(creds['login'], creds['pass'])
with self.subTest(url_encoded):
new_repo = self._make_repository({'content-type': 'yum', 'url': url_encoded})
# Assertion that repo is not yet synced
self.assertEqual(new_repo['sync']['status'], 'Not Synced')
# Synchronize it
Repository.synchronize({'id': new_repo['id']})
# Verify it has finished
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['sync']['status'], 'Success')
@tier2
def test_negative_synchronize_auth_yum_repo(self):
"""Check if secured repo fails to synchronize with invalid credentials
:id: 809905ae-fb76-465d-9468-1f99c4274aeb
:expectedresults: Repository is created but synchronization fails
:BZ: 1405503, 1453118
:CaseLevel: Integration
"""
url = FAKE_5_YUM_REPO
for creds in [
cred for cred in valid_http_credentials(url_encoded=True) if not cred['http_valid']
]:
url_encoded = url.format(creds['login'], creds['pass'])
with self.subTest(url_encoded):
new_repo = self._make_repository({'content-type': 'yum', 'url': url_encoded})
# Try to synchronize it
repo_sync = Repository.synchronize({'id': new_repo['id'], 'async': True})
response = Task.progress({'id': repo_sync[0]['id']}, return_raw_response=True)
if creds['original_encoding'] == 'utf8':
self.assertIn(
("Error retrieving metadata: 'latin-1' codec can't encode characters"),
''.join(response.stderr),
)
else:
self.assertIn(
'Error retrieving metadata: Unauthorized', ''.join(response.stderr)
)
@tier2
@upgrade
def test_positive_synchronize_auth_puppet_repo(self):
"""Check if secured puppet repository can be created and synced
:id: 1d2604fc-8a18-4cbe-bf4c-5c7d9fbdb82c
:expectedresults: Repository is created and synced
:BZ: 1405503
:CaseLevel: Integration
"""
url = FAKE_7_PUPPET_REPO
for creds in [
cred for cred in valid_http_credentials(url_encoded=True) if cred['http_valid']
]:
url_encoded = url.format(creds['login'], creds['pass'])
with self.subTest(url_encoded):
new_repo = self._make_repository({'content-type': 'puppet', 'url': url_encoded})
# Assertion that repo is not yet synced
self.assertEqual(new_repo['sync']['status'], 'Not Synced')
# Synchronize it
Repository.synchronize({'id': new_repo['id']})
# Verify it has finished
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['sync']['status'], 'Success')
@tier2
@upgrade
def test_positive_synchronize_docker_repo(self):
"""Check if Docker repository can be created and synced
:id: cb9ae788-743c-4785-98b2-6ae0c161bc9a
:expectedresults: Docker repository is created and synced
"""
new_repo = self._make_repository(
{
'content-type': 'docker',
'docker-upstream-name': 'busybox',
'url': DOCKER_REGISTRY_HUB,
}
)
# Assertion that repo is not yet synced
self.assertEqual(new_repo['sync']['status'], 'Not Synced')
# Synchronize it
Repository.synchronize({'id': new_repo['id']})
# Verify it has finished
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['sync']['status'], 'Success')
@tier2
@upgrade
def test_positive_synchronize_docker_repo_with_tags_whitelist(self):
"""Check if only whitelisted tags are synchronized
:id: aa820c65-2de1-4b32-8890-98bd8b4320dc
:expectedresults: Only whitelisted tag is synchronized
"""
tags = 'latest'
repo = self._make_repository(
{
'content-type': 'docker',
'docker-upstream-name': 'alpine',
'url': DOCKER_REGISTRY_HUB,
'docker-tags-whitelist': tags,
}
)
Repository.synchronize({'id': repo['id']})
repo = self._validated_image_tags_count(repo=repo)
self.assertIn(tags, repo['container-image-tags-filter'])
self.assertEqual(int(repo['content-counts']['container-image-tags']), 1)
@tier2
def test_positive_synchronize_docker_repo_set_tags_later(self):
"""Verify that adding tags whitelist and re-syncing after
synchronizing full repository doesn't remove content that was
already pulled in
:id: 97f2087f-6041-4242-8b7c-be53c68f46ff
:expectedresults: Non-whitelisted tags are not removed
"""
tags = 'latest'
repo = self._make_repository(
{
'content-type': 'docker',
'docker-upstream-name': 'hello-world',
'url': DOCKER_REGISTRY_HUB,
}
)
Repository.synchronize({'id': repo['id']})
repo = self._validated_image_tags_count(repo=repo)
self.assertFalse(repo['container-image-tags-filter'])
self.assertGreaterEqual(int(repo['content-counts']['container-image-tags']), 2)
Repository.update({'id': repo['id'], 'docker-tags-whitelist': tags})
Repository.synchronize({'id': repo['id']})
repo = self._validated_image_tags_count(repo=repo)
self.assertIn(tags, repo['container-image-tags-filter'])
self.assertGreaterEqual(int(repo['content-counts']['container-image-tags']), 2)
@tier2
def test_negative_synchronize_docker_repo_with_mix_valid_invalid_tags(self):
"""Set tags whitelist to contain both valid and invalid (non-existing)
tags. Check if only whitelisted tags are synchronized
:id: 75668da8-cc94-4d39-ade1-d3ef91edc812
:expectedresults: Only whitelisted tag is synchronized
"""
tags = ['latest', gen_string('alpha')]
repo = self._make_repository(
{
'content-type': 'docker',
'docker-upstream-name': 'alpine',
'url': DOCKER_REGISTRY_HUB,
'docker-tags-whitelist': ",".join(tags),
}
)
Repository.synchronize({'id': repo['id']})
repo = self._validated_image_tags_count(repo=repo)
[self.assertIn(tag, repo['container-image-tags-filter']) for tag in tags]
self.assertEqual(int(repo['content-counts']['container-image-tags']), 1)
@tier2
def test_negative_synchronize_docker_repo_with_invalid_tags(self):
"""Set tags whitelist to contain only invalid (non-existing)
tags. Check that no data is synchronized.
:id: da05cdb1-2aea-48b9-9424-6cc700bc1194
:expectedresults: Tags are not synchronized
"""
tags = [gen_string('alpha') for _ in range(3)]
repo = self._make_repository(
{
'content-type': 'docker',
'docker-upstream-name': 'alpine',
'url': DOCKER_REGISTRY_HUB,
'docker-tags-whitelist': ",".join(tags),
}
)
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
[self.assertIn(tag, repo['container-image-tags-filter']) for tag in tags]
self.assertEqual(int(repo['content-counts']['container-image-tags']), 0)
@tier2
def test_positive_resynchronize_rpm_repo(self):
"""Check that repository content is resynced after packages were
removed from repository
:id: a21b6710-4f12-4722-803e-3cb29d70eead
:expectedresults: Repository has updated non-zero packages count
:BZ: 1459845, 1459874, 1318004
:CaseLevel: Integration
"""
# Create repository and synchronize it
repo = self._make_repository({'content-type': 'yum', 'url': FAKE_1_YUM_REPO})
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['packages'], '32')
# Find repo packages and remove them
packages = Package.list({'repository-id': repo['id']})
Repository.remove_content(
{'id': repo['id'], 'ids': [package['id'] for package in packages]}
)
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['content-counts']['packages'], '0')
# Re-synchronize repository
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['packages'], '32')
@tier2
def test_positive_resynchronize_puppet_repo(self):
"""Check that repository content is resynced after puppet modules
were removed from repository
:id: 9e28f0ae-3875-4c1e-ad8b-d068f4409fe3
:expectedresults: Repository has updated non-zero puppet modules count
:BZ: 1459845, 1318004
:CaseLevel: Integration
"""
# Create repository and synchronize it
repo = self._make_repository({'content-type': 'puppet', 'url': FAKE_1_PUPPET_REPO})
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['puppet-modules'], '2')
# Find repo packages and remove them
modules = PuppetModule.list({'repository-id': repo['id']})
Repository.remove_content({'id': repo['id'], 'ids': [module['id'] for module in modules]})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['content-counts']['puppet-modules'], '0')
# Re-synchronize repository
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['puppet-modules'], '2')
@tier2
def test_positive_synchronize_rpm_repo_ignore_content(self):
"""Synchronize yum repository with ignore content setting
:id: fa32ff10-e2e2-4ee0-b444-82f66f4a0e96
:expectedresults: Selected content types are ignored during
synchronization
:BZ: 1591358
:CaseLevel: Integration
"""
# Create repository and synchronize it
repo = self._make_repository(
{
'content-type': 'yum',
'url': FAKE_YUM_MIXED_REPO,
'ignorable-content': ['erratum', 'srpm', 'drpm'],
}
)
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
# Check synced content types
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['packages'], '5', 'content not synced correctly')
self.assertEqual(repo['content-counts']['errata'], '0', 'content not ignored correctly')
self.assertEqual(
repo['content-counts']['source-rpms'], '0', 'content not ignored correctly'
)
# drpm check requires a different method
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/Library'
'/custom/{}/{}/drpms/ | grep .drpm'.format(
self.org['label'], self.product['label'], repo['label']
)
)
# expecting No such file or directory for drpms
self.assertEqual(result.return_code, 1)
self.assertIn('No such file or directory', result.stderr)
# Find repo packages and remove them
packages = Package.list({'repository-id': repo['id']})
Repository.remove_content(
{'id': repo['id'], 'ids': [package['id'] for package in packages]}
)
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['content-counts']['packages'], '0')
# Update the ignorable-content setting
Repository.update({'id': repo['id'], 'ignorable-content': ['rpm']})
# Re-synchronize repository
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
# Re-check synced content types
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['packages'], '0', 'content not ignored correctly')
self.assertEqual(repo['content-counts']['errata'], '2', 'content not synced correctly')
if not is_open('BZ:1664549'):
self.assertEqual(
repo['content-counts']['source-rpms'], '3', 'content not synced correctly'
)
if not is_open('BZ:1682951'):
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/Library'
'/custom/{}/{}/drpms/ | grep .drpm'.format(
self.org['label'], self.product['label'], repo['label']
)
)
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 4, 'content not synced correctly')
@tier1
def test_positive_update_url(self):
"""Update the original url for a repository
:id: 1a2cf29b-5c30-4d4c-b6d1-2f227b0a0a57
:expectedresults: Repository url is updated
:CaseImportance: Critical
"""
new_repo = self._make_repository()
# generate repo URLs with all valid credentials
auth_repos = [
repo.format(creds['login'], creds['pass'])
for creds in valid_http_credentials(url_encoded=True)
for repo in (FAKE_5_YUM_REPO, FAKE_7_PUPPET_REPO)
]
for url in [
FAKE_4_YUM_REPO,
FAKE_1_PUPPET_REPO,
FAKE_2_PUPPET_REPO,
FAKE_3_PUPPET_REPO,
FAKE_2_YUM_REPO,
] + auth_repos:
with self.subTest(url):
# Update the url
Repository.update({'id': new_repo['id'], 'url': url})
# Fetch it again
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['url'], url)
@tier1
def test_negative_update_auth_url_with_special_characters(self):
"""Verify that repository URL credentials cannot be updated to contain
the forbidden characters
:id: 566553b2-d077-4fd8-8ed5-00ba75355386
:expectedresults: Repository url not updated
:CaseImportance: Critical
"""
new_repo = self._make_repository()
# get auth repos with credentials containing unquoted special chars
auth_repos = [
repo.format(cred['login'], cred['pass'])
for cred in valid_http_credentials()
if cred['quote']
for repo in (FAKE_5_YUM_REPO, FAKE_7_PUPPET_REPO)
]
for url in auth_repos:
with self.subTest(url):
with self.assertRaises(CLIReturnCodeError):
Repository.update({'id': new_repo['id'], 'url': url})
# Fetch it again
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['url'], new_repo['url'])
@tier1
def test_negative_update_auth_url_too_long(self):
"""Update the original url for a repository to value which is too long
:id: a703de60-8631-4e31-a9d9-e51804f27f03
:expectedresults: Repository url not updated
:CaseImportance: Critical
"""
new_repo = self._make_repository()
# generate repo URLs with all invalid credentials
auth_repos = [
repo.format(cred['login'], cred['pass'])
for cred in invalid_http_credentials()
for repo in (FAKE_5_YUM_REPO, FAKE_7_PUPPET_REPO)
]
for url in auth_repos:
with self.subTest(url):
with self.assertRaises(CLIReturnCodeError):
Repository.update({'id': new_repo['id'], 'url': url})
# Fetch it again
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['url'], new_repo['url'])
@tier1
def test_positive_update_gpg_key(self):
"""Update the original gpg key
:id: 367ff375-4f52-4a8c-b974-8c1c54e3fdd3
:expectedresults: Repository gpg key is updated
:CaseImportance: Critical
"""
gpg_key = make_gpg_key({'organization-id': self.org['id']})
gpg_key_new = make_gpg_key({'organization-id': self.org['id']})
new_repo = self._make_repository({'gpg-key-id': gpg_key['id']})
Repository.update({'id': new_repo['id'], 'gpg-key-id': gpg_key_new['id']})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['gpg-key']['id'], gpg_key_new['id'])
@tier1
def test_positive_update_mirror_on_sync(self):
"""Update the mirror on sync rule for repository
:id: 9bab2537-3223-40d7-bc4c-a51b09d2e812
:expectedresults: Repository is updated
:CaseImportance: Critical
"""
new_repo = self._make_repository({'mirror-on-sync': 'no'})
Repository.update({'id': new_repo['id'], 'mirror-on-sync': 'yes'})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['mirror-on-sync'], 'yes')
@tier1
def test_positive_update_publish_method(self):
"""Update the original publishing method
:id: e7bd2667-4851-4a64-9c70-1b5eafbc3f71
:expectedresults: Repository publishing method is updated
:CaseImportance: Critical
"""
new_repo = self._make_repository({'publish-via-http': 'no'})
Repository.update({'id': new_repo['id'], 'publish-via-http': 'yes'})
result = Repository.info({'id': new_repo['id']})
self.assertEqual(result['publish-via-http'], 'yes')
@tier1
def test_positive_update_checksum_type(self):
"""Create a YUM repository and update the checksum type
:id: 42f14257-d860-443d-b337-36fd355014bc
:expectedresults: A YUM repository is updated and contains the correct
checksum type
:CaseImportance: Critical
"""
content_type = 'yum'
repository = self._make_repository(
{'content-type': content_type, 'download-policy': 'immediate'}
)
self.assertEqual(repository['content-type'], content_type)
for checksum_type in 'sha1', 'sha256':
with self.subTest(checksum_type):
Repository.update({'checksum-type': checksum_type, 'id': repository['id']})
result = Repository.info({'id': repository['id']})
self.assertEqual(result['checksum-type'], checksum_type)
@tier1
def test_negative_create_checksum_with_on_demand_policy(self):
"""Attempt to create repository with checksum and on_demand policy.
:id: 33d712e6-e91f-42bb-8c5d-35bdc427182c
:expectedresults: A repository is not created and error is raised.
:CaseImportance: Critical
:BZ: 1732056
"""
for checksum_type in 'sha1', 'sha256':
with self.assertRaises(CLIFactoryError):
self._make_repository(
{
'content-type': 'yum',
'checksum-type': checksum_type,
'download-policy': 'on_demand',
}
)
@tier1
def test_positive_delete_by_id(self):
"""Check if repository can be created and deleted
:id: bcf096db-0033-4138-90a3-cb7355d5dfaf
:expectedresults: Repository is created and then deleted
:CaseImportance: Critical
"""
for name in valid_data_list().values():
with self.subTest(name):
new_repo = self._make_repository({'name': name})
Repository.delete({'id': new_repo['id']})
with self.assertRaises(CLIReturnCodeError):
Repository.info({'id': new_repo['id']})
@tier1
@upgrade
def test_positive_delete_by_name(self):
"""Check if repository can be created and deleted
:id: 463980a4-dbcf-4178-83a6-1863cf59909a
:expectedresults: Repository is created and then deleted
:CaseImportance: Critical
"""
for name in valid_data_list().values():
with self.subTest(name):
new_repo = self._make_repository({'name': name})
Repository.delete({'name': new_repo['name'], 'product-id': self.product['id']})
with self.assertRaises(CLIReturnCodeError):
Repository.info({'id': new_repo['id']})
@tier1
def test_positive_delete_rpm(self):
"""Check if rpm repository with packages can be deleted.
:id: 1172492f-d595-4c8e-89c1-fabb21eb04ac
:expectedresults: Repository is deleted.
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'yum', 'url': FAKE_1_YUM_REPO})
Repository.synchronize({'id': new_repo['id']})
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['sync']['status'], 'Success')
# Check that there is at least one package
self.assertGreater(int(new_repo['content-counts']['packages']), 0)
Repository.delete({'id': new_repo['id']})
with self.assertRaises(CLIReturnCodeError):
Repository.info({'id': new_repo['id']})
@tier1
def test_positive_delete_puppet(self):
"""Check if puppet repository with puppet modules can be deleted.
:id: 83d92454-11b7-4f9a-952d-650ffe5135e4
:expectedresults: Repository is deleted.
:BZ: 1316681
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'puppet', 'url': FAKE_1_PUPPET_REPO})
Repository.synchronize({'id': new_repo['id']})
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['sync']['status'], 'Success')
# Check that there is at least one puppet module
self.assertGreater(int(new_repo['content-counts']['puppet-modules']), 0)
Repository.delete({'id': new_repo['id']})
with self.assertRaises(CLIReturnCodeError):
Repository.info({'id': new_repo['id']})
@tier1
@upgrade
def test_positive_remove_content_by_repo_name(self):
"""Synchronize repository and remove rpm content from using repo name
:id: a8b6f17d-3b13-4185-920a-2558ace59458
:expectedresults: Content Counts shows zero packages
:BZ: 1349646, 1413145, 1459845, 1459874
:CaseImportance: Critical
"""
# Create repository and synchronize it
repo = self._make_repository({'content-type': 'yum', 'url': FAKE_1_YUM_REPO})
Repository.synchronize(
{
'name': repo['name'],
'product': self.product['name'],
'organization': self.org['name'],
}
)
repo = Repository.info(
{
'name': repo['name'],
'product': self.product['name'],
'organization': self.org['name'],
}
)
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['packages'], '32')
# Find repo packages and remove them
packages = Package.list(
{
'repository': repo['name'],
'product': self.product['name'],
'organization': self.org['name'],
}
)
Repository.remove_content(
{
'name': repo['name'],
'product': self.product['name'],
'organization': self.org['name'],
'ids': [package['id'] for package in packages],
}
)
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['content-counts']['packages'], '0')
@tier1
@upgrade
def test_positive_remove_content_rpm(self):
"""Synchronize repository and remove rpm content from it
:id: c4bcda0e-c0d6-424c-840d-26684ca7c9f1
:expectedresults: Content Counts shows zero packages
:BZ: 1459845, 1459874
:CaseImportance: Critical
"""
# Create repository and synchronize it
repo = self._make_repository({'content-type': 'yum', 'url': FAKE_1_YUM_REPO})
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['packages'], '32')
# Find repo packages and remove them
packages = Package.list({'repository-id': repo['id']})
Repository.remove_content(
{'id': repo['id'], 'ids': [package['id'] for package in packages]}
)
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['content-counts']['packages'], '0')
@tier1
@upgrade
def test_positive_remove_content_puppet(self):
"""Synchronize repository and remove puppet content from it
:id: b025ccd0-9beb-4ac0-9fbf-21340c90650e
:expectedresults: Content Counts shows zero puppet modules
:BZ: 1459845
:CaseImportance: Critical
"""
# Create repository and synchronize it
repo = self._make_repository({'content-type': 'puppet', 'url': FAKE_1_PUPPET_REPO})
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['puppet-modules'], '2')
# Find puppet modules and remove them from repository
modules = PuppetModule.list({'repository-id': repo['id']})
Repository.remove_content({'id': repo['id'], 'ids': [module['id'] for module in modules]})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['content-counts']['puppet-modules'], '0')
@tier1
def test_positive_upload_content(self):
"""Create repository and upload content
:id: eb0ec599-2bf1-483a-8215-66652f948d67
:expectedresults: upload content is successful
:BZ: 1343006
:CaseImportance: Critical
"""
new_repo = self._make_repository({'name': gen_string('alpha')})
ssh.upload_file(
local_file=get_data_file(RPM_TO_UPLOAD), remote_file="/tmp/{0}".format(RPM_TO_UPLOAD)
)
result = Repository.upload_content(
{
'name': new_repo['name'],
'organization': new_repo['organization'],
'path': "/tmp/{0}".format(RPM_TO_UPLOAD),
'product-id': new_repo['product']['id'],
}
)
self.assertIn(
"Successfully uploaded file '{0}'".format(RPM_TO_UPLOAD), result[0]['message']
)
@tier1
def test_positive_upload_content_to_file_repo(self):
"""Create file repository and upload content to it
:id: 5e24b416-2928-4533-96cf-6bffbea97a95
:customerscenario: true
:expectedresults: upload content operation is successful
:BZ: 1446975
:CaseImportance: Critical
"""
new_repo = self._make_repository({'content-type': 'file', 'url': CUSTOM_FILE_REPO})
Repository.synchronize({'id': new_repo['id']})
# Verify it has finished
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(int(new_repo['content-counts']['files']), CUSTOM_FILE_REPO_FILES_COUNT)
ssh.upload_file(
local_file=get_data_file(OS_TEMPLATE_DATA_FILE),
remote_file="/tmp/{0}".format(OS_TEMPLATE_DATA_FILE),
)
result = Repository.upload_content(
{
'name': new_repo['name'],
'organization': new_repo['organization'],
'path': "/tmp/{0}".format(OS_TEMPLATE_DATA_FILE),
'product-id': new_repo['product']['id'],
}
)
self.assertIn(
"Successfully uploaded file '{0}'".format(OS_TEMPLATE_DATA_FILE), result[0]['message']
)
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(
int(new_repo['content-counts']['files']), CUSTOM_FILE_REPO_FILES_COUNT + 1
)
@pytest.mark.skip_if_open("BZ:1410916")
@tier2
def test_negative_restricted_user_cv_add_repository(self):
"""Attempt to add a product repository to content view with a
restricted user, using product name not visible to restricted user.
:id: 65792ae0-c5be-4a6c-9062-27dc03b83e10
:BZ: 1436209,1410916
:Steps:
1. Setup a restricted user with permissions that filter the
products with names like Test_* or "rhel7*"
2. Create a content view
3. Create a product with name that should not be visible to the
user and add a repository to it
:expectedresults:
1. The admin user can view the product repository
2. The restricted user cannot view the product repository
3. The restricted user cannot add the product repository to a
content view
4. After the attempt of adding the product repository to content
view, assert that the restricted user still cannot view the
product repository.
:CaseLevel: Integration
"""
required_permissions = {
'Katello::Product': (
[
'view_products',
'create_products',
'edit_products',
'destroy_products',
'sync_products',
'export_products',
],
'name ~ "Test_*" || name ~ "rhel7*"',
),
'Katello::ContentView': (
[
'view_content_views',
'create_content_views',
'edit_content_views',
'destroy_content_views',
'publish_content_views',
'promote_or_remove_content_views',
'export_content_views',
],
'name ~ "Test_*" || name ~ "rhel7*"',
),
'Organization': (
[
'view_organizations',
'create_organizations',
'edit_organizations',
'destroy_organizations',
'assign_organizations',
],
None,
),
}
user_name = gen_alphanumeric()
user_password = gen_alphanumeric()
# Generate a product name that is not like Test_* or rhel7*
product_name = 'zoo_{0}'.format(gen_string('alpha', 20))
# Generate a content view name like Test_*
content_view_name = 'Test_{0}'.format(gen_string('alpha', 20))
# Create an organization
org = make_org()
# Create a non admin user, for the moment without any permissions
user = make_user(
{
'admin': False,
'default-organization-id': org['id'],
'organization-ids': [org['id']],
'login': user_name,
'password': user_password,
}
)
# Create a new role
role = make_role()
# Get the available permissions
available_permissions = Filter.available_permissions()
# group the available permissions by resource type
available_rc_permissions = {}
for permission in available_permissions:
permission_resource = permission['resource']
if permission_resource not in available_rc_permissions:
available_rc_permissions[permission_resource] = []
available_rc_permissions[permission_resource].append(permission)
# create only the required role permissions per resource type
for resource_type, permission_data in required_permissions.items():
permission_names, search = permission_data
# assert that the required resource type is available
self.assertIn(resource_type, available_rc_permissions)
available_permission_names = [
permission['name']
for permission in available_rc_permissions[resource_type]
if permission['name'] in permission_names
]
# assert that all the required permissions are available
self.assertEqual(set(permission_names), set(available_permission_names))
# Create the current resource type role permissions
make_filter({'role-id': role['id'], 'permissions': permission_names, 'search': search})
# Add the created and initiated role with permissions to user
User.add_role({'id': user['id'], 'role-id': role['id']})
# assert that the user is not an admin one and cannot read the current
# role info (note: view_roles is not in the required permissions)
with self.assertRaises(CLIReturnCodeError) as context:
Role.with_user(user_name, user_password).info({'id': role['id']})
self.assertIn(
'Forbidden - server refused to process the request', context.exception.stderr
)
# Create a product
product = make_product({'organization-id': org['id'], 'name': product_name})
# Create a yum repository and synchronize
repo = make_repository({'product-id': product['id'], 'url': FAKE_1_YUM_REPO})
Repository.synchronize({'id': repo['id']})
# Create a content view
content_view = make_content_view({'organization-id': org['id'], 'name': content_view_name})
# assert that the user can read the content view info as per required
# permissions
user_content_view = ContentView.with_user(user_name, user_password).info(
{'id': content_view['id']}
)
# assert that this is the same content view
self.assertEqual(content_view['name'], user_content_view['name'])
# assert admin user is able to view the product
repos = Repository.list({'organization-id': org['id']})
self.assertEqual(len(repos), 1)
# assert that this is the same repo
self.assertEqual(repos[0]['id'], repo['id'])
# assert that restricted user is not able to view the product
repos = Repository.with_user(user_name, user_password).list({'organization-id': org['id']})
self.assertEqual(len(repos), 0)
# assert that the user cannot add the product repo to content view
with self.assertRaises(CLIReturnCodeError):
ContentView.with_user(user_name, user_password).add_repository(
{
'id': content_view['id'],
'organization-id': org['id'],
'repository-id': repo['id'],
}
)
# assert that restricted user still not able to view the product
repos = Repository.with_user(user_name, user_password).list({'organization-id': org['id']})
self.assertEqual(len(repos), 0)
@tier2
def test_positive_upload_remove_srpm_content(self):
"""Create repository, upload and remove an SRPM content
:id: 706dc3e2-dacb-4fdd-8eef-5715ce498888
:expectedresults: SRPM successfully uploaded and removed
:CaseImportance: Critical
:BZ: 1378442
"""
new_repo = self._make_repository({'name': gen_string('alpha', 15)})
ssh.upload_file(
local_file=get_data_file(SRPM_TO_UPLOAD), remote_file="/tmp/{0}".format(SRPM_TO_UPLOAD)
)
# Upload SRPM
result = Repository.upload_content(
{
'name': new_repo['name'],
'organization': new_repo['organization'],
'path': "/tmp/{0}".format(SRPM_TO_UPLOAD),
'product-id': new_repo['product']['id'],
'content-type': 'srpm',
}
)
assert "Successfully uploaded file '{0}'".format(SRPM_TO_UPLOAD) in result[0]['message']
assert int(Repository.info({'id': new_repo['id']})['content-counts']['source-rpms']) == 1
# Remove uploaded SRPM
Repository.remove_content(
{
'id': new_repo['id'],
'ids': [Srpm.list({'repository-id': new_repo['id']})[0]['id']],
'content-type': 'srpm',
}
)
assert int(Repository.info({'id': new_repo['id']})['content-counts']['source-rpms']) == 0
@upgrade
@tier2
def test_positive_srpm_list_end_to_end(self):
"""Create repository, upload, list and remove an SRPM content
:id: 98ad4228-f2e5-438a-9210-5ce6561769f2
:expectedresults:
1. SRPM should be listed repository wise.
2. SRPM should be listed product wise.
3. SRPM should be listed for specific and all Organizations.
4. SRPM should be listed LCE wise.
5. Able to see info of uploaded SRPM.
:CaseImportance: High
"""
new_repo = self._make_repository({'name': gen_string('alpha', 15)})
ssh.upload_file(
local_file=get_data_file(SRPM_TO_UPLOAD), remote_file="/tmp/{0}".format(SRPM_TO_UPLOAD)
)
# Upload SRPM
Repository.upload_content(
{
'name': new_repo['name'],
'organization': new_repo['organization'],
'path': "/tmp/{0}".format(SRPM_TO_UPLOAD),
'product-id': new_repo['product']['id'],
'content-type': 'srpm',
}
)
assert len(Srpm.list()) > 0
srpm_list = Srpm.list({'repository-id': new_repo['id']})
assert srpm_list[0]['filename'] == SRPM_TO_UPLOAD
assert len(srpm_list) == 1
assert Srpm.info({'id': srpm_list[0]['id']})[0]['filename'] == SRPM_TO_UPLOAD
assert int(Repository.info({'id': new_repo['id']})['content-counts']['source-rpms']) == 1
assert (
len(
Srpm.list(
{
'organization': new_repo['organization'],
'product-id': new_repo['product']['id'],
'repository-id': new_repo['id'],
}
)
)
> 0
)
assert len(Srpm.list({'organization': new_repo['organization']})) > 0
assert (
len(
Srpm.list(
{'organization': new_repo['organization'], 'lifecycle-environment': 'Library'}
)
)
> 0
)
assert (
len(
Srpm.list(
{
'content-view': 'Default Organization View',
'lifecycle-environment': 'Library',
'organization': new_repo['organization'],
}
)
)
> 0
)
# Remove uploaded SRPM
Repository.remove_content(
{
'id': new_repo['id'],
'ids': [Srpm.list({'repository-id': new_repo['id']})[0]['id']],
'content-type': 'srpm',
}
)
assert int(
Repository.info({'id': new_repo['id']})['content-counts']['source-rpms']
) == len(Srpm.list({'repository-id': new_repo['id']}))
@tier1
def test_positive_create_get_update_delete_module_streams(self):
"""Check module-stream get for each create, get, update, delete.
:id: e9001f76-9bc7-42a7-b8c9-2dccd5bf0b1f2f2e70b8-e446-4a28-9bae-fc870c80e83e
:Setup:
1. valid yum repo with Module Streams.
:Steps:
1. Create Yum Repository with url contain module-streams
2. Initialize synchronization
3. Another Repository with same Url
4. Module-Stream Get
5. Update the Module-Stream
6. Module-Stream Get
7. Delete Module-Stream
8. Module-Stream Get
:expectedresults: yum repository with modules is synced,
shows correct count and details with create, update, delete and
even duplicate repositories.
:CaseAutomation: automated
:CaseImportance: Critical
"""
org = make_org()
# Create a product
product = make_product({'organization-id': org['id']})
repo = make_repository(
{
'product-id': product['id'],
'content-type': 'yum',
'url': CUSTOM_MODULE_STREAM_REPO_2,
}
)
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(
repo['content-counts']['module-streams'], '7', 'Module Streams not synced correctly'
)
# adding repo with same yum url should not change count.
duplicate_repo = make_repository(
{
'product-id': product['id'],
'content-type': 'yum',
'url': CUSTOM_MODULE_STREAM_REPO_2,
}
)
Repository.synchronize({'id': duplicate_repo['id']})
module_streams = ModuleStream.list({'organization-id': org['id']})
self.assertEqual(len(module_streams), 7, 'Module Streams get worked correctly')
Repository.update(
{'product-id': product['id'], 'id': repo['id'], 'url': CUSTOM_MODULE_STREAM_REPO_2}
)
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(
repo['content-counts']['module-streams'], '7', 'Module Streams not synced correctly'
)
Repository.delete({'id': repo['id']})
with self.assertRaises(CLIReturnCodeError):
Repository.info({'id': repo['id']})
@tier1
def test_module_stream_list_validation(self):
"""Check module-stream get with list on hammer.
:id: 9842a0c3-8532-4b16-a00a-534fc3b0a776ff89f23e-cd00-4d20-84d3-add0ea24abf8
:Setup:
1. valid yum repo with Module Streams.
:Steps:
1. Create Yum Repositories with url contain module-streams and Products
2. Initialize synchronization
3. Verify the module-stream list with various inputs options
:expectedresults: Verify the module-stream list response.
:CaseAutomation: automated
"""
repo1 = self._make_repository({'content-type': 'yum', 'url': CUSTOM_MODULE_STREAM_REPO_1})
Repository.synchronize({'id': repo1['id']})
product2 = make_product_wait({'organization-id': self.org['id']})
repo2 = self._make_repository(
{
'content-type': 'yum',
'url': CUSTOM_MODULE_STREAM_REPO_2,
'product-id': product2['id'],
}
)
Repository.synchronize({'id': repo2['id']})
module_streams = ModuleStream.list()
self.assertGreater(len(module_streams), 13, 'Module Streams get worked correctly')
module_streams = ModuleStream.list({'product-id': product2['id']})
self.assertEqual(len(module_streams), 7, 'Module Streams get worked correctly')
@tier1
def test_module_stream_info_validation(self):
"""Check module-stream get with info on hammer.
:id: ddbeb49e-d292-4dc4-8fb9-e9b768acc441a2c2e797-02b7-4b12-9f95-cffc93254198
:Setup:
1. valid yum repo with Module Streams.
:Steps:
1. Create Yum Repositories with url contain module-streams
2. Initialize synchronization
3. Verify the module-stream info with various inputs options
:expectedresults: Verify the module-stream info response.
:CaseAutomation: automated
"""
product2 = make_product_wait({'organization-id': self.org['id']})
repo2 = self._make_repository(
{
'content-type': 'yum',
'url': CUSTOM_MODULE_STREAM_REPO_2,
'product-id': product2['id'],
}
)
Repository.synchronize({'id': repo2['id']})
module_streams = ModuleStream.list(
{'repository-id': repo2['id'], 'search': 'name="walrus" and stream="5.21"'}
)
actual_result = ModuleStream.info({'id': module_streams[0]['id']})
expected_result = {
'module-stream-name': 'walrus',
'stream': '5.21',
'architecture': 'x86_64',
}
self.assertEqual(
expected_result,
{key: value for key, value in actual_result.items() if key in expected_result},
)
class OstreeRepositoryTestCase(CLITestCase):
"""Ostree Repository CLI tests."""
@classmethod
@skip_if_os('RHEL6')
def setUpClass(cls):
"""Create an organization and product which can be re-used in tests."""
super(OstreeRepositoryTestCase, cls).setUpClass()
cls.org = make_org()
cls.product = make_product({'organization-id': cls.org['id']})
def _make_repository(self, options=None):
"""Makes a new repository and asserts its success"""
if options is None:
options = {}
if options.get('product-id') is None:
options['product-id'] = self.product['id']
return make_repository(options)
@tier1
def test_positive_create_ostree_repo(self):
"""Create a ostree repository
:id: a93c52e1-b32e-4590-981b-636ae8b8314d
:expectedresults: ostree repository is created
:CaseImportance: Critical
"""
for name in valid_data_list().values():
with self.subTest(name):
new_repo = self._make_repository(
{
'name': name,
'content-type': 'ostree',
'publish-via-http': 'false',
'url': FEDORA27_OSTREE_REPO,
}
)
self.assertEqual(new_repo['name'], name)
self.assertEqual(new_repo['content-type'], 'ostree')
@pytest.mark.skip_if_open("BZ:1716429")
@tier1
def test_negative_create_ostree_repo_with_checksum(self):
"""Create a ostree repository with checksum type
:id: a334e0f7-e1be-4add-bbf2-2fd9f0b982c4
:expectedresults: Validation error is raised
:CaseImportance: Critical
:BZ: 1716429
"""
for checksum_type in 'sha1', 'sha256':
with self.subTest(checksum_type):
with self.assertRaisesRegex(
CLIFactoryError,
'Validation failed: Checksum type cannot be set for non-yum repositories',
):
self._make_repository(
{
'content-type': 'ostree',
'checksum-type': checksum_type,
'publish-via-http': 'false',
'url': FEDORA27_OSTREE_REPO,
}
)
@tier1
def test_negative_create_unprotected_ostree_repo(self):
"""Create a ostree repository and published via http
:id: 2b139560-65bb-4a40-9724-5cca57bd8d30
:expectedresults: ostree repository is not created
:CaseImportance: Critical
"""
for use_http in 'true', 'yes', '1':
with self.subTest(use_http):
with self.assertRaisesRegex(
CLIFactoryError,
'Validation failed: OSTree Repositories cannot be unprotected',
):
self._make_repository(
{
'content-type': 'ostree',
'publish-via-http': 'true',
'url': FEDORA27_OSTREE_REPO,
}
)
@tier2
@upgrade
@pytest.mark.skip_if_open("BZ:1625783")
def test_positive_synchronize_ostree_repo(self):
"""Synchronize ostree repo
:id: 64fcae0a-44ae-46ae-9938-032bba1331e9
:expectedresults: Ostree repository is created and synced
:CaseLevel: Integration
:BZ: 1625783
"""
new_repo = self._make_repository(
{'content-type': 'ostree', 'publish-via-http': 'false', 'url': FEDORA27_OSTREE_REPO}
)
# Synchronize it
Repository.synchronize({'id': new_repo['id']})
# Verify it has finished
new_repo = Repository.info({'id': new_repo['id']})
self.assertEqual(new_repo['sync']['status'], 'Success')
@tier1
def test_positive_delete_ostree_by_name(self):
"""Delete Ostree repository by name
:id: 0b545c22-acff-47b6-92ff-669b348f9fa6
:expectedresults: Repository is deleted by name
:CaseImportance: Critical
"""
new_repo = self._make_repository(
{'content-type': 'ostree', 'publish-via-http': 'false', 'url': FEDORA27_OSTREE_REPO}
)
Repository.delete(
{
'name': new_repo['name'],
'product': new_repo['product']['name'],
'organization': new_repo['organization'],
}
)
with self.assertRaises(CLIReturnCodeError):
Repository.info({'name': new_repo['name']})
@tier1
@upgrade
def test_positive_delete_ostree_by_id(self):
"""Delete Ostree repository by id
:id: 171917f5-1a1b-440f-90c7-b8418f1da132
:expectedresults: Repository is deleted by id
:CaseImportance: Critical
"""
new_repo = self._make_repository(
{'content-type': 'ostree', 'publish-via-http': 'false', 'url': FEDORA27_OSTREE_REPO}
)
Repository.delete({'id': new_repo['id']})
with self.assertRaises(CLIReturnCodeError):
Repository.info({'id': new_repo['id']})
class SRPMRepositoryTestCase(CLITestCase):
"""Tests specific to using repositories containing source RPMs."""
@classmethod
def setUpClass(cls):
"""Create a product and an org which can be re-used in tests."""
super(SRPMRepositoryTestCase, cls).setUpClass()
cls.org = make_org()
cls.product = make_product({'organization-id': cls.org['id']})
@tier2
@pytest.mark.skip("Uses deprecated SRPM repository")
def test_positive_sync(self):
"""Synchronize repository with SRPMs
:id: eb69f840-122d-4180-b869-1bd37518480c
:expectedresults: srpms can be listed in repository
"""
repo = make_repository({'product-id': self.product['id'], 'url': FAKE_YUM_SRPM_REPO})
Repository.synchronize({'id': repo['id']})
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/Library'
'/custom/{}/{}/Packages/t/ | grep .src.rpm'.format(
self.org['label'], self.product['label'], repo['label']
)
)
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 1)
@tier2
@pytest.mark.skip("Uses deprecated SRPM repository")
def test_positive_sync_publish_cv(self):
"""Synchronize repository with SRPMs, add repository to content view
and publish content view
:id: 78cd6345-9c6c-490a-a44d-2ad64b7e959b
:expectedresults: srpms can be listed in content view
"""
repo = make_repository({'product-id': self.product['id'], 'url': FAKE_YUM_SRPM_REPO})
Repository.synchronize({'id': repo['id']})
cv = make_content_view({'organization-id': self.org['id']})
ContentView.add_repository({'id': cv['id'], 'repository-id': repo['id']})
ContentView.publish({'id': cv['id']})
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/content_views/{}'
'/1.0/custom/{}/{}/Packages/t/ | grep .src.rpm'.format(
self.org['label'], cv['label'], self.product['label'], repo['label']
)
)
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 1)
@tier2
@upgrade
@pytest.mark.skip("Uses deprecated SRPM repository")
def test_positive_sync_publish_promote_cv(self):
"""Synchronize repository with SRPMs, add repository to content view,
publish and promote content view to lifecycle environment
:id: 3d197118-b1fa-456f-980e-ad1a517bc769
:expectedresults: srpms can be listed in content view in proper
lifecycle environment
"""
lce = make_lifecycle_environment({'organization-id': self.org['id']})
repo = make_repository({'product-id': self.product['id'], 'url': FAKE_YUM_SRPM_REPO})
Repository.synchronize({'id': repo['id']})
cv = make_content_view({'organization-id': self.org['id']})
ContentView.add_repository({'id': cv['id'], 'repository-id': repo['id']})
ContentView.publish({'id': cv['id']})
content_view = ContentView.info({'id': cv['id']})
cvv = content_view['versions'][0]
ContentView.version_promote({'id': cvv['id'], 'to-lifecycle-environment-id': lce['id']})
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/{}/{}/custom/{}/{}/Packages/t'
' | grep .src.rpm'.format(
self.org['label'], lce['label'], cv['label'], self.product['label'], repo['label']
)
)
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 1)
@pytest.mark.skip_if_open("BZ:1682951")
class DRPMRepositoryTestCase(CLITestCase):
"""Tests specific to using repositories containing delta RPMs."""
@classmethod
def setUpClass(cls):
"""Create a product and an org which can be re-used in tests."""
super(DRPMRepositoryTestCase, cls).setUpClass()
cls.org = make_org()
cls.product = make_product({'organization-id': cls.org['id']})
@tier2
@pytest.mark.skip("Uses deprecated DRPM repository")
def test_positive_sync(self):
"""Synchronize repository with DRPMs
:id: a645966c-750b-40ef-a264-dc3bb632b9fd
:expectedresults: drpms can be listed in repository
"""
repo = make_repository({'product-id': self.product['id'], 'url': FAKE_YUM_DRPM_REPO})
Repository.synchronize({'id': repo['id']})
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/Library'
'/custom/{}/{}/drpms/ | grep .drpm'.format(
self.org['label'], self.product['label'], repo['label']
)
)
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 1)
@tier2
@pytest.mark.skip("Uses deprecated DRPM repository")
def test_positive_sync_publish_cv(self):
"""Synchronize repository with DRPMs, add repository to content view
and publish content view
:id: 014bfc80-4622-422e-a0ec-755b1d9f845e
:expectedresults: drpms can be listed in content view
"""
repo = make_repository({'product-id': self.product['id'], 'url': FAKE_YUM_DRPM_REPO})
Repository.synchronize({'id': repo['id']})
cv = make_content_view({'organization-id': self.org['id']})
ContentView.add_repository({'id': cv['id'], 'repository-id': repo['id']})
ContentView.publish({'id': cv['id']})
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/content_views/{}'
'/1.0/custom/{}/{}/drpms/ | grep .drpm'.format(
self.org['label'], cv['label'], self.product['label'], repo['label']
)
)
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 1)
@tier2
@upgrade
@pytest.mark.skip("Uses deprecated DRPM repository")
def test_positive_sync_publish_promote_cv(self):
"""Synchronize repository with DRPMs, add repository to content view,
publish and promote content view to lifecycle environment
:id: a01cb12b-d388-4902-8532-714f4e28ec56
:expectedresults: drpms can be listed in content view in proper
lifecycle environment
"""
lce = make_lifecycle_environment({'organization-id': self.org['id']})
repo = make_repository({'product-id': self.product['id'], 'url': FAKE_YUM_DRPM_REPO})
Repository.synchronize({'id': repo['id']})
cv = make_content_view({'organization-id': self.org['id']})
ContentView.add_repository({'id': cv['id'], 'repository-id': repo['id']})
ContentView.publish({'id': cv['id']})
content_view = ContentView.info({'id': cv['id']})
cvv = content_view['versions'][0]
ContentView.version_promote({'id': cvv['id'], 'to-lifecycle-environment-id': lce['id']})
result = ssh.command(
'ls /var/lib/pulp/published/yum/https/repos/{}/{}/{}/custom/{}/{}'
'/drpms/ | grep .drpm'.format(
self.org['label'], lce['label'], cv['label'], self.product['label'], repo['label']
)
)
self.assertEqual(result.return_code, 0)
self.assertGreaterEqual(len(result.stdout), 1)
class GitPuppetMirrorTestCase(CLITestCase):
"""Tests for creating the hosts via CLI."""
# Notes for Puppet GIT puppet mirror content
#
# This feature does not allow us to actually sync/update content in a
# GIT repo.
# Instead, we're essentially "snapshotting" what contains in a repo at any
# given time. The ability to update the GIT puppet mirror comes is/should
# be provided by pulp itself, via script. However, we should be able to
# create a sync schedule against the mirror to make sure it is periodically
# update to contain the latest and greatest.
@pytest.mark.stubbed
@tier2
def test_positive_git_local_create(self):
"""Create repository with local git puppet mirror.
:id: 89211cd5-82b8-4391-b729-a7502e57f824
:CaseLevel: Integration
:Setup: Assure local GIT puppet has been created and found by pulp
:Steps: Create link to local puppet mirror via cli
:expectedresults: Content source containing local GIT puppet mirror
content is created
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
def test_positive_git_local_update(self):
"""Update repository with local git puppet mirror.
:id: 341f40f2-3501-4754-9acf-7cda1a61f7db
:CaseLevel: Integration
:Setup: Assure local GIT puppet has been created and found by pulp
:Steps: Modify details for existing puppet repo (name, etc.) via cli
:expectedresults: Content source containing local GIT puppet mirror
content is modified
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
@upgrade
def test_positive_git_local_delete(self):
"""Delete repository with local git puppet mirror.
:id: a243f5bb-5186-41b3-8e8a-07d5cc784ccd
:CaseLevel: Integration
:Setup: Assure local GIT puppet has been created and found by pulp
:Steps: Delete link to local puppet mirror via cli
:expectedresults: Content source containing local GIT puppet mirror
content no longer exists/is available.
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
def test_positive_git_remote_create(self):
"""Create repository with remote git puppet mirror.
:id: 8582529f-3112-4b49-8d8f-f2bbf7dceca7
:CaseLevel: Integration
:Setup: Assure remote GIT puppet has been created and found by pulp
:Steps: Create link to local puppet mirror via cli
:expectedresults: Content source containing remote GIT puppet mirror
content is created
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
def test_positive_git_remote_update(self):
"""Update repository with remote git puppet mirror.
:id: 582c50b3-3b90-4244-b694-97642b1b13a9
:CaseLevel: Integration
:Setup: Assure remote GIT puppet has been created and found by pulp
:Steps: modify details for existing puppet repo (name, etc.) via cli
:expectedresults: Content source containing remote GIT puppet mirror
content is modified
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
@upgrade
def test_positive_git_remote_delete(self):
"""Delete repository with remote git puppet mirror.
:id: 0a23f969-b202-4c6c-b12e-f651a0b7d049
:CaseLevel: Integration
:Setup: Assure remote GIT puppet has been created and found by pulp
:Steps: Delete link to remote puppet mirror via cli
:expectedresults: Content source containing remote GIT puppet mirror
content no longer exists/is available.
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
def test_positive_git_sync(self):
"""Sync repository with git puppet mirror.
:id: a46c16bd-0986-48db-8e62-aeb3907ba4d2
:CaseLevel: Integration
:Setup: git mirror (local or remote) exists as a content source
:Steps: Attempt to sync content from mirror via cli
:expectedresults: Content is pulled down without error
:expectedresults: Confirmation that various resources actually exist in
local content repo
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
@upgrade
def test_positive_git_sync_with_content_change(self):
"""Sync repository with changes in git puppet mirror.
If module changes in GIT mirror but the version in manifest
does not change, content still pulled.
:id: 7d9519ca-8660-4014-8e0e-836594891c0c
:CaseLevel: Integration
:Setup: Assure remote GIT puppet has been created and found by pulp
:Steps:
1. Sync a git repo and observe the contents/checksum etc. of an
existing puppet module
2. Assure a puppet module in git repo has changed but the manifest
version for this module does not change.
3. Using pulp script, update repo mirror and re-sync within
satellite
4. View contents/details of same puppet module
:expectedresults: Puppet module has been updated in our content, even
though the module's version number has not changed.
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
def test_positive_git_sync_schedule(self):
"""Scheduled sync of git puppet mirror.
:id: 0d58d180-9836-4524-b608-66b67f9cab12
:CaseLevel: Integration
:Setup: git mirror (local or remote) exists as a content source
:Steps: Attempt to create a scheduled sync content from mirror, via cli
:expectedresults: Content is pulled down without error on expected
schedule
:CaseAutomation: notautomated
"""
@pytest.mark.stubbed
@tier2
def test_positive_git_view_content(self):
"""View content in synced git puppet mirror
:id: 02f06092-dd6c-49fa-be9f-831e52476e41
:CaseLevel: Integration
:Setup: git mirror (local or remote) exists as a content source
:Steps: Attempt to list contents of repo via cli
:expectedresults: Spot-checked items (filenames, dates, perhaps
checksums?) are correct.
:CaseAutomation: notautomated
"""
class FileRepositoryTestCase(CLITestCase):
"""Specific tests for File Repositories"""
@classmethod
def setUpClass(cls):
"""Create a product and an org which can be re-used in tests."""
super(FileRepositoryTestCase, cls).setUpClass()
cls.org = make_org()
cls.product = make_product({'organization-id': cls.org['id']})
@tier1
def test_positive_upload_file_to_file_repo(self):
"""Check arbitrary file can be uploaded to File Repository
:id: 134d668d-bd63-4475-bf7b-b899bb9fb7bb
:Steps:
1. Create a File Repository
2. Upload an arbitrary file to it
:Expectedresults: uploaded file is available under File Repository
:CaseAutomation: Automated
:CaseImportance: Critical
"""
new_repo = make_repository(
{'content-type': 'file', 'product-id': self.product['id'], 'url': CUSTOM_FILE_REPO}
)
ssh.upload_file(
local_file=get_data_file(RPM_TO_UPLOAD), remote_file="/tmp/{0}".format(RPM_TO_UPLOAD)
)
result = Repository.upload_content(
{
'name': new_repo['name'],
'organization': new_repo['organization'],
'path': "/tmp/{0}".format(RPM_TO_UPLOAD),
'product-id': new_repo['product']['id'],
}
)
self.assertIn(
"Successfully uploaded file '{0}'".format(RPM_TO_UPLOAD), result[0]['message']
)
repo = Repository.info({'id': new_repo['id']})
self.assertEqual(repo['content-counts']['files'], '1')
filesearch = entities.File().search(
query={"search": "name={0} and repository={1}".format(RPM_TO_UPLOAD, new_repo['name'])}
)
self.assertEqual(RPM_TO_UPLOAD, filesearch[0].name)
@pytest.mark.stubbed
@tier1
def test_positive_file_permissions(self):
"""Check file permissions after file upload to File Repository
:id: 03da888a-69ba-492f-b204-c62d85948d8a
:Setup:
1. Create a File Repository
2. Upload an arbitrary file to it
:Steps: Retrieve file permissions from File Repository
:expectedresults: uploaded file permissions are kept after upload
:CaseAutomation: notautomated
:CaseImportance: Critical
"""
@tier1
@upgrade
def test_positive_remove_file(self):
"""Check arbitrary file can be removed from File Repository
:id: 07ca9c8d-e764-404e-866d-30d8cd2ca2b6
:Setup:
1. Create a File Repository
2. Upload an arbitrary file to it
:Steps: Remove a file from File Repository
:expectedresults: file is not listed under File Repository after
removal
:CaseImportance: Critical
"""
new_repo = make_repository(
{'content-type': 'file', 'product-id': self.product['id'], 'url': CUSTOM_FILE_REPO}
)
ssh.upload_file(
local_file=get_data_file(RPM_TO_UPLOAD), remote_file="/tmp/{0}".format(RPM_TO_UPLOAD)
)
result = Repository.upload_content(
{
'name': new_repo['name'],
'organization': new_repo['organization'],
'path': "/tmp/{0}".format(RPM_TO_UPLOAD),
'product-id': new_repo['product']['id'],
}
)
self.assertIn(
"Successfully uploaded file '{0}'".format(RPM_TO_UPLOAD), result[0]['message']
)
repo = Repository.info({'id': new_repo['id']})
self.assertGreater(int(repo['content-counts']['files']), 0)
files = File.list({'repository-id': repo['id']})
Repository.remove_content({'id': repo['id'], 'ids': [file['id'] for file in files]})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['content-counts']['files'], '0')
@tier2
@upgrade
def test_positive_remote_directory_sync(self):
"""Check an entire remote directory can be synced to File Repository
through http
:id: 5c246307-8597-4f68-a6aa-4f1a6bbf0939
:Setup:
1. Create a directory to be synced with a pulp manifest on its root
2. Make the directory available through http
:Steps:
1. Create a File Repository with url pointing to http url
created on setup
2. Initialize synchronization
:expectedresults: entire directory is synced over http
"""
repo = make_repository(
{
'product-id': self.product['id'],
'content-type': 'file',
'url': FAKE_PULP_REMOTE_FILEREPO,
'name': gen_string('alpha'),
}
)
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertEqual(repo['sync']['status'], 'Success')
self.assertEqual(repo['content-counts']['files'], '2')
@tier1
def test_positive_file_repo_local_directory_sync(self):
"""Check an entire local directory can be synced to File Repository
:id: ee91ecd2-2f07-4678-b782-95a7e7e57159
:Setup:
1. Create a directory to be synced with a pulp manifest on its root
locally (on the Satellite/Foreman host)
:Steps:
1. Create a File Repository with url pointing to local url
created on setup
2. Initialize synchronization
:expectedresults: entire directory is synced
:CaseImportance: Critical
"""
# Making Setup For Creating Local Directory using Pulp Manifest
ssh.command("mkdir -p {}".format(CUSTOM_LOCAL_FOLDER))
ssh.command(
'wget -P {0} -r -np -nH --cut-dirs=5 -R "index.html*" '
'{1}'.format(CUSTOM_LOCAL_FOLDER, CUSTOM_FILE_REPO)
)
repo = make_repository(
{
'content-type': 'file',
'product-id': self.product['id'],
'url': 'file://{0}'.format(CUSTOM_LOCAL_FOLDER),
}
)
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertGreater(repo['content-counts']['files'], '1')
@tier2
def test_positive_symlinks_sync(self):
"""Check symlinks can be synced to File Repository
:id: b0b0a725-b754-450b-bc0d-572d0294307a
:Setup:
1. Create a directory to be synced with a pulp manifest on its root
locally (on the Satellite/Foreman host)
2. Make sure it contains symlinks
:Steps:
1. Create a File Repository with url pointing to local url
created on setup
2. Initialize synchronization
:expectedresults: entire directory is synced, including files
referred by symlinks
:CaseAutomation: automated
"""
# Downloading the pulp repository into Satellite Host
ssh.command("mkdir -p {}".format(CUSTOM_LOCAL_FOLDER))
ssh.command(
'wget -P {0} -r -np -nH --cut-dirs=5 -R "index.html*" '
'{1}'.format(CUSTOM_LOCAL_FOLDER, CUSTOM_FILE_REPO)
)
ssh.command("ln -s {0} /{1}".format(CUSTOM_LOCAL_FOLDER, gen_string('alpha')))
repo = make_repository(
{
'content-type': 'file',
'product-id': self.product['id'],
'url': 'file://{0}'.format(CUSTOM_LOCAL_FOLDER),
}
)
Repository.synchronize({'id': repo['id']})
repo = Repository.info({'id': repo['id']})
self.assertGreater(repo['content-counts']['files'], '1')
| gpl-3.0 |
matteocrippa/dsl-n55u-bender | release/src/router/openvpn/win/build_ddk.py | 11 | 1559 | import os
from wb import system, home_fn, choose_arch
def build_ddk(config, dir, x64):
ddk_path = config['DDK_PATH']
ddk_major = int(config['DDKVER_MAJOR'])
debug = 'PRODUCT_TAP_DEBUG' in config
return build_tap(ddk_path, ddk_major, debug, dir, x64)
def build_tap(ddk_path, ddk_major, debug, dir, x64):
"""Build drivers using WinDDK tools"""
setenv_bat = os.path.realpath(os.path.join(ddk_path, 'bin/setenv.bat'))
target = 'chk' if debug else 'fre'
if x64:
target += ' x64'
else:
target += ' x86'
if ddk_major >= 7600:
if x64:
target += ' wlh' # vista
else:
target += ' wnet' # server 2003
else:
if x64:
target += ' wnet' # server 2003
else:
target += ' w2k' # 2000
system('cmd /c "%s %s %s && cd %s && build -cef"' % (
setenv_bat,
os.path.realpath(ddk_path),
target,
dir
))
def main(config, proj, arch):
if proj == 'tap':
dir = home_fn('tap-win32')
elif proj == 'tapinstall':
dir = home_fn('tapinstall')
else:
raise ValueError("unknown project: %s" % (proj,))
for x64 in choose_arch(arch):
build_ddk(config, dir, x64)
# if we are run directly, and not loaded as a module
if __name__ == "__main__":
import sys
from wb import config
if len(sys.argv) >= 3:
main(config, sys.argv[1], sys.argv[2])
else:
print "usage: build <tap|tapinstall> <x64|x86|all>"
sys.exit(2)
| gpl-2.0 |
gnieboer/gnuradio | gr-wxgui/python/wxgui/const_window.py | 58 | 6131 | #
# Copyright 2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
##################################################
# Imports
##################################################
import plotter
import common
import wx
import numpy
import math
import pubsub
from constants import *
from gnuradio import gr #for gr.prefs
import forms
##################################################
# Constants
##################################################
SLIDER_STEPS = 200
LOOP_BW_MIN_EXP, LOOP_BW_MAX_EXP = -6, 0.0
GAIN_MU_MIN_EXP, GAIN_MU_MAX_EXP = -6, -0.301
DEFAULT_FRAME_RATE = gr.prefs().get_long('wxgui', 'const_rate', 5)
DEFAULT_WIN_SIZE = (500, 400)
DEFAULT_CONST_SIZE = gr.prefs().get_long('wxgui', 'const_size', 2048)
CONST_PLOT_COLOR_SPEC = (0, 0, 1)
MARKER_TYPES = (
('Dot Small', 1.0),
('Dot Medium', 2.0),
('Dot Large', 3.0),
('Line Link', None),
)
DEFAULT_MARKER_TYPE = 2.0
##################################################
# Constellation window control panel
##################################################
class control_panel(wx.Panel):
"""
A control panel with wx widgits to control the plotter.
"""
def __init__(self, parent):
"""
Create a new control panel.
Args:
parent: the wx parent window
"""
self.parent = parent
wx.Panel.__init__(self, parent, style=wx.SUNKEN_BORDER)
parent[SHOW_CONTROL_PANEL_KEY] = True
parent.subscribe(SHOW_CONTROL_PANEL_KEY, self.Show)
control_box = forms.static_box_sizer(
parent=self, label='Options',
bold=True, orient=wx.VERTICAL,
)
#loop_bw
control_box.AddStretchSpacer()
forms.text_box(
sizer=control_box, parent=self, label='Loop Bandwidth',
converter=forms.float_converter(),
ps=parent, key=LOOP_BW_KEY,
)
forms.log_slider(
sizer=control_box, parent=self,
min_exp=LOOP_BW_MIN_EXP,
max_exp=LOOP_BW_MAX_EXP,
num_steps=SLIDER_STEPS,
ps=parent, key=LOOP_BW_KEY,
)
#gain_mu
control_box.AddStretchSpacer()
forms.text_box(
sizer=control_box, parent=self, label='Gain Mu',
converter=forms.float_converter(),
ps=parent, key=GAIN_MU_KEY,
)
forms.log_slider(
sizer=control_box, parent=self,
min_exp=GAIN_MU_MIN_EXP,
max_exp=GAIN_MU_MAX_EXP,
num_steps=SLIDER_STEPS,
ps=parent, key=GAIN_MU_KEY,
)
#marker
control_box.AddStretchSpacer()
forms.drop_down(
sizer=control_box, parent=self,
ps=parent, key=MARKER_KEY, label='Marker',
choices=map(lambda x: x[1], MARKER_TYPES),
labels=map(lambda x: x[0], MARKER_TYPES),
)
#run/stop
control_box.AddStretchSpacer()
forms.toggle_button(
sizer=control_box, parent=self,
true_label='Stop', false_label='Run',
ps=parent, key=RUNNING_KEY,
)
#set sizer
self.SetSizerAndFit(control_box)
##################################################
# Constellation window with plotter and control panel
##################################################
class const_window(wx.Panel, pubsub.pubsub):
def __init__(
self,
parent,
controller,
size,
title,
msg_key,
loop_bw_key,
gain_mu_key,
gain_omega_key,
omega_key,
sample_rate_key,
):
pubsub.pubsub.__init__(self)
#proxy the keys
self.proxy(MSG_KEY, controller, msg_key)
self.proxy(LOOP_BW_KEY, controller, loop_bw_key)
self.proxy(GAIN_MU_KEY, controller, gain_mu_key)
self.proxy(GAIN_OMEGA_KEY, controller, gain_omega_key)
self.proxy(OMEGA_KEY, controller, omega_key)
self.proxy(SAMPLE_RATE_KEY, controller, sample_rate_key)
#initialize values
self[RUNNING_KEY] = True
self[X_DIVS_KEY] = 8
self[Y_DIVS_KEY] = 8
self[MARKER_KEY] = DEFAULT_MARKER_TYPE
#init panel and plot
wx.Panel.__init__(self, parent, style=wx.SIMPLE_BORDER)
self.plotter = plotter.channel_plotter(self)
self.plotter.SetSize(wx.Size(*size))
self.plotter.SetSizeHints(*size)
self.plotter.set_title(title)
self.plotter.set_x_label('Inphase')
self.plotter.set_y_label('Quadrature')
self.plotter.enable_point_label(True)
self.plotter.enable_grid_lines(True)
#setup the box with plot and controls
self.control_panel = control_panel(self)
main_box = wx.BoxSizer(wx.HORIZONTAL)
main_box.Add(self.plotter, 1, wx.EXPAND)
main_box.Add(self.control_panel, 0, wx.EXPAND)
self.SetSizerAndFit(main_box)
#alpha and gain mu 2nd orders
def set_gain_omega(gain_mu): self[GAIN_OMEGA_KEY] = .25*gain_mu**2
self.subscribe(GAIN_MU_KEY, set_gain_omega)
#register events
self.subscribe(MSG_KEY, self.handle_msg)
self.subscribe(X_DIVS_KEY, self.update_grid)
self.subscribe(Y_DIVS_KEY, self.update_grid)
#initial update
self.update_grid()
def handle_msg(self, msg):
"""
Plot the samples onto the complex grid.
Args:
msg: the array of complex samples
"""
if not self[RUNNING_KEY]: return
#convert to complex floating point numbers
samples = numpy.fromstring(msg, numpy.complex64)
real = numpy.real(samples)
imag = numpy.imag(samples)
#plot
self.plotter.set_waveform(
channel=0,
samples=(real, imag),
color_spec=CONST_PLOT_COLOR_SPEC,
marker=self[MARKER_KEY],
)
#update the plotter
self.plotter.update()
def update_grid(self):
#update the x axis
x_max = 2.0
self.plotter.set_x_grid(-x_max, x_max, common.get_clean_num(2.0*x_max/self[X_DIVS_KEY]))
#update the y axis
y_max = 2.0
self.plotter.set_y_grid(-y_max, y_max, common.get_clean_num(2.0*y_max/self[Y_DIVS_KEY]))
#update plotter
self.plotter.update()
| gpl-3.0 |
Angoreher/xcero | stats/models.py | 1 | 1039 | # -*- coding: utf-8 -*-
""" Models for the stats application. """
# standard library
# django
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
# models
from base.models import BaseModel
from users.models import User
class Stat(BaseModel):
# foreign keys
user = models.ForeignKey(
User,
verbose_name=_('user'),
)
# required fields
name = models.CharField(
_('name'),
max_length=30,
blank=True,
)
# optional fields
class Meta:
verbose_name = _('stat')
verbose_name_plural = _('stats')
permissions = (
('view_stat', _('Can view stats')),
)
def __str__(self):
# TODO this is an example str return, change it
return self.name
def get_absolute_url(self):
""" Returns the canonical URL for the stat object """
# TODO this is an example, change it
return reverse('stat_detail', args=(self.pk,))
| mit |
jessstrap/servotk | tests/wpt/web-platform-tests/tools/wptserve/wptserve/ranges.py | 142 | 3004 | from .utils import HTTPException
class RangeParser(object):
def __call__(self, header, file_size):
prefix = "bytes="
if not header.startswith(prefix):
raise HTTPException(416, message="Unrecognised range type %s" % (header,))
parts = header[len(prefix):].split(",")
ranges = []
for item in parts:
components = item.split("-")
if len(components) != 2:
raise HTTPException(416, "Bad range specifier %s" % (item))
data = []
for component in components:
if component == "":
data.append(None)
else:
try:
data.append(int(component))
except ValueError:
raise HTTPException(416, "Bad range specifier %s" % (item))
try:
ranges.append(Range(data[0], data[1], file_size))
except ValueError:
raise HTTPException(416, "Bad range specifier %s" % (item))
return self.coalesce_ranges(ranges, file_size)
def coalesce_ranges(self, ranges, file_size):
rv = []
target = None
for current in reversed(sorted(ranges)):
if target is None:
target = current
else:
new = target.coalesce(current)
target = new[0]
if len(new) > 1:
rv.append(new[1])
rv.append(target)
return rv[::-1]
class Range(object):
def __init__(self, lower, upper, file_size):
self.file_size = file_size
self.lower, self.upper = self._abs(lower, upper)
if self.lower >= self.upper or self.lower >= self.file_size:
raise ValueError
def __repr__(self):
return "<Range %s-%s>" % (self.lower, self.upper)
def __lt__(self, other):
return self.lower < other.lower
def __gt__(self, other):
return self.lower > other.lower
def __eq__(self, other):
return self.lower == other.lower and self.upper == other.upper
def _abs(self, lower, upper):
if lower is None and upper is None:
lower, upper = 0, self.file_size
elif lower is None:
lower, upper = max(0, self.file_size - upper), self.file_size
elif upper is None:
lower, upper = lower, self.file_size
else:
lower, upper = lower, min(self.file_size, upper + 1)
return lower, upper
def coalesce(self, other):
assert self.file_size == other.file_size
if (self.upper < other.lower or self.lower > other.upper):
return sorted([self, other])
else:
return [Range(min(self.lower, other.lower),
max(self.upper, other.upper) - 1,
self.file_size)]
def header_value(self):
return "bytes %i-%i/%i" % (self.lower, self.upper - 1, self.file_size)
| mpl-2.0 |
rghe/ansible | lib/ansible/modules/packaging/os/opkg.py | 95 | 5138 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Patrick Pelletier <[email protected]>
# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: opkg
author: "Patrick Pelletier (@skinp)"
short_description: Package manager for OpenWrt
description:
- Manages OpenWrt packages
version_added: "1.1"
options:
name:
description:
- name of package to install/remove
required: true
state:
description:
- state of the package
choices: [ 'present', 'absent' ]
default: present
force:
description:
- opkg --force parameter used
choices:
- ""
- "depends"
- "maintainer"
- "reinstall"
- "overwrite"
- "downgrade"
- "space"
- "postinstall"
- "remove"
- "checksum"
- "removal-of-dependent-packages"
default: absent
version_added: "2.0"
update_cache:
description:
- update the package db first
default: "no"
type: bool
requirements:
- opkg
- python
'''
EXAMPLES = '''
- opkg:
name: foo
state: present
- opkg:
name: foo
state: present
update_cache: yes
- opkg:
name: foo
state: absent
- opkg:
name: foo,bar
state: absent
- opkg:
name: foo
state: present
force: overwrite
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import shlex_quote
def update_package_db(module, opkg_path):
""" Updates packages list. """
rc, out, err = module.run_command("%s update" % opkg_path)
if rc != 0:
module.fail_json(msg="could not update package db")
def query_package(module, opkg_path, name, state="present"):
""" Returns whether a package is installed or not. """
if state == "present":
rc, out, err = module.run_command("%s list-installed | grep -q \"^%s \"" % (shlex_quote(opkg_path), shlex_quote(name)), use_unsafe_shell=True)
if rc == 0:
return True
return False
def remove_packages(module, opkg_path, packages):
""" Uninstalls one or more packages if installed. """
p = module.params
force = p["force"]
if force:
force = "--force-%s" % force
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, opkg_path, package):
continue
rc, out, err = module.run_command("%s remove %s %s" % (opkg_path, force, package))
if query_package(module, opkg_path, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, opkg_path, packages):
""" Installs one or more packages if not already installed. """
p = module.params
force = p["force"]
if force:
force = "--force-%s" % force
install_c = 0
for package in packages:
if query_package(module, opkg_path, package):
continue
rc, out, err = module.run_command("%s install %s %s" % (opkg_path, force, package))
if not query_package(module, opkg_path, package):
module.fail_json(msg="failed to install %s: %s" % (package, out))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=["pkg"], required=True),
state=dict(default="present", choices=["present", "installed", "absent", "removed"]),
force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove",
"checksum", "removal-of-dependent-packages"]),
update_cache=dict(default="no", aliases=["update-cache"], type='bool')
)
)
opkg_path = module.get_bin_path('opkg', True, ['/bin'])
p = module.params
if p["update_cache"]:
update_package_db(module, opkg_path)
pkgs = p["name"].split(",")
if p["state"] in ["present", "installed"]:
install_packages(module, opkg_path, pkgs)
elif p["state"] in ["absent", "removed"]:
remove_packages(module, opkg_path, pkgs)
if __name__ == '__main__':
main()
| gpl-3.0 |
greut/invenio-kwalitee | kwalitee/views.py | 3 | 12240 | # -*- coding: utf-8 -*-
#
# This file is part of kwalitee
# Copyright (C) 2014, 2015 CERN.
#
# kwalitee is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# kwalitee is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kwalitee; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Views like in MTV."""
from __future__ import unicode_literals
import requests
from flask import (current_app, render_template, make_response, json, jsonify,
request, url_for)
from werkzeug.exceptions import NotFound
from .tasks import pull_request, push, get_headers
from .models import db, Account, BranchStatus, CommitStatus, Repository
def status(sha):
"""Show the status of a commit.
**deprecated** static files aren't used anymore. To be removed at some
point.
:param sha: identifier of a commit.
"""
try:
with current_app.open_instance_resource(
"status_{sha}.txt".format(sha=sha), "r") as f:
status = f.read()
except IOError:
raise NotFound("{sha} was not found.".format(sha=sha))
status = status if len(status) > 0 else sha + ": Everything OK"
return render_template("status.html", status=status)
def index():
"""Homepage that lists the accounts."""
accounts = Account.query.order_by(db.asc(Account.name)).all()
return render_template("index.html", accounts=accounts)
def account(account):
"""Display the repositories linked with one account.
:param account: name of the account
"""
acc = _get_account(account)
return render_template("account.html",
account=acc,
repositories=acc.repositories)
def repository(account, repository, limit=50):
"""Display the recents commits and branches of a repository.
:param account: name of the owner
:param repository: name of the repository
:param limit: size of the commit window
"""
acc = _get_account(account)
repo = _get_repository(acc, repository)
commits = CommitStatus.query \
.filter_by(repository_id=repo.id) \
.order_by(db.desc(CommitStatus.id)) \
.limit(limit)
return render_template("repository.html",
account=acc,
repository=repo,
commits=commits)
def commit(account, repository, sha):
"""Display the status of a commit.
:param account: name of the owner
:param repository: name of the repository
:param sha: identifier of the commit
"""
acc = _get_account(account)
repo = _get_repository(acc, repository)
commit = CommitStatus.query.filter_by(repository_id=repo.id,
sha=sha).first_or_404()
return render_template("commit.html",
account=acc,
repository=repo,
commit=commit)
def branch(account, repository, branch):
"""Display the statuses of a branch.
:param account: name of the owner
:param repository: name of the repository
:param branch: name of the branch
"""
acc = _get_account(account)
repo = _get_repository(acc, repository)
all = BranchStatus.query.join(BranchStatus.commit) \
.filter(CommitStatus.repository_id == repo.id) \
.filter(BranchStatus.name == branch) \
.all()
if not all:
raise NotFound("{0.fullname} as no branches called {1}"
.format(repo, branch))
return render_template("branches.html",
account=acc,
repository=repo,
branches=all)
def branch_status(account, repository, branch, sha):
"""Display the status of a pull request.
:param account: name of the owner
:param repository: name of the repository
:param branch: name of the branch
:param sha: commit identifier of the commit related with the branch
"""
acc = _get_account(account)
repo = _get_repository(acc, repository)
branch = BranchStatus.query.join(BranchStatus.commit) \
.filter(CommitStatus.repository_id == repo.id) \
.filter(CommitStatus.sha == sha) \
.filter(BranchStatus.name == branch) \
.first_or_404()
return render_template("branch.html",
account=acc,
repository=repo,
branch=branch,
commit=branch.commit)
def payload():
"""Handle the GitHub events.
.. seealso::
`Event Types <https://developer.github.com/v3/activity/events/types/>`
"""
q = current_app.config["queue"]
events = ["push", "pull_request"]
try:
event = None
if "X-GitHub-Event" in request.headers:
event = request.headers["X-GitHub-Event"]
else:
raise ValueError("No X-GitHub-Event HTTP header found")
if event == "ping":
payload = {"message": "pong"}
elif event in events:
config = dict(current_app.config)
config.pop("queue")
timeout = config.pop("WORKER_TIMEOUT", None)
auto_create = config.pop("AUTO_CREATE", False)
data = json.loads(request.data)
repository_name = data["repository"]["name"]
keyname = "name" if event == "push" else "login"
owner_name = data["repository"]["owner"][keyname]
payload = {
"state": "pending",
"context": config.get("CONTEXT")
}
owner = Account.query.filter_by(name=owner_name).first()
if owner:
repository = Repository.query.filter_by(
name=repository_name,
owner_id=owner.id).first()
if not owner or not repository:
if auto_create:
owner = Account.find_or_create(owner_name)
repository = Repository.find_or_create(owner,
repository_name)
else:
payload["state"] = "error"
payload["description"] = "{0}/{1} is not yet registered" \
.format(owner_name,
repository_name)
if owner and repository:
if event == "push":
status_url = ""
commit_url = "https://api.github.com/repos/{owner}" \
"/{repo}/commits/{sha}"
for commit in data["commits"]:
cs = CommitStatus.find_or_create(repository,
commit["id"],
commit["url"])
status_url = url_for("commit",
account=owner.name,
repository=repository.name,
sha=cs.sha,
_external=True)
url = commit_url.format(
commit_url,
owner=owner.name,
repo=repository.name,
sha=cs.sha)
q.enqueue(push, cs.id, url, status_url, config,
timeout=timeout)
payload["target_url"] = status_url
payload["description"] = "commits queues"
elif event == "pull_request":
if data["action"] not in ["synchronize", "opened",
"reopened"]:
raise ValueError(
"Pull request action {0} is not supported"
.format(data["action"]))
repo = data["repository"]
data = data["pull_request"]
pull_request_url = data["url"]
commit_sha = data["head"]["sha"]
commits = []
headers = get_headers(Repository.query.filter_by(
name=repo["name"]).first(), config)
response = requests.get(data["commits_url"],
headers=headers)
response.raise_for_status() # check API rate limit
response_json = json.loads(response.content)
for commit in response_json:
cstat = CommitStatus.find_or_create(repository,
commit["sha"],
commit["html_url"])
commits.append(cstat)
bs = BranchStatus.find_or_create(commits[-1],
data["head"]["label"],
data["html_url"],
{"commits": commits})
status_url = url_for("branch_status",
account=owner.name,
repository=repository.name,
branch=bs.name,
sha=commit_sha,
_external=True)
q.enqueue(pull_request, bs.id, pull_request_url,
status_url, config, timeout=timeout)
payload["target_url"] = status_url
payload["description"] = "pull request {0} queued" \
.format(bs.name)
else:
raise ValueError("Event {0} is not supported".format(event))
return jsonify(payload=payload)
except Exception as e:
import traceback
# Uncomment to help you debugging the tests
# raise e
return make_response(jsonify(status="failure",
stacktrace=traceback.format_exc(),
exception=str(e)),
500)
def _get_account(account_name):
"""Get the account by name.
:param account_name: name of the account
:raise NotFound: if the account cannot be found
"""
account = Account.query.filter_by(name=account_name).first()
if not account:
raise NotFound("{0} isn't registered yet.".format(account_name))
return account
def _get_repository(account, repository_name):
"""Get the repository by name.
:param account: account
:param repository_name: name of the repository
:raise NotFound: if the repository cannot be found
"""
repository = Repository.query.filter_by(owner_id=account.id,
name=repository_name).first()
if not repository:
raise NotFound("{0}/{1} isn't registered yet.".format(account.name,
repository_name))
return repository
| gpl-2.0 |
andrewleech/SickRage | sickbeard/providers/hdtorrents_it.py | 3 | 8277 | # coding=utf-8
# Author: Dustyn Gibson <[email protected]>
#
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import re
from requests.utils import dict_from_cookiejar
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from six.moves.urllib.parse import quote_plus
from sickrage.helper.common import convert_size, try_int
from sickrage.providers.torrent.TorrentProvider import TorrentProvider
class HDTorrentsProvider_IT(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
TorrentProvider.__init__(self, "HDTorrents.it")
self.username = None
self.password = None
self.ratio = None
self.minseed = None
self.minleech = None
self.freeleech = None
self.urls = {'base_url': 'http://hdtorrents.it',
'login': 'http://hdtorrents.it/takelogin.php',
'search': 'http://hdtorrents.it/browse.php?search=%s',
'rss': 'http://hdtorrents.it/browse.php?search=%s',
'home': 'http://hdtorrents.it/%s'}
self.url = self.urls['base_url']
self.proper_strings = ['PROPER', 'REPACK']
self.cache = tvcache.TVCache(self, min_time=30) # only poll HDTorrents every 30 minutes ma
def _check_auth(self):
if not self.username or not self.password:
logger.log("Invalid username or password. Check your settings", logger.WARNING)
return True
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {'username': self.username,
'password': self.password,
'submit': 'Accedi!'}
response = self.get_url(self.urls['login'], post_data=login_params, timeout=30)
if not response:
logger.log("Unable to connect to provider", logger.WARNING)
return False
if re.search('Lei non e registrato in sistema.', response):
logger.log("Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
results = []
if not self.login():
return results
for mode in search_strings:
items = []
logger.log("Search Mode: {}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
search_url = self.urls['search'] % quote_plus(search_string)
logger.log("Search string: {}".format(search_string), logger.DEBUG)
else:
search_url = self.urls['rss']
if self.freeleech:
search_url = search_url.replace('active=1', 'active=5')
logger.log("Search URL: {}".format(search_url), logger.DEBUG)
data = self.get_url(search_url)
if not data or 'Error' in data:
logger.log("No data returned from provider", logger.DEBUG)
continue
if data.find('Non abbiamo trovato nulla') != -1:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
# Search result page contains some invalid html that prevents html parser from returning all data.
# We cut everything before the table that contains the data we are interested in thus eliminating
# the invalid html portions
try:
index = data.lower().index('<tbody id="highlighted"')
except ValueError:
logger.log("Could not find table of torrents highlighted", logger.DEBUG)
continue
# data = urllib.unquote(data[index:].encode('utf-8')).decode('utf-8').replace('\t', '')
data = data[index:]
with BS4Parser(data, 'html5lib') as html:
if not html:
logger.log("No html data parsed from provider", logger.DEBUG)
continue
torrent_rows = []
torrent_table = html.find('table', class_='highlighted')
if torrent_table:
torrent_rows = torrent_table.find_all('tr')
if not torrent_rows:
logger.log("Could not find results in returned data", logger.DEBUG)
continue
# Cat., Active, Filename, Dl, Wl, Added, Size, Uploader, S, L, C
labels = [label.a.get_text(strip=True) if label.a else label.get_text(strip=True) for label in torrent_rows[0].find_all('td')]
# Skip column headers
for result in torrent_rows[1:]:
try:
cells = result.findChildren('td')[:len(labels)]
if len(cells) < len(labels):
continue
title = cells[labels.index(1)].a.index(0).get_text(strip=True)
seeders = try_int(cells[labels.index(5)].a.index(0).get_text(strip=True))
leechers = try_int(cells[labels.index(5)].a.index(1).get_text(strip=True))
torrent_size = cells[labels.index(4)].get_text()
size = convert_size(torrent_size) or -1
download_url = self.url + '/' + cells[labels.index(1)].a.index(0)['href']
# title = cells[labels.index(u'Filename')].a.get_text(strip=True)
# seeders = try_int(cells[labels.index(u'S')].get_text(strip=True))
# leechers = try_int(cells[labels.index(u'L')].get_text(strip=True))
# torrent_size = cells[labels.index(u'Size')].get_text()
# size = convert_size(torrent_size) or -1
# download_url = self.url + '/' + cells[labels.index(u'Dl')].a['href']
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(
"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)
items.append(item)
# For each search mode sort all the items by seeders if available
items.sort(key=lambda tup: tup[3], reverse=True)
results += items
return results
def seed_ratio(self):
return self.ratio
provider = HDTorrentsProvider_IT()
| gpl-3.0 |
kxliugang/edx-platform | lms/djangoapps/lti_provider/tests/test_tasks.py | 36 | 4381 | """
Tests for the LTI outcome service handlers, both in outcomes.py and in tasks.py
"""
import ddt
from django.test import TestCase
from mock import patch, MagicMock
from student.tests.factories import UserFactory
from lti_provider.models import GradedAssignment, LtiConsumer, OutcomeService
import lti_provider.tasks as tasks
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator
class BaseOutcomeTest(TestCase):
"""
Super type for tests of both the leaf and composite outcome celery tasks.
"""
def setUp(self):
super(BaseOutcomeTest, self).setUp()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
self.usage_key = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='block_id'
)
self.user = UserFactory.create()
self.consumer = LtiConsumer(
consumer_name='Lti Consumer Name',
consumer_key='consumer_key',
consumer_secret='consumer_secret',
instance_guid='tool_instance_guid'
)
self.consumer.save()
outcome = OutcomeService(
lis_outcome_service_url='http://example.com/service_url',
lti_consumer=self.consumer
)
outcome.save()
self.assignment = GradedAssignment(
user=self.user,
course_key=self.course_key,
usage_key=self.usage_key,
outcome_service=outcome,
lis_result_sourcedid='sourcedid',
version_number=1,
)
self.assignment.save()
self.send_score_update_mock = self.setup_patch(
'lti_provider.outcomes.send_score_update', None
)
def setup_patch(self, function_name, return_value):
"""
Patch a method with a given return value, and return the mock
"""
mock = MagicMock(return_value=return_value)
new_patch = patch(function_name, new=mock)
new_patch.start()
self.addCleanup(new_patch.stop)
return mock
@ddt.ddt
class SendLeafOutcomeTest(BaseOutcomeTest):
"""
Tests for the send_leaf_outcome method in tasks.py
"""
@ddt.data(
(2.0, 2.0, 1.0),
(2.0, 0.0, 0.0),
(1, 2, 0.5),
)
@ddt.unpack
def test_outcome_with_score(self, earned, possible, expected):
tasks.send_leaf_outcome(
self.assignment.id, # pylint: disable=no-member
earned,
possible
)
self.send_score_update_mock.assert_called_once_with(self.assignment, expected)
@ddt.ddt
class SendCompositeOutcomeTest(BaseOutcomeTest):
"""
Tests for the send_composite_outcome method in tasks.py
"""
def setUp(self):
super(SendCompositeOutcomeTest, self).setUp()
self.descriptor = MagicMock()
self.descriptor.location = BlockUsageLocator(
course_key=self.course_key,
block_type='problem',
block_id='problem',
)
self.weighted_scores = MagicMock()
self.weighted_scores_mock = self.setup_patch(
'lti_provider.tasks.get_weighted_scores', self.weighted_scores
)
self.module_store = MagicMock()
self.module_store.get_item = MagicMock(return_value=self.descriptor)
self.check_result_mock = self.setup_patch(
'lti_provider.tasks.modulestore',
self.module_store
)
@ddt.data(
(2.0, 2.0, 1.0),
(2.0, 0.0, 0.0),
(1, 2, 0.5),
)
@ddt.unpack
def test_outcome_with_score_score(self, earned, possible, expected):
self.weighted_scores.score_for_module = MagicMock(return_value=(earned, possible))
tasks.send_composite_outcome(
self.user.id, unicode(self.course_key), self.assignment.id, 1 # pylint: disable=no-member
)
self.send_score_update_mock.assert_called_once_with(self.assignment, expected)
def test_outcome_with_outdated_version(self):
self.assignment.version_number = 2
self.assignment.save()
tasks.send_composite_outcome(
self.user.id, unicode(self.course_key), self.assignment.id, 1 # pylint: disable=no-member
)
self.assertEqual(self.weighted_scores_mock.call_count, 0)
| agpl-3.0 |
openhatch/oh-mainline | vendor/packages/gdata/src/atom/http_core.py | 40 | 19862 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# TODO: add proxy handling.
__author__ = '[email protected] (Jeff Scudder)'
import os
import StringIO
import urlparse
import urllib
import httplib
ssl = None
try:
import ssl
except ImportError:
pass
class Error(Exception):
pass
class UnknownSize(Error):
pass
class ProxyError(Error):
pass
MIME_BOUNDARY = 'END_OF_PART'
def get_headers(http_response):
"""Retrieves all HTTP headers from an HTTP response from the server.
This method is provided for backwards compatibility for Python2.2 and 2.3.
The httplib.HTTPResponse object in 2.2 and 2.3 does not have a getheaders
method so this function will use getheaders if available, but if not it
will retrieve a few using getheader.
"""
if hasattr(http_response, 'getheaders'):
return http_response.getheaders()
else:
headers = []
for header in (
'location', 'content-type', 'content-length', 'age', 'allow',
'cache-control', 'content-location', 'content-encoding', 'date',
'etag', 'expires', 'last-modified', 'pragma', 'server',
'set-cookie', 'transfer-encoding', 'vary', 'via', 'warning',
'www-authenticate', 'gdata-version'):
value = http_response.getheader(header, None)
if value is not None:
headers.append((header, value))
return headers
class HttpRequest(object):
"""Contains all of the parameters for an HTTP 1.1 request.
The HTTP headers are represented by a dictionary, and it is the
responsibility of the user to ensure that duplicate field names are combined
into one header value according to the rules in section 4.2 of RFC 2616.
"""
method = None
uri = None
def __init__(self, uri=None, method=None, headers=None):
"""Construct an HTTP request.
Args:
uri: The full path or partial path as a Uri object or a string.
method: The HTTP method for the request, examples include 'GET', 'POST',
etc.
headers: dict of strings The HTTP headers to include in the request.
"""
self.headers = headers or {}
self._body_parts = []
if method is not None:
self.method = method
if isinstance(uri, (str, unicode)):
uri = Uri.parse_uri(uri)
self.uri = uri or Uri()
def add_body_part(self, data, mime_type, size=None):
"""Adds data to the HTTP request body.
If more than one part is added, this is assumed to be a mime-multipart
request. This method is designed to create MIME 1.0 requests as specified
in RFC 1341.
Args:
data: str or a file-like object containing a part of the request body.
mime_type: str The MIME type describing the data
size: int Required if the data is a file like object. If the data is a
string, the size is calculated so this parameter is ignored.
"""
if isinstance(data, str):
size = len(data)
if size is None:
# TODO: support chunked transfer if some of the body is of unknown size.
raise UnknownSize('Each part of the body must have a known size.')
if 'Content-Length' in self.headers:
content_length = int(self.headers['Content-Length'])
else:
content_length = 0
# If this is the first part added to the body, then this is not a multipart
# request.
if len(self._body_parts) == 0:
self.headers['Content-Type'] = mime_type
content_length = size
self._body_parts.append(data)
elif len(self._body_parts) == 1:
# This is the first member in a mime-multipart request, so change the
# _body_parts list to indicate a multipart payload.
self._body_parts.insert(0, 'Media multipart posting')
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
content_length += len(boundary_string) + size
self._body_parts.insert(1, boundary_string)
content_length += len('Media multipart posting')
# Put the content type of the first part of the body into the multipart
# payload.
original_type_string = 'Content-Type: %s\r\n\r\n' % (
self.headers['Content-Type'],)
self._body_parts.insert(2, original_type_string)
content_length += len(original_type_string)
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
self._body_parts.append(boundary_string)
content_length += len(boundary_string)
# Change the headers to indicate this is now a mime multipart request.
self.headers['Content-Type'] = 'multipart/related; boundary="%s"' % (
MIME_BOUNDARY,)
self.headers['MIME-version'] = '1.0'
# Include the mime type of this part.
type_string = 'Content-Type: %s\r\n\r\n' % (mime_type)
self._body_parts.append(type_string)
content_length += len(type_string)
self._body_parts.append(data)
ending_boundary_string = '\r\n--%s--' % (MIME_BOUNDARY,)
self._body_parts.append(ending_boundary_string)
content_length += len(ending_boundary_string)
else:
# This is a mime multipart request.
boundary_string = '\r\n--%s\r\n' % (MIME_BOUNDARY,)
self._body_parts.insert(-1, boundary_string)
content_length += len(boundary_string) + size
# Include the mime type of this part.
type_string = 'Content-Type: %s\r\n\r\n' % (mime_type)
self._body_parts.insert(-1, type_string)
content_length += len(type_string)
self._body_parts.insert(-1, data)
self.headers['Content-Length'] = str(content_length)
# I could add an "append_to_body_part" method as well.
AddBodyPart = add_body_part
def add_form_inputs(self, form_data,
mime_type='application/x-www-form-urlencoded'):
"""Form-encodes and adds data to the request body.
Args:
form_data: dict or sequnce or two member tuples which contains the
form keys and values.
mime_type: str The MIME type of the form data being sent. Defaults
to 'application/x-www-form-urlencoded'.
"""
body = urllib.urlencode(form_data)
self.add_body_part(body, mime_type)
AddFormInputs = add_form_inputs
def _copy(self):
"""Creates a deep copy of this request."""
copied_uri = Uri(self.uri.scheme, self.uri.host, self.uri.port,
self.uri.path, self.uri.query.copy())
new_request = HttpRequest(uri=copied_uri, method=self.method,
headers=self.headers.copy())
new_request._body_parts = self._body_parts[:]
return new_request
def _dump(self):
"""Converts to a printable string for debugging purposes.
In order to preserve the request, it does not read from file-like objects
in the body.
"""
output = 'HTTP Request\n method: %s\n url: %s\n headers:\n' % (
self.method, str(self.uri))
for header, value in self.headers.iteritems():
output += ' %s: %s\n' % (header, value)
output += ' body sections:\n'
i = 0
for part in self._body_parts:
if isinstance(part, (str, unicode)):
output += ' %s: %s\n' % (i, part)
else:
output += ' %s: <file like object>\n' % i
i += 1
return output
def _apply_defaults(http_request):
if http_request.uri.scheme is None:
if http_request.uri.port == 443:
http_request.uri.scheme = 'https'
else:
http_request.uri.scheme = 'http'
class Uri(object):
"""A URI as used in HTTP 1.1"""
scheme = None
host = None
port = None
path = None
def __init__(self, scheme=None, host=None, port=None, path=None, query=None):
"""Constructor for a URI.
Args:
scheme: str This is usually 'http' or 'https'.
host: str The host name or IP address of the desired server.
post: int The server's port number.
path: str The path of the resource following the host. This begins with
a /, example: '/calendar/feeds/default/allcalendars/full'
query: dict of strings The URL query parameters. The keys and values are
both escaped so this dict should contain the unescaped values.
For example {'my key': 'val', 'second': '!!!'} will become
'?my+key=val&second=%21%21%21' which is appended to the path.
"""
self.query = query or {}
if scheme is not None:
self.scheme = scheme
if host is not None:
self.host = host
if port is not None:
self.port = port
if path:
self.path = path
def _get_query_string(self):
param_pairs = []
for key, value in self.query.iteritems():
param_pairs.append('='.join((urllib.quote_plus(key),
urllib.quote_plus(str(value)))))
return '&'.join(param_pairs)
def _get_relative_path(self):
"""Returns the path with the query parameters escaped and appended."""
param_string = self._get_query_string()
if self.path is None:
path = '/'
else:
path = self.path
if param_string:
return '?'.join([path, param_string])
else:
return path
def _to_string(self):
if self.scheme is None and self.port == 443:
scheme = 'https'
elif self.scheme is None:
scheme = 'http'
else:
scheme = self.scheme
if self.path is None:
path = '/'
else:
path = self.path
if self.port is None:
return '%s://%s%s' % (scheme, self.host, self._get_relative_path())
else:
return '%s://%s:%s%s' % (scheme, self.host, str(self.port),
self._get_relative_path())
def __str__(self):
return self._to_string()
def modify_request(self, http_request=None):
"""Sets HTTP request components based on the URI."""
if http_request is None:
http_request = HttpRequest()
if http_request.uri is None:
http_request.uri = Uri()
# Determine the correct scheme.
if self.scheme:
http_request.uri.scheme = self.scheme
if self.port:
http_request.uri.port = self.port
if self.host:
http_request.uri.host = self.host
# Set the relative uri path
if self.path:
http_request.uri.path = self.path
if self.query:
http_request.uri.query = self.query.copy()
return http_request
ModifyRequest = modify_request
def parse_uri(uri_string):
"""Creates a Uri object which corresponds to the URI string.
This method can accept partial URIs, but it will leave missing
members of the Uri unset.
"""
parts = urlparse.urlparse(uri_string)
uri = Uri()
if parts[0]:
uri.scheme = parts[0]
if parts[1]:
host_parts = parts[1].split(':')
if host_parts[0]:
uri.host = host_parts[0]
if len(host_parts) > 1:
uri.port = int(host_parts[1])
if parts[2]:
uri.path = parts[2]
if parts[4]:
param_pairs = parts[4].split('&')
for pair in param_pairs:
pair_parts = pair.split('=')
if len(pair_parts) > 1:
uri.query[urllib.unquote_plus(pair_parts[0])] = (
urllib.unquote_plus(pair_parts[1]))
elif len(pair_parts) == 1:
uri.query[urllib.unquote_plus(pair_parts[0])] = None
return uri
parse_uri = staticmethod(parse_uri)
ParseUri = parse_uri
parse_uri = Uri.parse_uri
ParseUri = Uri.parse_uri
class HttpResponse(object):
status = None
reason = None
_body = None
def __init__(self, status=None, reason=None, headers=None, body=None):
self._headers = headers or {}
if status is not None:
self.status = status
if reason is not None:
self.reason = reason
if body is not None:
if hasattr(body, 'read'):
self._body = body
else:
self._body = StringIO.StringIO(body)
def getheader(self, name, default=None):
if name in self._headers:
return self._headers[name]
else:
return default
def getheaders(self):
return self._headers
def read(self, amt=None):
if self._body is None:
return None
if not amt:
return self._body.read()
else:
return self._body.read(amt)
def _dump_response(http_response):
"""Converts to a string for printing debug messages.
Does not read the body since that may consume the content.
"""
output = 'HttpResponse\n status: %s\n reason: %s\n headers:' % (
http_response.status, http_response.reason)
headers = get_headers(http_response)
if isinstance(headers, dict):
for header, value in headers.iteritems():
output += ' %s: %s\n' % (header, value)
else:
for pair in headers:
output += ' %s: %s\n' % (pair[0], pair[1])
return output
class HttpClient(object):
"""Performs HTTP requests using httplib."""
debug = None
def request(self, http_request):
return self._http_request(http_request.method, http_request.uri,
http_request.headers, http_request._body_parts)
Request = request
def _get_connection(self, uri, headers=None):
"""Opens a socket connection to the server to set up an HTTP request.
Args:
uri: The full URL for the request as a Uri object.
headers: A dict of string pairs containing the HTTP headers for the
request.
"""
connection = None
if uri.scheme == 'https':
if not uri.port:
connection = httplib.HTTPSConnection(uri.host)
else:
connection = httplib.HTTPSConnection(uri.host, int(uri.port))
else:
if not uri.port:
connection = httplib.HTTPConnection(uri.host)
else:
connection = httplib.HTTPConnection(uri.host, int(uri.port))
return connection
def _http_request(self, method, uri, headers=None, body_parts=None):
"""Makes an HTTP request using httplib.
Args:
method: str example: 'GET', 'POST', 'PUT', 'DELETE', etc.
uri: str or atom.http_core.Uri
headers: dict of strings mapping to strings which will be sent as HTTP
headers in the request.
body_parts: list of strings, objects with a read method, or objects
which can be converted to strings using str. Each of these
will be sent in order as the body of the HTTP request.
"""
if isinstance(uri, (str, unicode)):
uri = Uri.parse_uri(uri)
connection = self._get_connection(uri, headers=headers)
if self.debug:
connection.debuglevel = 1
if connection.host != uri.host:
connection.putrequest(method, str(uri))
else:
connection.putrequest(method, uri._get_relative_path())
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (uri.scheme == 'https' and int(uri.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % uri.host
replacement_header_line = 'Host: %s' % uri.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# Send the HTTP headers.
for header_name, value in headers.iteritems():
connection.putheader(header_name, value)
connection.endheaders()
# If there is data, send it in the request.
if body_parts and filter(lambda x: x != '', body_parts):
for part in body_parts:
_send_data_part(part, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _send_data_part(data, connection):
if isinstance(data, (str, unicode)):
# I might want to just allow str, not unicode.
connection.send(data)
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
class ProxiedHttpClient(HttpClient):
def _get_connection(self, uri, headers=None):
# Check to see if there are proxy settings required for this request.
proxy = None
if uri.scheme == 'https':
proxy = os.environ.get('https_proxy')
elif uri.scheme == 'http':
proxy = os.environ.get('http_proxy')
if not proxy:
return HttpClient._get_connection(self, uri, headers=headers)
# Now we have the URL of the appropriate proxy server.
# Get a username and password for the proxy if required.
proxy_auth = _get_proxy_auth()
if uri.scheme == 'https':
import socket
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = uri.port
if not port:
port = 443
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (uri.host, port)
# Set the user agent to send to the proxy
user_agent = ''
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_uri = Uri.parse_uri(proxy)
if not proxy_uri.port:
proxy_uri.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_uri.host, int(proxy_uri.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
sslobj = None
if ssl is not None:
sslobj = ssl.wrap_socket(p_sock, None, None)
else:
sock_ssl = socket.ssl(p_sock, None, Nonesock_)
sslobj = httplib.FakeSocket(p_sock, sock_ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(proxy_uri.host)
connection.sock = sslobj
return connection
elif uri.scheme == 'http':
proxy_uri = Uri.parse_uri(proxy)
if not proxy_uri.port:
proxy_uri.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return httplib.HTTPConnection(proxy_uri.host, int(proxy_uri.port))
return None
def _get_proxy_auth():
import base64
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.b64encode('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''
| agpl-3.0 |
duyet-website/api.duyet.net | lib/faker/providers/address/uk_UA/__init__.py | 3 | 5601 | # coding=utf-8
from __future__ import unicode_literals
from random import randint
from .. import Provider as AddressProvider
class Provider(AddressProvider):
address_formats = ['{{street_address}}, {{city}}, {{postcode}}']
building_number_formats = ['#', '##', '###']
city_formats = ['{{city_prefix}} {{first_name}}']
street_address_formats = ['{{street_name}}, {{building_number}}']
street_name_formats = ['{{street_prefix}} {{last_name}}',
'{{last_name}} {{street_suffix}}']
city_prefixes = ['місто', 'село', 'селище', 'хутір']
countries = [
'Австралія', 'Австрія', 'Азербайджан', 'Албанія', 'Алжир', 'Ангола',
'Андорра', 'Антигуа і Барбуда', 'Аргентина', 'Афганістан',
'Багамські Острови', 'Бангладеш', 'Барбадос', 'Бахрейн', 'Беліз',
'Бельгія', 'Бенін', 'Білорусь', 'Болгарія', 'Болівія',
'Боснія і Герцеговина', 'Ботсвана', 'Бразилія', 'Бруней',
'Буркіна-Фасо', 'Бурунді', 'Бутан', 'Вануату', 'Ватикан',
'Велика Британія', 'Венесуела', 'В\'єтнам', 'Вірменія', 'Габон',
'Гаїті', 'Гаяна', 'Гамбія', 'Гана', 'Гватемала', 'Гвінея',
'Гвінея-Бісау', 'Гондурас', 'Гренада', 'Греція', 'Грузія', 'Данія',
'Джибуті', 'Домініка', 'Домініканська Республіка', 'Еквадор',
'Екваторіальна Гвінея', 'Еритрея', 'Естонія', 'Ефіопія', 'Єгипет',
'Ємен', 'Замбія', 'Західна Сахара', 'Зімбабве', 'Ізраїль', 'Індія',
'Індонезія', 'Ірак', 'Іран', 'Ірландія', 'Ісландія', 'Іспанія',
'Італія', 'Йорданія', 'Кабо-Верде', 'Казахстан', 'Камбоджа', 'Камерун',
'Канада', 'Катар', 'Кенія', 'Киргизстан', 'КНР', 'Кіпр', 'Кірибаті',
'Колумбія', 'Коморські Острови', 'Конго', 'ДР Конго', 'Південна Корея',
'Північна Корея', 'Косово', 'Коста-Рика', 'Кот-д\'Івуар', 'Куба',
'Кувейт', 'Лаос', 'Латвія', 'Лесото', 'Литва', 'Ліберія', 'Ліван',
'Лівія', 'Ліхтенштейн', 'Люксембург', 'Маврикій', 'Мавританія',
'Мадагаскар', 'Республіка Македонія', 'Малаві', 'Малайзія', 'Малі',
'Мальдіви', 'Мальта', 'Марокко', 'Маршаллові Острови', 'Мексика',
'Федеративні Штати Мікронезії', 'Мозамбік', 'Молдова', 'Монако',
'Монголія', 'М\'янма', 'Намібія', 'Науру', 'Непал', 'Нігер', 'Нігерія',
'Нідерланди', 'Нікарагуа', 'Німеччина', 'Нова Зеландія', 'Норвегія',
'ОАЕ', 'Оман', 'Пакистан', 'Палау', 'Палестинська держава', 'Панама',
'Папуа Нова Гвінея', 'ПАР', 'Парагвай', 'Перу', 'Південний Судан',
'Польща', 'Португалія', 'Росія', 'Руанда', 'Румунія', 'Сальвадор',
'Самоа', 'Сан-Марино', 'Сан-Томе і Принсіпі', 'Саудівська Аравія',
'Свазіленд', 'Сейшельські Острови', 'Сенегал',
'Сент-Вінсент і Гренадини', 'Сент-Кіттс і Невіс', 'Сент-Люсія',
'Сербія', 'Сінгапур', 'Сирія', 'Словаччина', 'Словенія',
'Соломонові Острови', 'Сомалі', 'Судан', 'Суринам', 'Східний Тимор',
'США', 'Сьєрра-Леоне', 'Таджикистан', 'Таїланд', 'Тайвань', 'Танзанія',
'Того', 'Тонга', 'Тринідад і Тобаго', 'Тувалу', 'Туніс', 'Туреччина',
'Туркменістан', 'Уганда', 'Угорщина', 'Узбекистан', 'Україна',
'Уругвай', 'Фіджі', 'Філіппіни', 'Фінляндія', 'Франція', 'Хорватія',
'Центральноафриканська Республіка', 'Чад', 'Чехія', 'Чилі',
'Чорногорія', 'Швейцарія', 'Швеція', 'Шрі-Ланка', 'Ямайка', 'Японія'
]
street_prefixes = [
'вулиця', 'проспект', 'майдан', 'набережна', 'бульвар', 'провулок'
]
street_suffixes = ['узвіз']
@classmethod
def city_prefix(cls):
return cls.random_element(cls.city_prefixes)
@classmethod
def postcode(cls):
"""The code consists of five digits (01000-99999)"""
return '{}{}'.format(randint(0, 10), randint(1000, 10000))
@classmethod
def street_prefix(cls):
return cls.random_element(cls.street_prefixes)
| mit |
JMY1000/CyclesMineways | CyclesMineways.py | 1 | 46368 | # Cycles Mineways setup
# Version 1.3.0, 5/28/16
# Copyright © 2016
# Please send suggestions or report bugs at https://github.com/JMY1000/CyclesMineways/
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation under version 3 of the License.
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details at http://www.gnu.org/licenses/gpl-3.0.en.html
# Distributed with Mineways, http://mineways.com
# To use the script within Blender, for use with the Cycles renderer:
# Open Blender and import the obj file created by Mineways.
# Change any window to the text editor.
# Alternatively, Go to the top of the window where it says "Default",
# click on the screen layout button left to the word "Default" and pick "Scripting".
# Click "Open" at the bottom of the text window.
# Go to the directory where this file, "CyclesMineways.py", is and select it.
# You should now see some text in the text window.
# Alternatively, you can click "new" then paste in the text.
# To apply this script, click on the "Run Script" button at the bottom of the text window.
# OPTIONAL: To see that the script's print output, you may want to turn on the terminal/console.
# It is not critical to see this window, but it might give you a warm and fuzzy feeling to know that the script has worked.
# It also helps provide debug info if something goes wrong.
# For Windows:
# From the upper left of your window select "Window" and then "Toggle System Console".
# For OS X:
# Find your application, right click it, hit "Show Package Contents".
# Navigate to Contents/MacOS/blender Launch blender this way, this will show the terminal.
# For Linux:
# Run Blender through the terminal.
#importing the Blender Python library
import bpy
print("Libraries imported")
# CONSTANTS
# PREFIX can stay as "" if you are importing into project that is not massive and has no other imported mineways worlds.
# If the .blend does not meet these requirements, you must set PREFIX to allow this script to know what it is working with.
# Set the PREFIX to the name of the file it uses (eg: a castle.obj file uses PREFIX = "castle")
PREFIX = ""
# USER_INPUT_SCENE controls what scenes Blender will apply this script's functionality to.
# If this list has scenes, the script only use those scenes to work with;
# otherwise, it will use all scenes
# example: USER_INPUT_SCENE = ["scene","scene2","randomScene123"]
USER_INPUT_SCENE = []
# WATER_SHADER_TYPE controls the water shader that will be used.
# Use 0 for a solid block shader.
# Use 1 for a semi-transparent flat shader.
# Use 2 for a small, sharp waves shader.
# Use 3 for a wavy shader.
# For a more detailed explanation with pictures of each water shader type, visit: https://github.com/JMY1000/CyclesMineways/wiki/Water-Shader-Types
WATER_SHADER_TYPE = 1
# TIME_OF_DAY controls the time of day.
# If TIME_OF_DAY is between 6.5 and 19.5 (crossing 12), the daytime shader will be used.
# If TIME_OF_DAY is between 19.5 and 6.5 (crossing 24), the nighttim shader will be used.
# NOTE: The decimal is not in minutes, and is a fraction (ex. 12:30 is 12.50).
# NOTE: This currently only handles day and night
TIME_OF_DAY = 12.00
# DISPLACE_WOOD controls whether virtual displacement (changes normals for illusion of roughness) for wooden plank blocks is used.
# NOTE: This currently only works for oak wood planks.
# NOTE: This can only be True or False
DISPLACE_WOOD = False
# STAINED_GLASS_COLOR controls how coloured the light that passed through stained glass is.
# 0 means light passed through unchanged
# 1 means all the light is changed to the glass's color (not recommended)
STAINED_GLASS_COLOR = 0.4
#List of transparent blocks
transparentBlocks = ["Acacia_Leaves","Dark_Oak_Leaves","Acacia_Door","Activator_Rail","Bed","Beetroot_Seeds","Birch_Door","Brewing_Stand","Cactus","Carrot","Carrots","Cauldron","Chorus_Flower","Chorus_Flower_Dead","Chorus_Plant","Cobweb",
"Cocoa","Crops","Dandelion","Dark_Oak_Door","Dead_Bush","Detector_Rail","Enchantment_Table","Glass","Glass_Pane","Grass","Iron_Bars","Iron_Door","Iron_Trapdoor","Jack_o'Lantern","Jungle_Door","Large_Flowers",
"Leaves","Melon_Stem","Monster_Spawner","Nether_Portal","Nether_Wart","Oak_Leaves","Oak_Sapling","Poppy","Potato","Potatoes","Powered_Rail","Pumpkin_Stem","Rail","Red_Mushroom",
"Redstone_Comparator_(inactive)","Redstone_Torch_(inactive)","Repeater_(inactive)","Sapling","Spruce_Door","Stained_Glass_Pane","Sugar_Cane","Sunflower","Tall_Grass","Trapdoor","Vines","Wheat","Wooden_Door"]
#List of light emitting blocks
lightBlocks = ["Daylight_Sensor","End_Gateway","End_Portal","Ender_Chest","Flowing_Lava","Glowstone","Inverted_Daylight_Sensor","Lava","Magma_Block","Redstone_Lamp_(active)","Stationary_Lava","Sea_Lantern"]
#List of light emitting and transparent block
lightTransparentBlocks = ["Beacon","Brown_Mushroom","Dragon_Egg","Endframe","End_Rod","Fire","Powered_Rail_(active)","Redstone_Comparator_(active)","Redstone_Torch_(active)","Repeater_(active)","Torch"]
#SHADERS
def Setup_Node_Tree(material):
#Make the material use nodes
material.use_nodes=True
#Set the variable node_tree to be the material's node tree and variable nodes to be the node tree's nodes
node_tree=material.node_tree
nodes=material.node_tree.nodes
#Remove the old nodes
for eachNode in nodes:
nodes.remove(eachNode)
return nodes,node_tree
def Normal_Shader(material,rgba_image):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(0,300)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = rgba_image
rgba_node.interpolation=('Closest')
rgba_node.location=(-300,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(diffuse_node.outputs["BSDF"],output_node.inputs["Surface"])
def Transparent_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the mix shader
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(0,300)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-300,400)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-300,0)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-600,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(rgba_node.outputs["Alpha"],mix_node.inputs["Fac"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(diffuse_node.outputs["BSDF"],mix_node.inputs[2])
links.new(mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Light_Emiting_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(600,300)
#Create the diffuse deciding mix node
diffuse_mix_node=nodes.new('ShaderNodeMixShader')
diffuse_mix_node.location=(300,300)
#Create the Light Path Node
light_path_node=nodes.new('ShaderNodeLightPath')
light_path_node.location=(0,600)
#Create the diffuse emission
indirect_emission_node=nodes.new('ShaderNodeEmission')
indirect_emission_node.location=(0,100)
#Create the Light Falloff node for indirect emission
light_falloff_node=nodes.new('ShaderNodeLightFalloff')
light_falloff_node.location=(-300,0)
light_falloff_node.inputs[0].default_value=200 #sets strength of light
light_falloff_node.inputs[1].default_value=0.03 #sets smooth level of light
#Create the HSV node to brighten the light
hsv_node=nodes.new('ShaderNodeHueSaturation')
hsv_node.location=(-300,200)
hsv_node.inputs["Value"].default_value=3 # brightens the color for better lighting
#Create the direct emission node
direct_emission_node=nodes.new('ShaderNodeEmission')
direct_emission_node.location=(0,300)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-600,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],direct_emission_node.inputs["Color"])
links.new(rgba_node.outputs["Color"],hsv_node.inputs["Color"])
links.new(hsv_node.outputs["Color"],indirect_emission_node.inputs["Color"])
links.new(light_falloff_node.outputs[0],indirect_emission_node.inputs[1]) #connects quadratic output to emission strength
links.new(indirect_emission_node.outputs["Emission"],diffuse_mix_node.inputs[2])
links.new(direct_emission_node.outputs["Emission"],diffuse_mix_node.inputs[1])
links.new(light_path_node.outputs[2],diffuse_mix_node.inputs["Fac"]) #links "is diffuse ray" to factor of mix node
links.new(diffuse_mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Transparent_Emiting_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(600,300)
#Create the indirect-direct mix shader
indirect_mix_node=nodes.new('ShaderNodeMixShader')
indirect_mix_node.location=(300,300)
#Create the mix shader
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(0,300)
#Create the Light Path node to check if light is indirect
light_path_node=nodes.new('ShaderNodeLightPath')
light_path_node.location=(0,800)
#Create the Light Falloff node for indirect emission
light_falloff_node=nodes.new('ShaderNodeLightFalloff')
light_falloff_node.location=(-300,600)
light_falloff_node.inputs[0].default_value=80 #sets strength of light
light_falloff_node.inputs[1].default_value=0.03 #sets smooth level of light
#Create the indirect emission node
indirect_emission_node=nodes.new('ShaderNodeEmission')
indirect_emission_node.location=(0,500)
indirect_emission_node.inputs["Color"].default_value = (1,1,0.56,1)
#Only tested color on torches, needs testing on other transparent emitters to see if it looks weird
#Create the direct emission node
emission_node=nodes.new('ShaderNodeEmission')
emission_node.location=(-300,400)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-300,0)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-600,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],emission_node.inputs["Color"])
links.new(rgba_node.outputs["Alpha"],mix_node.inputs["Fac"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(emission_node.outputs["Emission"],mix_node.inputs[2])
links.new(mix_node.outputs["Shader"],indirect_mix_node.inputs[1])
links.new(light_falloff_node.outputs["Quadratic"],indirect_emission_node.inputs["Strength"])
links.new(indirect_emission_node.outputs["Emission"],indirect_mix_node.inputs[2])
links.new(light_path_node.outputs["Is Diffuse Ray"],indirect_mix_node.inputs["Fac"])
links.new(indirect_mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Lily_Pad_Shader(material):
#A water setup shader should have ran before this
#Set the variable node_tree to be the material's node tree and variable nodes to be the node tree's nodes
node_tree=material.node_tree
nodes=material.node_tree.nodes
output = None
image_node = None
for node in nodes:
if node.name=="Material Output":
output=node
if node.name=="Image Texture": #assumes only 1 image input
image_node=node
output.location = (600,300)
water_output = output.inputs[0].links[0].from_node
mix_node = nodes.new('ShaderNodeMixShader')
mix_node.location=(300,500)
diffuse_node = nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(0,500)
RGB_splitter_node = nodes.new('ShaderNodeSeparateRGB')
RGB_splitter_node.location=(-300,700)
less_than_node = nodes.new('ShaderNodeMath')
less_than_node.location=(0,700)
less_than_node.operation="LESS_THAN"
links=node_tree.links
links.new(mix_node.outputs[0],output.inputs[0])
links.new(diffuse_node.outputs[0],mix_node.inputs[1])
links.new(water_output.outputs[0],mix_node.inputs[2]) #making massive assumption that output of water is in first output
links.new(less_than_node.outputs[0],mix_node.inputs[0])
links.new(image_node.outputs[0],diffuse_node.inputs[0])
links.new(RGB_splitter_node.outputs[2],less_than_node.inputs[1])
links.new(RGB_splitter_node.outputs[1],less_than_node.inputs[0])
links.new(image_node.outputs[0],RGB_splitter_node.inputs[0])
def Stained_Glass_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the mix shader
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(0,300)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-300,400)
#Create shadow(math)-color(HSV) mix node
shadow_color_mix_node=nodes.new('ShaderNodeMixRGB')
shadow_color_mix_node.location=(-600,400)
shadow_color_mix_node.inputs[1].default_value=(1,1,1,0)
#Create HSV node because for some reason color from the RGBA node in transparent nodes is super dark
hsv_node=nodes.new('ShaderNodeHueSaturation')
hsv_node.location=(-900,280)
hsv_node.inputs[1].default_value=2
hsv_node.inputs[2].default_value=8
#Create math(multiply, clamped) node
multiply_node=nodes.new('ShaderNodeMath')
multiply_node.location=(-900,450)
multiply_node.operation=('MULTIPLY')
multiply_node.use_clamp=True
multiply_node.inputs[1].default_value=STAINED_GLASS_COLOR
#Create math(add, clamped) node
add_node=nodes.new('ShaderNodeMath')
add_node.location=(-1200,450)
add_node.operation=('ADD')
add_node.use_clamp=True
#Create the lightpath node
light_path_node=nodes.new('ShaderNodeLightPath')
light_path_node.location=(-1500,450)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-900,0)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-1200,100)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(rgba_node.outputs["Alpha"],mix_node.inputs["Fac"])
links.new(rgba_node.outputs["Color"],hsv_node.inputs["Color"])
links.new(light_path_node.outputs[1],add_node.inputs[0]) #connects Is Shadow Ray to add node
links.new(light_path_node.outputs[2],add_node.inputs[1]) #connects Is Shadow Ray to add node
links.new(add_node.outputs[0],multiply_node.inputs[0])
links.new(multiply_node.outputs["Value"],shadow_color_mix_node.inputs["Fac"])
links.new(hsv_node.outputs["Color"],shadow_color_mix_node.inputs[2])
links.new(shadow_color_mix_node.outputs["Color"],transparent_node.inputs["Color"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(diffuse_node.outputs["BSDF"],mix_node.inputs[2])
links.new(mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Stationary_Water_Shader_1(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the fresnel mix shader
fresnel_mix_node=nodes.new('ShaderNodeMixShader')
fresnel_mix_node.location=(0,300)
#Create Fresnel node ior=1.33
fresnel_node=nodes.new('ShaderNodeFresnel')
fresnel_node.location=(-300,400)
fresnel_node.inputs[0].default_value=1.33
#Create the transparency-diffuse mixer
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(-300,300)
mix_node.inputs[0].default_value=0.4
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-600,300)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-600,180)
#Create the glossy shader
glossy_node=nodes.new('ShaderNodeBsdfGlossy')
glossy_node.location=(-600,100)
glossy_node.inputs[1].default_value=0.02
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-900,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(rgba_node.outputs["Color"],glossy_node.inputs["Color"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[2])
links.new(diffuse_node.outputs["BSDF"],mix_node.inputs[1])
links.new(fresnel_node.outputs["Fac"],fresnel_mix_node.inputs["Fac"])
links.new(mix_node.outputs["Shader"],fresnel_mix_node.inputs[1])
links.new(glossy_node.outputs["BSDF"],fresnel_mix_node.inputs[2])
links.new(fresnel_mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Stationary_Water_Shader_2(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(600,300)
#Create the fresnel mix shader
fresnel_mix_node=nodes.new('ShaderNodeMixShader')
fresnel_mix_node.location=(300,300)
#Create Fresnel node
fresnel_node=nodes.new('ShaderNodeFresnel')
fresnel_node.location=(0,500)
fresnel_node.inputs[0].default_value=1.33
#Create the mix+transparent mix shader
mix_node_transparent_mix=nodes.new('ShaderNodeMixShader')
mix_node_transparent_mix.location=(0,300)
mix_node_transparent_mix.inputs[0].default_value=0.18
#Create the refraction-glossy mix shader
mix_node_ref_glossy=nodes.new('ShaderNodeMixShader')
mix_node_ref_glossy.location=(-300,0)
mix_node_ref_glossy.inputs[0].default_value=0.72
#Create Diffuse-transparent mix shader
diffuse_transparent_mix_shader=nodes.new('ShaderNodeMixShader')
diffuse_transparent_mix_shader.location=(-300,450)
diffuse_transparent_mix_shader.inputs["Fac"].default_value = 0.5
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-600,400)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-600,550)
#Create the glossy node
glossy_node=nodes.new('ShaderNodeBsdfGlossy')
glossy_node.location=(-600,0)
glossy_node.inputs["Roughness"].default_value=0.005
#Create the refraction node
refraction_node=nodes.new('ShaderNodeBsdfRefraction')
refraction_node.location=(-600,300)
refraction_node.inputs[2].default_value=1.33
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-900,300)
rgba_node.label = "RGBA"
#Create the first multiply node
multiply_node=nodes.new('ShaderNodeMath')
multiply_node.location=(0,-300)
multiply_node.operation=('MULTIPLY')
multiply_node.inputs[1].default_value=0.05
#Create the add node
add_node=nodes.new('ShaderNodeMath')
add_node.location=(-300,-300)
add_node.operation=('ADD')
#Create the first voronoi texture
voronoi_node=nodes.new('ShaderNodeTexVoronoi')
voronoi_node.location=(-600,-300)
voronoi_node.inputs[1].default_value=20
#Create the second multiply node
multiply_node_two=nodes.new('ShaderNodeMath')
multiply_node_two.location=(-600,-600)
multiply_node_two.operation=('MULTIPLY')
#Create the second voronoi texture
voronoi_node_two=nodes.new('ShaderNodeTexVoronoi')
voronoi_node_two.location=(-900,-600)
voronoi_node_two.inputs[1].default_value=30
#Create the texture coordinate node
texture_coordinate_node=nodes.new('ShaderNodeTexCoord')
texture_coordinate_node.location=(-1200,-300)
#Link the nodes
links=node_tree.links
links.new(fresnel_mix_node.outputs["Shader"],output_node.inputs["Surface"])
links.new(fresnel_node.outputs["Fac"],fresnel_mix_node.inputs[0])
links.new(mix_node_transparent_mix.outputs["Shader"],fresnel_mix_node.inputs[1])
links.new(diffuse_transparent_mix_shader.outputs["Shader"],mix_node_transparent_mix.inputs[1])
links.new(diffuse_node.outputs["BSDF"],diffuse_transparent_mix_shader.inputs[1])
links.new(transparent_node.outputs["BSDF"],diffuse_transparent_mix_shader.inputs[2])
links.new(mix_node_ref_glossy.outputs["Shader"],mix_node_transparent_mix.inputs[2])
links.new(mix_node_ref_glossy.outputs["Shader"],fresnel_mix_node.inputs[2])
links.new(refraction_node.outputs["BSDF"],mix_node_ref_glossy.inputs[1])
links.new(glossy_node.outputs["BSDF"],mix_node_ref_glossy.inputs[2])
links.new(rgba_node.outputs["Color"],refraction_node.inputs["Color"])
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(multiply_node.outputs["Value"],output_node.inputs["Displacement"])
links.new(add_node.outputs["Value"],multiply_node.inputs[0])
links.new(voronoi_node.outputs["Fac"],add_node.inputs[0])
links.new(multiply_node_two.outputs["Value"],add_node.inputs[1])
links.new(voronoi_node_two.outputs["Fac"],multiply_node_two.inputs[0])
links.new(texture_coordinate_node.outputs["Object"],voronoi_node.inputs["Vector"])
links.new(texture_coordinate_node.outputs["Object"],voronoi_node_two.inputs["Vector"])
def Stationary_Water_Shader_3(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the first mix shader node
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(-300,300)
#Create the clamped add node
add_node=nodes.new('ShaderNodeMath')
add_node.location=(-600,600)
add_node.operation=('ADD')
add_node.use_clamp=True
#Create the fresnel node
fresnel_node=nodes.new('ShaderNodeFresnel')
fresnel_node.location=(-900,600)
fresnel_node.inputs["IOR"].default_value=1.33
#Create the transparent shader node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-600,400)
#Create the glossy shader node
glossy_node=nodes.new('ShaderNodeBsdfGlossy')
glossy_node.location=(-600,300)
glossy_node.inputs["Roughness"].default_value=0.02
#Create the rgb mix shader
rgbmix_node=nodes.new('ShaderNodeMixRGB')
rgbmix_node.location=(-900,300)
rgbmix_node.inputs["Fac"].default_value=0.3
rgbmix_node.inputs["Color2"].default_value=(1,1,1,1)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-1200,300)
rgba_node.label = "RGBA"
#Create the wave texture node
wave_node=nodes.new('ShaderNodeTexWave')
wave_node.location=(-1200,0)
wave_node.inputs["Scale"].default_value=1.7
wave_node.inputs["Distortion"].default_value=34
wave_node.inputs["Detail"].default_value=5
wave_node.inputs["Detail Scale"].default_value=5
#Create the multiply node
multiply_node=nodes.new('ShaderNodeMath')
multiply_node.location=(-600,0)
multiply_node.operation=('MULTIPLY')
#Link the nodes
links=node_tree.links
links.new(mix_node.outputs["Shader"],output_node.inputs["Surface"])
links.new(add_node.outputs["Value"],mix_node.inputs["Fac"])
links.new(fresnel_node.outputs["Fac"],add_node.inputs[0])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(glossy_node.outputs["BSDF"],mix_node.inputs[2])
links.new(rgbmix_node.outputs["Color"],glossy_node.inputs["Color"])
links.new(rgba_node.outputs["Color"],rgbmix_node.inputs["Color1"])
links.new(multiply_node.outputs["Value"],output_node.inputs["Displacement"])
links.new(wave_node.outputs["Fac"],multiply_node.inputs[0])
def Flowing_Water_Shader(material):
material.use_nodes=True
def Slime_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the mix shader
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(0,300)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-300,300)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-300,0)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-600,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(diffuse_node.outputs["BSDF"],mix_node.inputs[2])
links.new(mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Ice_Shader(material):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the mix shader
mix_node=nodes.new('ShaderNodeMixShader')
mix_node.location=(0,300)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(-300,300)
#Create the transparent node
transparent_node=nodes.new('ShaderNodeBsdfTransparent')
transparent_node.location=(-300,0)
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = bpy.data.images[PREFIX+"-RGBA.png"]
rgba_node.interpolation=('Closest')
rgba_node.location=(-600,300)
rgba_node.label = "RGBA"
#Link the nodes
links=node_tree.links
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(transparent_node.outputs["BSDF"],mix_node.inputs[1])
links.new(diffuse_node.outputs["BSDF"],mix_node.inputs[2])
links.new(mix_node.outputs["Shader"],output_node.inputs["Surface"])
def Sky_Day_Shader(world):
nodes, node_tree = Setup_Node_Tree(world)
#Add the output node
output_node=nodes.new('ShaderNodeOutputWorld')
output_node.location=(300,300)
#Add the background node
background_node=nodes.new('ShaderNodeBackground')
background_node.location=(0,300)
#Add the color correct node
HSV_node=nodes.new('ShaderNodeHueSaturation')
HSV_node.inputs["Value"].default_value=1.6 #Corrects the color value to be the same as Minecraft's sky
HSV_node.location=(-300,300)
#Add the sky texture node
sky_node=nodes.new('ShaderNodeTexSky')
sky_node.location=(-600,300)
#Link the nodes
links=node_tree.links
links.new(background_node.outputs["Background"],output_node.inputs["Surface"])
links.new(sky_node.outputs["Color"],HSV_node.inputs["Color"])
links.new(HSV_node.outputs["Color"],background_node.inputs["Color"])
def Sky_Night_Shader(world):
nodes, node_tree = Setup_Node_Tree(world)
#Add the output node
output_node=nodes.new('ShaderNodeOutputWorld')
output_node.location=(600,300)
#Add solid color background for diffuse textures
solid_background_node=nodes.new('ShaderNodeBackground')
solid_background_node.location=(0,150)
solid_background_node.inputs["Color"].default_value=(0.1,0.1,0.1,1)
#Add Light Path Node to make sure solid colour is only used for diffuse shaders
light_path_node=nodes.new('ShaderNodeLightPath')
light_path_node.location=(0,600)
#Add mix shader to add the diffuse-only background
diffuse_mixer_node=nodes.new('ShaderNodeMixShader')
diffuse_mixer_node.location=(300,300)
#Add the first background node
background_node=nodes.new('ShaderNodeBackground')
background_node.location=(0,300)
#Create the rgb mix shader
rgbmix_node=nodes.new('ShaderNodeMixRGB')
rgbmix_node.location=(-200,300)
rgbmix_node.inputs["Fac"].default_value=0.01
#Add the sky texture node
sky_node=nodes.new('ShaderNodeTexSky')
sky_node.location=(-600,0)
#Add the colorramp node
colorramp_node=nodes.new('ShaderNodeValToRGB')
colorramp_node.location=(-600,300)
colorramp_node.color_ramp.interpolation=('CONSTANT')
colorramp_node.color_ramp.elements[1].position=0.03
colorramp_node.color_ramp.elements[1].color=(0,0,0,255)
colorramp_node.color_ramp.elements[0].color=(255,255,255,255)
#Add the voronoi texture
voronoi_node=nodes.new('ShaderNodeTexVoronoi')
voronoi_node.location=(-900,300)
voronoi_node.coloring=("CELLS")
voronoi_node.inputs["Scale"].default_value=200
#Link the nodes
links=node_tree.links
links.new(diffuse_mixer_node.outputs["Shader"],output_node.inputs["Surface"])
links.new(solid_background_node.outputs["Background"],diffuse_mixer_node.inputs[2])
links.new(light_path_node.outputs["Is Diffuse Ray"],diffuse_mixer_node.inputs[0]) # connects "Is Diffuse Ray" to factor
links.new(background_node.outputs["Background"],diffuse_mixer_node.inputs[1])
links.new(rgbmix_node.outputs["Color"],background_node.inputs["Color"])
links.new(colorramp_node.outputs["Color"],rgbmix_node.inputs["Color1"])
links.new(sky_node.outputs["Color"],rgbmix_node.inputs["Color2"])
links.new(voronoi_node.outputs["Color"],colorramp_node.inputs["Fac"])
def Wood_Displacement_Texture(material,rgba_image):
nodes, node_tree = Setup_Node_Tree(material)
#Create the output node
output_node=nodes.new('ShaderNodeOutputMaterial')
output_node.location=(300,300)
#Create the diffuse node
diffuse_node=nodes.new('ShaderNodeBsdfDiffuse')
diffuse_node.location=(0,300)
diffuse_node.inputs[1].default_value=0.3 # sets diffuse to 0.3
#Create the rgba node
rgba_node=nodes.new('ShaderNodeTexImage')
rgba_node.image = rgba_image
rgba_node.interpolation=('Closest')
rgba_node.location=(-300,300)
rgba_node.label = "RGBA"
#Create displacement node tree
#Create magic node 1
magic_node_one=nodes.new('ShaderNodeTexMagic')
magic_node_one.location=(-900,200)
magic_node_one.turbulence_depth=6 #sets depth to 6
magic_node_one.inputs[1].default_value=5 #sets scale to 5
magic_node_one.inputs[2].default_value=10 #sets distortion to 10
#Create magic node 2
magic_node_two=nodes.new('ShaderNodeTexMagic')
magic_node_two.location=(-900,0)
magic_node_two.turbulence_depth=5 #sets depth to 5
magic_node_two.inputs[1].default_value=3.3 #sets scale to 3.3
magic_node_two.inputs[2].default_value=2.7 #sets distortion to 2.7
#Create Add node
#Connects to magic node 1 and 2
math_add_node_one=nodes.new('ShaderNodeMath')
math_add_node_one.location=(-600,0)
math_add_node_one.operation="ADD"
#Create noise texture
noise_node=nodes.new('ShaderNodeTexNoise')
noise_node.location=(-900,-200)
noise_node.inputs[1].default_value=6.9 #sets scale to 6.9
noise_node.inputs[2].default_value=5 #set detail to 5
noise_node.inputs[3].default_value=8 #sets distortion to 8
#Create multiply
#Connects to noise and 5
math_multiply_node=nodes.new('ShaderNodeMath')
math_multiply_node.location=(-600,-200)
math_multiply_node.operation="MULTIPLY"
math_multiply_node.inputs[1].default_value=5 #sets multiply value to 5
#Create 2nd Add node
#Connects to Add node and multiply node
math_add_node_two=nodes.new('ShaderNodeMath')
math_add_node_two.operation="ADD"
math_add_node_two.location=(-300,0)
#Create Divide node
#Connect from 2nd add node and input [1] to 10
#Connects to materials output
math_divide_node=nodes.new('ShaderNodeMath')
math_divide_node.location=(0,150)
math_divide_node.operation="DIVIDE"
math_divide_node.inputs[1].default_value=10
#Link the nodes
links=node_tree.links
#link surface modifiers
links.new(rgba_node.outputs["Color"],diffuse_node.inputs["Color"])
links.new(diffuse_node.outputs["BSDF"],output_node.inputs["Surface"])
#link displacement modifiers
links.new(magic_node_one.outputs["Fac"],math_add_node_one.inputs[0])
links.new(magic_node_two.outputs["Fac"],math_add_node_one.inputs[1])
links.new(math_add_node_one.outputs[0],math_add_node_two.inputs[0])
links.new(noise_node.outputs["Fac"],math_multiply_node.inputs[0])
links.new(math_multiply_node.outputs[0],math_add_node_two.inputs[1])
links.new(math_add_node_two.outputs[0],math_divide_node.inputs[0])
links.new(math_divide_node.outputs[0],output_node.inputs["Displacement"])
#MAIN
def main():
print("Main started")
#packing all the files into one .blend
print("Packing files")
bpy.ops.file.pack_all()
print("Files packed")
#finding the PREFIX for mineways
global PREFIX
print("Gettting PREFIX ('"+PREFIX+"')")
if PREFIX == "":
print("No prefix found, finding best PREFIX")
names={} # initalises a dictionary
for img in bpy.data.images: # loops through all images in .blend file
pos = max( # sets pos to be the max value of the 3 values
img.name.rfind("-RGBA.png"), # if "-RGBA.png" is in the file name, returns non -1, else returns -1
img.name.rfind("-RGB.png"), # if "-RGB.png" is in the file name, returns non -1, else returns -1
img.name.rfind("-Alpha.png")) # if "-Alpha.png" is in the file name, returns non -1, else returns -1
# all this max statement really does is checks if the string contains any of those strings, if not, it is -1
print("Checking:",img.name,"(Position: ",pos,"Prefix: ",img.name[:pos]+")")
if pos!=-1: # if pos==1, it does not contain "-RGBA.png" or "-RGB.png" or "-Alpha.png"
try:
names[img.name[:pos]]+=1 # if a key called the file name in the dictionary exists, increase its value by 1
except KeyError:
names[img.name[:pos]]=1 # this code is only reached if the value could not be increased by one
# this happens when the value does not exist (i.e. the key does not exist because this is the first loop)
print("names: ",names)
PREFIX = max(names) # finds the name of the key in the dictionary that has the highest value
# this is how the code determines what the PREFIX should be (majority vote)
print("Got PREFIX ('"+PREFIX+"')")
#Setting the render engine to Cycles and filtering materials that will be processed
print("Setting the render engine to Cycles and filtering materials that will be processed")
materials = []
#if the user doesn't provide any scenes, add all materials that exist to global "materials"
if len(USER_INPUT_SCENE)==0:
for scene in bpy.data.scenes:
scene.render.engine = 'CYCLES'
for material in bpy.data.materials:
materials.append(material)
#else for each scene provided
else:
for scene in bpy.data.scenes:
print("Checking for:",scene.name)
if scene.name in USER_INPUT_SCENE:
print("Adding materials from scene:",scene.name)
scene.render.engine='CYCLES'
#check to see if it's related to Mineways by checking if it has an active material
for object in scene.objects:
if object.active_material!=None: # This is a bad way or checking of an object is Mineways'
# we probably need to check its assigned texture, or name to see if it is one of our objects
materials.append(object.active_material)
print("Render engine set to Cycles for selected scenes")
try:
texture_rgba_image = bpy.data.images[PREFIX+"-RGBA.png"]
except:
print("Cannot find image. PREFIX is invalid.")
return
print("Setting up textures")
#for every material
for material in materials:
if (material.active_texture and len(material.active_texture.name)>=2 and material.active_texture.name[0:2]=="Kd"):
material_suffix = material.name[material.name.rfind("."):len(material.name)] # gets the .001 .002 .003 ... of the material
try:
int(material_suffix[1:])
except:
material_suffix=""
#if the material is transparent use a special shader
if any(material==bpy.data.materials.get(transparentBlock+material_suffix) for transparentBlock in transparentBlocks):
print(material.name+" is transparent.")
Transparent_Shader(material)
#if the material is a light emmitting block use a special shader
elif any(material==bpy.data.materials.get(lightBlock+material_suffix) for lightBlock in lightBlocks):
print(material.name+" is light block.")
Light_Emiting_Shader(material)
#if the material is a light emmitting transparent block use a special shader
elif any(material==bpy.data.materials.get(lightTransparentBlocks+material_suffix) for lightTransparentBlocks in lightTransparentBlocks):
print(material.name+" is transparent light block.")
Transparent_Emiting_Shader(material)
#if the material is stained glass, use a special shader
elif material==bpy.data.materials.get("Stained_Glass"+material_suffix):
print(material.name+" is stained glass.")
Stained_Glass_Shader(material)
#if the material is stationary water or a lily pad, use a special shader
elif material==bpy.data.materials.get("Stationary_Water"+material_suffix) or material==bpy.data.materials.get("Water"+material_suffix) or material==bpy.data.materials.get("Lily_Pad"+material_suffix):
print(material.name+" is water or a lily pad.")
print("Using shader type",WATER_SHADER_TYPE)
if WATER_SHADER_TYPE==0:
Normal_Shader(material,texture_rgba_image)
elif WATER_SHADER_TYPE==1:
Stationary_Water_Shader_1(material)
elif WATER_SHADER_TYPE==2:
Stationary_Water_Shader_2(material)
elif WATER_SHADER_TYPE==3:
Stationary_Water_Shader_3(material)
else:
print("ERROR! COULD NOT SET UP WATER")
Normal_Shader(material,texture_rgba_image)
if material==bpy.data.materials.get("Lily_Pad"+material_suffix):
Lily_Pad_Shader(material)
#if the material is flowing water, use a special shader
elif material==bpy.data.materials.get("Flowing_Water"+material_suffix):
print(material.name+" is flowing water.")
pass
#if the material is slime, use a special shader
elif material==bpy.data.materials.get("Slime"+material_suffix):
print(material.name+" is slime.")
Slime_Shader(material)
#if the material is ice, use a special shader
elif material==bpy.data.materials.get("Ice"+material_suffix):
print(material.name+" is ice.")
Ice_Shader(material)
#if the material is wood and DISPLACE_WOOD is True
elif (material==bpy.data.materials.get("Oak_Wood_Planks"+material_suffix))and(DISPLACE_WOOD):
print(material.name+" is displaced wooden planks.")
Wood_Displacement_Texture(material,texture_rgba_image)
#else use a normal shader
else:
print(material.name+" is normal.")
Normal_Shader(material,texture_rgba_image)
print("Finished setting up materials")
#Set up the sky
print("Started shading sky")
for world in bpy.data.worlds:
if 6.5<=TIME_OF_DAY<=19.5:
Sky_Day_Shader(world)
else:
Sky_Night_Shader(world)
print("Sky shaded")
#Remove unnecessary textures
print("Removing unnecessary textures")
for img in bpy.data.images: # loops through all images in ,blend file
try:
suffix = img.name.rfind(".") # finds the index of the last . in the image's name
int(img.name[suffix+1:]) # check to see if the characters after the . are numbers
# EG test.001 would work (and return 1, but we're not getting its return value)
# and test would error out, as suffix = -1, therefor int("test") errors
# if the entire name of the image is a number (eg: 123.png), it will remove it by mistake //needs fixing
print("Texture "+img.name+" removed for being a duplicate.")
img.user_clear() # clears all the image's parents to it can be removed
bpy.data.images.remove(img) # removes image from .blend file
except:
if (img.name==PREFIX+"-Alpha.png") or (img.name==PREFIX+"-RGB.png"): # checks if img ends in "-Alpha.png" or "-RGB.png"
print("Texture "+img.name+" removed for being redundant")
img.user_clear() # clears all the image's parents to it can be removed
bpy.data.images.remove(img) # removes image from .blend file
else:
print("Texture "+img.name+" was not removed.") # only non-Mineways files can get here, or PREFIX.RGBA.png
print("Finished removing unnecessary textures")
### THE FOLLOWING CODE IS USED IN SETTING UP THE GUI, THIS FEATURE IS IN DEVELOPMENT.
### the following code makes buttons in the scenes tab that allow hotswitching between water types
class OBJECT_PT_water_changer(bpy.types.Panel): # The object used for drawing the buttons
bl_label = "Water Types" # the name of the sub-sub-catagory used
bl_space_type = "PROPERTIES" # the name of the main catagory used
bl_region_type = "WINDOW" # dunno
bl_context = "scene" # the name of the sub-catagory used
def draw(self, context): # called by blender when it wants to update the screen
self.layout.operator("object.water_changer", text='Use Solid Water').type="0" # draws water button 0
self.layout.operator("object.water_changer", text='Use Transparent Water').type="1" # draws water button 1
self.layout.operator("object.water_changer", text='Use Choppy Water').type="2" # draws water button 2
self.layout.operator("object.water_changer", text='Use Wavey Water').type="3" # draws water button 3
class OBJECT_OT_water_changer(bpy.types.Operator): # the object used for executing the buttons
bl_label = "Change Water Shader" # Used when pressing space on a viewport.
# Currently broken, as all the water type buttons go to one button.
bl_idname = "object.water_changer" # Used if another script wants to use this button
bl_description = "Change water shader" # Main text of the tool tip
type = bpy.props.StringProperty() # Gets the type data set in BJECT_PT_water_changer.draw()
def execute(self, context):
print("self:",self.type,"len",len(self.type))
print("selected object:",context.object)
self.report({'INFO'}, "Set water to type "+self.type) # Used by the progress bar thingy that
# tells you when stuff is done in Blender.
global WATER_SHADER_TYPE # Allows WATER_SHADER_TYPE to be set globally
if self.type=="0":
print("setting to type 0")
WATER_SHADER_TYPE=0
elif self.type=="1":
print("setting to type 1")
WATER_SHADER_TYPE=1
elif self.type=="2":
print("setting to type 2")
WATER_SHADER_TYPE=2
elif self.type=="3":
print("setting to type 3")
WATER_SHADER_TYPE=3
# Sets WATER_SHADER_TYPE to something
main() # Runs the main script
return{'FINISHED'} # Required by Blender
def register():
bpy.utils.register_module(__name__) # Needed to register the custom GUI components
def unregister():
bpy.utils.unregister_module(__name__) # Needed to unregister the custom GUI components
### END OF GUI CODE
if __name__ == "__main__": # Standard python check to see if the code is being ran, or added as a module
print("\nStarted Cycles Mineways import script.\n")
main() # Runs the main script
#register() # Sets up the GUI
print("\nCycles Mineways has finished.\n")
| gpl-3.0 |
ClovisIRex/Snake-django | env/lib/python3.6/site-packages/rest_framework/throttling.py | 25 | 8143 | """
Provides various throttling policies.
"""
from __future__ import unicode_literals
import time
from django.core.cache import cache as default_cache
from django.core.exceptions import ImproperlyConfigured
from rest_framework.compat import is_authenticated
from rest_framework.settings import api_settings
class BaseThrottle(object):
"""
Rate throttling of requests.
"""
def allow_request(self, request, view):
"""
Return `True` if the request should be allowed, `False` otherwise.
"""
raise NotImplementedError('.allow_request() must be overridden')
def get_ident(self, request):
"""
Identify the machine making the request by parsing HTTP_X_FORWARDED_FOR
if present and number of proxies is > 0. If not use all of
HTTP_X_FORWARDED_FOR if it is available, if not use REMOTE_ADDR.
"""
xff = request.META.get('HTTP_X_FORWARDED_FOR')
remote_addr = request.META.get('REMOTE_ADDR')
num_proxies = api_settings.NUM_PROXIES
if num_proxies is not None:
if num_proxies == 0 or xff is None:
return remote_addr
addrs = xff.split(',')
client_addr = addrs[-min(num_proxies, len(addrs))]
return client_addr.strip()
return ''.join(xff.split()) if xff else remote_addr
def wait(self):
"""
Optionally, return a recommended number of seconds to wait before
the next request.
"""
return None
class SimpleRateThrottle(BaseThrottle):
"""
A simple cache implementation, that only requires `.get_cache_key()`
to be overridden.
The rate (requests / seconds) is set by a `throttle` attribute on the View
class. The attribute is a string of the form 'number_of_requests/period'.
Period should be one of: ('s', 'sec', 'm', 'min', 'h', 'hour', 'd', 'day')
Previous request information used for throttling is stored in the cache.
"""
cache = default_cache
timer = time.time
cache_format = 'throttle_%(scope)s_%(ident)s'
scope = None
THROTTLE_RATES = api_settings.DEFAULT_THROTTLE_RATES
def __init__(self):
if not getattr(self, 'rate', None):
self.rate = self.get_rate()
self.num_requests, self.duration = self.parse_rate(self.rate)
def get_cache_key(self, request, view):
"""
Should return a unique cache-key which can be used for throttling.
Must be overridden.
May return `None` if the request should not be throttled.
"""
raise NotImplementedError('.get_cache_key() must be overridden')
def get_rate(self):
"""
Determine the string representation of the allowed request rate.
"""
if not getattr(self, 'scope', None):
msg = ("You must set either `.scope` or `.rate` for '%s' throttle" %
self.__class__.__name__)
raise ImproperlyConfigured(msg)
try:
return self.THROTTLE_RATES[self.scope]
except KeyError:
msg = "No default throttle rate set for '%s' scope" % self.scope
raise ImproperlyConfigured(msg)
def parse_rate(self, rate):
"""
Given the request rate string, return a two tuple of:
<allowed number of requests>, <period of time in seconds>
"""
if rate is None:
return (None, None)
num, period = rate.split('/')
num_requests = int(num)
duration = {'s': 1, 'm': 60, 'h': 3600, 'd': 86400}[period[0]]
return (num_requests, duration)
def allow_request(self, request, view):
"""
Implement the check to see if the request should be throttled.
On success calls `throttle_success`.
On failure calls `throttle_failure`.
"""
if self.rate is None:
return True
self.key = self.get_cache_key(request, view)
if self.key is None:
return True
self.history = self.cache.get(self.key, [])
self.now = self.timer()
# Drop any requests from the history which have now passed the
# throttle duration
while self.history and self.history[-1] <= self.now - self.duration:
self.history.pop()
if len(self.history) >= self.num_requests:
return self.throttle_failure()
return self.throttle_success()
def throttle_success(self):
"""
Inserts the current request's timestamp along with the key
into the cache.
"""
self.history.insert(0, self.now)
self.cache.set(self.key, self.history, self.duration)
return True
def throttle_failure(self):
"""
Called when a request to the API has failed due to throttling.
"""
return False
def wait(self):
"""
Returns the recommended next request time in seconds.
"""
if self.history:
remaining_duration = self.duration - (self.now - self.history[-1])
else:
remaining_duration = self.duration
available_requests = self.num_requests - len(self.history) + 1
if available_requests <= 0:
return None
return remaining_duration / float(available_requests)
class AnonRateThrottle(SimpleRateThrottle):
"""
Limits the rate of API calls that may be made by a anonymous users.
The IP address of the request will be used as the unique cache key.
"""
scope = 'anon'
def get_cache_key(self, request, view):
if is_authenticated(request.user):
return None # Only throttle unauthenticated requests.
return self.cache_format % {
'scope': self.scope,
'ident': self.get_ident(request)
}
class UserRateThrottle(SimpleRateThrottle):
"""
Limits the rate of API calls that may be made by a given user.
The user id will be used as a unique cache key if the user is
authenticated. For anonymous requests, the IP address of the request will
be used.
"""
scope = 'user'
def get_cache_key(self, request, view):
if is_authenticated(request.user):
ident = request.user.pk
else:
ident = self.get_ident(request)
return self.cache_format % {
'scope': self.scope,
'ident': ident
}
class ScopedRateThrottle(SimpleRateThrottle):
"""
Limits the rate of API calls by different amounts for various parts of
the API. Any view that has the `throttle_scope` property set will be
throttled. The unique cache key will be generated by concatenating the
user id of the request, and the scope of the view being accessed.
"""
scope_attr = 'throttle_scope'
def __init__(self):
# Override the usual SimpleRateThrottle, because we can't determine
# the rate until called by the view.
pass
def allow_request(self, request, view):
# We can only determine the scope once we're called by the view.
self.scope = getattr(view, self.scope_attr, None)
# If a view does not have a `throttle_scope` always allow the request
if not self.scope:
return True
# Determine the allowed request rate as we normally would during
# the `__init__` call.
self.rate = self.get_rate()
self.num_requests, self.duration = self.parse_rate(self.rate)
# We can now proceed as normal.
return super(ScopedRateThrottle, self).allow_request(request, view)
def get_cache_key(self, request, view):
"""
If `view.throttle_scope` is not set, don't apply this throttle.
Otherwise generate the unique cache key by concatenating the user id
with the '.throttle_scope` property of the view.
"""
if is_authenticated(request.user):
ident = request.user.pk
else:
ident = self.get_ident(request)
return self.cache_format % {
'scope': self.scope,
'ident': ident
}
| mit |
lyft/graphite-web | webapp/graphite/finders/standard.py | 30 | 4096 | import os
from os.path import isdir, isfile, join, basename
from django.conf import settings
from graphite.logger import log
from graphite.node import BranchNode, LeafNode
from graphite.readers import WhisperReader, GzippedWhisperReader, RRDReader
from graphite.util import find_escaped_pattern_fields
from . import fs_to_metric, get_real_metric_path, match_entries
class StandardFinder:
DATASOURCE_DELIMITER = '::RRD_DATASOURCE::'
def __init__(self, directories=None):
directories = directories or settings.STANDARD_DIRS
self.directories = directories
def find_nodes(self, query):
clean_pattern = query.pattern.replace('\\', '')
pattern_parts = clean_pattern.split('.')
for root_dir in self.directories:
for absolute_path in self._find_paths(root_dir, pattern_parts):
if basename(absolute_path).startswith('.'):
continue
if self.DATASOURCE_DELIMITER in basename(absolute_path):
(absolute_path, datasource_pattern) = absolute_path.rsplit(self.DATASOURCE_DELIMITER, 1)
else:
datasource_pattern = None
relative_path = absolute_path[ len(root_dir): ].lstrip('/')
metric_path = fs_to_metric(relative_path)
real_metric_path = get_real_metric_path(absolute_path, metric_path)
metric_path_parts = metric_path.split('.')
for field_index in find_escaped_pattern_fields(query.pattern):
metric_path_parts[field_index] = pattern_parts[field_index].replace('\\', '')
metric_path = '.'.join(metric_path_parts)
# Now we construct and yield an appropriate Node object
if isdir(absolute_path):
yield BranchNode(metric_path)
elif isfile(absolute_path):
if absolute_path.endswith('.wsp') and WhisperReader.supported:
reader = WhisperReader(absolute_path, real_metric_path)
yield LeafNode(metric_path, reader)
elif absolute_path.endswith('.wsp.gz') and GzippedWhisperReader.supported:
reader = GzippedWhisperReader(absolute_path, real_metric_path)
yield LeafNode(metric_path, reader)
elif absolute_path.endswith('.rrd') and RRDReader.supported:
if datasource_pattern is None:
yield BranchNode(metric_path)
else:
for datasource_name in RRDReader.get_datasources(absolute_path):
if match_entries([datasource_name], datasource_pattern):
reader = RRDReader(absolute_path, datasource_name)
yield LeafNode(metric_path + "." + datasource_name, reader)
def _find_paths(self, current_dir, patterns):
"""Recursively generates absolute paths whose components underneath current_dir
match the corresponding pattern in patterns"""
pattern = patterns[0]
patterns = patterns[1:]
try:
entries = os.listdir(current_dir)
except OSError as e:
log.exception(e)
entries = []
subdirs = [entry for entry in entries if isdir(join(current_dir, entry))]
matching_subdirs = match_entries(subdirs, pattern)
if len(patterns) == 1 and RRDReader.supported: #the last pattern may apply to RRD data sources
files = [entry for entry in entries if isfile(join(current_dir, entry))]
rrd_files = match_entries(files, pattern + ".rrd")
if rrd_files: #let's assume it does
datasource_pattern = patterns[0]
for rrd_file in rrd_files:
absolute_path = join(current_dir, rrd_file)
yield absolute_path + self.DATASOURCE_DELIMITER + datasource_pattern
if patterns: #we've still got more directories to traverse
for subdir in matching_subdirs:
absolute_path = join(current_dir, subdir)
for match in self._find_paths(absolute_path, patterns):
yield match
else: #we've got the last pattern
files = [entry for entry in entries if isfile(join(current_dir, entry))]
matching_files = match_entries(files, pattern + '.*')
for base_name in matching_files + matching_subdirs:
yield join(current_dir, base_name)
| apache-2.0 |
oceanobservatories/mi-instrument | mi/platform/rsn/test/test_oms_client.py | 9 | 1674 | # #!/usr/bin/env python
#
# """
# @package ion.agents.platform.rsn.test.test_oms_client
# @file ion/agents/platform/rsn/test/test_oms_client.py
# @author Carlos Rueda
# @brief Test cases for CIOMSClient. The OMS enviroment variable can be used
# to indicate which CIOMSClient will be tested.
# """
#
# __author__ = 'Carlos Rueda'
# __license__ = 'Apache 2.0'
#
# from pyon.public import log
# from ion.agents.platform.rsn.simulator.logger import Logger
# Logger.set_logger(log)
#
# from pyon.util.int_test import IonIntegrationTestCase
#
# from ion.agents.platform.rsn.oms_client_factory import CIOMSClientFactory
# from ion.agents.platform.rsn.test.oms_test_mixin import OmsTestMixin
#
# from nose.plugins.attrib import attr
#
# import os
#
#
# @attr('INT', group='sa')
# class Test(IonIntegrationTestCase, OmsTestMixin):
# """
# The OMS enviroment variable can be used to indicate which CIOMSClient will
# be tested. By default, it tests against the simulator, which is launched
# as an external process.
# """
#
# @classmethod
# def setUpClass(cls):
# OmsTestMixin.setUpClass()
# if cls.using_actual_rsn_oms_endpoint():
# # use FQDM for local host if testing against actual RSN OMS:
# cls._use_fqdn_for_event_listener = True
#
# def setUp(self):
# oms_uri = os.getenv('OMS', "launchsimulator")
# oms_uri = self._dispatch_simulator(oms_uri)
# log.debug("oms_uri = %s", oms_uri)
# self.oms = CIOMSClientFactory.create_instance(oms_uri)
#
# def done():
# CIOMSClientFactory.destroy_instance(self.oms)
#
# self.addCleanup(done)
| bsd-2-clause |
dataxu/ansible | lib/ansible/modules/cloud/openstack/os_keypair.py | 5 | 4722 | #!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <[email protected]>
# Copyright (c) 2013, John Dewey <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keypair
short_description: Add/Delete a keypair from OpenStack
author: "Benno Joy (@bennojoy)"
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Add or Remove key pair from OpenStack
options:
name:
description:
- Name that has to be given to the key pair
required: true
default: None
public_key:
description:
- The public key that would be uploaded to nova and injected into VMs
upon creation.
required: false
default: None
public_key_file:
description:
- Path to local file containing ssh public key. Mutually exclusive
with public_key.
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
requirements: []
'''
EXAMPLES = '''
# Creates a key pair with the running users public key
- os_keypair:
cloud: mordred
state: present
name: ansible_key
public_key_file: /home/me/.ssh/id_rsa.pub
# Creates a new key pair and the private key returned after the run.
- os_keypair:
cloud: rax-dfw
state: present
name: ansible_key
'''
RETURN = '''
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the keypair.
returned: success
type: string
public_key:
description: The public key value for the keypair.
returned: success
type: string
private_key:
description: The private key value for the keypair.
returned: Only when a keypair is generated for the user (e.g., when creating one
and a public key is not specified).
type: string
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _system_state_change(module, keypair):
state = module.params['state']
if state == 'present' and not keypair:
return True
if state == 'absent' and keypair:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
public_key=dict(default=None),
public_key_file=dict(default=None),
state=dict(default='present',
choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[['public_key', 'public_key_file']])
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
state = module.params['state']
name = module.params['name']
public_key = module.params['public_key']
if module.params['public_key_file']:
public_key = open(module.params['public_key_file']).read()
public_key = public_key.rstrip()
shade, cloud = openstack_cloud_from_module(module)
try:
keypair = cloud.get_keypair(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, keypair))
if state == 'present':
if keypair and keypair['name'] == name:
if public_key and (public_key != keypair['public_key']):
module.fail_json(
msg="Key name %s present but key hash not the same"
" as offered. Delete key first." % name
)
else:
changed = False
else:
keypair = cloud.create_keypair(name, public_key)
changed = True
module.exit_json(changed=changed,
key=keypair,
id=keypair['id'])
elif state == 'absent':
if keypair:
cloud.delete_keypair(name)
module.exit_json(changed=True)
module.exit_json(changed=False)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
yaojenkuo/BuildingMachineLearningSystemsWithPython | ch03/rel_post_20news.py | 24 | 3903 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import sklearn.datasets
import scipy as sp
new_post = \
"""Disk drive problems. Hi, I have a problem with my hard disk.
After 1 year it is working only sporadically now.
I tried to format it, but now it doesn't boot any more.
Any ideas? Thanks.
"""
print("""\
Dear reader of the 1st edition of 'Building Machine Learning Systems with Python'!
For the 2nd edition we introduced a couple of changes that will result into
results that differ from the results in the 1st edition.
E.g. we now fully rely on scikit's fetch_20newsgroups() instead of requiring
you to download the data manually from MLCOMP.
If you have any questions, please ask at http://www.twotoreal.com
""")
all_data = sklearn.datasets.fetch_20newsgroups(subset="all")
print("Number of total posts: %i" % len(all_data.filenames))
# Number of total posts: 18846
groups = [
'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware',
'comp.sys.mac.hardware', 'comp.windows.x', 'sci.space']
train_data = sklearn.datasets.fetch_20newsgroups(subset="train",
categories=groups)
print("Number of training posts in tech groups:", len(train_data.filenames))
# Number of training posts in tech groups: 3529
labels = train_data.target
num_clusters = 50 # sp.unique(labels).shape[0]
import nltk.stem
english_stemmer = nltk.stem.SnowballStemmer('english')
from sklearn.feature_extraction.text import TfidfVectorizer
class StemmedTfidfVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(TfidfVectorizer, self).build_analyzer()
return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))
vectorizer = StemmedTfidfVectorizer(min_df=10, max_df=0.5,
stop_words='english', decode_error='ignore'
)
vectorized = vectorizer.fit_transform(train_data.data)
num_samples, num_features = vectorized.shape
print("#samples: %d, #features: %d" % (num_samples, num_features))
# samples: 3529, #features: 4712
from sklearn.cluster import KMeans
km = KMeans(n_clusters=num_clusters, n_init=1, verbose=1, random_state=3)
clustered = km.fit(vectorized)
print("km.labels_=%s" % km.labels_)
# km.labels_=[ 6 34 22 ..., 2 21 26]
print("km.labels_.shape=%s" % km.labels_.shape)
# km.labels_.shape=3529
from sklearn import metrics
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
# Homogeneity: 0.400
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
# Completeness: 0.206
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
# V-measure: 0.272
print("Adjusted Rand Index: %0.3f" %
metrics.adjusted_rand_score(labels, km.labels_))
# Adjusted Rand Index: 0.064
print("Adjusted Mutual Information: %0.3f" %
metrics.adjusted_mutual_info_score(labels, km.labels_))
# Adjusted Mutual Information: 0.197
print(("Silhouette Coefficient: %0.3f" %
metrics.silhouette_score(vectorized, labels, sample_size=1000)))
# Silhouette Coefficient: 0.006
new_post_vec = vectorizer.transform([new_post])
new_post_label = km.predict(new_post_vec)[0]
similar_indices = (km.labels_ == new_post_label).nonzero()[0]
similar = []
for i in similar_indices:
dist = sp.linalg.norm((new_post_vec - vectorized[i]).toarray())
similar.append((dist, train_data.data[i]))
similar = sorted(similar)
print("Count similar: %i" % len(similar))
show_at_1 = similar[0]
show_at_2 = similar[int(len(similar) / 10)]
show_at_3 = similar[int(len(similar) / 2)]
print("=== #1 ===")
print(show_at_1)
print()
print("=== #2 ===")
print(show_at_2)
print()
print("=== #3 ===")
print(show_at_3)
| mit |
hef/samba | python/samba/netcmd/spn.py | 46 | 7603 | # spn management
#
# Copyright Matthieu Patou [email protected] 2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import samba.getopt as options
import ldb
from samba import provision
from samba.samdb import SamDB
from samba.auth import system_session
from samba.netcmd.common import _get_user_realm_domain
from samba.netcmd import (
Command,
CommandError,
SuperCommand,
Option
)
class cmd_spn_list(Command):
"""List spns of a given user."""
synopsis = "%prog <user> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_args = ["user"]
def run(self, user, credopts=None, sambaopts=None, versionopts=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
paths = provision.provision_paths_from_lp(lp, lp.get("realm"))
sam = SamDB(paths.samdb, session_info=system_session(),
credentials=creds, lp=lp)
# TODO once I understand how, use the domain info to naildown
# to the correct domain
(cleaneduser, realm, domain) = _get_user_realm_domain(user)
self.outf.write(cleaneduser+"\n")
res = sam.search(
expression="samaccountname=%s" % ldb.binary_encode(cleaneduser),
scope=ldb.SCOPE_SUBTREE, attrs=["servicePrincipalName"])
if len(res) >0:
spns = res[0].get("servicePrincipalName")
found = False
flag = ldb.FLAG_MOD_ADD
if spns is not None:
self.outf.write(
"User %s has the following servicePrincipalName: \n" %
res[0].dn)
for e in spns:
self.outf.write("\t %s\n" % e)
else:
self.outf.write("User %s has no servicePrincipalName" %
res[0].dn)
else:
raise CommandError("User %s not found" % user)
class cmd_spn_add(Command):
"""Create a new spn."""
synopsis = "%prog <name> <user> [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_options = [
Option("--force", help="Force the addition of the spn"
" even it exists already", action="store_true"),
]
takes_args = ["name", "user"]
def run(self, name, user, force=False, credopts=None, sambaopts=None,
versionopts=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
paths = provision.provision_paths_from_lp(lp, lp.get("realm"))
sam = SamDB(paths.samdb, session_info=system_session(),
credentials=creds, lp=lp)
res = sam.search(
expression="servicePrincipalName=%s" % ldb.binary_encode(name),
scope=ldb.SCOPE_SUBTREE)
if len(res) != 0 and not force:
raise CommandError("Service principal %s already"
" affected to another user" % name)
(cleaneduser, realm, domain) = _get_user_realm_domain(user)
res = sam.search(
expression="samaccountname=%s" % ldb.binary_encode(cleaneduser),
scope=ldb.SCOPE_SUBTREE, attrs=["servicePrincipalName"])
if len(res) >0:
res[0].dn
msg = ldb.Message()
spns = res[0].get("servicePrincipalName")
tab = []
found = False
flag = ldb.FLAG_MOD_ADD
if spns is not None:
for e in spns:
if str(e) == name:
found = True
tab.append(str(e))
flag = ldb.FLAG_MOD_REPLACE
tab.append(name)
msg.dn = res[0].dn
msg["servicePrincipalName"] = ldb.MessageElement(tab, flag,
"servicePrincipalName")
if not found:
sam.modify(msg)
else:
raise CommandError("Service principal %s already"
" affected to %s" % (name, user))
else:
raise CommandError("User %s not found" % user)
class cmd_spn_delete(Command):
"""Delete a spn."""
synopsis = "%prog <name> [user] [options]"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_args = ["name", "user?"]
def run(self, name, user=None, credopts=None, sambaopts=None,
versionopts=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
paths = provision.provision_paths_from_lp(lp, lp.get("realm"))
sam = SamDB(paths.samdb, session_info=system_session(),
credentials=creds, lp=lp)
res = sam.search(
expression="servicePrincipalName=%s" % ldb.binary_encode(name),
scope=ldb.SCOPE_SUBTREE,
attrs=["servicePrincipalName", "samAccountName"])
if len(res) >0:
result = None
if user is not None:
(cleaneduser, realm, domain) = _get_user_realm_domain(user)
for elem in res:
if str(elem["samAccountName"]).lower() == cleaneduser:
result = elem
if result is None:
raise CommandError("Unable to find user %s with"
" spn %s" % (user, name))
else:
if len(res) != 1:
listUser = ""
for r in res:
listUser = "%s\n%s" % (listUser, str(r.dn))
raise CommandError("More than one user has the spn %s "
"and no specific user was specified, list of users"
" with this spn:%s" % (name, listUser))
else:
result=res[0]
msg = ldb.Message()
spns = result.get("servicePrincipalName")
tab = []
if spns is not None:
for e in spns:
if str(e) != name:
tab.append(str(e))
flag = ldb.FLAG_MOD_REPLACE
msg.dn = result.dn
msg["servicePrincipalName"] = ldb.MessageElement(tab, flag,
"servicePrincipalName")
sam.modify(msg)
else:
raise CommandError("Service principal %s not affected" % name)
class cmd_spn(SuperCommand):
"""Service Principal Name (SPN) management."""
subcommands = {}
subcommands["add"] = cmd_spn_add()
subcommands["list"] = cmd_spn_list()
subcommands["delete"] = cmd_spn_delete()
| gpl-3.0 |
ace02000/pyload | module/plugins/accounts/FastshareCz.py | 1 | 1470 | # -*- coding: utf-8 -*-
import re
from module.plugins.internal.Account import Account
from module.plugins.internal.Plugin import set_cookie
class FastshareCz(Account):
__name__ = "FastshareCz"
__type__ = "account"
__version__ = "0.11"
__status__ = "testing"
__description__ = """Fastshare.cz account plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "[email protected]"),
("stickell", "[email protected]")]
CREDIT_PATTERN = r'Credit\s*:\s*</td>\s*<td>(.+?)\s*<'
def grab_info(self, user, password, data):
validuntil = -1
trafficleft = None
premium = False
html = self.load("http://www.fastshare.cz/user")
m = re.search(self.CREDIT_PATTERN, html)
if m:
trafficleft = self.parse_traffic(m.group(1))
premium = bool(trafficleft)
return {'validuntil' : validuntil,
'trafficleft': trafficleft,
'premium' : premium}
def signin(self, user, password, data):
set_cookie(self.req.cj, "fastshare.cz", "lang", "en")
self.load('http://www.fastshare.cz/login') #@NOTE: Do not remove or it will not login
html = self.load("https://www.fastshare.cz/sql.php",
post={'login': user,
'heslo': password})
if ">Wrong username or password" in html:
self.fail_login()
| gpl-3.0 |
bowlofstew/common | api/biiapi.py | 5 | 2554 | from abc import ABCMeta, abstractmethod
from biicode.common.model.symbolic.reference import References
from biicode.common.edition.block_holder import BlockHolder
class BiiAPI(object):
'''The main interface to user-access biicode published information'''
#TODO: Clearly specify raised Exceptions in each method
#TODO: Validate implementations, to check that they really follow this specification
__metaclass__ = ABCMeta
def require_auth(self):
"""Require a logged username"""
raise NotImplementedError()
@abstractmethod
def get_dep_table(self, block_version):
"""
return: BlockVersionTable
"""
raise NotImplementedError()
@abstractmethod
def get_published_resources(self, references):
"""
param references: References
return: ReferencedResources
"""
raise NotImplementedError()
@abstractmethod
def get_cells_snapshot(self, block_version):
"""
return: [CellName] of the cells corresponding to such block_version
"""
raise NotImplementedError()
def get_block_holder(self, block_version):
""""
return: BlockHolder
"""
assert block_version.time is not None
refs = References()
block_cells_name = self.get_cells_snapshot(block_version)
refs[block_version] = set(block_cells_name)
resources = self.get_published_resources(refs)
return BlockHolder(block_version.block_name, resources[block_version])
@abstractmethod
def get_renames(self, brl_block, t1, t2):
'''return a Renames object (i.e. a dict{oldName:newName}'''
raise NotImplementedError()
@abstractmethod
def publish(self, publish_request):
raise NotImplementedError()
@abstractmethod
def get_version_delta_info(self, block_version):
raise NotImplementedError()
@abstractmethod
def get_version_by_tag(self, brl_block, version_tag):
raise NotImplementedError()
@abstractmethod
def get_block_info(self, brl_block):
raise NotImplementedError()
@abstractmethod
def find(self, finder_request, response):
'''Finder and updater
return a FinderResult'''
raise NotImplementedError()
@abstractmethod
def get_server_info(self):
''' Gets the server info ServerInfo object'''
raise NotImplementedError()
@abstractmethod
def authenticate(self):
''' Gets the token'''
raise NotImplementedError()
| mit |
gbriones1/cloud-init | cloudinit/sources/DataSourceNone.py | 15 | 1846 | # vi: ts=4 expandtab
#
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Joshua Harlow <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit import log as logging
from cloudinit import sources
LOG = logging.getLogger(__name__)
class DataSourceNone(sources.DataSource):
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
self.metadata = {}
self.userdata_raw = ''
def get_data(self):
# If the datasource config has any provided 'fallback'
# userdata or metadata, use it...
if 'userdata_raw' in self.ds_cfg:
self.userdata_raw = self.ds_cfg['userdata_raw']
if 'metadata' in self.ds_cfg:
self.metadata = self.ds_cfg['metadata']
return True
def get_instance_id(self):
return 'iid-datasource-none'
@property
def is_disconnected(self):
return True
# Used to match classes to dependencies
datasources = [
(DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
(DataSourceNone, []),
]
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
| gpl-3.0 |
HerkCoin/herkcoin | share/qt/extract_strings_qt.py | 2945 | 1844 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
jhonatajh/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/win/gyptest-link-opt-icf.py | 344 | 1319 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure comdat folding optimization setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('opt-icf.gyp', chdir=CHDIR)
test.build('opt-icf.gyp', chdir=CHDIR)
# We're specifying /DEBUG so the default is to not merge identical
# functions, so all of the similar_functions should be preserved.
output = test.run_dumpbin(
'/disasm', test.built_file_path('test_opticf_default.exe', chdir=CHDIR))
if output.count('similar_function') != 6: # 3 definitions, 3 calls.
test.fail_test()
# Explicitly off, all functions preserved seperately.
output = test.run_dumpbin(
'/disasm', test.built_file_path('test_opticf_no.exe', chdir=CHDIR))
if output.count('similar_function') != 6: # 3 definitions, 3 calls.
test.fail_test()
# Explicitly on, all but one removed.
output = test.run_dumpbin(
'/disasm', test.built_file_path('test_opticf_yes.exe', chdir=CHDIR))
if output.count('similar_function') != 4: # 1 definition, 3 calls.
test.fail_test()
test.pass_test()
| gpl-3.0 |
locationtech/geowave | python/src/main/python/pygw/store/accumulo/accumulo_options.py | 2 | 2877 | #
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
from pygw.config import geowave_pkg
from pygw.store import DataStoreOptions
class AccumuloOptions(DataStoreOptions):
"""
Accumulo data store options.
"""
def __init__(self):
super().__init__(geowave_pkg.datastore.accumulo.config.AccumuloRequiredOptions())
def set_zookeeper(self, zookeeper):
"""
Sets the list of Zookeper servers that the Accumulo instance uses as a comma-separated
string.
Args:
zookeeper (str): A comma-separated list of Zookeeper servers.
"""
self._java_ref.setZookeeper(zookeeper)
def get_zookeeper(self):
"""
Returns:
A comma-separated list of Zookeper servers.
"""
return self._java_ref.getZookeeper()
def set_instance(self, instance):
"""
Sets the Accumulo instance ID to use for the data store.
Args:
instance (str): The Accumulo instance ID to use.
"""
self._java_ref.setInstance(instance)
def get_instance(self):
"""
Returns:
The Accumulo instance ID.
"""
return self._java_ref.getInstance()
def set_user(self, user):
"""
Sets the Accumulo user ID.
Args:
user (str): The Accumulo user ID.
"""
self._java_ref.setUser(user)
def get_user(self):
"""
Returns:
The Accumulo user ID.
"""
return self._java_ref.getUser()
def set_password(self, password):
"""
Sets the Accumulo password.
Args:
password (str): The Accumulo password.
"""
self._java_ref.setPassword(password)
def get_password(self):
"""
Returns:
The Accumulo password.
"""
return self._java_ref.getPassword()
def set_use_locality_groups(self, use_locality_groups):
"""
Sets whether or not to use locality groups.
Args:
use_locality_groups (bool): Whether or not to use locality groups.
"""
self._base_options.setUseLocalityGroups(use_locality_groups)
def is_use_locality_groups(self):
"""
Returns:
True if locality groups are enabled, False otherwise.
"""
return self._base_options.isUseLocalityGroups()
| apache-2.0 |
matthewrudy/kubernetes | cluster/juju/layers/kubernetes/reactive/k8s.py | 53 | 14370 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from shlex import split
from shutil import copy2
from subprocess import check_call
from charms.docker.compose import Compose
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import when
from charms.reactive import when_not
from charmhelpers.core import hookenv
from charmhelpers.core.hookenv import is_leader
from charmhelpers.core.hookenv import status_set
from charmhelpers.core.templating import render
from charmhelpers.core import unitdata
from charmhelpers.core.host import chdir
from contextlib import contextmanager
@hook('config-changed')
def config_changed():
'''If the configuration values change, remove the available states.'''
config = hookenv.config()
if any(config.changed(key) for key in config.keys()):
hookenv.log('Configuration options have changed.')
# Use the Compose class that encapsulates the docker-compose commands.
compose = Compose('files/kubernetes')
hookenv.log('Removing kubelet container and kubelet.available state.')
# Stop and remove the Kubernetes kubelet container..
compose.kill('kubelet')
compose.rm('kubelet')
# Remove the state so the code can react to restarting kubelet.
remove_state('kubelet.available')
hookenv.log('Removing proxy container and proxy.available state.')
# Stop and remove the Kubernetes proxy container.
compose.kill('proxy')
compose.rm('proxy')
# Remove the state so the code can react to restarting proxy.
remove_state('proxy.available')
if config.changed('version'):
hookenv.log('Removing kubectl.downloaded state so the new version'
' of kubectl will be downloaded.')
remove_state('kubectl.downloaded')
@when('tls.server.certificate available')
@when_not('k8s.server.certificate available')
def server_cert():
'''When the server certificate is available, get the server certificate from
the charm unit data and write it to the proper directory. '''
destination_directory = '/srv/kubernetes'
# Save the server certificate from unitdata to /srv/kubernetes/server.crt
save_certificate(destination_directory, 'server')
# Copy the unitname.key to /srv/kubernetes/server.key
copy_key(destination_directory, 'server')
set_state('k8s.server.certificate available')
@when('tls.client.certificate available')
@when_not('k8s.client.certficate available')
def client_cert():
'''When the client certificate is available, get the client certificate
from the charm unitdata and write it to the proper directory. '''
destination_directory = '/srv/kubernetes'
if not os.path.isdir(destination_directory):
os.makedirs(destination_directory)
os.chmod(destination_directory, 0o770)
# The client certificate is also available on charm unitdata.
client_cert_path = 'easy-rsa/easyrsa3/pki/issued/client.crt'
kube_cert_path = os.path.join(destination_directory, 'client.crt')
if os.path.isfile(client_cert_path):
# Copy the client.crt to /srv/kubernetes/client.crt
copy2(client_cert_path, kube_cert_path)
# The client key is only available on the leader.
client_key_path = 'easy-rsa/easyrsa3/pki/private/client.key'
kube_key_path = os.path.join(destination_directory, 'client.key')
if os.path.isfile(client_key_path):
# Copy the client.key to /srv/kubernetes/client.key
copy2(client_key_path, kube_key_path)
@when('tls.certificate.authority available')
@when_not('k8s.certificate.authority available')
def ca():
'''When the Certificate Authority is available, copy the CA from the
/usr/local/share/ca-certificates/k8s.crt to the proper directory. '''
# Ensure the /srv/kubernetes directory exists.
directory = '/srv/kubernetes'
if not os.path.isdir(directory):
os.makedirs(directory)
os.chmod(directory, 0o770)
# Normally the CA is just on the leader, but the tls layer installs the
# CA on all systems in the /usr/local/share/ca-certificates directory.
ca_path = '/usr/local/share/ca-certificates/{0}.crt'.format(
hookenv.service_name())
# The CA should be copied to the destination directory and named 'ca.crt'.
destination_ca_path = os.path.join(directory, 'ca.crt')
if os.path.isfile(ca_path):
copy2(ca_path, destination_ca_path)
set_state('k8s.certificate.authority available')
@when('kubelet.available', 'proxy.available', 'cadvisor.available')
def final_messaging():
'''Lower layers emit messages, and if we do not clear the status messaging
queue, we are left with whatever the last method call sets status to. '''
# It's good UX to have consistent messaging that the cluster is online
if is_leader():
status_set('active', 'Kubernetes leader running')
else:
status_set('active', 'Kubernetes follower running')
@when('kubelet.available', 'proxy.available', 'cadvisor.available')
@when_not('skydns.available')
def launch_skydns():
'''Create a kubernetes service and resource controller for the skydns
service. '''
# Only launch and track this state on the leader.
# Launching duplicate SkyDNS rc will raise an error
if not is_leader():
return
cmd = "kubectl create -f files/manifests/skydns-rc.yml"
check_call(split(cmd))
cmd = "kubectl create -f files/manifests/skydns-svc.yml"
check_call(split(cmd))
set_state('skydns.available')
@when('docker.available')
@when_not('etcd.available')
def relation_message():
'''Take over messaging to let the user know they are pending a relationship
to the ETCD cluster before going any further. '''
status_set('waiting', 'Waiting for relation to ETCD')
@when('etcd.available', 'tls.server.certificate available')
@when_not('kubelet.available', 'proxy.available')
def master(etcd):
'''Install and run the hyperkube container that starts kubernetes-master.
This actually runs the kubelet, which in turn runs a pod that contains the
other master components. '''
render_files(etcd)
# Use the Compose class that encapsulates the docker-compose commands.
compose = Compose('files/kubernetes')
status_set('maintenance', 'Starting the Kubernetes kubelet container.')
# Start the Kubernetes kubelet container using docker-compose.
compose.up('kubelet')
set_state('kubelet.available')
# Open the secure port for api-server.
hookenv.open_port(6443)
status_set('maintenance', 'Starting the Kubernetes proxy container')
# Start the Kubernetes proxy container using docker-compose.
compose.up('proxy')
set_state('proxy.available')
status_set('active', 'Kubernetes started')
@when('proxy.available')
@when_not('kubectl.downloaded')
def download_kubectl():
'''Download the kubectl binary to test and interact with the cluster.'''
status_set('maintenance', 'Downloading the kubectl binary')
version = hookenv.config()['version']
cmd = 'wget -nv -O /usr/local/bin/kubectl https://storage.googleapis.com/' \
'kubernetes-release/release/{0}/bin/linux/amd64/kubectl'
cmd = cmd.format(version)
hookenv.log('Downloading kubelet: {0}'.format(cmd))
check_call(split(cmd))
cmd = 'chmod +x /usr/local/bin/kubectl'
check_call(split(cmd))
set_state('kubectl.downloaded')
status_set('active', 'Kubernetes installed')
@when('kubectl.downloaded')
@when_not('kubectl.package.created')
def package_kubectl():
'''Package the kubectl binary and configuration to a tar file for users
to consume and interact directly with Kubernetes.'''
if not is_leader():
return
context = 'default-context'
cluster_name = 'kubernetes'
public_address = hookenv.unit_public_ip()
directory = '/srv/kubernetes'
key = 'client.key'
ca = 'ca.crt'
cert = 'client.crt'
user = 'ubuntu'
port = '6443'
with chdir(directory):
# Create the config file with the external address for this server.
cmd = 'kubectl config set-cluster --kubeconfig={0}/config {1} ' \
'--server=https://{2}:{3} --certificate-authority={4}'
check_call(split(cmd.format(directory, cluster_name, public_address,
port, ca)))
# Create the credentials.
cmd = 'kubectl config set-credentials --kubeconfig={0}/config {1} ' \
'--client-key={2} --client-certificate={3}'
check_call(split(cmd.format(directory, user, key, cert)))
# Create a default context with the cluster.
cmd = 'kubectl config set-context --kubeconfig={0}/config {1}' \
' --cluster={2} --user={3}'
check_call(split(cmd.format(directory, context, cluster_name, user)))
# Now make the config use this new context.
cmd = 'kubectl config use-context --kubeconfig={0}/config {1}'
check_call(split(cmd.format(directory, context)))
# Copy the kubectl binary to this directory
cmd = 'cp -v /usr/local/bin/kubectl {0}'.format(directory)
check_call(split(cmd))
# Create an archive with all the necessary files.
cmd = 'tar -cvzf /home/ubuntu/kubectl_package.tar.gz ca.crt client.crt client.key config kubectl' # noqa
check_call(split(cmd))
set_state('kubectl.package.created')
@when('proxy.available')
@when_not('cadvisor.available')
def start_cadvisor():
'''Start the cAdvisor container that gives metrics about the other
application containers on this system. '''
compose = Compose('files/kubernetes')
compose.up('cadvisor')
set_state('cadvisor.available')
status_set('active', 'cadvisor running on port 8088')
hookenv.open_port(8088)
@when('sdn.available')
def gather_sdn_data():
'''Get the Software Defined Network (SDN) information and return it as a
dictionary.'''
# SDN Providers pass data via the unitdata.kv module
db = unitdata.kv()
# Generate an IP address for the DNS provider
subnet = db.get('sdn_subnet')
if subnet:
ip = subnet.split('/')[0]
dns_server = '.'.join(ip.split('.')[0:-1]) + '.10'
addedcontext = {}
addedcontext['dns_server'] = dns_server
return addedcontext
return {}
def copy_key(directory, prefix):
'''Copy the key from the easy-rsa/easyrsa3/pki/private directory to the
specified directory. '''
if not os.path.isdir(directory):
os.makedirs(directory)
os.chmod(directory, 0o770)
# Must remove the path characters from the local unit name.
path_name = hookenv.local_unit().replace('/', '_')
# The key is not in unitdata it is in the local easy-rsa directory.
local_key_path = 'easy-rsa/easyrsa3/pki/private/{0}.key'.format(path_name)
key_name = '{0}.key'.format(prefix)
# The key should be copied to this directory.
destination_key_path = os.path.join(directory, key_name)
# Copy the key file from the local directory to the destination.
copy2(local_key_path, destination_key_path)
def render_files(reldata=None):
'''Use jinja templating to render the docker-compose.yml and master.json
file to contain the dynamic data for the configuration files.'''
context = {}
# Load the context manager with sdn and config data.
context.update(gather_sdn_data())
context.update(hookenv.config())
if reldata:
context.update({'connection_string': reldata.connection_string()})
charm_dir = hookenv.charm_dir()
rendered_kube_dir = os.path.join(charm_dir, 'files/kubernetes')
if not os.path.exists(rendered_kube_dir):
os.makedirs(rendered_kube_dir)
rendered_manifest_dir = os.path.join(charm_dir, 'files/manifests')
if not os.path.exists(rendered_manifest_dir):
os.makedirs(rendered_manifest_dir)
# Add the manifest directory so the docker-compose file can have.
context.update({'manifest_directory': rendered_manifest_dir,
'private_address': hookenv.unit_get('private-address')})
# Render the files/kubernetes/docker-compose.yml file that contains the
# definition for kubelet and proxy.
target = os.path.join(rendered_kube_dir, 'docker-compose.yml')
render('docker-compose.yml', target, context)
# Render the files/manifests/master.json that contains parameters for the
# apiserver, controller, and controller-manager
target = os.path.join(rendered_manifest_dir, 'master.json')
render('master.json', target, context)
# Render files/kubernetes/skydns-svc.yaml for SkyDNS service
target = os.path.join(rendered_manifest_dir, 'skydns-svc.yml')
render('skydns-svc.yml', target, context)
# Render files/kubernetes/skydns-rc.yaml for SkyDNS pods
target = os.path.join(rendered_manifest_dir, 'skydns-rc.yml')
render('skydns-rc.yml', target, context)
def save_certificate(directory, prefix):
'''Get the certificate from the charm unitdata, and write it to the proper
directory. The parameters are: destination directory, and prefix to use
for the key and certificate name.'''
if not os.path.isdir(directory):
os.makedirs(directory)
os.chmod(directory, 0o770)
# Grab the unitdata key value store.
store = unitdata.kv()
certificate_data = store.get('tls.{0}.certificate'.format(prefix))
certificate_name = '{0}.crt'.format(prefix)
# The certificate should be saved to this directory.
certificate_path = os.path.join(directory, certificate_name)
# write the server certificate out to the correct location
with open(certificate_path, 'w') as fp:
fp.write(certificate_data)
| apache-2.0 |
jephianlin/minrank_aux | xi_dict.py | 1 | 6449 | print("---SAPreduced_matrix, has_SAP, find_ZFloor, Zsap, etc.")
def SAPmatrix(A):
"""
Input: a symmetric matrix A
Output: The matrix for checking if A has SAP
"""
if A.is_symmetric()==False:
raise ValueError("Input matrix is not symmetric.")
AA=[];
n=A.dimensions()[0];
row_num=0;
for i in range(n):
for j in range(n):
AA.append([0]*(n*n));
if A[i][j]!=0 or i==j:
AA[row_num][i*n+j]=1;
if A[i][j]==0 and i!=j:
AA[row_num][i*n+j]=1;
AA[row_num][j*n+i]=-1;
row_num+=1;
BB=identity_matrix(n).tensor_product(A);
for row in BB.rows():
AA.append(row);
return matrix(AA);
def SAPreduced_matrix(A):
"""
Input: a symmetric matrix A
Output: the reduced matrix for checking if A has SAP
"""
if A.is_symmetric()==False:
raise ValueError("Input matrix is not symmetric.")
AA=[];
n=A.dimensions()[0];
nonedge=0;
for i in range(n):
for j in range(i+1,n):
if A[i][j]==0:
AA.append([0]*(n*n));
i_start=i*n;
j_start=j*n;
for k in range(n):
AA[nonedge][i_start+k]=A[j][k];
AA[nonedge][j_start+k]=A[i][k];
nonedge+=1;
return matrix(AA).transpose();
def has_SAP(A):
"""
Input: a symmetric matrix A
Output: True if A has Strong Arnold Property; False if A does not.
"""
##SAPreduced_matrix is faster than SAPmatrix
##AA=SAPmatrix(A);
##if AA.rank()==AA.dimensions()[1]:
AA=SAPreduced_matrix(A);
if AA.rank()==AA.dimensions()[1]:
return True;
else:
return False;
def ful_annihilator(A):
"""
Input: a symmetric matrix A
Output: 0 if A has SAP; otherwise return the basis of ful_annihilators of A;
"""
n=A.dimensions()[0];
AA=SAPmatrix(A);
ker=AA.right_kernel();
if ker.dimension()==0:
return 0;
else:
basis=[];
for v in ker.basis():
list_v=list(v);
basis.append(matrix(n,n,list_v));
return basis;
def ZFloor_game(g,done,act,token,chron=False):
"""
g: considered graph
done: list of blue vertices that can no longer move (token are taken)
act: list of active blue vertices
token: integer of available tokens
Output True if it is a Zfloor forcing set; False if not. To see chron list, set chron=True.
"""
##for graphs and lists, we need to make a copy.
h=g.copy()
this_done=[];
this_act=[];
for v in done:
h.delete_vertex(v);
this_done.append(v);
for v in act:
this_act.append(v);
##Do conventional CRC as possible, and collect tokens.
##delete every edges between this_act.
for u,w in Combinations(this_act,2):
h.delete_edge(u,w);
again=True;
while again:
again=False;
for v in this_act:
if h.degree(v)==1:
u=h.neighbors(v)[0];
this_act.append(u);
this_act.remove(v);
this_done.append(v);
h.delete_vertex(v);
for w in this_act:
h.delete_edge(u,w);
again=True;
break;
if h.degree(v)==0:
token+=1;
this_act.remove(v);
this_done.append(v);
h.delete_vertex(v);
again=True;
if h.order()==0:
return True;
if h.order()!=0 and token==0:
return False;
##Find white set
white=h.vertices();
for v in this_act:
white.remove(v);
##Do recursion.
if token>=len(white):
return True;
else:
for new_act in Combinations(white,token):
if ZFloor_game(g,this_done,this_act+new_act,0)==True:
return True;
return False;
def find_ZFloor(g):
"""
Input: a simple graph g
Output: the ZFloor of g
"""
ZF=g.order()-1;
if ZF<0:
return ZF+1;
try_lower=True;
while try_lower:
try_lower=False;
if ZFloor_game(g,[],[],ZF)==True:
try_lower=True;
ZF+=-1;
return ZF+1;
T3FamilyString=['C~', 'DFw','EBnW','F@QZo','G?Gisg','H??@qiK']
T3Family=[Graph(stg) for stg in T3FamilyString];
def xi_ubd(g):
C=g.connected_components_subgraphs();
if len(C)==1:
ubd=find_ZFloor(g);
e=g.size();
if g.is_bipartite():
#print "bipartite"
ubd=min(ubd,int(-0.5+sqrt(2.25+2*e)));
else:
#print "not bipartite"
ubd=min(ubd,int(-0.5+sqrt(0.25+2*e)));
if g.is_tree():
#print "tree"
ubd=min(ubd,2);
return ubd;
else:
ubd=0;
for com in C:
ubd=max(ubd,xi_ubd(com));
return ubd;
def xi_lbd(g):
###SUPER long...
lbd=1;
if g.is_forest()==False or max(g.degree_sequence())>=3:
lbd=2;
for t in T3Family:
if has_minor(g,t):
return 3;
return lbd;
##This function requires gzerosgame and find_gzfs functions in oc_diag_analysis.sage
def SAPreduced_mr(g,non_singular=False):
n=g.order();
A=g.adjacency_matrix()-identity_matrix(n);
AA=SAPreduced_matrix(A);
##rows should be n^2; cols should be number of nonedges
rows,cols=AA.dimensions();
##Set X as -1~-rows and Y as 1~cols
X=[];
for i in range(1,rows+1):
X.append(-i);
Y=[];
for i in range(1,cols+1):
Y.append(i);
##NOTE: the labeling of graphs start at 1 and -1, but not 0
## but the labeling of the matrix start at 0 for both rows and columns
SAP_g=Graph(0);
SAP_g.add_vertices(X);
SAP_g.add_vertices(Y);
##setting edges and banned set
B=[];
for i in range(rows):
for j in range(cols):
if AA[i][j]!=0:
SAP_g.add_edge(-i-1,j+1);
if AA[i][j]==-1:
B.append((-i-1,j+1));
##For debug
#show(AA);
#show(SAP_g);
#print B;
if non_singular==False:
return rows+cols-find_gZ(SAP_g, X, B);
if non_singular==True:
#print gzerosgame(SAP_g, X, B);
return len(gzerosgame(SAP_g, X, B))==rows+cols;
| gpl-2.0 |
Lh4cKg/sl4a | python/src/Lib/test/test___all__.py | 52 | 6085 | import unittest
from test.test_support import run_unittest
import sys
import warnings
class AllTest(unittest.TestCase):
def check_all(self, modname):
names = {}
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".* (module|package)",
DeprecationWarning)
try:
exec "import %s" % modname in names
except ImportError:
# Silent fail here seems the best route since some modules
# may not be available in all environments.
return
self.failUnless(hasattr(sys.modules[modname], "__all__"),
"%s has no __all__ attribute" % modname)
names = {}
exec "from %s import *" % modname in names
if "__builtins__" in names:
del names["__builtins__"]
keys = set(names)
all = set(sys.modules[modname].__all__)
self.assertEqual(keys, all)
def test_all(self):
if not sys.platform.startswith('java'):
# In case _socket fails to build, make this test fail more gracefully
# than an AttributeError somewhere deep in CGIHTTPServer.
import _socket
self.check_all("BaseHTTPServer")
self.check_all("Bastion")
self.check_all("CGIHTTPServer")
self.check_all("ConfigParser")
self.check_all("Cookie")
self.check_all("MimeWriter")
self.check_all("Queue")
self.check_all("SimpleHTTPServer")
self.check_all("SocketServer")
self.check_all("StringIO")
self.check_all("UserString")
self.check_all("aifc")
self.check_all("atexit")
self.check_all("audiodev")
self.check_all("base64")
self.check_all("bdb")
self.check_all("binhex")
self.check_all("calendar")
self.check_all("cgi")
self.check_all("cmd")
self.check_all("code")
self.check_all("codecs")
self.check_all("codeop")
self.check_all("colorsys")
self.check_all("commands")
self.check_all("compileall")
self.check_all("copy")
self.check_all("copy_reg")
self.check_all("csv")
self.check_all("dbhash")
self.check_all("decimal")
self.check_all("difflib")
self.check_all("dircache")
self.check_all("dis")
self.check_all("doctest")
self.check_all("dummy_thread")
self.check_all("dummy_threading")
self.check_all("filecmp")
self.check_all("fileinput")
self.check_all("fnmatch")
self.check_all("fpformat")
self.check_all("ftplib")
self.check_all("getopt")
self.check_all("getpass")
self.check_all("gettext")
self.check_all("glob")
self.check_all("gzip")
self.check_all("heapq")
self.check_all("htmllib")
self.check_all("httplib")
self.check_all("ihooks")
self.check_all("imaplib")
self.check_all("imghdr")
self.check_all("imputil")
self.check_all("keyword")
self.check_all("linecache")
self.check_all("locale")
self.check_all("macpath")
self.check_all("macurl2path")
self.check_all("mailbox")
self.check_all("mailcap")
self.check_all("mhlib")
self.check_all("mimetools")
self.check_all("mimetypes")
self.check_all("mimify")
self.check_all("multifile")
self.check_all("netrc")
self.check_all("nntplib")
self.check_all("ntpath")
self.check_all("opcode")
self.check_all("optparse")
self.check_all("os")
self.check_all("os2emxpath")
self.check_all("pdb")
self.check_all("pickle")
self.check_all("pickletools")
self.check_all("pipes")
self.check_all("popen2")
self.check_all("poplib")
self.check_all("posixpath")
self.check_all("pprint")
self.check_all("profile")
self.check_all("pstats")
self.check_all("pty")
self.check_all("py_compile")
self.check_all("pyclbr")
self.check_all("quopri")
self.check_all("random")
self.check_all("re")
self.check_all("repr")
self.check_all("rexec")
self.check_all("rfc822")
self.check_all("rlcompleter")
self.check_all("robotparser")
self.check_all("sched")
self.check_all("sets")
self.check_all("sgmllib")
self.check_all("shelve")
self.check_all("shlex")
self.check_all("shutil")
self.check_all("smtpd")
self.check_all("smtplib")
self.check_all("sndhdr")
self.check_all("socket")
self.check_all("_strptime")
self.check_all("symtable")
self.check_all("tabnanny")
self.check_all("tarfile")
self.check_all("telnetlib")
self.check_all("tempfile")
self.check_all("test.test_support")
self.check_all("textwrap")
self.check_all("threading")
self.check_all("timeit")
self.check_all("toaiff")
self.check_all("tokenize")
self.check_all("traceback")
self.check_all("tty")
self.check_all("unittest")
self.check_all("urllib")
self.check_all("urlparse")
self.check_all("uu")
self.check_all("warnings")
self.check_all("wave")
self.check_all("weakref")
self.check_all("webbrowser")
self.check_all("xdrlib")
self.check_all("zipfile")
# rlcompleter needs special consideration; it import readline which
# initializes GNU readline which calls setlocale(LC_CTYPE, "")... :-(
try:
self.check_all("rlcompleter")
finally:
try:
import locale
except ImportError:
pass
else:
locale.setlocale(locale.LC_CTYPE, 'C')
def test_main():
run_unittest(AllTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
Andr3iC/courtlistener | cleaning_scripts/correct_links_to_resource_org_186.py | 5 | 2193 | import os
import sys
execfile('/etc/courtlistener')
sys.path.append(INSTALL_ROOT)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from alert.search.models import Document, Citation
from alert.lib.db_tools import queryset_generator
from alert.lib.string_utils import clean_string
from alert.lib.string_utils import harmonize
from alert.lib.string_utils import titlecase
from optparse import OptionParser
def link_fixer(link):
"""Fixes the errors in a link
Orig: http://bulk.resource.org/courts.gov/c/US/819/996.F2d.311.html
Fixed: http://bulk.resource.org/courts.gov/c/F2/996/996.F2d.311.html
"""
# Very crude and lazy replacement of US with F2
link_parts = link.split('US')
fixed = 'F2'.join(link_parts)
# Fixes the number
link_parts = fixed.split('/')
number = int(link_parts[-2]) + 177
fixed = '/'.join(link_parts[0:-2]) + "/" + str(number) + "/" + str(link_parts[-1])
return fixed
def cleaner(simulate=False, verbose=False):
docs = queryset_generator(Document.objects.filter(source = 'R', time_retrieved__gt = '2011-06-01'))
for doc in docs:
original_link = doc.download_url
fixed = link_fixer(original_link)
doc.download_url = fixed
if verbose:
print "Changing: " + original_link
print " to: " + fixed
if not simulate:
doc.save()
def main():
usage = "usage: %prog [--verbose] [---simulate]"
parser = OptionParser(usage)
parser.add_option('-v', '--verbose', action="store_true", dest='verbose',
default=False, help="Display log during execution")
parser.add_option('-s', '--simulate', action="store_true",
dest='simulate', default=False, help="Simulate the corrections without " + \
"actually making them.")
(options, args) = parser.parse_args()
verbose = options.verbose
simulate = options.simulate
if simulate:
print "*******************************************"
print "* SIMULATE MODE - NO CHANGES WILL BE MADE *"
print "*******************************************"
return cleaner(simulate, verbose)
if __name__ == '__main__':
main()
| agpl-3.0 |
Fokko/incubator-airflow | airflow/contrib/hooks/gdrive_hook.py | 1 | 5267 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for Google Drive service"""
from typing import Any, Optional
from googleapiclient.discovery import Resource, build
from googleapiclient.http import MediaFileUpload
from airflow.gcp.hooks.base import CloudBaseHook
# noinspection PyAbstractClass
class GoogleDriveHook(CloudBaseHook):
"""
Hook for the Google Drive APIs.
:param api_version: API version used (for example v3).
:type api_version: str
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
_conn = None # type: Optional[Resource]
def __init__(
self,
api_version: str = "v3",
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None
) -> None:
super().__init__(gcp_conn_id, delegate_to)
self.api_version = api_version
def get_conn(self) -> Any:
"""
Retrieves the connection to Google Drive.
:return: Google Drive services object.
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build("drive", self.api_version, http=http_authorized, cache_discovery=False)
return self._conn
def _ensure_folders_exists(self, path: str) -> str:
service = self.get_conn()
current_parent = "root"
folders = path.split("/")
depth = 0
# First tries to enter directories
for current_folder in folders:
self.log.debug("Looking for %s directory with %s parent", current_folder, current_parent)
conditions = [
"mimeType = 'application/vnd.google-apps.folder'",
"name='{}'".format(current_folder),
"'{}' in parents".format(current_parent),
]
result = (
service.files() # pylint: disable=no-member
.list(q=" and ".join(conditions), spaces="drive", fields="files(id, name)")
.execute(num_retries=self.num_retries)
)
files = result.get("files", [])
if not files:
self.log.info("Not found %s directory", current_folder)
# If the directory does not exist, break loops
break
depth += 1
current_parent = files[0].get("id")
# Check if there are directories to process
if depth != len(folders):
# Create missing directories
for current_folder in folders[depth:]:
file_metadata = {
"name": current_folder,
"mimeType": "application/vnd.google-apps.folder",
"parents": [current_parent],
}
file = (
service.files() # pylint: disable=no-member
.create(body=file_metadata, fields="id")
.execute(num_retries=self.num_retries)
)
self.log.info("Created %s directory", current_folder)
current_parent = file.get("id")
# Return the ID of the last directory
return current_parent
def upload_file(self, local_location: str, remote_location: str) -> str:
"""
Uploads a file that is available locally to a Google Drive service.
:param local_location: The path where the file is available.
:type local_location: str
:param remote_location: The path where the file will be send
:type remote_location: str
:return: File ID
:rtype: str
"""
service = self.get_conn()
directory_path, _, filename = remote_location.rpartition("/")
if directory_path:
parent = self._ensure_folders_exists(directory_path)
else:
parent = "root"
file_metadata = {"name": filename, "parents": [parent]}
media = MediaFileUpload(local_location)
file = (
service.files() # pylint: disable=no-member
.create(body=file_metadata, media_body=media, fields="id")
.execute(num_retries=self.num_retries)
)
self.log.info("File %s uploaded to gdrive://%s.", local_location, remote_location)
return file.get("id")
| apache-2.0 |
zzjkf2009/Midterm_Astar | opencv/samples/python/squares.py | 1 | 1774 | #!/usr/bin/env python
'''
Simple "Square Detector" program.
Loads several images sequentially and tries to find squares in each image.
'''
# Python 2/3 compatibility
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2
def angle_cos(p0, p1, p2):
d1, d2 = (p0-p1).astype('float'), (p2-p1).astype('float')
return abs( np.dot(d1, d2) / np.sqrt( np.dot(d1, d1)*np.dot(d2, d2) ) )
def find_squares(img):
img = cv2.GaussianBlur(img, (5, 5), 0)
squares = []
for gray in cv2.split(img):
for thrs in xrange(0, 255, 26):
if thrs == 0:
bin = cv2.Canny(gray, 0, 50, apertureSize=5)
bin = cv2.dilate(bin, None)
else:
_retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY)
bin, contours, _hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
cnt_len = cv2.arcLength(cnt, True)
cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True)
if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt):
cnt = cnt.reshape(-1, 2)
max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])
if max_cos < 0.1:
squares.append(cnt)
return squares
if __name__ == '__main__':
from glob import glob
for fn in glob('../data/pic*.png'):
img = cv2.imread(fn)
squares = find_squares(img)
cv2.drawContours( img, squares, -1, (0, 255, 0), 3 )
cv2.imshow('squares', img)
ch = cv2.waitKey()
if ch == 27:
break
cv2.destroyAllWindows()
| mit |
rbian/virt-test | tools/package_jeos.py | 15 | 1491 | #!/usr/bin/python
import os
import sys
import logging
import shutil
import common
from autotest.client import utils
from autotest.client.shared import logging_manager
from virttest import utils_misc
def package_jeos(img):
"""
Package JeOS and make it ready for upload.
Steps:
1) Move /path/to/jeos.qcow2 to /path/to/jeos.qcow2.backup
2) Sparsify the image, creating a new, trimmed down /path/to/jeos.qcow2
3) Compress the sparsified image with 7za
:param img: Path to a qcow2 image
"""
basedir = os.path.dirname(img)
backup = img + '.backup'
qemu_img = utils_misc.find_command('qemu-img')
shutil.move(img, backup)
logging.info("Backup %s saved", backup)
utils.system("%s convert -f qcow2 -O qcow2 %s %s" % (qemu_img, backup, img))
logging.info("Sparse file %s created successfully", img)
archiver = utils_misc.find_command('7za')
compressed_img = img + ".7z"
utils.system("%s a %s %s" % (archiver, compressed_img, img))
logging.info("JeOS compressed file %s created successfuly",
compressed_img)
if __name__ == "__main__":
logging_manager.configure_logging(utils_misc.VirtLoggingConfig(),
verbose=True)
if len(sys.argv) <= 1:
logging.info("Usage: %s [path to freshly installed JeOS qcow2 image]",
sys.argv[0])
sys.exit(1)
path = sys.argv[1]
image = os.path.abspath(path)
package_jeos(image)
| gpl-2.0 |
husni75/p2pool_idc | p2pool/bitcoin/sha256.py | 285 | 3084 | from __future__ import division
import struct
k = [
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
]
def process(state, chunk):
def rightrotate(x, n):
return (x >> n) | (x << 32 - n) % 2**32
w = list(struct.unpack('>16I', chunk))
for i in xrange(16, 64):
s0 = rightrotate(w[i-15], 7) ^ rightrotate(w[i-15], 18) ^ (w[i-15] >> 3)
s1 = rightrotate(w[i-2], 17) ^ rightrotate(w[i-2], 19) ^ (w[i-2] >> 10)
w.append((w[i-16] + s0 + w[i-7] + s1) % 2**32)
a, b, c, d, e, f, g, h = start_state = struct.unpack('>8I', state)
for k_i, w_i in zip(k, w):
t1 = (h + (rightrotate(e, 6) ^ rightrotate(e, 11) ^ rightrotate(e, 25)) + ((e & f) ^ (~e & g)) + k_i + w_i) % 2**32
a, b, c, d, e, f, g, h = (
(t1 + (rightrotate(a, 2) ^ rightrotate(a, 13) ^ rightrotate(a, 22)) + ((a & b) ^ (a & c) ^ (b & c))) % 2**32,
a, b, c, (d + t1) % 2**32, e, f, g,
)
return struct.pack('>8I', *((x + y) % 2**32 for x, y in zip(start_state, [a, b, c, d, e, f, g, h])))
initial_state = struct.pack('>8I', 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19)
class sha256(object):
digest_size = 256//8
block_size = 512//8
def __init__(self, data='', _=(initial_state, '', 0)):
self.state, self.buf, self.length = _
self.update(data)
def update(self, data):
state = self.state
buf = self.buf + data
chunks = [buf[i:i + self.block_size] for i in xrange(0, len(buf) + 1, self.block_size)]
for chunk in chunks[:-1]:
state = process(state, chunk)
self.state = state
self.buf = chunks[-1]
self.length += 8*len(data)
def copy(self, data=''):
return self.__class__(data, (self.state, self.buf, self.length))
def digest(self):
state = self.state
buf = self.buf + '\x80' + '\x00'*((self.block_size - 9 - len(self.buf)) % self.block_size) + struct.pack('>Q', self.length)
for chunk in [buf[i:i + self.block_size] for i in xrange(0, len(buf), self.block_size)]:
state = process(state, chunk)
return state
def hexdigest(self):
return self.digest().encode('hex')
| gpl-3.0 |
aisipos/django | django/db/backends/base/operations.py | 43 | 23170 | import datetime
import decimal
import warnings
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends import utils
from django.utils import six, timezone
from django.utils.dateparse import parse_duration
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
# Integer field safe ranges by `internal_type` as documented
# in docs/ref/models/fields.txt.
integer_field_ranges = {
'SmallIntegerField': (-32768, 32767),
'IntegerField': (-2147483648, 2147483647),
'BigIntegerField': (-9223372036854775808, 9223372036854775807),
'PositiveSmallIntegerField': (0, 32767),
'PositiveIntegerField': (0, 2147483647),
}
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Returns the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def cache_key_culling_sql(self):
"""
Returns an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def unification_cast_sql(self, output_field):
"""
Given a field instance, returns the SQL necessary to cast the result of
a union to that type. Note that the resulting string should contain a
'%s' placeholder for the expression being cast.
"""
return '%s'
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method')
def date_interval_sql(self, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method')
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a date object with only
the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetrunc_sql() method')
def datetime_cast_date_sql(self, field_name, tzname):
"""
Returns the SQL necessary to cast a datetime value to date value.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_date() method')
def datetime_extract_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that extracts a value from the given
datetime field field_name, and a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method')
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunk_sql() method')
def time_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'hour', 'minute' or 'second', returns the SQL
that extracts a value from the given time field field_name.
"""
return self.date_extract_sql(lookup_type, field_name)
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type, internal_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), and an internal type
(e.g. 'GenericIPAddressField'), returns the SQL necessary to cast it
before using it in a WHERE statement. Note that the resulting string
should contain a '%s' placeholder for the column being searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
else:
return 'FOR UPDATE'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
# RemovedInDjango20Warning
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
# Convert params to contain Unicode values.
def to_unicode(s):
return force_text(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple(to_unicode(val) for val in params)
elif params is None:
u_params = ()
else:
u_params = {to_unicode(k): to_unicode(v) for k, v in params.items()}
return six.text_type("QUERY = %r - PARAMS = %r") % (sql, u_params)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type, internal_type=None):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc.). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method')
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def prepare_sql_script(self, sql):
"""
Takes an SQL script that may contain multiple lines and returns a list
of statements to feed to successive cursor.execute() calls.
Since few databases are able to process raw SQL scripts in a single
cursor.execute() call and PEP 249 doesn't talk about this use case,
the default implementation is conservative.
"""
try:
import sqlparse
except ImportError:
raise ImproperlyConfigured(
"sqlparse is required if you don't split your SQL "
"statements manually."
)
else:
return [sqlparse.format(statement, strip_comments=True)
for statement in sqlparse.split(sql) if statement]
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method')
def random_function_sql(self):
"""
Returns an SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method')
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The returned value also includes SQL statements required to reset DB
sequences passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
The `allow_cascade` argument determines whether truncation may cascade
to tables with foreign keys pointing the tables being truncated.
PostgreSQL requires a cascade even if these tables are empty.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations must provide an sql_flush() method')
def sequence_reset_by_name_sql(self, style, sequences):
"""
Returns a list of the SQL statements required to reset sequences
passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""
Returns the SQL statement required to end a transaction.
"""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
return force_text(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
"""
return value
def adapt_unknown_value(self, value):
"""
Transforms a value to something compatible with the backend driver.
This method only depends on the type of the value. It's designed for
cases where the target type isn't known, such as .raw() SQL queries.
As a consequence it may not work perfectly in all circumstances.
"""
if isinstance(value, datetime.datetime): # must be before date
return self.adapt_datetimefield_value(value)
elif isinstance(value, datetime.date):
return self.adapt_datefield_value(value)
elif isinstance(value, datetime.time):
return self.adapt_timefield_value(value)
elif isinstance(value, decimal.Decimal):
return self.adapt_decimalfield_value(value)
else:
return value
def adapt_datefield_value(self, value):
"""
Transforms a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return six.text_type(value)
def adapt_datetimefield_value(self, value):
"""
Transforms a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return six.text_type(value)
def adapt_timefield_value(self, value):
"""
Transforms a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if timezone.is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return six.text_type(value)
def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):
"""
Transforms a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
return utils.format_number(value, max_digits, decimal_places)
def adapt_ipaddressfield_value(self, value):
"""
Transforms a string representation of an IP address into the expected
type for the backend driver.
"""
return value or None
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.date(value, 1, 1)
second = datetime.date(value, 12, 31)
first = self.adapt_datefield_value(first)
second = self.adapt_datefield_value(second)
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
first = self.adapt_datetimefield_value(first)
second = self.adapt_datetimefield_value(second)
return [first, second]
def get_db_converters(self, expression):
"""
Get a list of functions needed to convert field data.
Some field types on some backends do not provide data in the correct
format, this is the hook for converter functions.
"""
return []
def convert_durationfield_value(self, value, expression, connection, context):
if value is not None:
value = str(decimal.Decimal(value) / decimal.Decimal(1000000))
value = parse_duration(value)
return value
def check_aggregate_support(self, aggregate_func):
warnings.warn(
"check_aggregate_support has been deprecated. Use "
"check_expression_support instead.",
RemovedInDjango20Warning, stacklevel=2)
return self.check_expression_support(aggregate_func)
def check_expression_support(self, expression):
"""
Check that the backend supports the provided expression.
This is used on specific backends to rule out known expressions
that have problematic or nonexistent implementations. If the
expression has a known problem, the backend should raise
NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
return self.combine_expression(connector, sub_expressions)
def binary_placeholder_sql(self, value):
"""
Some backends require special syntax to insert binary content (MySQL
for example uses '_binary %s').
"""
return '%s'
def modify_insert_params(self, placeholder, params):
"""Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
def integer_field_range(self, internal_type):
"""
Given an integer field internal type (e.g. 'PositiveIntegerField'),
returns a tuple of the (min_value, max_value) form representing the
range of the column type bound to the field.
"""
return self.integer_field_ranges[internal_type]
def subtract_temporals(self, internal_type, lhs, rhs):
if self.connection.features.supports_temporal_subtraction:
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "(%s - %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
raise NotImplementedError("This backend does not support %s subtraction." % internal_type)
| bsd-3-clause |
blackzw/openwrt_sdk_dev1 | staging_dir/target-mips_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_strftime.py | 132 | 6956 | """
Unittest for time.strftime
"""
import calendar
import sys
import re
from test import test_support
import time
import unittest
# helper functions
def fixasctime(s):
if s[8] == ' ':
s = s[:8] + '0' + s[9:]
return s
def escapestr(text, ampm):
"""
Escape text to deal with possible locale values that have regex
syntax while allowing regex syntax used for comparison.
"""
new_text = re.escape(text)
new_text = new_text.replace(re.escape(ampm), ampm)
new_text = new_text.replace('\%', '%')
new_text = new_text.replace('\:', ':')
new_text = new_text.replace('\?', '?')
return new_text
class StrftimeTest(unittest.TestCase):
def __init__(self, *k, **kw):
unittest.TestCase.__init__(self, *k, **kw)
def _update_variables(self, now):
# we must update the local variables on every cycle
self.gmt = time.gmtime(now)
now = time.localtime(now)
if now[3] < 12: self.ampm='(AM|am)'
else: self.ampm='(PM|pm)'
self.jan1 = time.localtime(time.mktime((now[0], 1, 1, 0, 0, 0, 0, 1, 0)))
try:
if now[8]: self.tz = time.tzname[1]
else: self.tz = time.tzname[0]
except AttributeError:
self.tz = ''
if now[3] > 12: self.clock12 = now[3] - 12
elif now[3] > 0: self.clock12 = now[3]
else: self.clock12 = 12
self.now = now
def setUp(self):
try:
import java
java.util.Locale.setDefault(java.util.Locale.US)
except ImportError:
import locale
locale.setlocale(locale.LC_TIME, 'C')
def test_strftime(self):
now = time.time()
self._update_variables(now)
self.strftest1(now)
self.strftest2(now)
if test_support.verbose:
print "Strftime test, platform: %s, Python version: %s" % \
(sys.platform, sys.version.split()[0])
for j in range(-5, 5):
for i in range(25):
arg = now + (i+j*100)*23*3603
self._update_variables(arg)
self.strftest1(arg)
self.strftest2(arg)
def strftest1(self, now):
if test_support.verbose:
print "strftime test for", time.ctime(now)
now = self.now
# Make sure any characters that could be taken as regex syntax is
# escaped in escapestr()
expectations = (
('%a', calendar.day_abbr[now[6]], 'abbreviated weekday name'),
('%A', calendar.day_name[now[6]], 'full weekday name'),
('%b', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%B', calendar.month_name[now[1]], 'full month name'),
# %c see below
('%d', '%02d' % now[2], 'day of month as number (00-31)'),
('%H', '%02d' % now[3], 'hour (00-23)'),
('%I', '%02d' % self.clock12, 'hour (01-12)'),
('%j', '%03d' % now[7], 'julian day (001-366)'),
('%m', '%02d' % now[1], 'month as number (01-12)'),
('%M', '%02d' % now[4], 'minute, (00-59)'),
('%p', self.ampm, 'AM or PM as appropriate'),
('%S', '%02d' % now[5], 'seconds of current time (00-60)'),
('%U', '%02d' % ((now[7] + self.jan1[6])//7),
'week number of the year (Sun 1st)'),
('%w', '0?%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'),
('%W', '%02d' % ((now[7] + (self.jan1[6] - 1)%7)//7),
'week number of the year (Mon 1st)'),
# %x see below
('%X', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%y', '%02d' % (now[0]%100), 'year without century'),
('%Y', '%d' % now[0], 'year with century'),
# %Z see below
('%%', '%', 'single percent sign'),
)
for e in expectations:
# musn't raise a value error
try:
result = time.strftime(e[0], now)
except ValueError, error:
self.fail("strftime '%s' format gave error: %s" % (e[0], error))
if re.match(escapestr(e[1], self.ampm), result):
continue
if not result or result[0] == '%':
self.fail("strftime does not support standard '%s' format (%s)"
% (e[0], e[2]))
else:
self.fail("Conflict for %s (%s): expected %s, but got %s"
% (e[0], e[2], e[1], result))
def strftest2(self, now):
nowsecs = str(long(now))[:-1]
now = self.now
nonstandard_expectations = (
# These are standard but don't have predictable output
('%c', fixasctime(time.asctime(now)), 'near-asctime() format'),
('%x', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)),
'%m/%d/%y %H:%M:%S'),
('%Z', '%s' % self.tz, 'time zone name'),
# These are some platform specific extensions
('%D', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), 'mm/dd/yy'),
('%e', '%2d' % now[2], 'day of month as number, blank padded ( 0-31)'),
('%h', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%k', '%2d' % now[3], 'hour, blank padded ( 0-23)'),
('%n', '\n', 'newline character'),
('%r', '%02d:%02d:%02d %s' % (self.clock12, now[4], now[5], self.ampm),
'%I:%M:%S %p'),
('%R', '%02d:%02d' % (now[3], now[4]), '%H:%M'),
('%s', nowsecs, 'seconds since the Epoch in UCT'),
('%t', '\t', 'tab character'),
('%T', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%3y', '%03d' % (now[0]%100),
'year without century rendered using fieldwidth'),
)
for e in nonstandard_expectations:
try:
result = time.strftime(e[0], now)
except ValueError, result:
msg = "Error for nonstandard '%s' format (%s): %s" % \
(e[0], e[2], str(result))
if test_support.verbose:
print msg
continue
if re.match(escapestr(e[1], self.ampm), result):
if test_support.verbose:
print "Supports nonstandard '%s' format (%s)" % (e[0], e[2])
elif not result or result[0] == '%':
if test_support.verbose:
print "Does not appear to support '%s' format (%s)" % \
(e[0], e[2])
else:
if test_support.verbose:
print "Conflict for nonstandard '%s' format (%s):" % \
(e[0], e[2])
print " Expected %s, but got %s" % (e[1], result)
def test_main():
test_support.run_unittest(StrftimeTest)
if __name__ == '__main__':
test_main()
| gpl-2.0 |
Don42/youtube-dl | youtube_dl/extractor/ina.py | 129 | 1064 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class InaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ina\.fr/video/(?P<id>I?[A-Z0-9]+)'
_TEST = {
'url': 'http://www.ina.fr/video/I12055569/francois-hollande-je-crois-que-c-est-clair-video.html',
'md5': 'a667021bf2b41f8dc6049479d9bb38a3',
'info_dict': {
'id': 'I12055569',
'ext': 'mp4',
'title': 'François Hollande "Je crois que c\'est clair"',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
mrss_url = 'http://player.ina.fr/notices/%s.mrss' % video_id
info_doc = self._download_xml(mrss_url, video_id)
self.report_extraction(video_id)
video_url = info_doc.find('.//{http://search.yahoo.com/mrss/}player').attrib['url']
return {
'id': video_id,
'url': video_url,
'title': info_doc.find('.//title').text,
}
| unlicense |
xpac1985/ansible | lib/ansible/plugins/action/win_copy.py | 185 | 1153 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
from ansible.plugins.action.copy import ActionModule as CopyActionModule
# Even though CopyActionModule inherits from ActionBase, we still need to
# directly inherit from ActionBase to appease the plugin loader.
class ActionModule(CopyActionModule, ActionBase):
pass
| gpl-3.0 |
blindroot/django | django/contrib/gis/db/backends/spatialite/base.py | 445 | 3615 | import sys
from ctypes.util import find_library
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.base import (
Database, DatabaseWrapper as SQLiteDatabaseWrapper, SQLiteCursorWrapper,
)
from django.utils import six
from .client import SpatiaLiteClient
from .features import DatabaseFeatures
from .introspection import SpatiaLiteIntrospection
from .operations import SpatiaLiteOperations
from .schema import SpatialiteSchemaEditor
class DatabaseWrapper(SQLiteDatabaseWrapper):
SchemaEditorClass = SpatialiteSchemaEditor
def __init__(self, *args, **kwargs):
# Before we get too far, make sure pysqlite 2.5+ is installed.
if Database.version_info < (2, 5, 0):
raise ImproperlyConfigured('Only versions of pysqlite 2.5+ are '
'compatible with SpatiaLite and GeoDjango.')
# Trying to find the location of the SpatiaLite library.
# Here we are figuring out the path to the SpatiaLite library
# (`libspatialite`). If it's not in the system library path (e.g., it
# cannot be found by `ctypes.util.find_library`), then it may be set
# manually in the settings via the `SPATIALITE_LIBRARY_PATH` setting.
self.spatialite_lib = getattr(settings, 'SPATIALITE_LIBRARY_PATH',
find_library('spatialite'))
if not self.spatialite_lib:
raise ImproperlyConfigured('Unable to locate the SpatiaLite library. '
'Make sure it is in your library path, or set '
'SPATIALITE_LIBRARY_PATH in your settings.'
)
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = SpatiaLiteOperations(self)
self.client = SpatiaLiteClient(self)
self.introspection = SpatiaLiteIntrospection(self)
def get_new_connection(self, conn_params):
conn = super(DatabaseWrapper, self).get_new_connection(conn_params)
# Enabling extension loading on the SQLite connection.
try:
conn.enable_load_extension(True)
except AttributeError:
raise ImproperlyConfigured(
'The pysqlite library does not support C extension loading. '
'Both SQLite and pysqlite must be configured to allow '
'the loading of extensions to use SpatiaLite.')
# Loading the SpatiaLite library extension on the connection, and returning
# the created cursor.
cur = conn.cursor(factory=SQLiteCursorWrapper)
try:
cur.execute("SELECT load_extension(%s)", (self.spatialite_lib,))
except Exception as msg:
new_msg = (
'Unable to load the SpatiaLite library extension '
'"%s" because: %s') % (self.spatialite_lib, msg)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(new_msg), sys.exc_info()[2])
cur.close()
return conn
def prepare_database(self):
super(DatabaseWrapper, self).prepare_database()
# Check if spatial metadata have been initialized in the database
with self.cursor() as cursor:
cursor.execute("PRAGMA table_info(geometry_columns);")
if cursor.fetchall() == []:
arg = "1" if self.features.supports_initspatialmetadata_in_one_transaction else ""
cursor.execute("SELECT InitSpatialMetaData(%s)" % arg)
| bsd-3-clause |
RO-ny9/python-for-android | python3-alpha/python3-src/Lib/idlelib/AutoComplete.py | 67 | 9061 | """AutoComplete.py - An IDLE extension for automatically completing names.
This extension can complete either attribute names of file names. It can pop
a window with all available names, for the user to select from.
"""
import os
import sys
import string
from idlelib.configHandler import idleConf
# This string includes all chars that may be in a file name (without a path
# separator)
FILENAME_CHARS = string.ascii_letters + string.digits + os.curdir + "._~#$:-"
# This string includes all chars that may be in an identifier
ID_CHARS = string.ascii_letters + string.digits + "_"
# These constants represent the two different types of completions
COMPLETE_ATTRIBUTES, COMPLETE_FILES = range(1, 2+1)
from idlelib import AutoCompleteWindow
from idlelib.HyperParser import HyperParser
import __main__
SEPS = os.sep
if os.altsep: # e.g. '/' on Windows...
SEPS += os.altsep
class AutoComplete:
menudefs = [
('edit', [
("Show Completions", "<<force-open-completions>>"),
])
]
popupwait = idleConf.GetOption("extensions", "AutoComplete",
"popupwait", type="int", default=0)
def __init__(self, editwin=None):
self.editwin = editwin
if editwin is None: # subprocess and test
return
self.text = editwin.text
self.autocompletewindow = None
# id of delayed call, and the index of the text insert when the delayed
# call was issued. If _delayed_completion_id is None, there is no
# delayed call.
self._delayed_completion_id = None
self._delayed_completion_index = None
def _make_autocomplete_window(self):
return AutoCompleteWindow.AutoCompleteWindow(self.text)
def _remove_autocomplete_window(self, event=None):
if self.autocompletewindow:
self.autocompletewindow.hide_window()
self.autocompletewindow = None
def force_open_completions_event(self, event):
"""Happens when the user really wants to open a completion list, even
if a function call is needed.
"""
self.open_completions(True, False, True)
def try_open_completions_event(self, event):
"""Happens when it would be nice to open a completion list, but not
really necessary, for example after an dot, so function
calls won't be made.
"""
lastchar = self.text.get("insert-1c")
if lastchar == ".":
self._open_completions_later(False, False, False,
COMPLETE_ATTRIBUTES)
elif lastchar in SEPS:
self._open_completions_later(False, False, False,
COMPLETE_FILES)
def autocomplete_event(self, event):
"""Happens when the user wants to complete his word, and if necessary,
open a completion list after that (if there is more than one
completion)
"""
if hasattr(event, "mc_state") and event.mc_state:
# A modifier was pressed along with the tab, continue as usual.
return
if self.autocompletewindow and self.autocompletewindow.is_active():
self.autocompletewindow.complete()
return "break"
else:
opened = self.open_completions(False, True, True)
if opened:
return "break"
def _open_completions_later(self, *args):
self._delayed_completion_index = self.text.index("insert")
if self._delayed_completion_id is not None:
self.text.after_cancel(self._delayed_completion_id)
self._delayed_completion_id = \
self.text.after(self.popupwait, self._delayed_open_completions,
*args)
def _delayed_open_completions(self, *args):
self._delayed_completion_id = None
if self.text.index("insert") != self._delayed_completion_index:
return
self.open_completions(*args)
def open_completions(self, evalfuncs, complete, userWantsWin, mode=None):
"""Find the completions and create the AutoCompleteWindow.
Return True if successful (no syntax error or so found).
if complete is True, then if there's nothing to complete and no
start of completion, won't open completions and return False.
If mode is given, will open a completion list only in this mode.
"""
# Cancel another delayed call, if it exists.
if self._delayed_completion_id is not None:
self.text.after_cancel(self._delayed_completion_id)
self._delayed_completion_id = None
hp = HyperParser(self.editwin, "insert")
curline = self.text.get("insert linestart", "insert")
i = j = len(curline)
if hp.is_in_string() and (not mode or mode==COMPLETE_FILES):
self._remove_autocomplete_window()
mode = COMPLETE_FILES
while i and curline[i-1] in FILENAME_CHARS:
i -= 1
comp_start = curline[i:j]
j = i
while i and curline[i-1] in FILENAME_CHARS + SEPS:
i -= 1
comp_what = curline[i:j]
elif hp.is_in_code() and (not mode or mode==COMPLETE_ATTRIBUTES):
self._remove_autocomplete_window()
mode = COMPLETE_ATTRIBUTES
while i and curline[i-1] in ID_CHARS:
i -= 1
comp_start = curline[i:j]
if i and curline[i-1] == '.':
hp.set_index("insert-%dc" % (len(curline)-(i-1)))
comp_what = hp.get_expression()
if not comp_what or \
(not evalfuncs and comp_what.find('(') != -1):
return
else:
comp_what = ""
else:
return
if complete and not comp_what and not comp_start:
return
comp_lists = self.fetch_completions(comp_what, mode)
if not comp_lists[0]:
return
self.autocompletewindow = self._make_autocomplete_window()
self.autocompletewindow.show_window(comp_lists,
"insert-%dc" % len(comp_start),
complete,
mode,
userWantsWin)
return True
def fetch_completions(self, what, mode):
"""Return a pair of lists of completions for something. The first list
is a sublist of the second. Both are sorted.
If there is a Python subprocess, get the comp. list there. Otherwise,
either fetch_completions() is running in the subprocess itself or it
was called in an IDLE EditorWindow before any script had been run.
The subprocess environment is that of the most recently run script. If
two unrelated modules are being edited some calltips in the current
module may be inoperative if the module was not the last to run.
"""
try:
rpcclt = self.editwin.flist.pyshell.interp.rpcclt
except:
rpcclt = None
if rpcclt:
return rpcclt.remotecall("exec", "get_the_completion_list",
(what, mode), {})
else:
if mode == COMPLETE_ATTRIBUTES:
if what == "":
namespace = __main__.__dict__.copy()
namespace.update(__main__.__builtins__.__dict__)
bigl = eval("dir()", namespace)
bigl.sort()
if "__all__" in bigl:
smalll = eval("__all__", namespace)
smalll.sort()
else:
smalll = [s for s in bigl if s[:1] != '_']
else:
try:
entity = self.get_entity(what)
bigl = dir(entity)
bigl.sort()
if "__all__" in bigl:
smalll = entity.__all__
smalll.sort()
else:
smalll = [s for s in bigl if s[:1] != '_']
except:
return [], []
elif mode == COMPLETE_FILES:
if what == "":
what = "."
try:
expandedpath = os.path.expanduser(what)
bigl = os.listdir(expandedpath)
bigl.sort()
smalll = [s for s in bigl if s[:1] != '.']
except OSError:
return [], []
if not smalll:
smalll = bigl
return smalll, bigl
def get_entity(self, name):
"""Lookup name in a namespace spanning sys.modules and __main.dict__"""
namespace = sys.modules.copy()
namespace.update(__main__.__dict__)
return eval(name, namespace)
| apache-2.0 |
collex100/odoo | addons/sale_journal/__openerp__.py | 262 | 2637 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Invoicing Journals',
'version': '1.0',
'category': 'Sales Management',
'description': """
The sales journal modules allows you to categorise your sales and deliveries (picking lists) between different journals.
========================================================================================================================
This module is very helpful for bigger companies that works by departments.
You can use journal for different purposes, some examples:
----------------------------------------------------------
* isolate sales of different departments
* journals for deliveries by truck or by UPS
Journals have a responsible and evolves between different status:
-----------------------------------------------------------------
* draft, open, cancel, done.
Batch operations can be processed on the different journals to confirm all sales
at once, to validate or invoice packing.
It also supports batch invoicing methods that can be configured by partners and sales orders, examples:
-------------------------------------------------------------------------------------------------------
* daily invoicing
* monthly invoicing
Some statistics by journals are provided.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/billing',
'depends': ['sale_stock'],
'data': [
'security/ir.model.access.csv',
'sale_journal_view.xml',
'sale_journal_data.xml'
],
'demo': ['sale_journal_demo.xml'],
'test': [ ],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
domesticduck/MenuConciergeServer | vendor/bundle/ruby/2.0.0/gems/libv8-3.16.14.3/vendor/v8/tools/testrunner/local/verbose.py | 19 | 3680 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import time
from . import statusfile
REPORT_TEMPLATE = (
"""Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(timeout)4d tests are expected to timeout sometimes
* %(nocrash)4d tests are expected to be flaky but not crash
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix""")
def PrintReport(tests):
total = len(tests)
skipped = timeout = nocrash = passes = fail_ok = fail = 0
for t in tests:
if "outcomes" not in dir(t) or not t.outcomes:
passes += 1
continue
o = t.outcomes
if statusfile.DoSkip(o):
skipped += 1
continue
if statusfile.TIMEOUT in o: timeout += 1
if statusfile.IsFlaky(o): nocrash += 1
if list(o) == [statusfile.PASS]: passes += 1
if statusfile.IsFailOk(o): fail_ok += 1
if list(o) == [statusfile.FAIL]: fail += 1
print REPORT_TEMPLATE % {
"total": total,
"skipped": skipped,
"timeout": timeout,
"nocrash": nocrash,
"pass": passes,
"fail_ok": fail_ok,
"fail": fail
}
def PrintTestSource(tests):
for test in tests:
suite = test.suite
source = suite.GetSourceForTest(test).strip()
if len(source) > 0:
print "--- begin source: %s/%s ---" % (suite.name, test.path)
print source
print "--- end source: %s/%s ---" % (suite.name, test.path)
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def PrintTestDurations(suites, overall_time):
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(overall_time))
timed_tests = [ t for s in suites for t in s.tests
if t.duration is not None ]
timed_tests.sort(lambda a, b: cmp(b.duration, a.duration))
index = 1
for entry in timed_tests[:20]:
t = FormatTime(entry.duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
index += 1
| apache-2.0 |
theo-l/django | tests/postgres_tests/test_hstore.py | 11 | 14088 | import json
from django.core import checks, exceptions, serializers
from django.db import connection
from django.db.models import OuterRef, Subquery
from django.db.models.expressions import RawSQL
from django.forms import Form
from django.test.utils import CaptureQueriesContext, isolate_apps
from . import PostgreSQLSimpleTestCase, PostgreSQLTestCase
from .models import HStoreModel, PostgreSQLModel
try:
from django.contrib.postgres import forms
from django.contrib.postgres.fields import HStoreField
from django.contrib.postgres.fields.hstore import KeyTransform
from django.contrib.postgres.validators import KeysValidator
except ImportError:
pass
class SimpleTests(PostgreSQLTestCase):
def test_save_load_success(self):
value = {'a': 'b'}
instance = HStoreModel(field=value)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, value)
def test_null(self):
instance = HStoreModel(field=None)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertIsNone(reloaded.field)
def test_value_null(self):
value = {'a': None}
instance = HStoreModel(field=value)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, value)
def test_key_val_cast_to_string(self):
value = {'a': 1, 'b': 'B', 2: 'c', 'ï': 'ê'}
expected_value = {'a': '1', 'b': 'B', '2': 'c', 'ï': 'ê'}
instance = HStoreModel.objects.create(field=value)
instance = HStoreModel.objects.get()
self.assertEqual(instance.field, expected_value)
instance = HStoreModel.objects.get(field__a=1)
self.assertEqual(instance.field, expected_value)
instance = HStoreModel.objects.get(field__has_keys=[2, 'a', 'ï'])
self.assertEqual(instance.field, expected_value)
def test_array_field(self):
value = [
{'a': 1, 'b': 'B', 2: 'c', 'ï': 'ê'},
{'a': 1, 'b': 'B', 2: 'c', 'ï': 'ê'},
]
expected_value = [
{'a': '1', 'b': 'B', '2': 'c', 'ï': 'ê'},
{'a': '1', 'b': 'B', '2': 'c', 'ï': 'ê'},
]
instance = HStoreModel.objects.create(array_field=value)
instance.refresh_from_db()
self.assertEqual(instance.array_field, expected_value)
class TestQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.objs = HStoreModel.objects.bulk_create([
HStoreModel(field={'a': 'b'}),
HStoreModel(field={'a': 'b', 'c': 'd'}),
HStoreModel(field={'c': 'd'}),
HStoreModel(field={}),
HStoreModel(field=None),
])
def test_exact(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__exact={'a': 'b'}),
self.objs[:1]
)
def test_contained_by(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__contained_by={'a': 'b', 'c': 'd'}),
self.objs[:4]
)
def test_contains(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__contains={'a': 'b'}),
self.objs[:2]
)
def test_in_generator(self):
def search():
yield {'a': 'b'}
self.assertSequenceEqual(
HStoreModel.objects.filter(field__in=search()),
self.objs[:1]
)
def test_has_key(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_key='c'),
self.objs[1:3]
)
def test_has_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_keys=['a', 'c']),
self.objs[1:2]
)
def test_has_any_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_any_keys=['a', 'c']),
self.objs[:3]
)
def test_key_transform(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a='b'),
self.objs[:2]
)
def test_key_transform_raw_expression(self):
expr = RawSQL('%s::hstore', ['x => b, y => c'])
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a=KeyTransform('x', expr)),
self.objs[:2]
)
def test_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__keys=['a']),
self.objs[:1]
)
def test_values(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__values=['b']),
self.objs[:1]
)
def test_field_chaining(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__contains='b'),
self.objs[:2]
)
def test_order_by_field(self):
more_objs = (
HStoreModel.objects.create(field={'g': '637'}),
HStoreModel.objects.create(field={'g': '002'}),
HStoreModel.objects.create(field={'g': '042'}),
HStoreModel.objects.create(field={'g': '981'}),
)
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_key='g').order_by('field__g'),
[more_objs[1], more_objs[2], more_objs[0], more_objs[3]]
)
def test_keys_contains(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__keys__contains=['a']),
self.objs[:2]
)
def test_values_overlap(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__values__overlap=['b', 'd']),
self.objs[:3]
)
def test_key_isnull(self):
obj = HStoreModel.objects.create(field={'a': None})
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__isnull=True),
self.objs[2:5] + [obj]
)
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__isnull=False),
self.objs[:2]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(id__in=HStoreModel.objects.filter(field__a='b')),
self.objs[:2]
)
def test_key_sql_injection(self):
with CaptureQueriesContext(connection) as queries:
self.assertFalse(
HStoreModel.objects.filter(**{
"field__test' = 'a') OR 1 = 1 OR ('d": 'x',
}).exists()
)
self.assertIn(
"""."field" -> 'test'' = ''a'') OR 1 = 1 OR (''d') = 'x' """,
queries[0]['sql'],
)
def test_obj_subquery_lookup(self):
qs = HStoreModel.objects.annotate(
value=Subquery(HStoreModel.objects.filter(pk=OuterRef('pk')).values('field')),
).filter(value__a='b')
self.assertSequenceEqual(qs, self.objs[:2])
@isolate_apps('postgres_tests')
class TestChecks(PostgreSQLSimpleTestCase):
def test_invalid_default(self):
class MyModel(PostgreSQLModel):
field = HStoreField(default={})
model = MyModel()
self.assertEqual(model.check(), [
checks.Warning(
msg=(
"HStoreField default should be a callable instead of an "
"instance so that it's not shared between all field "
"instances."
),
hint='Use a callable instead, e.g., use `dict` instead of `{}`.',
obj=MyModel._meta.get_field('field'),
id='fields.E010',
)
])
def test_valid_default(self):
class MyModel(PostgreSQLModel):
field = HStoreField(default=dict)
self.assertEqual(MyModel().check(), [])
class TestSerialization(PostgreSQLSimpleTestCase):
test_data = json.dumps([{
'model': 'postgres_tests.hstoremodel',
'pk': None,
'fields': {
'field': json.dumps({'a': 'b'}),
'array_field': json.dumps([
json.dumps({'a': 'b'}),
json.dumps({'b': 'a'}),
]),
},
}])
def test_dumping(self):
instance = HStoreModel(field={'a': 'b'}, array_field=[{'a': 'b'}, {'b': 'a'}])
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, {'a': 'b'})
self.assertEqual(instance.array_field, [{'a': 'b'}, {'b': 'a'}])
def test_roundtrip_with_null(self):
instance = HStoreModel(field={'a': 'b', 'c': None})
data = serializers.serialize('json', [instance])
new_instance = list(serializers.deserialize('json', data))[0].object
self.assertEqual(instance.field, new_instance.field)
class TestValidation(PostgreSQLSimpleTestCase):
def test_not_a_string(self):
field = HStoreField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean({'a': 1}, None)
self.assertEqual(cm.exception.code, 'not_a_string')
self.assertEqual(cm.exception.message % cm.exception.params, 'The value of “a” is not a string or null.')
def test_none_allowed_as_value(self):
field = HStoreField()
self.assertEqual(field.clean({'a': None}, None), {'a': None})
class TestFormField(PostgreSQLSimpleTestCase):
def test_valid(self):
field = forms.HStoreField()
value = field.clean('{"a": "b"}')
self.assertEqual(value, {'a': 'b'})
def test_invalid_json(self):
field = forms.HStoreField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('{"a": "b"')
self.assertEqual(cm.exception.messages[0], 'Could not load JSON data.')
self.assertEqual(cm.exception.code, 'invalid_json')
def test_non_dict_json(self):
field = forms.HStoreField()
msg = 'Input must be a JSON dictionary.'
with self.assertRaisesMessage(exceptions.ValidationError, msg) as cm:
field.clean('["a", "b", 1]')
self.assertEqual(cm.exception.code, 'invalid_format')
def test_not_string_values(self):
field = forms.HStoreField()
value = field.clean('{"a": 1}')
self.assertEqual(value, {'a': '1'})
def test_none_value(self):
field = forms.HStoreField()
value = field.clean('{"a": null}')
self.assertEqual(value, {'a': None})
def test_empty(self):
field = forms.HStoreField(required=False)
value = field.clean('')
self.assertEqual(value, {})
def test_model_field_formfield(self):
model_field = HStoreField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.HStoreField)
def test_field_has_changed(self):
class HStoreFormTest(Form):
f1 = forms.HStoreField()
form_w_hstore = HStoreFormTest()
self.assertFalse(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'})
self.assertTrue(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'}, initial={'f1': '{"a": 1}'})
self.assertFalse(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 2}'}, initial={'f1': '{"a": 1}'})
self.assertTrue(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'}, initial={'f1': {"a": 1}})
self.assertFalse(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 2}'}, initial={'f1': {"a": 1}})
self.assertTrue(form_w_hstore.has_changed())
class TestValidator(PostgreSQLSimpleTestCase):
def test_simple_valid(self):
validator = KeysValidator(keys=['a', 'b'])
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
def test_missing_keys(self):
validator = KeysValidator(keys=['a', 'b'])
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some keys were missing: b')
self.assertEqual(cm.exception.code, 'missing_keys')
def test_strict_valid(self):
validator = KeysValidator(keys=['a', 'b'], strict=True)
validator({'a': 'foo', 'b': 'bar'})
def test_extra_keys(self):
validator = KeysValidator(keys=['a', 'b'], strict=True)
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c')
self.assertEqual(cm.exception.code, 'extra_keys')
def test_custom_messages(self):
messages = {
'missing_keys': 'Foobar',
}
validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages)
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Foobar')
self.assertEqual(cm.exception.code, 'missing_keys')
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c')
self.assertEqual(cm.exception.code, 'extra_keys')
def test_deconstruct(self):
messages = {
'missing_keys': 'Foobar',
}
validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages)
path, args, kwargs = validator.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.validators.KeysValidator')
self.assertEqual(args, ())
self.assertEqual(kwargs, {'keys': ['a', 'b'], 'strict': True, 'messages': messages})
| bsd-3-clause |
eranchetz/nupic | examples/opf/experiments/anomaly/spatial/10field_many_balanced/description.py | 96 | 16581 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
'f2': dict(fieldname='f2', n=100, name='f2', type='SDRCategoryEncoder', w=21),
'f3': dict(fieldname='f3', n=100, name='f3', type='SDRCategoryEncoder', w=21),
'f4': dict(fieldname='f4', n=100, name='f4', type='SDRCategoryEncoder', w=21),
'f5': dict(fieldname='f5', n=100, name='f5', type='SDRCategoryEncoder', w=21),
'f6': dict(fieldname='f6', n=100, name='f6', type='SDRCategoryEncoder', w=21),
'f7': dict(fieldname='f7', n=100, name='f7', type='SDRCategoryEncoder', w=21),
'f8': dict(fieldname='f8', n=100, name='f8', type='SDRCategoryEncoder', w=21),
'f9': dict(fieldname='f9', n=100, name='f9', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'cpp',
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [claModelControlEnableSPLearningCb, claModelControlEnableTPLearningCb],
# 'setup' : [claModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |
bjolivot/ansible | lib/ansible/modules/network/lenovo/cnos_template.py | 19 | 7146 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send CLI templates to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_template
author: "Dave Kasberg (@dkasberg)"
short_description: Manage switch configuration using templates on devices running Lenovo CNOS
description:
- This module allows you to work with the running configuration of a switch. It provides a way
to execute a set of CNOS commands on a switch by evaluating the current running configuration
and executing the commands only if the specific settings have not been already configured.
The configuration source can be a set of commands or a template written in the Jinja2 templating language.
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_template.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
commandfile:
description:
- This specifies the path to the CNOS command file which needs to be applied. This usually
comes from the commands folder. Generally this file is the output of the variables applied
on a template file. So this command is preceded by a template module.
Note The command file must contain the Ansible keyword {{ inventory_hostname }} in its
filename to ensure that the command file is unique for each switch and condition.
If this is omitted, the command file will be overwritten during iteration. For example,
commandfile=./commands/clos_leaf_bgp_{{ inventory_hostname }}_commands.txt
required: true
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_template. These are written in the main.yml file of the tasks directory.
---
- name: Replace Config CLI command template with values
template:
src: demo_template.j2
dest: "./commands/demo_template_{{ inventory_hostname }}_commands.txt"
vlanid1: 13
slot_chassis_number1: "1/2"
portchannel_interface_number1: 100
portchannel_mode1: "active"
- name: Applying CLI commands on Switches
cnos_template:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
commandfile: "./commands/demo_template_{{ inventory_hostname }}_commands.txt"
outputfile: "./results/demo_template_command_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
return value: |
On successful execution, the method returns a message in JSON format
[Template Applied.]
Upon any failure, the method returns an error display string.
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
commandfile=dict(required=True),
outputfile=dict(required=True),
host=dict(required=True),
deviceType=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
commandfile = module.params['commandfile']
outputfile = module.params['outputfile']
deviceType = module.params['deviceType']
hostIP = module.params['host']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + cnos.waitForDeviceResponse("configure d\n", "(config)#", 2, remote_conn)
# Send commands one by one
#with open(commandfile, "r") as f:
f = open(commandfile, "r")
for line in f:
# Omit the comment lines in template file
if not line.startswith("#"):
command = line
if not line.endswith("\n"):
command = command+"\n"
response = cnos.waitForDeviceResponse(command, "#", 2, remote_conn)
errorMsg = cnos.checkOutputForError(response)
output = output + response
if(errorMsg is not None):
break # To cater to Mufti case
# Write to memory
output = output + cnos.waitForDeviceResponse("save\n", "#", 3, remote_conn)
# Write output to file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Template Applied")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 |
Amechi101/indieapp | account/auth_backends.py | 5 | 1360 | from __future__ import unicode_literals
from django.db.models import Q
from django.contrib.auth.backends import ModelBackend
from account.compat import get_user_model, get_user_lookup_kwargs
from account.models import EmailAddress
class UsernameAuthenticationBackend(ModelBackend):
def authenticate(self, **credentials):
User = get_user_model()
lookup_kwargs = get_user_lookup_kwargs({
"{username}__iexact": credentials["username"]
})
try:
user = User.objects.get(**lookup_kwargs)
except (User.DoesNotExist, KeyError):
return None
else:
try:
if user.check_password(credentials["password"]):
return user
except KeyError:
return None
class EmailAuthenticationBackend(ModelBackend):
def authenticate(self, **credentials):
qs = EmailAddress.objects.filter(Q(primary=True) | Q(verified=True))
try:
email_address = qs.get(email__iexact=credentials["username"])
except (EmailAddress.DoesNotExist, KeyError):
return None
else:
user = email_address.user
try:
if user.check_password(credentials["password"]):
return user
except KeyError:
return None
| mit |
nelsonw2014/watershed | setup.py | 1 | 1598 | import setuptools
from os import path
if __name__ == "__main__":
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name="watershed",
version="0.0.1",
description="Data streams drain into a data reservoir so they can be used later or combined together",
author="CommerceHub Open Source",
url="https://github.com/commercehub-oss/watershed",
long_description=long_description,
packages=[
"watershed",
"pump_client"
],
install_requires=[
"Boto3",
"sshtunnel",
"requests"
],
test_requires=[
'nose',
'requests'
],
include_package_data=True,
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Ruby',
'Programming Language :: Unix Shell',
'Topic :: Database',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Archiving',
'Topic :: System :: Clustering',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
'Topic :: Utilities'
],
zip_safe=False,
) | apache-2.0 |
dennissergeev/faamtools | faamtools/avaps.py | 2 | 2620 | # -*- coding: utf-8 -*-
"""
Functions to process AVAPS dropsondes data
"""
import datetime
import netCDF4 as nc
import numpy as np
from . import DsFld, ObsData
from . import utils
def read_avaps_nc(fname, flds=None, time2datetime=False):
"""Read AVAPS dropsonde data from a NetCDF file.
Open a NetCDF file and write data into `ObsData` instance of `DsFld` objects.
Perform filtering of raw dropsonde data using `var_utils.filt_miss_row`
Args:
-----
fname: str, file name
Kwargs:
-------
flds: dict, names of variables to read from a dropsonde data file
The default value is
dict(time='time',hgt='alt',lon='lon',lat='lat',
u='u_wind',v='v_wind',wspd='wspd',wdir='wdir',
pres='pres',tdry='tdry',thta='theta',dhgt='dz',
tdew='dp',relh='rh',mixr='mr',thte='theta_e',thtv='theta_v')
time2datetime: boolean, optional.
If True and `flds` dictionary contains 'time' key, convert array of
time values to `datetime.datetime` objects.
Requires `var_utils.timestr2datetime()` to parse time units.
Defaults to False.
Returns:
--------
data: `ObsData` instance
"""
if flds == None:
flds = dict(time='time',hgt='alt',lon='lon',lat='lat',\
u='u_wind',v='v_wind',wspd='wspd',wdir='wdir',\
pres='pres',tdry='tdry',thta='theta',dhgt='dz',\
tdew='dp',relh='rh',mixr='mr',thte='theta_e',thtv='theta_v')
with nc.Dataset(fname) as f:
dum = ObsData()
for i in flds:
ncfld = f.variables[flds[i]]
dum(**{i:DsFld(raw=ncfld[:],units=ncfld.units,long_name=ncfld.long_name)})
flds_list = [ii for ii in flds] # to keep the order
fil_list = utils.filt_miss_row(*[getattr(dum,ii).raw for ii in flds_list])
data = ObsData()
for i, j in enumerate(fil_list):
data(**{flds_list[i]:DsFld(raw=getattr(dum,flds_list[i]).raw,\
fil=j,\
units=getattr(dum,flds_list[i]).units,\
long_name=getattr(dum,flds_list[i]).long_name)})
if time2datetime and 'time' in flds:
if hasattr(data.time, 'units'):
tbase, tstep_sec = utils.timestr2datetime(data.time.units)
arr_sec2datetime = np.vectorize(lambda x: tbase + datetime.timedelta(seconds=x*tstep_sec))
data.time.fil = arr_sec2datetime(data.time.fil)
return data
| mit |
CSGreater-Developers/HMC-Grader | app/userViews/instructor/testSettings.py | 2 | 4117 | # coding=utf-8
'''This module handles all functions responsible for modifying the course
settings page.
View Function: instructorEditTestFile (instructor/testSettings.html)
Redirect Functions: TODO
AJAX Fuctions: TODO
'''
#Import the app
from app import app
#Import needed flask functions
from flask import g, render_template, redirect, url_for, flash, jsonify, abort
from flask import request
from flask.ext.login import current_user, login_required
#Import the models we need on these pages
from app.structures.models.user import *
from app.structures.models.gradebook import *
from app.structures.models.course import *
from app.structures.forms import ReuploadTestForm
#import plugins
from app.plugins.autograder import getTestFileParsers
#Generic python imports
import json
from werkzeug import secure_filename
@app.route('/editproblem/<pid>/editTestFile/<filename>')
@login_required
def instructorEditTestFile(pid, filename):
try:
p = Problem.objects.get(id=pid)
c,a = p.getParents()
#For security purposes we send anyone who isnt an instructor or
#admin away
if not c in current_user.courseInstructor:
abort(403)
filepath = getTestPath(c, a, p)
filepath = os.path.join(filepath, filename)
return render_template('instructor/testSettings.html', course=c, assignment=a,\
problem=p, filename=filename, \
data=getTestData(filepath), form=ReuploadTestForm())
except (Course.DoesNotExist, Problem.DoesNotExist, AssignmentGroup.DoesNotExist):
abort(404)
@app.route('/editproblem/<pid>/saveTestFile/<filename>', methods=['POST'])
@login_required
def instructorSaveTestFile(pid, filename):
try:
p = Problem.objects.get(id=pid)
c,a = p.getParents()
#For security purposes we send anyone who isnt an instructor or
#admin away
if not (g.user.isAdmin or c in current_user.courseInstructor):
abort(403)
#Try to get the contents
content = request.get_json()
#make sure we got the contents
if content == None:
return jsonify(res=False)
filepath = getTestPath(c, a, p)
filepath = os.path.join(filepath, filename+".json")
with open(filepath, 'w') as f:
json.dump(content, f, sort_keys=True,indent=4, separators=(',', ': '))
return jsonify(res=True)
except (Course.DoesNotExist, Problem.DoesNotExist, AssignmentGroup.DoesNotExist):
abort(404)
@app.route('/editproblem/<pid>/reupTestFile/<filename>', methods=['POST'])
@login_required
def instructorReuploadTestFile(pid, filename):
try:
p = Problem.objects.get(id=pid)
c,a = p.getParents()
#For security purposes we send anyone who isnt an instructor or
#admin away
if not c in current_user.courseInstructor:
abort(403)
filepath = getTestPath(c, a, p)
filepath = os.path.join(filepath, filename)
gradeSpec = getTestData(filepath)
parser = getTestFileParsers()[gradeSpec['type']]
if request.method == "POST":
form = ReuploadTestForm(request.form)
if form.validate():
filename = secure_filename(request.files[form.testFile.name].filename)
if filename != gradeSpec['file']:
flash("Uploaded file does not have the same name as the existing file. Reupload failed.", "warning")
return redirect(url_for('instructorEditTestFile', pid=pid, filename=gradeSpec['file']))
request.files[form.testFile.name].save(filepath)
tests = parser(filepath)
#Filter out removed tests
for sec in gradeSpec['sections']:
sec['tests'] = [x for x in sec['tests'] if x in tests]
gradeSpec['tests'] = tests
with open(filepath+".json", 'w') as f:
json.dump(gradeSpec, f)
flash("File successfully reuploaded", "success")
return redirect(url_for('instructorEditTestFile', pid=pid, filename=filename))
except (Course.DoesNotExist, Problem.DoesNotExist, AssignmentGroup.DoesNotExist):
abort(404)
#
# Helper function for test data
#
def getTestData(fn):
with open(fn+".json") as f:
data = json.load(f)
return data
| mit |
hugovk/pylast | tests/test_pylast.py | 1 | 4174 | #!/usr/bin/env python
"""
Integration (not unit) tests for pylast.py
"""
import os
import time
import unittest
import pytest
from flaky import flaky
import pylast
def load_secrets():
secrets_file = "test_pylast.yaml"
if os.path.isfile(secrets_file):
import yaml # pip install pyyaml
with open(secrets_file, "r") as f: # see example_test_pylast.yaml
doc = yaml.load(f)
else:
doc = {}
try:
doc["username"] = os.environ["PYLAST_USERNAME"].strip()
doc["password_hash"] = os.environ["PYLAST_PASSWORD_HASH"].strip()
doc["api_key"] = os.environ["PYLAST_API_KEY"].strip()
doc["api_secret"] = os.environ["PYLAST_API_SECRET"].strip()
except KeyError:
pytest.skip("Missing environment variables: PYLAST_USERNAME etc.")
return doc
class PyLastTestCase(unittest.TestCase):
def assert_startswith(self, str, prefix, start=None, end=None):
self.assertTrue(str.startswith(prefix, start, end))
def assert_endswith(self, str, suffix, start=None, end=None):
self.assertTrue(str.endswith(suffix, start, end))
@flaky(max_runs=3, min_passes=1)
class TestPyLastWithLastFm(PyLastTestCase):
secrets = None
def unix_timestamp(self):
return int(time.time())
def setUp(self):
if self.__class__.secrets is None:
self.__class__.secrets = load_secrets()
self.username = self.__class__.secrets["username"]
password_hash = self.__class__.secrets["password_hash"]
api_key = self.__class__.secrets["api_key"]
api_secret = self.__class__.secrets["api_secret"]
self.network = pylast.LastFMNetwork(
api_key=api_key,
api_secret=api_secret,
username=self.username,
password_hash=password_hash,
)
def helper_is_thing_hashable(self, thing):
# Arrange
things = set()
# Act
things.add(thing)
# Assert
self.assertIsNotNone(thing)
self.assertEqual(len(things), 1)
def helper_validate_results(self, a, b, c):
# Assert
self.assertIsNotNone(a)
self.assertIsNotNone(b)
self.assertIsNotNone(c)
self.assertGreaterEqual(len(a), 0)
self.assertGreaterEqual(len(b), 0)
self.assertGreaterEqual(len(c), 0)
self.assertEqual(a, b)
self.assertEqual(b, c)
def helper_validate_cacheable(self, thing, function_name):
# Arrange
# get thing.function_name()
func = getattr(thing, function_name, None)
# Act
result1 = func(limit=1, cacheable=False)
result2 = func(limit=1, cacheable=True)
result3 = func(limit=1)
# Assert
self.helper_validate_results(result1, result2, result3)
def helper_at_least_one_thing_in_top_list(self, things, expected_type):
# Assert
self.assertGreater(len(things), 1)
self.assertIsInstance(things, list)
self.assertIsInstance(things[0], pylast.TopItem)
self.assertIsInstance(things[0].item, expected_type)
def helper_only_one_thing_in_top_list(self, things, expected_type):
# Assert
self.assertEqual(len(things), 1)
self.assertIsInstance(things, list)
self.assertIsInstance(things[0], pylast.TopItem)
self.assertIsInstance(things[0].item, expected_type)
def helper_only_one_thing_in_list(self, things, expected_type):
# Assert
self.assertEqual(len(things), 1)
self.assertIsInstance(things, list)
self.assertIsInstance(things[0], expected_type)
def helper_two_different_things_in_top_list(self, things, expected_type):
# Assert
self.assertEqual(len(things), 2)
thing1 = things[0]
thing2 = things[1]
self.assertIsInstance(thing1, pylast.TopItem)
self.assertIsInstance(thing2, pylast.TopItem)
self.assertIsInstance(thing1.item, expected_type)
self.assertIsInstance(thing2.item, expected_type)
self.assertNotEqual(thing1, thing2)
if __name__ == "__main__":
unittest.main(failfast=True)
| apache-2.0 |
weiting-chen/manila | manila/tests/fake_volume.py | 4 | 1978 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class FakeVolume(object):
def __init__(self, **kwargs):
self.id = kwargs.pop('id', 'fake_vol_id')
self.status = kwargs.pop('status', 'available')
self.device = kwargs.pop('device', '')
for key, value in kwargs.items():
setattr(self, key, value)
def __getitem__(self, attr):
return getattr(self, attr)
class FakeVolumeSnapshot(object):
def __init__(self, **kwargs):
self.id = kwargs.pop('id', 'fake_volsnap_id')
self.status = kwargs.pop('status', 'available')
for key, value in kwargs.items():
setattr(self, key, value)
def __getitem__(self, attr):
return getattr(self, attr)
class API(object):
"""Fake Volume API."""
def get(self, *args, **kwargs):
pass
def create_snapshot_force(self, *args, **kwargs):
pass
def get_snapshot(self, *args, **kwargs):
pass
def delete_snapshot(self, *args, **kwargs):
pass
def create(self, *args, **kwargs):
pass
def extend(self, *args, **kwargs):
pass
def get_all(self, search_opts):
pass
def delete(self, volume_id):
pass
def get_all_snapshots(self, search_opts):
pass
| apache-2.0 |
napkindrawing/ansible | lib/ansible/utils/module_docs_fragments/vyos.py | 224 | 2754 | #
# (c) 2015, Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
provider:
description:
- A dict object containing connection details.
default: null
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device.
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
default: null
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
"""
| gpl-3.0 |
waxmanr/moose | framework/contrib/nsiqcppstyle/rules/RULE_4_1_B_indent_each_enum_item_in_enum_block.py | 43 | 3015 | """
Indent the each enum item in the enum block.
== Violation ==
enum A {
A_A, <== Violation
A_B <== Violation
}
== Good ==
enum A {
A_A, <== Good
A_B
}
"""
from nsiqcppstyle_rulehelper import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
def RunRule(lexer, typeName, typeFullName, decl, contextStack, typeContext) :
if not decl and typeName == "ENUM" and typeContext != None:
column = GetIndentation(lexer.GetCurToken())
lexer._MoveToToken(typeContext.startToken)
t2 = typeContext.endToken
while(True) :
t = lexer.GetNextTokenSkipWhiteSpaceAndCommentAndPreprocess()
if t == None or t == t2 :
break
# if typeContext != t.contextStack.Peek() : continue
if GetRealColumn(t) <= (column + 1):
nsiqcppstyle_reporter.Error(t, __name__, "Enum block should be indented. But the token(%s) seems to be unindented" % t.value);
ruleManager.AddTypeNameRule(RunRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddTypeNameRule(RunRule)
def test1(self):
self.Analyze("test/thisFile.c",
"""
enum A {
}
""")
assert not CheckErrorContent(__name__)
def test2(self):
self.Analyze("test/thisFile.c",
"""
enum C {
AA, BB
}
""")
assert not CheckErrorContent(__name__)
def test3(self):
self.Analyze("test/thisFile.c",
"""
enum C {
AA = 4,
BB
}
""")
assert CheckErrorContent(__name__)
def test4(self):
self.Analyze("test/thisFile.c",
"""
enum C {
AA = 4
,BB
}
""")
assert CheckErrorContent(__name__)
def test5(self):
self.Analyze("test/thisFile.c",
"""
enum C {
AA = 4
/** HELLO */
,BB
}
""")
assert not CheckErrorContent(__name__)
def test6(self):
self.Analyze("test/thisFile.c",
"""
typedef enum {
AA = 4
/** HELLO */
,BB
} DD
""")
assert not CheckErrorContent(__name__)
def test7(self):
self.Analyze("test/thisFile.c",
"""
typedef enum
{
SERVICE,
SERVER,
BROKER,
MANAGER,
REPL_SERVER,
REPL_AGENT,
UTIL_HELP,
UTIL_VERSION,
ADMIN
} UTIL_SERVICE_INDEX_E;
""")
assert not CheckErrorContent(__name__)
def test8(self):
self.Analyze("test/thisFile.c",
"""
enum COLOR
{
COLOR_TRANSPARENT = RGB(0, 0, 255),
COLOR_ROOM_IN_OUT = 0xffff00,
COLOR_CHAT_ITEM = 0xff9419,
COLOR_CHAT_MY = 0x00b4ff,
COLOR_CHAT_YOUR = 0xa3d5ff,
COLOR_ROOM_INFO = 0x00ffff,
COLOR_RESULT_SCORE = 0xffcc00,
COLOR_RESULT_RATING = 0x00fcff,
COLOR_RESULT_POINT = 0x33ff00
}; """)
assert not CheckErrorContent(__name__)
| lgpl-2.1 |
CryToCry96/android_kernel_mt6572 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
petrus-v/odoo | addons/l10n_ro/__openerp__.py | 186 | 2241 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Author: Fekete Mihai <[email protected]>, Tatár Attila <[email protected]>
# Copyright (C) 2011-2014 TOTAL PC SYSTEMS (http://www.erpsystems.ro).
# Copyright (C) 2014 Fekete Mihai
# Copyright (C) 2014 Tatár Attila
# Based on precedent versions developed by Fil System, Fekete Mihai
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Romania - Accounting",
"version" : "1.0",
"author" : "ERPsystems Solutions",
"website": "http://www.erpsystems.ro",
"category" : "Localization/Account Charts",
"depends" : ['account','account_chart','base_vat'],
"description": """
This is the module to manage the Accounting Chart, VAT structure, Fiscal Position and Tax Mapping.
It also adds the Registration Number for Romania in OpenERP.
================================================================================================================
Romanian accounting chart and localization.
""",
"demo" : [],
"data" : ['partner_view.xml',
'account_chart.xml',
'account_tax_code_template.xml',
'account_chart_template.xml',
'account_tax_template.xml',
'fiscal_position_template.xml',
'l10n_chart_ro_wizard.xml',
'res.country.state.csv',
'res.bank.csv',
],
"installable": True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Eureka22/ASM_xf | PythonD/lib/python2.4/site-packages/display/PIL/Image.py | 2 | 54748 | #
# The Python Imaging Library.
# $Id: //modules/pil/PIL/Image.py#47 $
#
# the Image class wrapper
#
# partial release history:
# 1995-09-09 fl Created
# 1996-03-11 fl PIL release 0.0 (proof of concept)
# 1996-04-30 fl PIL release 0.1b1
# 1996-05-27 fl PIL release 0.1b2
# 1996-11-04 fl PIL release 0.2b1
# 1996-12-08 fl PIL release 0.2b2
# 1996-12-16 fl PIL release 0.2b3
# 1997-01-14 fl PIL release 0.2b4
# 1998-07-02 fl PIL release 0.3b1
# 1998-07-17 fl PIL release 0.3b2
# 1999-01-01 fl PIL release 1.0b1
# 1999-02-08 fl PIL release 1.0b2
# 1999-07-28 fl PIL release 1.0 final
# 2000-06-07 fl PIL release 1.1
# 2000-10-20 fl PIL release 1.1.1
# 2001-05-07 fl PIL release 1.1.2
# 2002-01-14 fl PIL release 1.2b1 (imToolkit)
# 2002-03-15 fl PIL release 1.1.3
# 2003-05-10 fl PIL release 1.1.4
#
# Copyright (c) 1997-2003 by Secret Labs AB. All rights reserved.
# Copyright (c) 1995-2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
VERSION = "1.1.4"
class _imaging_not_installed:
# module placeholder
def __getattr__(self, id):
raise ImportError("The _imaging C module is not installed")
try:
# give Tk a chance to set up the environment, in case we're
# using an _imaging module linked against libtcl/libtk
import FixTk
except ImportError:
pass
try:
# If the _imaging C module is not present, you can still use
# the "open" function to identify files, but you cannot load
# them. Note that other modules should not refer to _imaging
# directly; import Image and use the Image.core variable instead.
import _imaging
core = _imaging
del _imaging
except ImportError, v:
import string
core = _imaging_not_installed()
if str(v)[:20] == "Module use of python":
# The _imaging C module is present, but not compiled for
# the right version (windows only). Print a warning, if
# possible.
try:
import warnings
warnings.warn(
"The _imaging extension was built for another version "
"of Python; most PIL functions will be disabled",
RuntimeWarning
)
except (ImportError, NameError, AttributeError):
pass # sorry
import ImagePalette
import os, string, sys
# type stuff
from types import IntType, StringType, TupleType
try:
UnicodeStringType = type(unicode(""))
def isStringType(t):
return isinstance(t, StringType) or isinstance(t, UnicodeStringType)
except NameError:
def isStringType(t):
return isinstance(t, StringType)
def isTupleType(t):
return isinstance(t, TupleType)
def isImageType(t):
return hasattr(t, "im")
def isDirectory(f):
return isStringType(f) and os.path.isdir(f)
from operator import isNumberType, isSequenceType
#
# Debug level
DEBUG = 0
#
# Constants (also defined in _imagingmodule.c!)
NONE = 0
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
ROTATE_90 = 2
ROTATE_180 = 3
ROTATE_270 = 4
# transforms
AFFINE = 0
EXTENT = 1
PERSPECTIVE = 2 # Not yet implemented
QUAD = 3
MESH = 4
# resampling filters
NONE = 0
NEAREST = 0
ANTIALIAS = 1 # 3-lobed lanczos
LINEAR = BILINEAR = 2
CUBIC = BICUBIC = 3
# dithers
NONE = 0
NEAREST = 0
ORDERED = 1 # Not yet implemented
RASTERIZE = 2 # Not yet implemented
FLOYDSTEINBERG = 3 # default
# palettes/quantizers
WEB = 0
ADAPTIVE = 1
# categories
NORMAL = 0
SEQUENCE = 1
CONTAINER = 2
# --------------------------------------------------------------------
# Registries
ID = []
OPEN = {}
MIME = {}
SAVE = {}
EXTENSION = {}
# --------------------------------------------------------------------
# Modes supported by this version
_MODEINFO = {
# official modes
"1": ("L", "L", ("1",)),
"L": ("L", "L", ("L",)),
"I": ("L", "I", ("I",)),
"F": ("L", "F", ("F",)),
"P": ("RGB", "L", ("P",)),
"RGB": ("RGB", "L", ("R", "G", "B")),
"RGBX": ("RGB", "L", ("R", "G", "B", "X")),
"RGBA": ("RGB", "L", ("R", "G", "B", "A")),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K")),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")),
# Experimental modes include I;16, I;16B, RGBa, BGR;15,
# and BGR;24. Use these modes only if you know exactly
# what you're doing...
}
MODES = _MODEINFO.keys()
MODES.sort()
# raw modes that may be memory mapped. NOTE: if you change this, you
# may have to modify the stride calculation in map.c too!
_MAPMODES = ("L", "P", "RGBX", "RGBA", "CMYK", "I;16", "I;16B")
##
# Get "base" mode. Given a mode, this function returns "L" for
# images that contain grayscale data, and "RGB" for images that
# contain color data.
#
# @param mode Input mode.
# @return "L" or "RGB".
# @exception KeyError The input mode was not a standard mode.
def getmodebase(mode):
# corresponding "base" mode (grayscale or colour)
return _MODEINFO[mode][0]
##
# Get storage type mode. Given a mode, this function returns a
# single-layer mode suitable for storing individual bands.
#
# @param mode Input mode.
# @return "L", "I", or "F".
# @exception KeyError The input mode was not a standard mode.
def getmodetype(mode):
# storage type (per band)
return _MODEINFO[mode][1]
##
# Get list of individual band names. Given a mode, this function
# returns a tuple containing the names of individual bands (use
# <b>getmodetype</b> to get the mode used to store each individual
# band.
#
# @param mode Input mode.
# @return A tuple containing band names. The length of the tuple
# gives the number of bands in an image of the given mode.
# @exception KeyError The input mode was not a standard mode.
def getmodebands(mode):
# return list of subcomponents
return len(_MODEINFO[mode][2])
# --------------------------------------------------------------------
# Helpers
_initialized = 0
##
# Explicitly load standard file format drivers.
def preinit():
"Load standard file format drivers."
global _initialized
if _initialized >= 1:
return
for m in ("Bmp", "Gif", "Jpeg", "Ppm", "Png", "Tiff"):
try:
__import__("%sImagePlugin" % m, globals(), locals(), [])
except ImportError:
pass # ignore missing driver for now
_initialized = 1
##
# Explicitly load all available file format drivers.
def init():
"Load all file format drivers."
global _initialized
if _initialized >= 2:
return
visited = {}
directories = sys.path
try:
directories = directories + [os.path.dirname(__file__)]
except NameError:
pass
# only check directories (including current, if present in the path)
for directory in filter(isDirectory, directories):
fullpath = os.path.abspath(directory)
if visited.has_key(fullpath):
continue
for file in os.listdir(directory):
if file[-14:] == "ImagePlugin.py":
f, e = os.path.splitext(file)
try:
sys.path.insert(0, directory)
try:
__import__(f, globals(), locals(), [])
finally:
del sys.path[0]
except ImportError:
if DEBUG:
print "Image: failed to import",
print f, ":", sys.exc_value
visited[fullpath] = None
if OPEN or SAVE:
_initialized = 2
# --------------------------------------------------------------------
# Codec factories (used by tostring/fromstring and ImageFile.load)
def _getdecoder(mode, decoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isTupleType(args):
args = (args,)
try:
# get decoder
decoder = getattr(core, decoder_name + "_decoder")
# print decoder, (mode,) + args + extra
return apply(decoder, (mode,) + args + extra)
except AttributeError:
raise IOError("decoder %s not available" % decoder_name)
def _getencoder(mode, encoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isTupleType(args):
args = (args,)
try:
# get encoder
encoder = getattr(core, encoder_name + "_encoder")
# print encoder, (mode,) + args + extra
return apply(encoder, (mode,) + args + extra)
except AttributeError:
raise IOError("encoder %s not available" % encoder_name)
# --------------------------------------------------------------------
# Simple expression analyzer
class _E:
def __init__(self, data): self.data = data
def __coerce__(self, other): return self, _E(other)
def __add__(self, other): return _E((self.data, "__add__", other.data))
def __mul__(self, other): return _E((self.data, "__mul__", other.data))
def _getscaleoffset(expr):
stub = ["stub"]
data = expr(_E(stub)).data
try:
(a, b, c) = data # simplified syntax
if (a is stub and b == "__mul__" and isNumberType(c)):
return c, 0.0
if (a is stub and b == "__add__" and isNumberType(c)):
return 1.0, c
except TypeError: pass
try:
((a, b, c), d, e) = data # full syntax
if (a is stub and b == "__mul__" and isNumberType(c) and
d == "__add__" and isNumberType(e)):
return c, e
except TypeError: pass
raise ValueError("illegal expression")
# --------------------------------------------------------------------
# Implementation wrapper
##
# This class represents an image object. To create Image objects, use
# the appropriate factory functions. There's hardly ever any reason
# to call the Image constructor directly.
#
# @see #open
# @see #new
# @see #fromstring
class Image:
format = None
format_description = None
def __init__(self):
self.im = None
self.mode = ""
self.size = (0, 0)
self.palette = None
self.info = {}
self.category = NORMAL
self.readonly = 0
def _new(self, im):
new = Image()
new.im = im
new.mode = im.mode
new.size = im.size
new.palette = self.palette
if im.mode == "P":
new.palette = ImagePalette.ImagePalette()
try:
new.info = self.info.copy()
except AttributeError:
# fallback (pre-1.5.2)
new.info = {}
for k, v in self.info:
new.info[k] = v
return new
_makeself = _new # compatibility
def _copy(self):
self.load()
self.im = self.im.copy()
self.readonly = 0
def _dump(self, file=None, format=None):
import tempfile
if not file:
file = tempfile.mktemp()
self.load()
if not format or format == "PPM":
self.im.save_ppm(file)
else:
file = file + "." + format
self.save(file, format)
return file
##
# Returns a string containing pixel data.
#
# @param encoder_name What encoder to use. The default is to
# use the standard "raw" encoder.
# @param *args Extra arguments to the encoder.
# @return An 8-bit string.
def tostring(self, encoder_name="raw", *args):
"Return image as a binary string"
# may pass tuple instead of argument list
if len(args) == 1 and isTupleType(args[0]):
args = args[0]
if encoder_name == "raw" and args == ():
args = self.mode
self.load()
# unpack data
e = _getencoder(self.mode, encoder_name, args)
e.setimage(self.im)
data = []
while 1:
l, s, d = e.encode(65536)
data.append(d)
if s:
break
if s < 0:
raise RuntimeError("encoder error %d in tostring" % s)
return string.join(data, "")
##
# Returns the image converted to an X11 bitmap. This method
# only works for mode "1" images.
#
# @param name The name prefix to use for the bitmap variables.
# @return A string containing an X11 bitmap.
# @exception ValueError If the mode is not "1"
def tobitmap(self, name="image"):
"Return image as an XBM bitmap"
self.load()
if self.mode != "1":
raise ValueError("not a bitmap")
data = self.tostring("xbm")
return string.join(["#define %s_width %d\n" % (name, self.size[0]),
"#define %s_height %d\n"% (name, self.size[1]),
"static char %s_bits[] = {\n" % name, data, "};"], "")
##
# Same as the <b>fromstring</b> function, but loads data
# into the current image.
def fromstring(self, data, decoder_name="raw", *args):
"Load data to image from binary string"
# may pass tuple instead of argument list
if len(args) == 1 and isTupleType(args[0]):
args = args[0]
# default format
if decoder_name == "raw" and args == ():
args = self.mode
# unpack data
d = _getdecoder(self.mode, decoder_name, args)
d.setimage(self.im)
s = d.decode(data)
if s[0] >= 0:
raise ValueError("not enough image data")
if s[1] != 0:
raise ValueError("cannot decode image data")
##
# Allocates storage for the image and loads the pixel data. In
# normal cases, you don't need to call this method, since the
# Image class automatically loads an opened image when it is
# accessed for the first time.
def load(self):
"Explicitly load pixel data."
if self.im and self.palette and self.palette.dirty:
# realize palette
apply(self.im.putpalette, self.palette.getdata())
self.palette.dirty = 0
self.palette.mode = "RGB"
self.palette.rawmode = None
if self.info.has_key("transparency"):
self.im.putpalettealpha(self.info["transparency"], 0)
self.palette.mode = "RGBA"
##
# Verify file contents. For data read from a file, this method
# attempts to determine if the file is broken, without actually
# decoding the image data. If this method finds any problems, it
# raises suitable exceptions. If you need to load the image after
# using this method, you must reopen the image file.
def verify(self):
"Verify file contents."
pass
##
# Returns a converted copy of an image. For the "P" mode, this
# translates pixels through the palette. If mode is omitted, a
# mode is chosen so that all information in the image and the
# palette can be represented without a palette.
# <p>
# The current version supports all possible conversions between
# "L", "RGB" and "CMYK."
# <p>
# When translating a colour image to black and white (mode "L"),
# the library uses the ITU-R 601-2 luma transform:
# <p>
# <b>L = R * 299/1000 + G * 587/1000 + B * 114/1000</b>
# <p>
# When translating a greyscale image into a bilevel image (mode
# "1"), all non-zero values are set to 255 (white). To use other
# thresholds, use the <b>point</b> method.
#
# @def convert(mode, matrix=None)
# @param mode The requested mode.
# @param matrix An optional conversion matrix. If given, this
# should be 4- or 16-tuple containing floating point values.
# @return An Image object.
def convert(self, mode=None, data=None, dither=None,
palette=WEB, colors=256):
"Convert to other pixel format"
if not mode:
# determine default mode
if self.mode == "P":
self.load()
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
else:
return self.copy()
self.load()
if data:
# matrix conversion
if mode not in ("L", "RGB"):
raise ValueError("illegal conversion")
im = self.im.convert_matrix(mode, data)
return self._new(im)
if mode == "P" and palette == ADAPTIVE:
im = self.im.quantize(colors)
return self._new(im)
# colourspace conversion
if dither is None:
dither = FLOYDSTEINBERG
try:
im = self.im.convert(mode, dither)
except ValueError:
try:
# normalize source image and try again
im = self.im.convert(getmodebase(self.mode))
im = im.convert(mode, dither)
except KeyError:
raise ValueError("illegal conversion")
return self._new(im)
def quantize(self, colors=256, method=0, kmeans=0, palette=None):
# methods:
# 0 = median cut
# 1 = maximum coverage
# NOTE: this functionality will be moved to the extended
# quantizer interface in a later versions of PIL.
self.load()
if palette:
# use palette from reference image
palette.load()
if palette.mode != "P":
raise ValueError("bad mode for palette image")
if self.mode != "RGB" and self.mode != "L":
raise ValueError(
"only RGB or L mode images can be quantized to a palette"
)
im = self.im.convert("P", 1, palette.im)
return self._makeself(im)
im = self.im.quantize(colors, method, kmeans)
return self._new(im)
##
# Copies the image. Use this method if you wish to paste things
# into an image, but still retain the original.
#
# @return An Image object.
def copy(self):
"Copy raster data"
self.load()
im = self.im.copy()
return self._new(im)
##
# Returns a rectangular region from the current image. The box is
# a 4-tuple defining the left, upper, right, and lower pixel
# coordinate.
# <p>
# This is a lazy operation. Changes to the source image may or
# may not be reflected in the cropped image. To break the
# connection, call the <b>load</b> method on the cropped copy.
#
# @return An Image object.
def crop(self, box=None):
"Crop region from image"
self.load()
if box is None:
return self.copy()
# lazy operation
return _ImageCrop(self, box)
##
# Configures the image file loader so it returns a version of the
# image that as closely as possible matches the given mode and
# size. For example, you can use this method to convert a colour
# JPEG to greyscale while loading it, or to extract a 128x192
# version from a PCD file.
# <p>
# Note that this method modifies the Image object in place. If
# the image has already been loaded, this method has no effect.
#
# @param mode The requested mode.
# @param size The requested size.
def draft(self, mode, size):
"Configure image decoder"
pass
##
# Filter image by the given filter. For a list of available
# filters, see the <b>ImageFilter</b> module.
#
# @param filter Filter kernel.
# @return An Image object.
# @see ImageFilter
def filter(self, filter):
"Apply environment filter to image"
self.load()
from ImageFilter import Filter
if not isinstance(filter, Filter):
filter = filter()
if self.im.bands == 1:
return self._new(filter.filter(self.im))
# fix to handle multiband images since _imaging doesn't
ims = []
for c in range(self.im.bands):
ims.append(self._new(filter.filter(self.im.getband(c))))
return merge(self.mode, ims)
##
# Returns a tuple containing the name of each band. For example,
# <b>getbands</b> on an RGB image returns ("R", "G", "B").
#
# @return A tuple containing band names.
def getbands(self):
"Get band names"
return _MODEINFO[self.mode][2]
##
# Calculates the bounding box of the non-zero regions in the
# image.
# @return The bounding box is returned as a 4-tuple defining the
# left, upper, right, and lower pixel coordinate. If the image
# is completely empty, this method returns None.
def getbbox(self):
"Get bounding box of actual data (non-zero pixels) in image"
self.load()
return self.im.getbbox()
##
# Returns the contents of an image as a sequence object containing
# pixel values. The sequence object is flattened, so that values
# for line one follow directly after the values of line zero, and
# so on.
# <p>
# Note that the sequence object returned by this method is an
# internal PIL data type, which only supports certain sequence
# operations. To convert it to an ordinary sequence (e.g. for
# printing), use <b>list(im.getdata())</b>.
#
# @param band What band to return. The default is to return
# all bands. To return a single band, pass in the index
# value (e.g. 0 to get the "R" band from an "RGB" image).
# @return A sequence-like object.
def getdata(self, band = None):
"Get image data as sequence object."
self.load()
if band is not None:
return self.im.getband(band)
return self.im # could be abused
##
# Get the the minimum and maximum pixel values for each band in
# the image.
#
# @return For a single-band image, a 2-tuple containing the
# minimum and maximum pixel value. For a multi-band image,
# a tuple containing one 2-tuple for each band.
def getextrema(self):
"Get min/max value"
self.load()
if self.im.bands > 1:
extrema = []
for i in range(self.im.bands):
extrema.append(self.im.getband(i).getextrema())
return tuple(extrema)
return self.im.getextrema()
##
# Returns the pixel value at a given position.
#
# @param xy The coordinate, given as (x, y).
# @return The pixel value. If the image is a multi-layer image,
# this method returns a tuple.
def getpixel(self, xy):
"Get pixel value"
self.load()
return self.im.getpixel(xy)
def getprojection(self):
"Get projection to x and y axes"
self.load()
x, y = self.im.getprojection()
return map(ord, x), map(ord, y)
##
# Returns a histogram for the image. The histogram is returned as
# a list of pixel counts, one for each pixel value in the source
# image. If the image has more than one band, the histograms for
# all bands are concatenated (for example, the histogram for an
# "RGB" image contains 768 values).
# <p>
# A bilevel image (mode "1") is treated as a greyscale ("L") image
# by this method.
# <p>
# If a mask is provided, the method returns a histogram for those
# parts of the image where the mask image is non-zero. The mask
# image must have the same size as the image, and be either a
# bi-level image (mode "1") or a greyscale image ("L").
#
# @def histogram(mask=None)
# @param mask An optional mask.
# @return A list containing pixel counts.
def histogram(self, mask=None, extrema=None):
"Take histogram of image"
self.load()
if mask:
mask.load()
return self.im.histogram((0, 0), mask.im)
if self.mode in ("I", "F"):
if extrema is None:
extrema = self.getextrema()
return self.im.histogram(extrema)
return self.im.histogram()
##
# (Deprecated) Returns a copy of the image where the data has been
# offset by the given distances. Data wraps around the edges. If
# yoffset is omitted, it is assumed to be equal to xoffset.
# <p>
# This method is deprecated. New code should use the <b>offset</b>
# function in the <b>ImageChops</b> module.
#
# @param xoffset The horizontal distance.
# @param yoffset The vertical distance. If omitted, both
# distances are set to the same value.
# @return An Image object.
def offset(self, xoffset, yoffset=None):
"(deprecated) Offset image in horizontal and/or vertical direction"
import ImageChops
return ImageChops.offset(self, xoffset, yoffset)
##
# Pastes another image into this image. The box argument is either
# a 2-tuple giving the upper left corner, a 4-tuple defining the
# left, upper, right, and lower pixel coordinate, or None (same as
# (0, 0)). If a 4-tuple is given, the size of the pasted image
# must match the size of the region.
# <p>
# If the modes don't match, the pasted image is converted to the
# mode of this image (see the <b>convert</b> method for details).
# <p>
# Instead of an image, the source can be a integer or tuple
# containing pixel values. The method then fills the region
# with the given colour. When creating RGB images, you can
# also use colour strings as supported by the ImageColor module.
# <p>
# If a mask is given, this method updates only the regions
# indicated by the mask. You can use either "1", "L" or "RGBA"
# images (in the latter case, the alpha band is used as mask).
# Where the mask is 255, the given image is copied as is. Where
# the mask is 0, the current value is preserved. Intermediate
# values can be used for transparency effects.
# <p>
# Note that if you paste an "RGBA" image, the alpha band is
# ignored. You can work around this by using the same image as
# both source image and mask.
#
# @param im Source image or pixel value (integer or tuple).
# @param box A 4-tuple giving the region to paste into. If a
# 2-tuple is used instead, it's treated as the upper left
# corner. If None is used instead, the source is pasted
# into the upper left corner.
# @param mask An optional mask image.
# @return An Image object.
def paste(self, im, box=None, mask=None):
"Paste other image into region"
if box is None:
# cover all of self
box = (0, 0) + self.size
if len(box) == 2:
# lower left corner given; get size from image or mask
if isImageType(im):
box = box + (box[0]+im.size[0], box[1]+im.size[1])
else:
box = box + (box[0]+mask.size[0], box[1]+mask.size[1])
if isStringType(im):
import ImageColor
im = ImageColor.getcolor(im, self.mode)
elif isImageType(im):
im.load()
if self.mode != im.mode:
if self.mode != "RGB" or im.mode not in ("RGBA", "RGBa"):
# should use an adapter for this!
im = im.convert(self.mode)
im = im.im
self.load()
if self.readonly:
self._copy()
if mask:
mask.load()
self.im.paste(im, box, mask.im)
else:
self.im.paste(im, box)
##
# Map image through lookup table or function.
#
# @param lut A lookup table, containing 256 values per band in the
# image. A function can be used instead, it should take a single
# argument. The function is called once for each possible pixel
# value, and the resulting table is applied to all bands of the
# image.
# @param mode Output mode (default is same as input). In the
# current version, this can only be used if the source image
# has mode "L" or "P", and the output has mode "1".
# @return An Image object.
def point(self, lut, mode=None):
"Map image through lookup table"
if self.mode in ("I", "I;16", "F"):
# floating point; lut must be a valid expression
scale, offset = _getscaleoffset(lut)
self.load()
im = self.im.point_transform(scale, offset);
else:
# integer image; use lut and mode
self.load()
if not isSequenceType(lut):
# if it isn't a list, it should be a function
lut = map(lut, range(256)) * self.im.bands
im = self.im.point(lut, mode)
return self._new(im)
##
# Replace the alpha layer in the current image. The image must be
# an "RGBA" image, and the band must be either "L" or "1".
#
# @param im The new alpha layer.
def putalpha(self, im):
"Set alpha layer"
if self.mode != "RGBA" or im.mode not in ("1", "L"):
raise ValueError("illegal image mode")
im.load()
self.load()
if im.mode == "1":
im = im.convert("L")
self.im.putband(im.im, 3)
##
# Copy pixel data to this image. This method copies data from a
# sequence object into the image, starting at the upper left
# corner (0, 0), and continuing until either the image or the
# sequence ends. The scale and offset values are used to adjust
# the sequence values: <b>pixel = value*scale + offset</b>.
#
# @param data A sequence object.
# @param scale An optional scale value. The default is 1.0.
# @param offset An optional offset value. The default is 0.0.
def putdata(self, data, scale=1.0, offset=0.0):
"Put data from a sequence object into an image."
self.load() # hmm...
self.im.putdata(data, scale, offset)
##
# Attach a palette to a "P" or "L" image. The palette sequence
# should contain 768 integer values, where each group of three
# values represent the red, green, and blue values for the
# corresponding pixel index. Instead of an integer sequence, you
# can use an 8-bit string.
#
# @def putpalette(data)
# @param data A palette sequence.
def putpalette(self, data, rawmode="RGB"):
"Put palette data into an image."
self.load()
if self.mode not in ("L", "P"):
raise ValueError("illegal image mode")
if not isStringType(data):
data = string.join(map(chr, data), "")
self.mode = "P"
self.palette = ImagePalette.raw(rawmode, data)
self.palette.mode = "RGB"
self.load() # install new palette
##
# Modifies the pixel at the given position. The colour is given as
# a single numerical value for single-band images, and a tuple for
# multi-band images.
# <p>
# Note that this method is relatively slow. For more extensive
# changes, use <b>paste</b> or the <b>ImageDraw</b> module
# instead.
#
# @param xy The pixel coordinate, given as (x, y).
# @param value The pixel value.
# @see #Image.paste
# @see #Image.putdata
# @see ImageDraw
def putpixel(self, xy, value):
"Set pixel value"
self.load()
return self.im.putpixel(xy, value)
##
# Returns a resized copy of an image.
#
# @def resize(size, filter=NEAREST)
# @param size The requested size in pixels, as a 2-tuple:
# (width, height).
# @param filter An optional resampling filter. This can be
# one of <b>NEAREST</b> (use nearest neighbour), <b>BILINEAR</b>
# (linear interpolation in a 2x2 environment), <b>BICUBIC</b>
# (cubic spline interpolation in a 4x4 environment), or
# <b>ANTIALIAS</b> (a high-quality downsampling filter).
# If omitted, or if the image has mode "1" or "P", it is
# set <b>NEAREST</b>.
# @return An Image object.
def resize(self, size, resample=NEAREST):
"Resize image"
if resample not in (NEAREST, BILINEAR, BICUBIC, ANTIALIAS):
raise ValueError("unknown resampling filter")
self.load()
if self.mode in ("1", "P"):
resample = NEAREST
if resample == ANTIALIAS:
# requires stretch support (imToolkit & PIL 1.1.3)
try:
im = self.im.stretch(size, resample)
except AttributeError:
raise ValueError("unsupported resampling filter")
else:
im = self.im.resize(size, resample)
return self._new(im)
##
# Returns a rotated image. This method returns a copy of an
# image, rotated the given number of degrees counter clockwise
# around its centre.
#
# @def rotate(angle, filter=NEAREST)
# @param angle In degrees counter clockwise.
# @param filter An optional resampling filter. This can be
# one of <b>NEAREST</b> (use nearest neighbour), <b>BILINEAR</b>
# (linear interpolation in a 2x2 environment), or <b>BICUBIC</b>
# (cubic spline interpolation in a 4x4 environment).
# If omitted, or if the image has mode "1" or "P", it is
# set <b>NEAREST</b>.
# @return An Image object.
def rotate(self, angle, resample=NEAREST):
"Rotate image. Angle given as degrees counter-clockwise."
if resample not in (NEAREST, BILINEAR, BICUBIC):
raise ValueError("unknown resampling filter")
self.load()
if self.mode in ("1", "P"):
resample = NEAREST
return self._new(self.im.rotate(angle, resample))
##
# Saves the image under the given filename. If no format is
# specified, the format to use is determined from the filename
# extension, if possible.
# <p>
# Keyword options can be used to provide additional instructions
# to the writer. If a writer doesn't recognise an option, it is
# silently ignored. The available options are described later in
# this handbook.
# <p>
# You can use a file object instead of a filename. In this case,
# you must always specify the format. The file object must
# implement the <b>seek</b>, <b>tell</b>, and <b>write</b>
# methods, and be opened in binary mode.
#
# @def save(file, format=None, **options)
# @param file File name or file object.
# @param format Optional format override. If omitted, the
# format to use is determined from the filename extension.
# If a file object was used instead of a filename, this
# parameter should always be used.
# @param **options Extra parameters to the image writer.
# @return None
def save(self, fp, format=None, **params):
"Save image to file or stream"
if isStringType(fp):
import __builtin__
filename = fp
fp = __builtin__.open(fp, "wb")
close = 1
else:
if hasattr(fp, "name") and isStringType(fp.name):
filename = fp.name
else:
filename = ""
close = 0
self.encoderinfo = params
self.encoderconfig = ()
self.load()
preinit()
ext = string.lower(os.path.splitext(filename)[1])
try:
if not format:
format = EXTENSION[ext]
SAVE[string.upper(format)](self, fp, filename)
except KeyError, v:
init()
if not format:
format = EXTENSION[ext]
SAVE[string.upper(format)](self, fp, filename)
if close:
fp.close()
##
# Seeks to the given frame in a sequence file. If you seek beyond
# the end of the sequence, the method raises an <b>EOFError</b>
# exception. When a sequence file is opened, the library
# automatically seeks to frame 0.
# <p>
# Note that in the current version of the library, most sequence
# formats only allows you to seek to the next frame.
#
# @param frame Frame number, starting at 0.
# @exception EOFError Attempt to seek beyond the end of the sequence.
# @see #Image.tell
def seek(self, frame):
"Seek to given frame in sequence file"
# overridden by file handlers
if frame != 0:
raise EOFError
##
# Displays an image. This method is mainly intended for
# debugging purposes.
# <p>
# On Unix platforms, this method saves the image to a temporary
# PPM file, and calls the <b>xv</b> utility.
# <p>
# On Windows, it saves the image to a temporary BMP file, and uses
# the standard BMP display utility to show it (usually Paint).
#
# @def show(title=None)
# @param title Optional title to use for the image window,
# where possible.
def show(self, title=None, command=None):
"Display image (for debug purposes only)"
try:
import ImageTk
ImageTk._show(self, title)
# note: caller must enter mainloop!
except:
_showxv(self, title, command)
##
# Split image into individual bands. This methods returns a tuple
# of individual image bands from an image. For example, splitting
# an "RGB" image creates three new images each containing a copy
# of one of the original bands (red, green, blue).
#
# @return A tuple containing bands.
def split(self):
"Split image into bands"
ims = []
self.load()
for i in range(self.im.bands):
ims.append(self._new(self.im.getband(i)))
return tuple(ims)
##
# Returns the current frame number.
#
# @return Frame number, starting with 0.
# @see #Image.seek
def tell(self):
"Return current frame number"
return 0
##
# Make thumbnail. This method modifies the image to contain a
# thumbnail version of itself, no larger than the given size.
# This method calculates an appropriate thumbnail size to preserve
# the aspect of the image, calls the <b>draft</b> method to
# configure the file reader (where applicable), and finally
# resizes the image.
# <p>
# Note that the bilinear and bicubic filters in the current
# version of PIL are not well-suited for thumbnail generation.
# You should use <b>ANTIALIAS</b> unless speed is much more
# important than quality.
# <p>
# Also note that this function modifies the Image object in place.
# If you need to use the full resolution image as well, apply this
# method to a <b>copy</b> of the original image.
#
# @param size Requested size.
# @param resample Optional resampling filter. This can be one
# of <b>NEAREST</b>, <b>BILINEAR</b>, <b>BICUBIC</b>, or
# <b>ANTIALIAS</b> (best quality). If omitted, it defaults
# to <b>NEAREST</b> (this will be changed to ANTIALIAS in
# future versions).
# @return None
def thumbnail(self, size, resample=NEAREST):
"Create thumbnail representation (modifies image in place)"
# FIXME: the default resampling filter will be changed
# to ANTIALIAS in future versions
# preserve aspect ratio
x, y = self.size
if x > size[0]: y = y * size[0] / x; x = size[0]
if y > size[1]: x = x * size[1] / y; y = size[1]
size = x, y
if size == self.size:
return
self.draft(None, size)
self.load()
try:
im = self.resize(size, resample)
except ValueError:
if resample != ANTIALIAS:
raise
im = self.resize(size, NEAREST) # fallback
self.im = im.im
self.mode = im.mode
self.size = size
self.readonly = 0
# FIXME: the different tranform methods need further explanation
# instead of bloating the method docs, add a separate chapter.
##
# Transform image. This method creates a new image with the
# given size, and the same mode as the original, and copies
# data to the new image using the given transform.
# <p>
# @def transform(size, method, data, resample=NEAREST)
# @param size The output size.
# @param method The transformation method. This is one of
# <b>EXTENT</b> (cut out a rectangular subregion), <b>AFFINE</b>
# (affine transform), <b>QUAD</b> (map a quadrilateral to a
# rectangle), or <b>MESH</b> (map a number of source quadrilaterals
# in one operation).
# @param data Extra data to the transformation method.
# @param resample Optional resampling filter. It can be one of
# <b>NEAREST</b> (use nearest neighbour), <b>BILINEAR</b>
# (linear interpolation in a 2x2 environment), or
# <b>BICUBIC</b> (cubic spline interpolation in a 4x4
# environment). If omitted, or if the image has mode
# "1" or "P", it is set to <b>NEAREST</b>.
# @return An Image object.
def transform(self, size, method, data=None, resample=NEAREST, fill=1):
"Transform image"
import ImageTransform
if isinstance(method, ImageTransform.Transform):
method, data = method.getdata()
if data is None:
raise ValueError("missing method data")
im = new(self.mode, size, None)
if method == MESH:
# list of quads
for box, quad in data:
im.__transformer(box, self, QUAD, quad, resample, fill)
else:
im.__transformer((0, 0)+size, self, method, data, resample, fill)
return im
def __transformer(self, box, image, method, data,
resample=NEAREST, fill=1):
"Transform into current image"
# FIXME: this should be turned into a lazy operation (?)
w = box[2]-box[0]
h = box[3]-box[1]
if method == AFFINE:
# change argument order to match implementation
data = (data[2], data[0], data[1],
data[5], data[3], data[4])
elif method == EXTENT:
# convert extent to an affine transform
x0, y0, x1, y1 = data
xs = float(x1 - x0) / w
ys = float(y1 - y0) / h
method = AFFINE
data = (x0 + xs/2, xs, 0, y0 + ys/2, 0, ys)
elif method == QUAD:
# quadrilateral warp. data specifies the four corners
# given as NW, SW, SE, and NE.
nw = data[0:2]; sw = data[2:4]; se = data[4:6]; ne = data[6:8]
x0, y0 = nw; As = 1.0 / w; At = 1.0 / h
data = (x0, (ne[0]-x0)*As, (sw[0]-x0)*At,
(se[0]-sw[0]-ne[0]+x0)*As*At,
y0, (ne[1]-y0)*As, (sw[1]-y0)*At,
(se[1]-sw[1]-ne[1]+y0)*As*At)
else:
raise ValueError("unknown transformation method")
if resample not in (NEAREST, BILINEAR, BICUBIC):
raise ValueError("unknown resampling filter")
image.load()
self.load()
if image.mode in ("1", "P"):
resample = NEAREST
self.im.transform2(box, image.im, method, data, resample, fill)
##
# Returns a flipped or rotated copy of an image.
#
# @param method One of <b>FLIP_LEFT_RIGHT</b>, <b>FLIP_TOP_BOTTOM</b>,
# <b>ROTATE_90</b>, <b>ROTATE_180</b>, or <b>ROTATE_270</b>.
def transpose(self, method):
"Transpose image (flip or rotate in 90 degree steps)"
self.load()
im = self.im.transpose(method)
return self._new(im)
# --------------------------------------------------------------------
# Lazy operations
class _ImageCrop(Image):
def __init__(self, im, box):
Image.__init__(self)
self.mode = im.mode
self.size = box[2]-box[0], box[3]-box[1]
self.__crop = box
self.im = im.im
def load(self):
# lazy evaluation!
if self.__crop:
self.im = self.im.crop(self.__crop)
self.__crop = None
# FIXME: future versions should optimize crop/paste
# sequences!
# --------------------------------------------------------------------
# Factories
#
# Debugging
def _wedge():
"Create greyscale wedge (for debugging only)"
return Image()._new(core.wedge("L"))
##
# Creates a new image with the given mode and size.
#
# @param mode The mode to use for the new image.
# @param size A 2-tuple, containing (width, height)
# @param color What colour to use for the image. Default is black.
# If given, this should be a single integer or floating point value
# for single-band modes, and a tuple for multi-band modes (one value
# per band). When creating RGB images, you can also use colour
# strings as supported by the ImageColor module. If the colour is
# None, the image is not initialised.
# @return An Image object.
def new(mode, size, color=0):
"Create a new image"
if color is None:
# don't initialize
return Image()._new(core.new(mode, size))
if isStringType(color):
# css3-style specifier
import ImageColor
color = ImageColor.getcolor(color, mode)
return Image()._new(core.fill(mode, size, color))
##
# Creates an image memory from pixel data in a string.
# <p>
# In its simplest form, this function takes three arguments
# (mode, size, and unpacked pixel data).
# <p>
# You can also use any pixel decoder supported by PIL. For more
# information on available decoders, see the section <a
# href="decoder"><i>Writing Your Own File Decoder</i></a>.
# <p>
# Note that this function decodes pixel data only, not entire images.
# If you have an entire image in a string, wrap it in a <b>StringIO</b>
# object, and use <b>open</b> to load it.
#
# @param mode The image mode.
# @param size The image size.
# @param data An 8-bit string containing raw data for the given mode.
# @param decoder_name What decoder to use.
# @param *args Additional parameters for the given decoder.
# @return An Image object.
def fromstring(mode, size, data, decoder_name="raw", *args):
"Load image from string"
# may pass tuple instead of argument list
if len(args) == 1 and isTupleType(args[0]):
args = args[0]
if decoder_name == "raw" and args == ():
args = mode
im = new(mode, size)
im.fromstring(data, decoder_name, args)
return im
##
# Creates an image memory from pixel data in a string or byte buffer.
# <p>
# This function is similar to <b>fromstring</b>, but it data in
# the byte buffer, where possible. Images created by this function
# are usually marked as readonly.
# <p>
# Note that this function decodes pixel data only, not entire images.
# If you have an entire image in a string, wrap it in a <b>StringIO</b>
# object, and use <b>open</b> to load it.
#
# @param mode The image mode.
# @param size The image size.
# @param data An 8-bit string or other buffer object containing raw
# data for the given mode.
# @param decoder_name What decoder to use.
# @param *args Additional parameters for the given decoder.
# @return An Image object.
def frombuffer(mode, size, data, decoder_name="raw", *args):
"Load image from string or buffer"
# may pass tuple instead of argument list
if len(args) == 1 and isTupleType(args[0]):
args = args[0]
if decoder_name == "raw":
if args == ():
args = mode, 0, -1
if args[0] in _MAPMODES:
im = new(mode, (1,1))
im = im._new(
core.map_buffer(data, size, decoder_name, None, 0, args)
)
im.readonly = 1
return im
return apply(fromstring, (mode, size, data, decoder_name, args))
##
# Opens and identifies the given image file.
# <p>
# This is a lazy operation; this function identifies the file, but the
# actual image data is not read from the file until you try to process
# the data (or call the <b>load</b> method).
#
# @def open(file, mode="r")
# @param file A filename (string) or a file object. The file object
# must implement <b>read</b>, <b>seek</b>, and <b>tell</b> methods,
# and be opened in binary mode.
# @param mode The mode. If given, this argument must be "r".
# @return An Image object.
# @exception IOError If the file cannot be found, or the image cannot be
# opened and identified.
# @see #new
def open(fp, mode="r"):
"Open an image file, without loading the raster data"
if mode != "r":
raise ValueError("bad mode")
if isStringType(fp):
import __builtin__
filename = fp
fp = __builtin__.open(fp, "rb")
else:
filename = ""
prefix = fp.read(16)
preinit()
for i in ID:
try:
factory, accept = OPEN[i]
if not accept or accept(prefix):
fp.seek(0)
return factory(fp, filename)
except (SyntaxError, IndexError, TypeError):
pass
init()
for i in ID:
try:
factory, accept = OPEN[i]
if not accept or accept(prefix):
fp.seek(0)
return factory(fp, filename)
except (SyntaxError, IndexError, TypeError):
pass
raise IOError("cannot identify image file")
#
# Image processing.
##
# Creates a new image by interpolating between the given images, using
# a constant alpha.
#
# <pre>
# out = image1 * (1.0 - alpha) + image2 * alpha
# </pre>
#
# @param im1 The first image.
# @param im2 The second image. Must have the same mode and size as
# the first image.
# @param alpha The interpolation alpha factor. If alpha is 0.0, a
# copy of the first image is returned. If alpha is 1.0, a copy of
# the second image is returned. There are no restrictions on the
# alpha value. If necessary, the result is clipped to fit into
# the allowed output range.
# @return An Image object.
def blend(im1, im2, alpha):
"Interpolate between images."
im1.load()
im2.load()
return im1._new(core.blend(im1.im, im2.im, alpha))
##
# Creates a new image by interpolating between the given images,
# using the mask as alpha.
#
# @param image1 The first image.
# @param image2 The second image. Must have the same mode and
# size as the first image.
# @param mask A mask image. This image can can have mode
# "1", "L", or "RGBA", and most have the same size as the
# other two images.
def composite(image1, image2, mask):
"Create composite image by blending images using a transparency mask"
image = image2.copy()
image.paste(image1, None, mask)
return image
##
# Applies the function (which should take one argument) to each pixel
# in the given image. If the image has more than one band, the same
# function is applied to each band. Note that the function is
# evaluated once for each possible pixel value, so you cannot use
# random components or other generators.
#
# @def eval(image, function)
# @param image The input image.
# @param function A function object, taking one integer argument.
# @return An Image object.
def eval(image, *args):
"Evaluate image expression"
return image.point(args[0])
##
# Creates a new image from a number of single-band images.
#
# @param mode The mode to use for the output image.
# @param bands A sequence containing one single-band image for
# each band in the output image. All bands must have the
# same size.
# @return An Image object.
def merge(mode, bands):
"Merge a set of single band images into a new multiband image."
if getmodebands(mode) != len(bands) or "*" in mode:
raise ValueError("wrong number of bands")
for im in bands[1:]:
if im.mode != getmodetype(mode):
raise ValueError("mode mismatch")
if im.size != bands[0].size:
raise ValueError("size mismatch")
im = core.new(mode, bands[0].size)
for i in range(getmodebands(mode)):
bands[i].load()
im.putband(bands[i].im, i)
return bands[0]._new(im)
# --------------------------------------------------------------------
# Plugin registry
##
# Register an image file plugin. This function should not be used
# in application code.
#
# @param id An image format identifier.
# @param factory An image file factory method.
# @param accept An optional function that can be used to quickly
# reject images having another format.
def register_open(id, factory, accept=None):
id = string.upper(id)
ID.append(id)
OPEN[id] = factory, accept
##
# Register an image MIME type. This function should not be used
# in application code.
#
# @param id An image format identifier.
# @param mimetype The image MIME type for this format.
def register_mime(id, mimetype):
MIME[string.upper(id)] = mimetype
##
# Register an image save function. This function should not be
# used in application code.
#
# @param id An image format identifier.
# @param driver A function to save images in this format.
def register_save(id, driver):
SAVE[string.upper(id)] = driver
##
# Register an image extension. This function should not be
# used in application code.
#
# @param id An image format identifier.
# @param extension An extension used for this format.
def register_extension(id, extension):
EXTENSION[string.lower(extension)] = string.upper(id)
# --------------------------------------------------------------------
# Simple display support
def _showxv(self, title=None, command=None):
if os.name == "nt":
format = "BMP"
if not command:
command = "start"
elif os.environ.get("OSTYPE") == "darwin":
format = "JPEG"
if not command:
command = "open -a /Applications/Preview.app"
else:
format = None
if not command:
command = "xv"
if title:
command = command + " -name \"%s\"" % title
if self.mode == "I;16":
# @PIL88 @PIL101
# "I;16" isn't an 'official' mode, but we still want to
# provide a simple way to show 16-bit images.
base = "L"
else:
base = getmodebase(self.mode)
if base != self.mode and self.mode != "1":
file = self.convert(base)._dump(format=format)
else:
file = self._dump(format=format)
if os.name == "nt":
os.system("%s %s" % (command, file))
# FIXME: this leaves temporary files around...
elif os.environ.get("OSTYPE") == "darwin":
# on darwin open returns immediately resulting in the temp
# file removal while app is opening
os.system("(%s %s; sleep 20; rm -f %s)&" % (command, file, file))
else:
os.system("(%s %s; rm -f %s)&" % (command, file, file))
| gpl-2.0 |
mhnatiuk/phd_sociology_of_religion | scrapper/build/scrapy/build/lib.linux-x86_64-2.7/scrapy/commands/deploy.py | 15 | 8793 | from __future__ import print_function
import sys
import os
import glob
import tempfile
import shutil
import time
import urllib2
import netrc
import json
from urlparse import urlparse, urljoin
from subprocess import Popen, PIPE, check_call
from w3lib.form import encode_multipart
from scrapy.command import ScrapyCommand
from scrapy.exceptions import UsageError
from scrapy.utils.http import basic_auth_header
from scrapy.utils.python import retry_on_eintr
from scrapy.utils.conf import get_config, closest_scrapy_cfg
_SETUP_PY_TEMPLATE = \
"""# Automatically created by: scrapy deploy
from setuptools import setup, find_packages
setup(
name = 'project',
version = '1.0',
packages = find_packages(),
entry_points = {'scrapy': ['settings = %(settings)s']},
)
"""
class Command(ScrapyCommand):
requires_project = True
def syntax(self):
return "[options] [ [target] | -l | -L <target> ]"
def short_desc(self):
return "Deploy project in Scrapyd target"
def long_desc(self):
return "Deploy the current project into the given Scrapyd server " \
"(known as target)"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-p", "--project",
help="the project name in the target")
parser.add_option("-v", "--version",
help="the version to deploy. Defaults to current timestamp")
parser.add_option("-l", "--list-targets", action="store_true", \
help="list available targets")
parser.add_option("-d", "--debug", action="store_true",
help="debug mode (do not remove build dir)")
parser.add_option("-L", "--list-projects", metavar="TARGET", \
help="list available projects on TARGET")
parser.add_option("--egg", metavar="FILE",
help="use the given egg, instead of building it")
parser.add_option("--build-egg", metavar="FILE",
help="only build the egg, don't deploy it")
def run(self, args, opts):
try:
import setuptools
except ImportError:
raise UsageError("setuptools not installed")
urllib2.install_opener(urllib2.build_opener(HTTPRedirectHandler))
if opts.list_targets:
for name, target in _get_targets().items():
print("%-20s %s" % (name, target['url']))
return
if opts.list_projects:
target = _get_target(opts.list_projects)
req = urllib2.Request(_url(target, 'listprojects.json'))
_add_auth_header(req, target)
f = urllib2.urlopen(req)
projects = json.loads(f.read())['projects']
print(os.linesep.join(projects))
return
tmpdir = None
if opts.build_egg: # build egg only
egg, tmpdir = _build_egg()
_log("Writing egg to %s" % opts.build_egg)
shutil.copyfile(egg, opts.build_egg)
else: # buld egg and deploy
target_name = _get_target_name(args)
target = _get_target(target_name)
project = _get_project(target, opts)
version = _get_version(target, opts)
if opts.egg:
_log("Using egg: %s" % opts.egg)
egg = opts.egg
else:
_log("Packing version %s" % version)
egg, tmpdir = _build_egg()
if not _upload_egg(target, egg, project, version):
self.exitcode = 1
if tmpdir:
if opts.debug:
_log("Output dir not removed: %s" % tmpdir)
else:
shutil.rmtree(tmpdir)
def _log(message):
sys.stderr.write(message + os.linesep)
def _get_target_name(args):
if len(args) > 1:
raise UsageError("Too many arguments: %s" % ' '.join(args))
elif args:
return args[0]
elif len(args) < 1:
return 'default'
def _get_project(target, opts):
project = opts.project or target.get('project')
if not project:
raise UsageError("Missing project")
return project
def _get_option(section, option, default=None):
cfg = get_config()
return cfg.get(section, option) if cfg.has_option(section, option) \
else default
def _get_targets():
cfg = get_config()
baset = dict(cfg.items('deploy')) if cfg.has_section('deploy') else {}
targets = {}
if 'url' in baset:
targets['default'] = baset
for x in cfg.sections():
if x.startswith('deploy:'):
t = baset.copy()
t.update(cfg.items(x))
targets[x[7:]] = t
return targets
def _get_target(name):
try:
return _get_targets()[name]
except KeyError:
raise UsageError("Unknown target: %s" % name)
def _url(target, action):
return urljoin(target['url'], action)
def _get_version(target, opts):
version = opts.version or target.get('version')
if version == 'HG':
p = Popen(['hg', 'tip', '--template', '{rev}'], stdout=PIPE)
d = 'r%s' % p.communicate()[0]
p = Popen(['hg', 'branch'], stdout=PIPE)
b = p.communicate()[0].strip('\n')
return '%s-%s' % (d, b)
elif version == 'GIT':
p = Popen(['git', 'describe', '--always'], stdout=PIPE)
d = p.communicate()[0].strip('\n')
p = Popen(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], stdout=PIPE)
b = p.communicate()[0].strip('\n')
return '%s-%s' % (d, b)
elif version:
return version
else:
return str(int(time.time()))
def _upload_egg(target, eggpath, project, version):
with open(eggpath, 'rb') as f:
eggdata = f.read()
data = {
'project': project,
'version': version,
'egg': ('project.egg', eggdata),
}
body, boundary = encode_multipart(data)
url = _url(target, 'addversion.json')
headers = {
'Content-Type': 'multipart/form-data; boundary=%s' % boundary,
'Content-Length': str(len(body)),
}
req = urllib2.Request(url, body, headers)
_add_auth_header(req, target)
_log('Deploying to project "%s" in %s' % (project, url))
return _http_post(req)
def _add_auth_header(request, target):
if 'username' in target:
u, p = target.get('username'), target.get('password', '')
request.add_header('Authorization', basic_auth_header(u, p))
else: # try netrc
try:
host = urlparse(target['url']).hostname
a = netrc.netrc().authenticators(host)
request.add_header('Authorization', basic_auth_header(a[0], a[2]))
except (netrc.NetrcParseError, IOError, TypeError):
pass
def _http_post(request):
try:
f = urllib2.urlopen(request)
_log("Server response (%s):" % f.code)
print(f.read())
return True
except urllib2.HTTPError as e:
_log("Deploy failed (%s):" % e.code)
print(e.read())
except urllib2.URLError as e:
_log("Deploy failed: %s" % e)
def _build_egg():
closest = closest_scrapy_cfg()
os.chdir(os.path.dirname(closest))
if not os.path.exists('setup.py'):
settings = get_config().get('settings', 'default')
_create_default_setup_py(settings=settings)
d = tempfile.mkdtemp(prefix="scrapydeploy-")
o = open(os.path.join(d, "stdout"), "wb")
e = open(os.path.join(d, "stderr"), "wb")
retry_on_eintr(check_call, [sys.executable, 'setup.py', 'clean', '-a', 'bdist_egg', '-d', d], stdout=o, stderr=e)
o.close()
e.close()
egg = glob.glob(os.path.join(d, '*.egg'))[0]
return egg, d
def _create_default_setup_py(**kwargs):
with open('setup.py', 'w') as f:
f.write(_SETUP_PY_TEMPLATE % kwargs)
class HTTPRedirectHandler(urllib2.HTTPRedirectHandler):
def redirect_request(self, req, fp, code, msg, headers, newurl):
newurl = newurl.replace(' ', '%20')
if code in (301, 307):
return urllib2.Request(newurl,
data=req.get_data(),
headers=req.headers,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
elif code in (302, 303):
newheaders = dict((k, v) for k, v in req.headers.items()
if k.lower() not in ("content-length", "content-type"))
return urllib2.Request(newurl,
headers=newheaders,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
else:
raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
| gpl-2.0 |
Alwnikrotikz/cortex-vfx | test/IECore/CubeColorLookupTest.py | 12 | 2884 | ##########################################################################
#
# Copyright (c) 2008-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import random
import math
import os
from IECore import *
class GammaOp( ColorTransformOp ) :
def __init__( self, gamma = 1.0 ) :
ColorTransformOp.__init__( self, "applies gamma" )
self.gamma = gamma
def begin( self, operands ) :
pass
def transform( self, color ) :
return Color3f(
math.pow( color.r, 1.0 / self.gamma ),
math.pow( color.g, 1.0 / self.gamma ),
math.pow( color.b, 1.0 / self.gamma )
)
def end( self ) :
pass
class CubeColorLookupTest( unittest.TestCase ) :
def testOpConstruction( self ) :
gammaOp = GammaOp( 2.0 )
dim = V3i( 48, 66, 101 )
cubeLookup = CubeColorLookupf( dim, gammaOp )
random.seed( 23 )
# Perform 100 random comparisons with the LUT against the original function
for i in range( 0, 100 ) :
c = Color3f( random.random(), random.random(), random.random() )
c1 = cubeLookup( c )
c2 = gammaOp.transform( c )
self.assertAlmostEqual( c1.r, c2.r, 1 )
self.assertAlmostEqual( c1.g, c2.g, 1 )
self.assertAlmostEqual( c1.b, c2.b, 1 )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
devassistant/dapi | wsgi.py | 1 | 40852 | #!/usr/bin/python
import os
virtenv = os.environ['OPENSHIFT_PYTHON_DIR'] + '/virtenv/'
virtualenv = os.path.join(virtenv, 'bin/activate_this.py')
try:
execfile(virtualenv, dict(__file__=virtualenv))
except IOError:
pass
#
# IMPORTANT: Put any additional includes below this line. If placed above this
# line, it's possible required libraries won't be in your searchable path
#
def application(environ, start_response):
ctype = 'text/plain'
if environ['PATH_INFO'] == '/health':
response_body = "1"
elif environ['PATH_INFO'] == '/env':
response_body = ['%s: %s' % (key, value)
for key, value in sorted(environ.items())]
response_body = '\n'.join(response_body)
else:
ctype = 'text/html'
response_body = '''<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>Welcome to OpenShift</title>
<style>
/*!
* Bootstrap v3.0.0
*
* Copyright 2013 Twitter, Inc
* Licensed under the Apache License v2.0
* http://www.apache.org/licenses/LICENSE-2.0
*
* Designed and built with all the love in the world @twitter by @mdo and @fat.
*/
.logo {
background-size: cover;
height: 58px;
width: 180px;
margin-top: 6px;
background-image: url(data:image/svg+xml;base64,<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 14.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 43363)  -->
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" width="180px"
	 height="58px" viewBox="-127.391 432.019 180 58" enable-background="new -127.391 432.019 180 58" xml:space="preserve">
<g id="Layer_1" display="none">
	<g display="inline">
		<path d="M-121.385,438.749c-0.416,0.361-1.006,0.541-1.771,0.541h-2.774v-7h2.874c0.612,0,1.099,0.155,1.462,0.464
			c0.362,0.31,0.544,0.76,0.544,1.353c0,0.359-0.084,0.651-0.253,0.874c-0.168,0.223-0.378,0.398-0.629,0.524
			c0.139,0.04,0.278,0.102,0.417,0.185s0.265,0.192,0.377,0.326c0.112,0.133,0.204,0.293,0.273,0.48s0.104,0.401,0.104,0.641
			C-120.761,437.852-120.969,438.389-121.385,438.749z M-122.312,433.514c-0.146-0.176-0.396-0.264-0.75-0.264h-1.88v1.8h1.88
			c0.173,0,0.322-0.024,0.445-0.074c0.123-0.05,0.223-0.116,0.3-0.199c0.077-0.083,0.133-0.177,0.17-0.283s0.055-0.215,0.055-0.328
			C-122.091,433.906-122.165,433.689-122.312,433.514z M-122.121,436.32c-0.214-0.207-0.52-0.31-0.92-0.31h-1.9v2.32h1.87
			c0.466,0,0.795-0.106,0.985-0.32s0.285-0.494,0.285-0.84C-121.801,436.81-121.908,436.527-122.121,436.32z"/>
		<path d="M-116.281,439.29v-0.506c-0.134,0.195-0.318,0.347-0.555,0.455s-0.492,0.162-0.765,0.162c-0.613,0-1.078-0.196-1.395-0.59
			c-0.316-0.393-0.475-0.98-0.475-1.76v-3.01h1.04v2.963c0,0.532,0.095,0.905,0.284,1.117c0.189,0.213,0.453,0.319,0.792,0.319
			c0.345,0,0.61-0.116,0.796-0.349c0.186-0.233,0.279-0.562,0.279-0.988v-3.063h1.04v5.25H-116.281z"/>
		<path d="M-112.697,433.165c-0.13,0.13-0.285,0.195-0.465,0.195c-0.187,0-0.345-0.065-0.475-0.195s-0.195-0.285-0.195-0.465
			c0-0.187,0.065-0.345,0.195-0.475s0.288-0.195,0.475-0.195c0.18,0,0.335,0.065,0.465,0.195s0.195,0.289,0.195,0.475
			C-112.501,432.88-112.567,433.035-112.697,433.165z M-113.682,439.29v-5.25h1.04v5.25H-113.682z"/>
		<path d="M-111.031,439.29v-6.75l1.04-0.54v7.29H-111.031z"/>
		<path d="M-105.921,439.16c-0.127,0.073-0.275,0.131-0.445,0.175c-0.17,0.043-0.358,0.065-0.565,0.065
			c-0.367,0-0.655-0.113-0.865-0.34s-0.315-0.577-0.315-1.05v-3.03h-0.75v-0.94h0.75v-1.5l1.01-0.54v2.04h1.3v0.94h-1.3v2.85
			c0,0.247,0.042,0.414,0.125,0.5c0.083,0.087,0.222,0.13,0.415,0.13c0.133,0,0.27-0.021,0.41-0.065s0.256-0.091,0.35-0.145
			L-105.921,439.16z"/>
		<path d="M-97.452,437.805c-0.12,0.343-0.287,0.633-0.5,0.87c-0.213,0.237-0.463,0.417-0.75,0.54
			c-0.287,0.124-0.6,0.185-0.94,0.185c-0.333,0-0.64-0.065-0.92-0.195c-0.28-0.13-0.523-0.315-0.73-0.555
			c-0.207-0.24-0.368-0.526-0.485-0.86s-0.175-0.707-0.175-1.12c0-0.426,0.06-0.81,0.18-1.15s0.285-0.628,0.495-0.865
			c0.21-0.237,0.457-0.417,0.74-0.54c0.284-0.124,0.592-0.185,0.925-0.185c0.333,0,0.643,0.065,0.93,0.195s0.535,0.312,0.745,0.545
			s0.374,0.519,0.49,0.855c0.116,0.337,0.175,0.708,0.175,1.115C-97.271,437.073-97.332,437.462-97.452,437.805z M-98.667,435.385
			c-0.237-0.317-0.565-0.475-0.985-0.475c-0.394,0-0.702,0.158-0.925,0.475c-0.223,0.316-0.335,0.735-0.335,1.255
			c0,0.58,0.12,1.021,0.36,1.325c0.24,0.304,0.557,0.455,0.95,0.455c0.193,0,0.37-0.046,0.53-0.14
			c0.16-0.094,0.296-0.219,0.41-0.375c0.113-0.157,0.2-0.342,0.26-0.555s0.09-0.44,0.09-0.68
			C-98.312,436.13-98.43,435.702-98.667,435.385z"/>
		<path d="M-92.812,439.29v-2.963c0-0.532-0.095-0.904-0.284-1.117c-0.189-0.213-0.453-0.319-0.791-0.319
			c-0.345,0-0.611,0.116-0.796,0.349c-0.186,0.233-0.279,0.562-0.279,0.988v3.063h-1.04v-5.25h1.04v0.506
			c0.133-0.195,0.318-0.347,0.555-0.455s0.492-0.162,0.765-0.162c0.613,0,1.078,0.197,1.395,0.59c0.316,0.394,0.475,0.98,0.475,1.76
			v3.01H-92.812z"/>
	</g>
</g>
<g id="Layer_6">
	<g>
		<path d="M-122.266,438.984c-0.39,0.344-0.955,0.516-1.695,0.516h-2.51v-7h2.56c0.28,0,0.535,0.035,0.765,0.105
			s0.43,0.176,0.6,0.319c0.17,0.143,0.301,0.324,0.395,0.544c0.093,0.22,0.14,0.479,0.14,0.779c0,0.386-0.093,0.693-0.28,0.923
			c-0.187,0.23-0.43,0.398-0.73,0.504c0.16,0.04,0.32,0.102,0.48,0.185c0.16,0.083,0.303,0.194,0.43,0.331
			c0.127,0.137,0.23,0.307,0.31,0.511s0.12,0.446,0.12,0.726C-121.681,438.121-121.875,438.641-122.266,438.984z M-123.071,433.504
			c-0.187-0.196-0.477-0.294-0.87-0.294h-1.75v2.17h1.69c0.433,0,0.743-0.108,0.93-0.323c0.187-0.216,0.28-0.476,0.28-0.781
			C-122.791,433.957-122.884,433.7-123.071,433.504z M-122.861,436.45c-0.267-0.24-0.63-0.36-1.09-0.36h-1.74v2.7h1.78
			c0.526,0,0.9-0.12,1.12-0.36c0.22-0.24,0.33-0.56,0.33-0.96C-122.46,437.03-122.594,436.69-122.861,436.45z"/>
		<path d="M-117.121,439.5v-0.64c-0.153,0.22-0.35,0.4-0.59,0.54s-0.527,0.21-0.86,0.21c-0.28,0-0.534-0.042-0.76-0.125
			c-0.227-0.083-0.42-0.213-0.58-0.39c-0.16-0.177-0.283-0.4-0.37-0.67c-0.087-0.27-0.13-0.595-0.13-0.975v-3.2h0.76v3.077
			c0,0.568,0.101,0.984,0.304,1.248s0.513,0.396,0.931,0.396c0.365,0,0.672-0.13,0.921-0.391s0.374-0.678,0.374-1.252v-3.077h0.76
			v5.25H-117.121z"/>
		<path d="M-113.906,433.155c-0.103,0.104-0.225,0.155-0.365,0.155c-0.153,0-0.284-0.052-0.39-0.155
			c-0.106-0.103-0.16-0.228-0.16-0.375c0-0.153,0.053-0.281,0.16-0.385s0.237-0.155,0.39-0.155c0.14,0,0.262,0.051,0.365,0.155
			c0.104,0.104,0.155,0.232,0.155,0.385C-113.751,432.927-113.803,433.052-113.906,433.155z M-114.661,439.5v-5.25h0.76v5.25
			H-114.661z"/>
		<path d="M-112.151,439.5v-6.87l0.76-0.42v7.29H-112.151z"/>
		<path d="M-108.721,434.89v3.412c0,0.232,0.039,0.396,0.115,0.489c0.077,0.093,0.215,0.14,0.415,0.14
			c0.153,0,0.285-0.012,0.395-0.035s0.225-0.062,0.345-0.115l-0.05,0.65c-0.147,0.06-0.295,0.105-0.445,0.135
			c-0.15,0.03-0.325,0.045-0.525,0.045c-0.329,0-0.579-0.088-0.751-0.264c-0.172-0.176-0.258-0.484-0.258-0.923v-3.532h-0.65v-0.64
			h0.65v-1.62l0.76-0.42v2.04h1.3v0.64H-108.721z"/>
		<path d="M-99.271,438.025c-0.12,0.344-0.284,0.633-0.49,0.87s-0.45,0.415-0.73,0.535c-0.28,0.12-0.58,0.18-0.9,0.18
			s-0.619-0.058-0.895-0.175c-0.277-0.117-0.515-0.29-0.715-0.52c-0.2-0.23-0.358-0.515-0.475-0.855s-0.175-0.733-0.175-1.18
			c0-0.446,0.06-0.84,0.18-1.18c0.12-0.34,0.283-0.625,0.49-0.855c0.207-0.23,0.45-0.405,0.73-0.525c0.28-0.12,0.58-0.18,0.9-0.18
			c0.32,0,0.618,0.057,0.895,0.17c0.276,0.113,0.515,0.283,0.715,0.51c0.2,0.227,0.358,0.509,0.475,0.845
			c0.117,0.337,0.175,0.729,0.175,1.175C-99.091,437.287-99.151,437.682-99.271,438.025z M-100.27,435.297
			c-0.279-0.345-0.648-0.518-1.106-0.518c-0.458,0-0.826,0.173-1.102,0.518c-0.276,0.345-0.414,0.866-0.414,1.562
			c0,0.697,0.138,1.223,0.414,1.578s0.643,0.533,1.102,0.533c0.458,0,0.827-0.178,1.106-0.533c0.279-0.355,0.418-0.881,0.418-1.578
			C-99.851,436.164-99.991,435.643-100.27,435.297z"/>
		<path d="M-94.421,439.5v-3.077c0-0.568-0.102-0.983-0.304-1.248c-0.202-0.264-0.513-0.396-0.931-0.396
			c-0.365,0-0.672,0.13-0.921,0.391s-0.374,0.678-0.374,1.252v3.077h-0.76v-5.25h0.76v0.64c0.153-0.22,0.35-0.4,0.59-0.54
			c0.24-0.14,0.526-0.21,0.86-0.21c0.28,0,0.533,0.042,0.76,0.125s0.42,0.213,0.58,0.39c0.16,0.177,0.283,0.4,0.37,0.67
			c0.086,0.27,0.13,0.595,0.13,0.975v3.2H-94.421z"/>
	</g>
</g>
<g id="Layer_5">
	<g>
		<path fill="#DB212F" d="M-119.063,465.698l-4.604,1.678c0.059,0.738,0.185,1.466,0.364,2.181l4.376-1.592
			C-119.068,467.224-119.12,466.462-119.063,465.698"/>
		<g>
			<g>
				<path fill="#DB212F" d="M-98.71,460.606c-0.321-0.663-0.693-1.303-1.122-1.905l-4.606,1.675
					c0.538,0.547,0.986,1.164,1.354,1.823L-98.71,460.606z"/>
			</g>
			<g>
				<path fill="#DB212F" d="M-108.841,459.301c0.959,0.449,1.787,1.057,2.488,1.773l4.604-1.677
					c-1.276-1.79-3.012-3.286-5.141-4.277c-6.583-3.071-14.434-0.213-17.505,6.369c-0.992,2.129-1.362,4.392-1.188,6.582
					l4.606-1.675c0.075-0.998,0.318-1.998,0.766-2.957C-118.218,459.164-113.116,457.309-108.841,459.301"/>
			</g>
		</g>
		<path fill="#EA2227" d="M-123.015,469.452l-4.376,1.594c0.401,1.594,1.101,3.11,2.057,4.458l4.596-1.67
			C-121.919,472.621-122.702,471.09-123.015,469.452"/>
		<path fill="#DB212F" d="M-103.93,467.715c-0.073,0.999-0.325,1.998-0.774,2.957c-1.994,4.277-7.094,6.134-11.371,4.14
			c-0.958-0.449-1.795-1.053-2.492-1.77l-4.594,1.673c1.271,1.789,3.007,3.285,5.137,4.279c6.582,3.069,14.434,0.211,17.502-6.372
			c0.994-2.129,1.362-4.391,1.185-6.578L-103.93,467.715z"/>
		<path fill="#EA2227" d="M-102.798,462.094l-4.374,1.592c0.811,1.457,1.195,3.134,1.071,4.819l4.594-1.672
			C-101.639,465.185-102.078,463.575-102.798,462.094"/>
		<path fill="#231F20" d="M-72.271,467.031c0-1.331-0.18-2.512-0.54-3.543c-0.344-1.049-0.837-1.931-1.478-2.651
			c-0.624-0.734-1.384-1.29-2.275-1.666c-0.876-0.392-1.845-0.586-2.909-0.586c-1.079,0-2.063,0.195-2.955,0.586
			c-0.892,0.39-1.659,0.955-2.299,1.689c-0.642,0.718-1.142,1.602-1.502,2.651c-0.345,1.047-0.516,2.236-0.516,3.565
			c0,1.33,0.171,2.52,0.516,3.566c0.36,1.031,0.853,1.915,1.479,2.651c0.64,0.718,1.399,1.273,2.275,1.665
			c0.892,0.376,1.875,0.563,2.956,0.563c1.062,0,2.039-0.195,2.931-0.586c0.892-0.391,1.659-0.947,2.3-1.665
			c0.642-0.736,1.134-1.626,1.478-2.675C-72.451,469.548-72.271,468.359-72.271,467.031L-72.271,467.031z M-75.649,467.076
			c0,1.675-0.353,2.956-1.055,3.848c-0.689,0.892-1.612,1.337-2.77,1.337c-1.158,0-2.095-0.453-2.815-1.36
			c-0.718-0.907-1.078-2.197-1.078-3.87c0-1.675,0.345-2.957,1.031-3.848c0.704-0.892,1.636-1.336,2.793-1.336
			s2.094,0.453,2.814,1.36C-76.009,464.114-75.649,465.403-75.649,467.076L-75.649,467.076z"/>
		<path fill="#231F20" d="M-55.075,464.051c0-0.876-0.149-1.634-0.446-2.275c-0.298-0.658-0.703-1.205-1.219-1.644
			c-0.518-0.437-1.12-0.758-1.807-0.96c-0.689-0.218-1.415-0.329-2.183-0.329h-7.179v16.422h3.285v-5.818h3.611
			c0.845,0,1.628-0.1,2.347-0.305c0.736-0.203,1.368-0.523,1.901-0.96c0.531-0.439,0.944-0.994,1.242-1.667
			C-55.224,465.826-55.075,465.005-55.075,464.051L-55.075,464.051z M-58.454,464.121c0,1.424-0.782,2.134-2.345,2.134h-3.824
			v-4.222h3.777c0.733,0,1.312,0.171,1.735,0.516C-58.672,462.877-58.454,463.401-58.454,464.121L-58.454,464.121z"/>
		<polygon fill="#231F20" points="-39.147,475.264 -39.147,472.05 -47.615,472.05 -47.615,468.086 -42.9,468.086 -42.9,464.896 
			-47.615,464.896 -47.615,462.057 -39.497,462.057 -39.497,458.842 -50.9,458.842 -50.9,475.264 		"/>
		<path fill="#231F20" d="M-21.292,475.264v-16.422h-3.238v7.812c0.016,0.344,0.023,0.695,0.023,1.055v0.986
			c0.016,0.297,0.023,0.524,0.023,0.679c-0.109-0.218-0.281-0.5-0.517-0.845c-0.219-0.358-0.43-0.695-0.633-1.008l-5.818-8.68
			h-3.144v16.422h3.236v-7.226c0-0.234-0.008-0.523-0.021-0.868v-1.032c0-0.36-0.008-0.688-0.023-0.986v-0.703
			c0.107,0.218,0.273,0.508,0.492,0.866c0.233,0.345,0.452,0.673,0.657,0.986l6.028,8.962H-21.292z"/>
		<path fill="#231F20" d="M-5.879,470.947c0-0.61-0.079-1.149-0.234-1.618c-0.157-0.47-0.424-0.899-0.798-1.291
			c-0.359-0.392-0.844-0.75-1.454-1.079c-0.61-0.328-1.37-0.657-2.275-0.986c-0.831-0.297-1.502-0.571-2.018-0.821
			c-0.502-0.25-0.892-0.5-1.173-0.75c-0.282-0.266-0.471-0.532-0.563-0.799c-0.095-0.282-0.142-0.593-0.142-0.937
			c0-0.329,0.056-0.634,0.163-0.916c0.126-0.297,0.313-0.555,0.565-0.773c0.266-0.22,0.601-0.392,1.008-0.518
			c0.407-0.14,0.892-0.21,1.454-0.21c0.829,0,1.541,0.133,2.136,0.399c0.608,0.25,1.211,0.626,1.805,1.126l1.174-1.431
			c-0.688-0.547-1.423-0.978-2.205-1.291c-0.766-0.313-1.696-0.469-2.791-0.469c-0.768,0-1.47,0.095-2.111,0.282
			c-0.626,0.187-1.166,0.468-1.618,0.844c-0.439,0.36-0.783,0.797-1.033,1.313c-0.25,0.518-0.376,1.104-0.376,1.76
			c0,0.594,0.078,1.118,0.235,1.572c0.172,0.453,0.438,0.868,0.798,1.244c0.376,0.358,0.86,0.703,1.454,1.032
			c0.61,0.313,1.36,0.626,2.252,0.938c0.75,0.266,1.376,0.532,1.877,0.797c0.502,0.25,0.899,0.508,1.196,0.773
			c0.313,0.266,0.532,0.555,0.658,0.868s0.187,0.657,0.187,1.033c0,0.876-0.32,1.563-0.961,2.063
			c-0.625,0.502-1.485,0.752-2.58,0.752c-0.845,0-1.628-0.181-2.346-0.54c-0.721-0.36-1.393-0.836-2.018-1.43l-1.221,1.36
			c0.657,0.657,1.454,1.205,2.394,1.642c0.952,0.422,1.994,0.634,3.12,0.634c0.859,0,1.625-0.118,2.299-0.352
			c0.672-0.234,1.244-0.555,1.711-0.96c0.469-0.408,0.821-0.892,1.056-1.455C-6.005,472.192-5.879,471.589-5.879,470.947
			L-5.879,470.947z"/>
		<polygon fill="#231F20" points="10.801,475.264 10.801,458.842 8.971,458.842 8.971,465.857 0.806,465.857 0.806,458.842 
			-1.024,458.842 -1.024,475.264 0.806,475.264 0.806,467.522 8.971,467.522 8.971,475.264 		"/>
		<rect x="16.289" y="458.842" fill="#231F20" width="1.832" height="16.422"/>
		<polygon fill="#231F20" points="33.25,460.507 33.25,458.842 23.609,458.842 23.609,475.264 25.438,475.264 25.438,467.617 
			29.943,467.617 29.943,465.95 25.438,465.95 25.438,460.507 		"/>
		<polygon fill="#231F20" points="48.008,460.507 48.008,458.842 36.512,458.842 36.512,460.507 41.344,460.507 41.344,475.264 
			43.176,475.264 43.176,460.507 		"/>
		<path fill="#231F20" d="M-41.526,488.261c-0.223,0.124-0.534,0.212-0.896,0.212c-0.649,0-1.049-0.399-1.049-1.234v-2.691h-0.665
			v-0.836h0.665v-1.331l0.896-0.479v1.809h1.155v0.836h-1.155v2.531c0,0.435,0.144,0.559,0.48,0.559
			c0.238,0,0.506-0.089,0.675-0.187L-41.526,488.261z M-45.843,486.387c-0.248-0.124-0.566-0.205-1.064-0.205
			c-0.587,0-0.959,0.268-0.959,0.693c0,0.462,0.294,0.773,0.896,0.773c0.49,0,0.916-0.303,1.128-0.596V486.387z M-45.843,488.375
			v-0.461c-0.318,0.319-0.773,0.558-1.279,0.558c-0.754,0-1.614-0.427-1.614-1.573c0-1.037,0.8-1.507,1.856-1.507
			c0.436,0,0.779,0.061,1.037,0.177v-0.346c0-0.506-0.311-0.792-0.878-0.792c-0.479,0-0.852,0.091-1.216,0.295l-0.354-0.693
			c0.443-0.275,0.94-0.419,1.597-0.419c1.039,0,1.749,0.508,1.749,1.565v3.195H-45.843z M-50.807,488.375v-2.787h-2.857v2.787
			h-0.932v-6.216h0.932v2.515h2.857v-2.515h0.934v6.216H-50.807z M-59.127,485.072c-0.204-0.275-0.63-0.61-1.092-0.61
			c-0.658,0-1.012,0.496-1.012,1.48c0,1.173,0.372,1.687,1.047,1.687c0.435,0,0.818-0.291,1.057-0.595V485.072L-59.127,485.072z
			 M-59.137,488.375v-0.443c-0.336,0.309-0.727,0.54-1.214,0.54c-1.006,0-1.796-0.727-1.796-2.503c0-1.599,0.872-2.354,1.841-2.354
			c0.471,0,0.913,0.25,1.169,0.533v-1.774l0.907-0.472v6.473H-59.137z M-64.979,484.442c-0.611,0-0.984,0.428-1.064,1.171h2.165
			C-63.921,484.976-64.223,484.442-64.979,484.442 M-62.981,486.37h-3.08c0.098,0.896,0.602,1.279,1.171,1.279
			c0.392,0,0.703-0.142,1.012-0.374l0.543,0.587c-0.409,0.39-0.897,0.612-1.607,0.612c-1.093,0-2.016-0.88-2.016-2.425
			c0-1.581,0.836-2.433,2.042-2.433c1.323,0,1.961,1.075,1.961,2.336C-62.956,486.122-62.971,486.271-62.981,486.37
			 M-69.695,483.039h-1.812v1.998h1.812c0.622,0,1.058-0.319,1.058-0.994C-68.637,483.396-69.063,483.039-69.695,483.039
			 M-69.063,485.836l1.27,2.541h-1.072l-1.237-2.46h-1.403v2.46h-0.913v-6.218h2.725c1.084,0,1.998,0.578,1.998,1.858
			C-67.697,485.011-68.22,485.624-69.063,485.836 M-78.013,490.019h-0.969l0.676-1.732l-1.715-4.572h1.004l0.762,2.281
			c0.146,0.409,0.356,1.102,0.411,1.36c0.079-0.278,0.274-0.94,0.418-1.343l0.789-2.298h0.969L-78.013,490.019z M-82.446,484.46
			c-0.435,0-0.814,0.293-1.057,0.594v1.963c0.204,0.276,0.632,0.614,1.095,0.614c0.654,0,1.011-0.498,1.011-1.482
			C-81.397,484.974-81.771,484.46-82.446,484.46 M-82.32,488.474c-0.473,0-0.915-0.248-1.173-0.533v0.435h-0.906v-6.001l0.906-0.472
			v2.255c0.338-0.309,0.728-0.54,1.216-0.54c1.004,0,1.796,0.729,1.796,2.504C-80.481,487.72-81.351,488.474-82.32,488.474"/>
		<path fill="#231F20" d="M-39.347,482.736c-0.029-0.023-0.069-0.035-0.124-0.035h-0.227v0.287h0.213
			c0.12,0,0.179-0.047,0.179-0.144C-39.306,482.797-39.32,482.762-39.347,482.736 M-39.247,483.004
			c-0.034,0.041-0.083,0.069-0.143,0.083l0.191,0.364h-0.134l-0.184-0.354h-0.183v0.354h-0.112V482.6h0.345
			c0.076,0,0.142,0.02,0.194,0.061c0.054,0.038,0.079,0.101,0.079,0.183C-39.192,482.909-39.209,482.962-39.247,483.004
			 M-38.92,482.768c-0.033-0.083-0.08-0.154-0.14-0.213c-0.059-0.058-0.13-0.104-0.211-0.136c-0.08-0.035-0.169-0.051-0.264-0.051
			c-0.092,0-0.179,0.016-0.262,0.051c-0.08,0.031-0.149,0.077-0.21,0.136c-0.06,0.06-0.106,0.131-0.143,0.213
			c-0.033,0.08-0.049,0.173-0.049,0.273c0,0.099,0.016,0.189,0.049,0.272c0.036,0.083,0.083,0.153,0.143,0.21
			c0.061,0.058,0.13,0.106,0.21,0.139c0.083,0.032,0.17,0.048,0.262,0.048c0.095,0,0.184-0.016,0.264-0.048
			c0.081-0.033,0.152-0.081,0.211-0.139c0.06-0.057,0.106-0.128,0.14-0.21c0.035-0.083,0.052-0.173,0.052-0.272
			C-38.869,482.941-38.885,482.848-38.92,482.768 M-38.822,483.354c-0.041,0.093-0.095,0.175-0.163,0.244
			c-0.069,0.065-0.15,0.118-0.244,0.156c-0.095,0.035-0.195,0.054-0.306,0.054c-0.108,0-0.208-0.02-0.303-0.054
			c-0.095-0.038-0.177-0.091-0.244-0.156c-0.069-0.069-0.124-0.151-0.163-0.244c-0.038-0.095-0.058-0.201-0.058-0.313
			c0-0.118,0.02-0.221,0.058-0.315c0.039-0.096,0.094-0.178,0.163-0.244c0.067-0.069,0.149-0.12,0.244-0.157
			c0.095-0.037,0.194-0.055,0.303-0.055c0.11,0,0.211,0.018,0.306,0.055c0.094,0.038,0.175,0.089,0.244,0.157
			c0.068,0.067,0.122,0.148,0.163,0.244c0.037,0.095,0.057,0.197,0.057,0.315C-38.765,483.153-38.785,483.26-38.822,483.354"/>
		<path fill="#221D1D" d="M51.717,459.262c-0.043-0.038-0.104-0.057-0.186-0.057h-0.346v0.441h0.326
			c0.182,0,0.271-0.075,0.271-0.221C51.783,459.353,51.764,459.297,51.717,459.262 M51.875,459.667
			c-0.055,0.061-0.129,0.104-0.219,0.127l0.289,0.553h-0.201l-0.279-0.541h-0.279v0.541h-0.17v-1.295h0.523
			c0.117,0,0.217,0.029,0.295,0.09c0.082,0.062,0.121,0.156,0.121,0.282C51.955,459.523,51.926,459.604,51.875,459.667
			 M52.371,459.307c-0.051-0.126-0.123-0.234-0.215-0.323c-0.088-0.091-0.197-0.162-0.322-0.211c-0.123-0.051-0.256-0.075-0.4-0.075
			c-0.141,0-0.273,0.024-0.396,0.075c-0.125,0.049-0.23,0.12-0.322,0.211c-0.092,0.088-0.162,0.197-0.213,0.323
			c-0.055,0.124-0.08,0.264-0.08,0.415c0,0.152,0.025,0.29,0.08,0.416c0.051,0.126,0.121,0.234,0.213,0.323
			c0.092,0.09,0.197,0.159,0.322,0.208c0.123,0.051,0.256,0.075,0.396,0.075c0.145,0,0.277-0.023,0.4-0.075
			c0.125-0.049,0.234-0.118,0.322-0.208c0.092-0.088,0.164-0.197,0.215-0.323s0.078-0.264,0.078-0.416
			C52.449,459.571,52.422,459.431,52.371,459.307 M52.52,460.203c-0.061,0.142-0.143,0.266-0.246,0.368
			c-0.107,0.105-0.229,0.184-0.373,0.238c-0.141,0.057-0.297,0.085-0.467,0.085c-0.166,0-0.32-0.028-0.465-0.085
			c-0.141-0.055-0.262-0.133-0.371-0.238c-0.102-0.102-0.186-0.226-0.244-0.368c-0.061-0.146-0.092-0.305-0.092-0.48
			c0-0.175,0.031-0.334,0.092-0.48c0.059-0.144,0.143-0.266,0.244-0.369c0.109-0.104,0.23-0.183,0.371-0.24
			c0.145-0.055,0.299-0.084,0.465-0.084c0.17,0,0.326,0.029,0.467,0.084c0.145,0.057,0.266,0.136,0.373,0.24
			c0.104,0.103,0.186,0.225,0.246,0.369c0.059,0.146,0.09,0.305,0.09,0.48C52.609,459.898,52.578,460.057,52.52,460.203"/>
	</g>
</g>
<g id="Layer_2">
</g>
<g id="Layer_4" display="none">
	<g display="inline">
		<path d="M-85.193,513.353c-3.295,0-5.483,2.655-5.483,7.425c0,4.771,2.288,7.492,5.588,7.492c3.295,0,5.478-2.654,5.478-7.426
			C-79.61,516.075-81.899,513.353-85.193,513.353 M-85.16,532.938c-6.154,0-10.359-4.5-10.359-12.094
			c0-7.587,4.272-12.16,10.432-12.16c6.116,0,10.324,4.501,10.324,12.093S-79.039,532.938-85.16,532.938"/>
		<path d="M-60.14,513.621h-5.415v6.049h5.485c2.184,0,3.362-1.009,3.362-3.061C-56.709,514.561-58.056,513.621-60.14,513.621
			 M-60.374,524.241h-5.182v8.328h-4.708v-23.516h10.291c4.439,0,8.107,2.454,8.107,7.459
			C-51.867,521.958-55.498,524.241-60.374,524.241"/>
		<polygon points="-46.994,532.567 -46.994,509.053 -30.65,509.053 -30.65,513.657 -42.289,513.657 -42.289,517.721 
			-35.529,517.721 -35.529,522.288 -42.289,522.288 -42.289,527.963 -30.145,527.963 -30.145,532.567 		"/>
		<path d="M-9.871,532.567l-8.647-12.83c-0.573-0.871-1.343-2.049-1.646-2.653c0,0.873,0.064,3.829,0.064,5.142v10.341h-4.637
			v-23.514h4.502l8.343,12.432c0.573,0.871,1.345,2.051,1.647,2.653c0-0.879-0.065-3.829-0.065-5.14v-9.947h4.638v23.514h-4.199
			V532.567z"/>
		<path d="M8.021,532.938c-3.193,0-6.053-1.381-7.9-3.258l1.746-1.949c1.783,1.713,3.836,2.823,6.258,2.823
			c3.129,0,5.08-1.544,5.08-4.031c0-2.187-1.312-3.426-5.617-4.971c-5.077-1.815-6.798-3.461-6.798-6.854
			c0-3.767,2.96-6.014,7.367-6.014c3.166,0,5.184,0.938,7.168,2.522l-1.682,2.049c-1.715-1.413-3.299-2.187-5.654-2.187
			c-3.226,0-4.574,1.612-4.574,3.46c0,1.953,0.878,3.057,5.585,4.738c5.215,1.881,6.829,3.629,6.829,7.121
			C15.828,530.085,12.934,532.938,8.021,532.938"/>
		<polygon points="35.999,532.567 35.999,521.485 24.295,521.485 24.295,532.567 21.672,532.567 21.672,509.053 24.295,509.053 
			24.295,519.098 35.999,519.098 35.999,509.053 38.623,509.053 38.623,532.567 		"/>
		<rect x="45.371" y="509.055" width="2.623" height="23.514"/>
		<polygon points="57.375,511.438 57.375,519.233 63.83,519.233 63.83,521.62 57.375,521.62 57.375,532.567 54.75,532.567 
			54.75,509.053 68.576,509.053 68.576,511.438 		"/>
		<polygon points="82.834,511.438 82.834,532.567 80.211,532.567 80.211,511.438 73.285,511.438 73.285,509.053 89.764,509.053 
			89.764,511.438 		"/>
		<path fill="#BC1C29" d="M-142.341,518.498l-7.872,2.861c0.103,1.26,0.318,2.504,0.623,3.725l7.473-2.723
			C-142.357,521.103-142.442,519.803-142.341,518.498"/>
		<path fill="#BC1C29" d="M-107.571,509.81c-0.548-1.129-1.181-2.224-1.919-3.256l-7.868,2.861c0.916,0.938,1.685,1.987,2.312,3.113
			L-107.571,509.81z"/>
		<path fill="#E22434" d="M-124.882,507.586c1.636,0.763,3.057,1.801,4.25,3.023l7.869-2.864c-2.182-3.052-5.148-5.604-8.782-7.297
			c-11.246-5.24-24.667-0.364-29.905,10.87c-1.701,3.631-2.332,7.494-2.038,11.231l7.871-2.86c0.128-1.7,0.547-3.407,1.311-5.044
			C-140.903,507.35-132.184,504.181-124.882,507.586"/>
		<path fill="#E22434" d="M-149.099,524.909l-7.475,2.717c0.688,2.719,1.88,5.309,3.516,7.607l7.853-2.851
			C-147.221,530.311-148.564,527.7-149.099,524.909"/>
		<path fill="#E22434" d="M-116.491,521.944c-0.126,1.698-0.551,3.408-1.319,5.045c-3.406,7.299-12.123,10.467-19.431,7.062
			c-1.636-0.766-3.067-1.799-4.258-3.02l-7.849,2.854c2.175,3.053,5.141,5.604,8.776,7.302c11.246,5.237,24.664,0.36,29.91-10.873
			c1.696-3.632,2.322-7.492,2.024-11.228L-116.491,521.944z"/>
		<path fill="#E22434" d="M-114.555,512.346l-7.475,2.724c1.39,2.481,2.043,5.344,1.833,8.221l7.85-2.854
			C-112.574,517.622-113.325,514.876-114.555,512.346"/>
		<path fill="#97101B" d="M-142.373,520.078c-0.019-0.524-0.012-1.051,0.032-1.58l-7.872,2.861c0.038,0.504,0.103,1.002,0.178,1.5
			L-142.373,520.078z"/>
		<path fill="#97101B" d="M-108.707,507.741c-0.25-0.4-0.507-0.8-0.781-1.187l-7.866,2.861c0.345,0.354,0.666,0.732,0.969,1.114
			L-108.707,507.741z"/>
		<path fill="#BC1C29" d="M-149.347,533.886c0.604,0.849,1.274,1.663,2,2.426l8.545-3.112c-1-0.627-1.902-1.353-2.699-2.166
			L-149.347,533.886z M-108.637,519.089l-7.854,2.856c-0.083,1.129-0.303,2.26-0.664,3.371l8.542-3.113
			C-108.547,521.159-108.559,520.119-108.637,519.089"/>
		<path d="M96.124,511.01c-0.082,0.198-0.194,0.368-0.339,0.511c-0.147,0.139-0.316,0.25-0.512,0.328
			c-0.197,0.078-0.41,0.115-0.646,0.115c-0.227,0-0.439-0.038-0.637-0.115c-0.196-0.079-0.366-0.188-0.516-0.328
			c-0.141-0.143-0.256-0.313-0.334-0.511c-0.087-0.197-0.128-0.417-0.128-0.659c0-0.241,0.041-0.461,0.128-0.657
			c0.078-0.2,0.193-0.37,0.334-0.511c0.148-0.144,0.318-0.25,0.516-0.329c0.197-0.077,0.412-0.116,0.637-0.116
			c0.236,0,0.449,0.039,0.646,0.116c0.194,0.079,0.363,0.186,0.512,0.329c0.145,0.141,0.257,0.311,0.339,0.511
			c0.081,0.196,0.122,0.417,0.122,0.657C96.246,510.593,96.205,510.813,96.124,511.01 M95.92,509.78
			c-0.073-0.175-0.17-0.323-0.296-0.444c-0.122-0.126-0.271-0.222-0.442-0.292c-0.169-0.067-0.354-0.104-0.554-0.104
			c-0.192,0-0.375,0.037-0.548,0.104c-0.168,0.07-0.315,0.166-0.438,0.292c-0.127,0.121-0.228,0.269-0.298,0.444
			c-0.072,0.173-0.109,0.361-0.109,0.571c0,0.207,0.037,0.4,0.109,0.573c0.07,0.173,0.171,0.321,0.298,0.445
			c0.124,0.123,0.272,0.217,0.438,0.286c0.174,0.072,0.354,0.104,0.548,0.104c0.198,0,0.385-0.033,0.554-0.104
			c0.172-0.069,0.321-0.164,0.442-0.286c0.126-0.124,0.224-0.272,0.296-0.445c0.074-0.173,0.107-0.364,0.107-0.573
			C96.029,510.141,95.994,509.95,95.92,509.78 M95.234,510.275c-0.072,0.086-0.172,0.143-0.297,0.174l0.399,0.763h-0.278
			l-0.384-0.746h-0.386v0.746h-0.235v-1.783h0.724c0.164,0,0.297,0.043,0.406,0.125c0.112,0.085,0.168,0.214,0.168,0.388
			C95.348,510.076,95.309,510.188,95.234,510.275 M95.02,509.717c-0.058-0.051-0.145-0.077-0.258-0.077h-0.477v0.604h0.447
			c0.252,0,0.377-0.101,0.377-0.301C95.111,509.842,95.078,509.764,95.02,509.717"/>
	</g>
</g>
<g id="Layer_3" display="none">
	
		<image display="inline" overflow="visible" width="217" height="96" xlink:href="../Desktop/Screen Shot 2013-11-19 at 4.51.37 PM.png"  transform="matrix(1 0 0 1 -145.2275 405.29)">
	</image>
</g>
</svg>
);
}
.logo a {
display: block;
width: 100%;
height: 100%;
}
*, *:before, *:after {
-moz-box-sizing: border-box;
box-sizing: border-box;
}
aside,
footer,
header,
hgroup,
section{
display: block;
}
body {
color: #404040;
font-family: "Helvetica Neue",Helvetica,"Liberation Sans",Arial,sans-serif;
font-size: 14px;
line-height: 1.4;
}
html {
font-family: sans-serif;
-ms-text-size-adjust: 100%;
-webkit-text-size-adjust: 100%;
}
ul {
margin-top: 0;
}
.container {
margin-right: auto;
margin-left: auto;
padding-left: 15px;
padding-right: 15px;
}
.container:before,
.container:after {
content: " ";
/* 1 */
display: table;
/* 2 */
}
.container:after {
clear: both;
}
.row {
margin-left: -15px;
margin-right: -15px;
}
.row:before,
.row:after {
content: " ";
/* 1 */
display: table;
/* 2 */
}
.row:after {
clear: both;
}
.col-sm-6, .col-md-6, .col-xs-12 {
position: relative;
min-height: 1px;
padding-left: 15px;
padding-right: 15px;
}
.col-xs-12 {
width: 100%;
}
@media (min-width: 768px) {
.container {
width: 750px;
}
.col-sm-6 {
float: left;
}
.col-sm-6 {
width: 50%;
}
}
@media (min-width: 992px) {
.container {
width: 970px;
}
.col-md-6 {
float: left;
}
.col-md-6 {
width: 50%;
}
}
@media (min-width: 1200px) {
.container {
width: 1170px;
}
}
a {
color: #069;
text-decoration: none;
}
a:hover {
color: #EA0011;
text-decoration: underline;
}
hgroup {
margin-top: 50px;
}
footer {
margin: 50px 0 25px;
font-size: 11px
}
h1, h2, h3 {
color: #000;
line-height: 1.38em;
margin: 1.5em 0 .3em;
}
h1 {
font-size: 25px;
font-weight: 300;
border-bottom: 1px solid #fff;
margin-bottom: .5em;
}
h1:after {
content: "";
display: block;
width: 100%;
height: 1px;
background-color: #ddd;
}
h2 {
font-size: 19px;
font-weight: 400;
}
h3 {
font-size: 15px;
font-weight: 400;
margin: 0 0 .3em;
}
p {
margin: 0 0 2em;
}
p + h2 {
margin-top: 2em;
}
html {
background: #f5f5f5;
height: 100%;
}
code {
background-color: white;
border: 1px solid #ccc;
padding: 1px 5px;
color: #888;
}
pre {
display: block;
padding: 13.333px 20px;
margin: 0 0 20px;
font-size: 13px;
line-height: 1.4;
background-color: #fff;
border-left: 2px solid rgba(120,120,120,0.35);
white-space: pre;
white-space: pre-wrap;
word-break: normal;
word-wrap: break-word;
overflow: auto;
font-family: Menlo,Monaco,"Liberation Mono",Consolas,monospace !important;
}
</style>
</head>
<body>
<section class='container'>
<hgroup>
<h1>Welcome to your Python application on OpenShift</h1>
</hgroup>
<div class="row">
<section class='col-xs-12 col-sm-6 col-md-6'>
<section>
<h2>Deploying code changes</h2>
<p>OpenShift uses the <a href="http://git-scm.com/">Git version control system</a> for your source code, and grants you access to it via the Secure Shell (SSH) protocol. In order to upload and download code to your application you need to give us your <a href="https://www.openshift.com/developers/remote-access">public SSH key</a>. You can upload it within the web console or install the <a href="https://www.openshift.com/developers/rhc-client-tools-install">RHC command line tool</a> and run <code>rhc setup</code> to generate and upload your key automatically.</p>
<h3>Working in your local Git repository</h3>
<p>If you created your application from the command line and uploaded your SSH key, rhc will automatically download a copy of that source code repository (Git calls this 'cloning') to your local system.</p>
<p>If you created the application from the web console, you'll need to manually clone the repository to your local system. Copy the application's source code Git URL and then run:</p>
<pre>$ git clone <git_url> <directory_to_create>
# Within your project directory
# Commit your changes and push to OpenShift
$ git commit -a -m 'Some commit message'
$ git push</pre>
<ul>
<li><a href="https://www.openshift.com/developers/deploying-and-building-applications">Learn more about deploying and building your application</a></li>
<li>See the README file in your local application Git repository for more information on the options for deploying applications.</li>
</ul>
</section>
</section>
<section class="col-xs-12 col-sm-6 col-md-6">
<h2>Managing your application</h2>
<h3>Web Console</h3>
<p>You can use the OpenShift web console to enable additional capabilities via cartridges, add collaborator access authorizations, designate custom domain aliases, and manage domain memberships.</p>
<h3>Command Line Tools</h3>
<p>Installing the <a href="https://www.openshift.com/developers/rhc-client-tools-install">OpenShift RHC client tools</a> allows you complete control of your cloud environment. Read more on how to manage your application from the command line in our <a href="https://www.openshift.com/user-guide">User Guide</a>.
</p>
<h2>Development Resources</h2>
<ul>
<li><a href="https://www.openshift.com/developers">Developer Center</a></li>
<li><a href="https://www.openshift.com/user-guide">User Guide</a></li>
<li><a href="https://www.openshift.com/support">OpenShift Support</a></li>
<li><a href="http://stackoverflow.com/questions/tagged/openshift">Stack Overflow questions for OpenShift</a></li>
<li><a href="http://webchat.freenode.net/?randomnick=1&channels=openshift&uio=d4">IRC channel at #openshift on freenode.net</a></li>
<li><a href="http://git-scm.com/documentation">Git documentation</a></li>
</ul>
</section>
</div>
<footer>
<div class="logo"><a href="https://www.openshift.com/"></a></div>
</footer>
</section>
</body>
</html>'''
status = '200 OK'
response_headers = [('Content-Type', ctype), ('Content-Length', str(len(response_body)))]
#
start_response(status, response_headers)
return [response_body]
#
# Below for testing only
#
if __name__ == '__main__':
from wsgiref.simple_server import make_server
httpd = make_server('localhost', 8051, application)
# Wait for a single request, serve it and quit.
httpd.handle_request()
| agpl-3.0 |
haad/ansible | lib/ansible/modules/identity/ipa/ipa_hbacrule.py | 134 | 13226 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipa_hbacrule
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA HBAC rule
description:
- Add, modify or delete an IPA HBAC rule using IPA API.
options:
cn:
description:
- Canonical name.
- Can not be changed as it is the unique identifier.
required: true
aliases: ["name"]
description:
description: Description
host:
description:
- List of host names to assign.
- If an empty list is passed all hosts will be removed from the rule.
- If option is omitted hosts will not be checked or changed.
required: false
hostcategory:
description: Host category
choices: ['all']
hostgroup:
description:
- List of hostgroup names to assign.
- If an empty list is passed all hostgroups will be removed. from the rule
- If option is omitted hostgroups will not be checked or changed.
service:
description:
- List of service names to assign.
- If an empty list is passed all services will be removed from the rule.
- If option is omitted services will not be checked or changed.
servicecategory:
description: Service category
choices: ['all']
servicegroup:
description:
- List of service group names to assign.
- If an empty list is passed all assigned service groups will be removed from the rule.
- If option is omitted service groups will not be checked or changed.
sourcehost:
description:
- List of source host names to assign.
- If an empty list if passed all assigned source hosts will be removed from the rule.
- If option is omitted source hosts will not be checked or changed.
sourcehostcategory:
description: Source host category
choices: ['all']
sourcehostgroup:
description:
- List of source host group names to assign.
- If an empty list if passed all assigned source host groups will be removed from the rule.
- If option is omitted source host groups will not be checked or changed.
state:
description: State to ensure
default: "present"
choices: ["present", "absent", "enabled", "disabled"]
user:
description:
- List of user names to assign.
- If an empty list if passed all assigned users will be removed from the rule.
- If option is omitted users will not be checked or changed.
usercategory:
description: User category
choices: ['all']
usergroup:
description:
- List of user group names to assign.
- If an empty list if passed all assigned user groups will be removed from the rule.
- If option is omitted user groups will not be checked or changed.
extends_documentation_fragment: ipa.documentation
version_added: "2.3"
'''
EXAMPLES = '''
# Ensure rule to allow all users to access any host from any host
- ipa_hbacrule:
name: allow_all
description: Allow all users to access any host from any host
hostcategory: all
servicecategory: all
usercategory: all
state: present
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure rule with certain limitations
- ipa_hbacrule:
name: allow_all_developers_access_to_db
description: Allow all developers to access any database from any host
hostgroup:
- db-server
usergroup:
- developers
state: present
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure rule is absent
- ipa_hbacrule:
name: rule_to_be_deleted
state: absent
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
'''
RETURN = '''
hbacrule:
description: HBAC rule as returned by IPA API.
returned: always
type: dict
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ipa import IPAClient, ipa_argument_spec
from ansible.module_utils._text import to_native
class HBACRuleIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(HBACRuleIPAClient, self).__init__(module, host, port, protocol)
def hbacrule_find(self, name):
return self._post_json(method='hbacrule_find', name=None, item={'all': True, 'cn': name})
def hbacrule_add(self, name, item):
return self._post_json(method='hbacrule_add', name=name, item=item)
def hbacrule_mod(self, name, item):
return self._post_json(method='hbacrule_mod', name=name, item=item)
def hbacrule_del(self, name):
return self._post_json(method='hbacrule_del', name=name)
def hbacrule_add_host(self, name, item):
return self._post_json(method='hbacrule_add_host', name=name, item=item)
def hbacrule_remove_host(self, name, item):
return self._post_json(method='hbacrule_remove_host', name=name, item=item)
def hbacrule_add_service(self, name, item):
return self._post_json(method='hbacrule_add_service', name=name, item=item)
def hbacrule_remove_service(self, name, item):
return self._post_json(method='hbacrule_remove_service', name=name, item=item)
def hbacrule_add_user(self, name, item):
return self._post_json(method='hbacrule_add_user', name=name, item=item)
def hbacrule_remove_user(self, name, item):
return self._post_json(method='hbacrule_remove_user', name=name, item=item)
def hbacrule_add_sourcehost(self, name, item):
return self._post_json(method='hbacrule_add_sourcehost', name=name, item=item)
def hbacrule_remove_sourcehost(self, name, item):
return self._post_json(method='hbacrule_remove_sourcehost', name=name, item=item)
def get_hbacrule_dict(description=None, hostcategory=None, ipaenabledflag=None, servicecategory=None,
sourcehostcategory=None,
usercategory=None):
data = {}
if description is not None:
data['description'] = description
if hostcategory is not None:
data['hostcategory'] = hostcategory
if ipaenabledflag is not None:
data['ipaenabledflag'] = ipaenabledflag
if servicecategory is not None:
data['servicecategory'] = servicecategory
if sourcehostcategory is not None:
data['sourcehostcategory'] = sourcehostcategory
if usercategory is not None:
data['usercategory'] = usercategory
return data
def get_hbcarule_diff(client, ipa_hbcarule, module_hbcarule):
return client.get_diff(ipa_data=ipa_hbcarule, module_data=module_hbcarule)
def ensure(module, client):
name = module.params['cn']
state = module.params['state']
if state in ['present', 'enabled']:
ipaenabledflag = 'TRUE'
else:
ipaenabledflag = 'FALSE'
host = module.params['host']
hostcategory = module.params['hostcategory']
hostgroup = module.params['hostgroup']
service = module.params['service']
servicecategory = module.params['servicecategory']
servicegroup = module.params['servicegroup']
sourcehost = module.params['sourcehost']
sourcehostcategory = module.params['sourcehostcategory']
sourcehostgroup = module.params['sourcehostgroup']
user = module.params['user']
usercategory = module.params['usercategory']
usergroup = module.params['usergroup']
module_hbacrule = get_hbacrule_dict(description=module.params['description'],
hostcategory=hostcategory,
ipaenabledflag=ipaenabledflag,
servicecategory=servicecategory,
sourcehostcategory=sourcehostcategory,
usercategory=usercategory)
ipa_hbacrule = client.hbacrule_find(name=name)
changed = False
if state in ['present', 'enabled', 'disabled']:
if not ipa_hbacrule:
changed = True
if not module.check_mode:
ipa_hbacrule = client.hbacrule_add(name=name, item=module_hbacrule)
else:
diff = get_hbcarule_diff(client, ipa_hbacrule, module_hbacrule)
if len(diff) > 0:
changed = True
if not module.check_mode:
data = {}
for key in diff:
data[key] = module_hbacrule.get(key)
client.hbacrule_mod(name=name, item=data)
if host is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_host', []), host,
client.hbacrule_add_host,
client.hbacrule_remove_host, 'host') or changed
if hostgroup is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberhost_hostgroup', []), hostgroup,
client.hbacrule_add_host,
client.hbacrule_remove_host, 'hostgroup') or changed
if service is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvc', []), service,
client.hbacrule_add_service,
client.hbacrule_remove_service, 'hbacsvc') or changed
if servicegroup is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberservice_hbacsvcgroup', []),
servicegroup,
client.hbacrule_add_service,
client.hbacrule_remove_service, 'hbacsvcgroup') or changed
if sourcehost is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_host', []), sourcehost,
client.hbacrule_add_sourcehost,
client.hbacrule_remove_sourcehost, 'host') or changed
if sourcehostgroup is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('sourcehost_group', []), sourcehostgroup,
client.hbacrule_add_sourcehost,
client.hbacrule_remove_sourcehost, 'hostgroup') or changed
if user is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_user', []), user,
client.hbacrule_add_user,
client.hbacrule_remove_user, 'user') or changed
if usergroup is not None:
changed = client.modify_if_diff(name, ipa_hbacrule.get('memberuser_group', []), usergroup,
client.hbacrule_add_user,
client.hbacrule_remove_user, 'group') or changed
else:
if ipa_hbacrule:
changed = True
if not module.check_mode:
client.hbacrule_del(name=name)
return changed, client.hbacrule_find(name=name)
def main():
argument_spec = ipa_argument_spec()
argument_spec.update(cn=dict(type='str', required=True, aliases=['name']),
description=dict(type='str'),
host=dict(type='list'),
hostcategory=dict(type='str', choices=['all']),
hostgroup=dict(type='list'),
service=dict(type='list'),
servicecategory=dict(type='str', choices=['all']),
servicegroup=dict(type='list'),
sourcehost=dict(type='list'),
sourcehostcategory=dict(type='str', choices=['all']),
sourcehostgroup=dict(type='list'),
state=dict(type='str', default='present', choices=['present', 'absent', 'enabled', 'disabled']),
user=dict(type='list'),
usercategory=dict(type='str', choices=['all']),
usergroup=dict(type='list'))
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True
)
client = HBACRuleIPAClient(module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot'])
try:
client.login(username=module.params['ipa_user'],
password=module.params['ipa_pass'])
changed, hbacrule = ensure(module, client)
module.exit_json(changed=changed, hbacrule=hbacrule)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
jswope00/griffinx | common/test/acceptance/tests/lms/test_lms_acid_xblock.py | 122 | 5837 | # -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
from unittest import expectedFailure
from ..helpers import UniqueCourseTest
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.course_info import CourseInfoPage
from ...pages.lms.tab_nav import TabNavPage
from ...pages.xblock.acid import AcidView
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
class XBlockAcidBase(UniqueCourseTest):
"""
Base class for tests that verify that XBlock integration is working correctly
"""
__test__ = False
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(XBlockAcidBase, self).setUp()
self.setup_fixtures()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
def validate_acid_block_view(self, acid_block):
"""
Verify that the LMS view for the Acid Block is correct
"""
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
self.assertTrue(acid_block.scope_passed('user_state'))
self.assertTrue(acid_block.scope_passed('user_state_summary'))
self.assertTrue(acid_block.scope_passed('preferences'))
self.assertTrue(acid_block.scope_passed('user_info'))
class XBlockAcidNoChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with no children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
def test_acid_block(self):
"""
Verify that all expected acid block tests pass in the lms.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
acid_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid]')
self.validate_acid_block_view(acid_block)
class XBlockAcidChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
XBlockFixtureDesc('acid', 'First Acid Child', metadata={'name': 'first'}),
XBlockFixtureDesc('acid', 'Second Acid Child', metadata={'name': 'second'}),
XBlockFixtureDesc('html', 'Html Child', data="<html>Contents</html>"),
)
)
)
)
).install()
def validate_acid_parent_block_view(self, acid_parent_block):
super(XBlockAcidChildTest, self).validate_acid_block_view(acid_parent_block)
self.assertTrue(acid_parent_block.child_tests_passed)
def test_acid_block(self):
"""
Verify that all expected acid block tests pass in the lms.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
acid_parent_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid_parent]')
self.validate_acid_parent_block_view(acid_parent_block)
acid_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid]')
self.validate_acid_block_view(acid_block)
class XBlockAcidAsideTest(XBlockAcidBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
@expectedFailure
def test_acid_block(self):
"""
Verify that all expected acid block tests pass in the lms.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
acid_aside = AcidView(self.browser, '.xblock_asides-v1-student_view[data-block-type=acid_aside]')
self.validate_acid_aside_view(acid_aside)
acid_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid]')
self.validate_acid_block_view(acid_block)
def validate_acid_aside_view(self, acid_aside):
self.validate_acid_block_view(acid_aside)
| agpl-3.0 |
jelugbo/hebs_repo | lms/envs/common.py | 2 | 62020 | # -*- coding: utf-8 -*-
"""
This is the common settings file, intended to set sane defaults. If you have a
piece of configuration that's dependent on a set of feature flags being set,
then create a function that returns the calculated value based on the value of
FEATURES[...]. Modules that extend this one can change the feature
configuration in an environment specific config file and re-calculate those
values.
We should make a method that calls all these config methods so that you just
make one call at the end of your site-specific dev file to reset all the
dependent variables (like INSTALLED_APPS) for you.
Longer TODO:
1. Right now our treatment of static content in general and in particular
course-specific static content is haphazard.
2. We should have a more disciplined approach to feature flagging, even if it
just means that we stick them in a dict called FEATURES.
3. We need to handle configuration for multiple courses. This could be as
multiple sites, but we do need a way to map their data assets.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0611, W0614, C0103
import sys
import os
import imp
from path import path
from warnings import simplefilter
from django.utils.translation import ugettext_lazy as _
from .discussionsettings import *
from xmodule.modulestore.modulestore_settings import update_module_store_settings
from lms.lib.xblock.mixin import LmsBlockMixin
################################### FEATURES ###################################
# The display name of the platform to be used in templates/emails/etc.
PLATFORM_NAME = "Your Platform Name Here"
CC_MERCHANT_NAME = PLATFORM_NAME
PLATFORM_FACEBOOK_ACCOUNT = "http://www.facebook.com/YourPlatformFacebookAccount"
PLATFORM_TWITTER_ACCOUNT = "@YourPlatformTwitterAccount"
PLATFORM_TWITTER_URL = "https://twitter.com/YourPlatformTwitterAccount"
PLATFORM_MEETUP_URL = "http://www.meetup.com/YourMeetup"
PLATFORM_LINKEDIN_URL = "http://www.linkedin.com/company/YourPlatform"
PLATFORM_GOOGLE_PLUS_URL = "https://plus.google.com/YourGooglePlusAccount/"
COURSEWARE_ENABLED = True
ENABLE_JASMINE = False
DISCUSSION_SETTINGS = {
'MAX_COMMENT_DEPTH': 2,
}
# Features
FEATURES = {
'SAMPLE': False,
'USE_DJANGO_PIPELINE': True,
'DISPLAY_DEBUG_INFO_TO_STAFF': True,
'DISPLAY_HISTOGRAMS_TO_STAFF': False, # For large courses this slows down courseware access for staff.
'REROUTE_ACTIVATION_EMAIL': False, # nonempty string = address for all activation emails
'DEBUG_LEVEL': 0, # 0 = lowest level, least verbose, 255 = max level, most verbose
## DO NOT SET TO True IN THIS FILE
## Doing so will cause all courses to be released on production
'DISABLE_START_DATES': False, # When True, all courses will be active, regardless of start date
# When True, will only publicly list courses by the subdomain. Expects you
# to define COURSE_LISTINGS, a dictionary mapping subdomains to lists of
# course_ids (see dev_int.py for an example)
'SUBDOMAIN_COURSE_LISTINGS': False,
# When True, will override certain branding with university specific values
# Expects a SUBDOMAIN_BRANDING dictionary that maps the subdomain to the
# university to use for branding purposes
'SUBDOMAIN_BRANDING': False,
'FORCE_UNIVERSITY_DOMAIN': False, # set this to the university domain to use, as an override to HTTP_HOST
# set to None to do no university selection
# for consistency in user-experience, keep the value of the following 3 settings
# in sync with the corresponding ones in cms/envs/common.py
'ENABLE_DISCUSSION_SERVICE': True,
'ENABLE_TEXTBOOK': True,
'ENABLE_STUDENT_NOTES': True, # enables the student notes API and UI.
# discussion home panel, which includes a subscription on/off setting for discussion digest emails.
# this should remain off in production until digest notifications are online.
'ENABLE_DISCUSSION_HOME_PANEL': False,
'ENABLE_PSYCHOMETRICS': False, # real-time psychometrics (eg item response theory analysis in instructor dashboard)
'ENABLE_DJANGO_ADMIN_SITE': True, # set true to enable django's admin site, even on prod (e.g. for course ops)
'ENABLE_SQL_TRACKING_LOGS': False,
'ENABLE_LMS_MIGRATION': False,
'ENABLE_MANUAL_GIT_RELOAD': False,
'ENABLE_MASQUERADE': True, # allow course staff to change to student view of courseware
'ENABLE_SYSADMIN_DASHBOARD': False, # sysadmin dashboard, to see what courses are loaded, to delete & load courses
'DISABLE_LOGIN_BUTTON': False, # used in systems where login is automatic, eg MIT SSL
# extrernal access methods
'ACCESS_REQUIRE_STAFF_FOR_COURSE': False,
'AUTH_USE_OPENID': False,
'AUTH_USE_CERTIFICATES': False,
'AUTH_USE_OPENID_PROVIDER': False,
# Even though external_auth is in common, shib assumes the LMS views / urls, so it should only be enabled
# in LMS
'AUTH_USE_SHIB': False,
'AUTH_USE_CAS': False,
# This flag disables the requirement of having to agree to the TOS for users registering
# with Shib. Feature was requested by Stanford's office of general counsel
'SHIB_DISABLE_TOS': False,
# Toggles OAuth2 authentication provider
'ENABLE_OAUTH2_PROVIDER': False,
# Can be turned off if course lists need to be hidden. Effects views and templates.
'COURSES_ARE_BROWSABLE': True,
# Enables ability to restrict enrollment in specific courses by the user account login method
'RESTRICT_ENROLL_BY_REG_METHOD': False,
# Enables the LMS bulk email feature for course staff
'ENABLE_INSTRUCTOR_EMAIL': True,
# If True and ENABLE_INSTRUCTOR_EMAIL: Forces email to be explicitly turned on
# for each course via django-admin interface.
# If False and ENABLE_INSTRUCTOR_EMAIL: Email will be turned on by default
# for all Mongo-backed courses.
'REQUIRE_COURSE_EMAIL_AUTH': True,
# Analytics experiments - shows instructor analytics tab in LMS instructor dashboard.
# Enabling this feature depends on installation of a separate analytics server.
'ENABLE_INSTRUCTOR_ANALYTICS': False,
# enable analytics server.
# WARNING: THIS SHOULD ALWAYS BE SET TO FALSE UNDER NORMAL
# LMS OPERATION. See analytics.py for details about what
# this does.
'RUN_AS_ANALYTICS_SERVER_ENABLED': False,
# Flip to True when the YouTube iframe API breaks (again)
'USE_YOUTUBE_OBJECT_API': False,
# Give a UI to show a student's submission history in a problem by the
# Staff Debug tool.
'ENABLE_STUDENT_HISTORY_VIEW': True,
# Segment.io for LMS--need to explicitly turn it on for production.
'SEGMENT_IO_LMS': False,
# Provide a UI to allow users to submit feedback from the LMS (left-hand help modal)
'ENABLE_FEEDBACK_SUBMISSION': False,
# Turn on a page that lets staff enter Python code to be run in the
# sandbox, for testing whether it's enabled properly.
'ENABLE_DEBUG_RUN_PYTHON': False,
# Enable URL that shows information about the status of variuous services
'ENABLE_SERVICE_STATUS': False,
# Toggle to indicate use of a custom theme
'USE_CUSTOM_THEME': False,
# Don't autoplay videos for students
'AUTOPLAY_VIDEOS': False,
# Enable instructor dash to submit background tasks
'ENABLE_INSTRUCTOR_BACKGROUND_TASKS': True,
# Enable instructor to assign individual due dates
'INDIVIDUAL_DUE_DATES': False,
# Enable legacy instructor dashboard
'ENABLE_INSTRUCTOR_LEGACY_DASHBOARD': True,
# Is this an edX-owned domain? (used on instructor dashboard)
'IS_EDX_DOMAIN': False,
# Toggle to enable certificates of courses on dashboard
'ENABLE_VERIFIED_CERTIFICATES': False,
# Allow use of the hint managment instructor view.
'ENABLE_HINTER_INSTRUCTOR_VIEW': False,
# for load testing
'AUTOMATIC_AUTH_FOR_TESTING': False,
# Toggle to enable chat availability (configured on a per-course
# basis in Studio)
'ENABLE_CHAT': False,
# Allow users to enroll with methods other than just honor code certificates
'MULTIPLE_ENROLLMENT_ROLES': False,
# Toggle the availability of the shopping cart page
'ENABLE_SHOPPING_CART': False,
# Toggle storing detailed billing information
'STORE_BILLING_INFO': False,
# Enable flow for payments for course registration (DIFFERENT from verified student flow)
'ENABLE_PAID_COURSE_REGISTRATION': False,
# Automatically approve student identity verification attempts
'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': False,
# Disable instructor dash buttons for downloading course data
# when enrollment exceeds this number
'MAX_ENROLLMENT_INSTR_BUTTONS': 200,
# Grade calculation started from the new instructor dashboard will write
# grades CSV files to S3 and give links for downloads.
'ENABLE_S3_GRADE_DOWNLOADS': False,
# whether to use password policy enforcement or not
'ENFORCE_PASSWORD_POLICY': False,
# Give course staff unrestricted access to grade downloads (if set to False,
# only edX superusers can perform the downloads)
'ALLOW_COURSE_STAFF_GRADE_DOWNLOADS': False,
'ENABLED_PAYMENT_REPORTS': ["refund_report", "itemized_purchase_report", "university_revenue_share", "certificate_status"],
# Turn off account locking if failed login attempts exceeds a limit
'ENABLE_MAX_FAILED_LOGIN_ATTEMPTS': False,
# Hide any Personally Identifiable Information from application logs
'SQUELCH_PII_IN_LOGS': False,
# Toggles the embargo functionality, which enable embargoing for particular courses
'EMBARGO': False,
# Toggles the embargo site functionality, which enable embargoing for the whole site
'SITE_EMBARGOED': False,
# Whether the Wiki subsystem should be accessible via the direct /wiki/ paths. Setting this to True means
# that people can submit content and modify the Wiki in any arbitrary manner. We're leaving this as True in the
# defaults, so that we maintain current behavior
'ALLOW_WIKI_ROOT_ACCESS': True,
# Turn on/off Microsites feature
'USE_MICROSITES': False,
# Turn on third-party auth. Disabled for now because full implementations are not yet available. Remember to syncdb
# if you enable this; we don't create tables by default.
'ENABLE_THIRD_PARTY_AUTH': False,
# Toggle to enable alternate urls for marketing links
'ENABLE_MKTG_SITE': False,
# Prevent concurrent logins per user
'PREVENT_CONCURRENT_LOGINS': False,
# Turn off Advanced Security by default
'ADVANCED_SECURITY': False,
# Show a "Download your certificate" on the Progress page if the lowest
# nonzero grade cutoff is met
'SHOW_PROGRESS_SUCCESS_BUTTON': False,
# Analytics Data API (for active student count)
# Default to false here b/c dev environments won't have the api, will override in aws.py
'ENABLE_ANALYTICS_ACTIVE_COUNT': False,
# When a logged in user goes to the homepage ('/') should the user be
# redirected to the dashboard - this is default Open edX behavior. Set to
# False to not redirect the user
'ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER': True,
# Expose Mobile REST API. Note that if you use this, you must also set
# ENABLE_OAUTH2_PROVIDER to True
'ENABLE_MOBILE_REST_API': False,
# Video Abstraction Layer used to allow video teams to manage video assets
# independently of courseware. https://github.com/edx/edx-val
'ENABLE_VIDEO_ABSTRACTION_LAYER_API': False,
# Enable the new dashboard, account, and profile pages
'ENABLE_NEW_DASHBOARD': False,
}
# Ignore static asset files on import which match this pattern
ASSET_IGNORE_REGEX = r"(^\._.*$)|(^\.DS_Store$)|(^.*~$)"
# Used for A/B testing
DEFAULT_GROUPS = []
# If this is true, random scores will be generated for the purpose of debugging the profile graphs
GENERATE_PROFILE_SCORES = False
# Used with XQueue
XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5 # seconds
############################# SET PATH INFORMATION #############################
PROJECT_ROOT = path(__file__).abspath().dirname().dirname() # /edx-platform/lms
REPO_ROOT = PROJECT_ROOT.dirname()
COMMON_ROOT = REPO_ROOT / "common"
ENV_ROOT = REPO_ROOT.dirname() # virtualenv dir /edx-platform is in
COURSES_ROOT = ENV_ROOT / "data"
DATA_DIR = COURSES_ROOT
# TODO: Remove the rest of the sys.path modification here and in cms/envs/common.py
sys.path.append(REPO_ROOT)
sys.path.append(PROJECT_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'djangoapps')
sys.path.append(COMMON_ROOT / 'lib')
# For Node.js
system_node_path = os.environ.get("NODE_PATH", REPO_ROOT / 'node_modules')
node_paths = [
COMMON_ROOT / "static/js/vendor",
COMMON_ROOT / "static/coffee/src",
system_node_path,
]
NODE_PATH = ':'.join(node_paths)
# For geolocation ip database
GEOIP_PATH = REPO_ROOT / "common/static/data/geoip/GeoIP.dat"
GEOIPV6_PATH = REPO_ROOT / "common/static/data/geoip/GeoIPv6.dat"
# Where to look for a status message
STATUS_MESSAGE_PATH = ENV_ROOT / "status_message.json"
############################ OpenID Provider ##################################
OPENID_PROVIDER_TRUSTED_ROOTS = ['cs50.net', '*.cs50.net']
############################ OAUTH2 Provider ###################################
# OpenID Connect issuer ID. Normally the URL of the authentication endpoint.
OAUTH_OIDC_ISSUER = 'https:/example.com/oauth2'
# OpenID Connect claim handlers
OAUTH_OIDC_ID_TOKEN_HANDLERS = (
'oauth2_provider.oidc.handlers.BasicIDTokenHandler',
'oauth2_provider.oidc.handlers.ProfileHandler',
'oauth2_provider.oidc.handlers.EmailHandler',
'oauth2_handler.IDTokenHandler'
)
OAUTH_OIDC_USERINFO_HANDLERS = (
'oauth2_provider.oidc.handlers.BasicUserInfoHandler',
'oauth2_provider.oidc.handlers.ProfileHandler',
'oauth2_provider.oidc.handlers.EmailHandler',
'oauth2_handler.UserInfoHandler'
)
################################## EDX WEB #####################################
# This is where we stick our compiled template files. Most of the app uses Mako
# templates
import tempfile
MAKO_MODULE_DIR = os.path.join(tempfile.gettempdir(), 'mako_lms')
MAKO_TEMPLATES = {}
MAKO_TEMPLATES['main'] = [PROJECT_ROOT / 'templates',
COMMON_ROOT / 'templates',
COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates']
# This is where Django Template lookup is defined. There are a few of these
# still left lying around.
TEMPLATE_DIRS = [
PROJECT_ROOT / "templates",
COMMON_ROOT / 'templates',
COMMON_ROOT / 'lib' / 'capa' / 'capa' / 'templates',
COMMON_ROOT / 'djangoapps' / 'pipeline_mako' / 'templates',
]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.contrib.auth.context_processors.auth', # this is required for admin
'django.core.context_processors.csrf',
# Added for django-wiki
'django.core.context_processors.media',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'sekizai.context_processors.sekizai',
# Hack to get required link URLs to password reset templates
'edxmako.shortcuts.marketing_link_context_processor',
# Allows the open edX footer to be leveraged in Django Templates.
'edxmako.shortcuts.open_source_footer_context_processor',
# Shoppingcart processor (detects if request.user has a cart)
'shoppingcart.context_processor.user_has_cart_context_processor',
# Allows the open edX footer to be leveraged in Django Templates.
'edxmako.shortcuts.microsite_footer_context_processor',
)
# use the ratelimit backend to prevent brute force attacks
AUTHENTICATION_BACKENDS = (
'ratelimitbackend.backends.RateLimitModelBackend',
)
STUDENT_FILEUPLOAD_MAX_SIZE = 4 * 1000 * 1000 # 4 MB
MAX_FILEUPLOADS_PER_INPUT = 20
# FIXME:
# We should have separate S3 staged URLs in case we need to make changes to
# these assets and test them.
LIB_URL = '/static/js/'
# Dev machines shouldn't need the book
# BOOK_URL = '/static/book/'
BOOK_URL = 'https://mitxstatic.s3.amazonaws.com/book_images/' # For AWS deploys
RSS_TIMEOUT = 600
# Configuration option for when we want to grab server error pages
STATIC_GRAB = False
DEV_CONTENT = True
EDX_ROOT_URL = ''
LOGIN_REDIRECT_URL = EDX_ROOT_URL + '/accounts/login'
LOGIN_URL = EDX_ROOT_URL + '/accounts/login'
COURSE_NAME = "6.002_Spring_2012"
COURSE_NUMBER = "6.002x"
COURSE_TITLE = "Circuits and Electronics"
### Dark code. Should be enabled in local settings for devel.
ENABLE_MULTICOURSE = False # set to False to disable multicourse display (see lib.util.views.edXhome)
WIKI_ENABLED = False
###
COURSE_DEFAULT = '6.002x_Fall_2012'
COURSE_SETTINGS = {
'6.002x_Fall_2012': {
'number': '6.002x',
'title': 'Circuits and Electronics',
'xmlpath': '6002x/',
'location': 'i4x://edx/6002xs12/course/6.002x_Fall_2012',
}
}
# IP addresses that are allowed to reload the course, etc.
# TODO (vshnayder): Will probably need to change as we get real access control in.
LMS_MIGRATION_ALLOWED_IPS = []
# These are standard regexes for pulling out info like course_ids, usage_ids, etc.
# They are used so that URLs with deprecated-format strings still work.
# Note: these intentionally greedily grab all chars up to the next slash including any pluses
# DHM: I really wanted to ensure the separators were the same (+ or /) but all patts I tried had
# too many inadvertent side effects :-(
COURSE_KEY_PATTERN = r'(?P<course_key_string>[^/+]+(/|\+)[^/+]+(/|\+)[^/]+)'
COURSE_ID_PATTERN = COURSE_KEY_PATTERN.replace('course_key_string', 'course_id')
COURSE_KEY_REGEX = COURSE_KEY_PATTERN.replace('P<course_key_string>', ':')
USAGE_KEY_PATTERN = r'(?P<usage_key_string>(?:i4x://?[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))'
ASSET_KEY_PATTERN = r'(?P<asset_key_string>(?:/?c4x(:/)?/[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))'
USAGE_ID_PATTERN = r'(?P<usage_id>(?:i4x://?[^/]+/[^/]+/[^/]+/[^@]+(?:@[^/]+)?)|(?:[^/]+))'
############################## EVENT TRACKING #################################
# FIXME: Should we be doing this truncation?
TRACK_MAX_EVENT = 50000
DEBUG_TRACK_LOG = False
TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'track.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking'
}
}
}
# We're already logging events, and we don't want to capture user
# names/passwords. Heartbeat events are likely not interesting.
TRACKING_IGNORE_URL_PATTERNS = [r'^/event', r'^/login', r'^/heartbeat', r'^/segmentio/event']
EVENT_TRACKING_ENABLED = True
EVENT_TRACKING_BACKENDS = {
'logger': {
'ENGINE': 'eventtracking.backends.logger.LoggerBackend',
'OPTIONS': {
'name': 'tracking',
'max_event_size': TRACK_MAX_EVENT,
}
}
}
EVENT_TRACKING_PROCESSORS = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
}
]
# Backwards compatibility with ENABLE_SQL_TRACKING_LOGS feature flag.
# In the future, adding the backend to TRACKING_BACKENDS should be enough.
if FEATURES.get('ENABLE_SQL_TRACKING_LOGS'):
TRACKING_BACKENDS.update({
'sql': {
'ENGINE': 'track.backends.django.DjangoBackend'
}
})
EVENT_TRACKING_BACKENDS.update({
'sql': {
'ENGINE': 'track.backends.django.DjangoBackend'
}
})
TRACKING_SEGMENTIO_WEBHOOK_SECRET = None
TRACKING_SEGMENTIO_ALLOWED_ACTIONS = ['Track', 'Screen']
TRACKING_SEGMENTIO_ALLOWED_CHANNELS = ['mobile']
######################## GOOGLE ANALYTICS ###########################
GOOGLE_ANALYTICS_ACCOUNT = None
GOOGLE_ANALYTICS_LINKEDIN = 'GOOGLE_ANALYTICS_LINKEDIN_DUMMY'
######################## OPTIMIZELY ###########################
OPTIMIZELY_PROJECT_ID = None
######################## subdomain specific settings ###########################
COURSE_LISTINGS = {}
SUBDOMAIN_BRANDING = {}
VIRTUAL_UNIVERSITIES = []
############# XBlock Configuration ##########
# Import after sys.path fixup
from xmodule.modulestore.inheritance import InheritanceMixin
from xmodule.modulestore import prefer_xmodules
from xmodule.x_module import XModuleMixin
# This should be moved into an XBlock Runtime/Application object
# once the responsibility of XBlock creation is moved out of modulestore - cpennington
XBLOCK_MIXINS = (LmsBlockMixin, InheritanceMixin, XModuleMixin)
# Allow any XBlock in the LMS
XBLOCK_SELECT_FUNCTION = prefer_xmodules
############# ModuleStore Configuration ##########
MODULESTORE_BRANCH = 'published-only'
CONTENTSTORE = None
DOC_STORE_CONFIG = {
'host': 'localhost',
'db': 'xmodule',
'collection': 'modulestore',
}
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore',
'OPTIONS': {
'mappings': {},
'stores': [
{
'NAME': 'draft',
'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': DATA_DIR,
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
{
'NAME': 'xml',
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
'OPTIONS': {
'data_dir': DATA_DIR,
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
}
},
{
'NAME': 'split',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': DATA_DIR,
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
]
}
}
}
#################### Python sandbox ############################################
CODE_JAIL = {
# Path to a sandboxed Python executable. None means don't bother.
'python_bin': None,
# User to run as in the sandbox.
'user': 'sandbox',
# Configurable limits.
'limits': {
# How many CPU seconds can jailed code use?
'CPU': 1,
},
}
# Some courses are allowed to run unsafe code. This is a list of regexes, one
# of them must match the course id for that course to run unsafe code.
#
# For example:
#
# COURSES_WITH_UNSAFE_CODE = [
# r"Harvard/XY123.1/.*"
# ]
COURSES_WITH_UNSAFE_CODE = []
############################### DJANGO BUILT-INS ###############################
# Change DEBUG/TEMPLATE_DEBUG in your environment settings files, not here
DEBUG = False
TEMPLATE_DEBUG = False
USE_TZ = True
SESSION_COOKIE_SECURE = False
# CMS base
CMS_BASE = 'localhost:8001'
# Site info
SITE_ID = 1
SITE_NAME = "example.com"
HTTPS = 'on'
ROOT_URLCONF = 'lms.urls'
# NOTE: Please set ALLOWED_HOSTS to some sane value, as we do not allow the default '*'
# Platform Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = '[email protected]'
DEFAULT_FEEDBACK_EMAIL = '[email protected]'
SERVER_EMAIL = '[email protected]'
TECH_SUPPORT_EMAIL = '[email protected]'
CONTACT_EMAIL = '[email protected]'
BUGS_EMAIL = '[email protected]'
UNIVERSITY_EMAIL = '[email protected]'
PRESS_EMAIL = '[email protected]'
ADMINS = ()
MANAGERS = ADMINS
# Static content
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATIC_ROOT = ENV_ROOT / "staticfiles"
STATICFILES_DIRS = [
COMMON_ROOT / "static",
PROJECT_ROOT / "static",
]
FAVICON_PATH = 'images/favicon.ico'
# Locale/Internationalization
TIME_ZONE = 'America/New_York' # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
LANGUAGE_CODE = 'en' # http://www.i18nguy.com/unicode/language-identifiers.html
# these languages display right to left
LANGUAGES_BIDI = ("en@rtl", "he", "ar", "fa", "ur", "fa-ir")
# Sourced from http://www.localeplanet.com/icu/ and wikipedia
LANGUAGES = (
('en', u'English'),
('en@rtl', u'English (right-to-left)'),
('eo', u'Dummy Language (Esperanto)'), # Dummy languaged used for testing
('fake2', u'Fake translations'), # Another dummy language for testing (not pushed to prod)
('am', u'አማርኛ'), # Amharic
('ar', u'العربية'), # Arabic
('az', u'azərbaycanca'), # Azerbaijani
('bg-bg', u'български (България)'), # Bulgarian (Bulgaria)
('bn-bd', u'বাংলা (বাংলাদেশ)'), # Bengali (Bangladesh)
('bn-in', u'বাংলা (ভারত)'), # Bengali (India)
('bs', u'bosanski'), # Bosnian
('ca', u'Català'), # Catalan
('ca@valencia', u'Català (València)'), # Catalan (Valencia)
('cs', u'Čeština'), # Czech
('cy', u'Cymraeg'), # Welsh
('da', u'dansk'), # Danish
('de-de', u'Deutsch (Deutschland)'), # German (Germany)
('el', u'Ελληνικά'), # Greek
('en-uk', u'English (United Kingdom)'), # English (United Kingdom)
('en@lolcat', u'LOLCAT English'), # LOLCAT English
('en@pirate', u'Pirate English'), # Pirate English
('es-419', u'Español (Latinoamérica)'), # Spanish (Latin America)
('es-ar', u'Español (Argentina)'), # Spanish (Argentina)
('es-ec', u'Español (Ecuador)'), # Spanish (Ecuador)
('es-es', u'Español (España)'), # Spanish (Spain)
('es-mx', u'Español (México)'), # Spanish (Mexico)
('es-pe', u'Español (Perú)'), # Spanish (Peru)
('et-ee', u'Eesti (Eesti)'), # Estonian (Estonia)
('eu-es', u'euskara (Espainia)'), # Basque (Spain)
('fa', u'فارسی'), # Persian
('fa-ir', u'فارسی (ایران)'), # Persian (Iran)
('fi-fi', u'Suomi (Suomi)'), # Finnish (Finland)
('fil', u'Filipino'), # Filipino
('fr', u'Français'), # French
('gl', u'Galego'), # Galician
('gu', u'ગુજરાતી'), # Gujarati
('he', u'עברית'), # Hebrew
('hi', u'हिन्दी'), # Hindi
('hr', u'hrvatski'), # Croatian
('hu', u'magyar'), # Hungarian
('hy-am', u'Հայերեն (Հայաստան)'), # Armenian (Armenia)
('id', u'Bahasa Indonesia'), # Indonesian
('it-it', u'Italiano (Italia)'), # Italian (Italy)
('ja-jp', u'日本語 (日本)'), # Japanese (Japan)
('kk-kz', u'қазақ тілі (Қазақстан)'), # Kazakh (Kazakhstan)
('km-kh', u'ភាសាខ្មែរ (កម្ពុជា)'), # Khmer (Cambodia)
('kn', u'ಕನ್ನಡ'), # Kannada
('ko-kr', u'한국어 (대한민국)'), # Korean (Korea)
('lt-lt', u'Lietuvių (Lietuva)'), # Lithuanian (Lithuania)
('ml', u'മലയാളം'), # Malayalam
('mn', u'Монгол хэл'), # Mongolian
('mr', u'मराठी'), # Marathi
('ms', u'Bahasa Melayu'), # Malay
('nb', u'Norsk bokmål'), # Norwegian Bokmål
('ne', u'नेपाली'), # Nepali
('nl-nl', u'Nederlands (Nederland)'), # Dutch (Netherlands)
('or', u'ଓଡ଼ିଆ'), # Oriya
('pl', u'Polski'), # Polish
('pt-br', u'Português (Brasil)'), # Portuguese (Brazil)
('pt-pt', u'Português (Portugal)'), # Portuguese (Portugal)
('ro', u'română'), # Romanian
('ru', u'Русский'), # Russian
('si', u'සිංහල'), # Sinhala
('sk', u'Slovenčina'), # Slovak
('sl', u'Slovenščina'), # Slovenian
('sq', u'shqip'), # Albanian
('sr', u'Српски'), # Serbian
('sv', u'svenska'), # Swedish
('sw', u'Kiswahili'), # Swahili
('ta', u'தமிழ்'), # Tamil
('te', u'తెలుగు'), # Telugu
('th', u'ไทย'), # Thai
('tr-tr', u'Türkçe (Türkiye)'), # Turkish (Turkey)
('uk', u'Українська'), # Ukranian
('ur', u'اردو'), # Urdu
('vi', u'Tiếng Việt'), # Vietnamese
('uz', u'Ўзбек'), # Uzbek
('zh-cn', u'中文 (简体)'), # Chinese (China)
('zh-hk', u'中文 (香港)'), # Chinese (Hong Kong)
('zh-tw', u'中文 (台灣)'), # Chinese (Taiwan)
)
LANGUAGE_DICT = dict(LANGUAGES)
USE_I18N = True
USE_L10N = True
# Localization strings (e.g. django.po) are under this directory
LOCALE_PATHS = (REPO_ROOT + '/conf/locale',) # edx-platform/conf/locale/
# Messages
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
# Guidelines for translators
TRANSLATORS_GUIDE = 'https://github.com/edx/edx-platform/blob/master/docs/en_us/developers/source/i18n_translators_guide.rst'
#################################### GITHUB #######################################
# gitreload is used in LMS-workflow to pull content from github
# gitreload requests are only allowed from these IP addresses, which are
# the advertised public IPs of the github WebHook servers.
# These are listed, eg at https://github.com/edx/edx-platform/admin/hooks
ALLOWED_GITRELOAD_IPS = ['207.97.227.253', '50.57.128.197', '108.171.174.178']
#################################### AWS #######################################
# S3BotoStorage insists on a timeout for uploaded assets. We should make it
# permanent instead, but rather than trying to figure out exactly where that
# setting is, I'm just bumping the expiration time to something absurd (100
# years). This is only used if DEFAULT_FILE_STORAGE is overriden to use S3
# in the global settings.py
AWS_QUERYSTRING_EXPIRE = 10 * 365 * 24 * 60 * 60 # 10 years
################################# SIMPLEWIKI ###################################
SIMPLE_WIKI_REQUIRE_LOGIN_EDIT = True
SIMPLE_WIKI_REQUIRE_LOGIN_VIEW = False
################################# WIKI ###################################
from course_wiki import settings as course_wiki_settings
WIKI_ACCOUNT_HANDLING = False
WIKI_EDITOR = 'course_wiki.editors.CodeMirror'
WIKI_SHOW_MAX_CHILDREN = 0 # We don't use the little menu that shows children of an article in the breadcrumb
WIKI_ANONYMOUS = False # Don't allow anonymous access until the styling is figured out
WIKI_CAN_DELETE = course_wiki_settings.CAN_DELETE
WIKI_CAN_MODERATE = course_wiki_settings.CAN_MODERATE
WIKI_CAN_CHANGE_PERMISSIONS = course_wiki_settings.CAN_CHANGE_PERMISSIONS
WIKI_CAN_ASSIGN = course_wiki_settings.CAN_ASSIGN
WIKI_USE_BOOTSTRAP_SELECT_WIDGET = False
WIKI_LINK_LIVE_LOOKUPS = False
WIKI_LINK_DEFAULT_LEVEL = 2
##### Feedback submission mechanism #####
FEEDBACK_SUBMISSION_EMAIL = None
##### Zendesk #####
ZENDESK_URL = None
ZENDESK_USER = None
ZENDESK_API_KEY = None
##### EMBARGO #####
EMBARGO_SITE_REDIRECT_URL = None
##### shoppingcart Payment #####
PAYMENT_SUPPORT_EMAIL = '[email protected]'
##### Using cybersource by default #####
CC_PROCESSOR_NAME = 'CyberSource'
CC_PROCESSOR = {
'CyberSource': {
'SHARED_SECRET': '',
'MERCHANT_ID': '',
'SERIAL_NUMBER': '',
'ORDERPAGE_VERSION': '7',
'PURCHASE_ENDPOINT': '',
},
'CyberSource2': {
"PURCHASE_ENDPOINT": '',
"SECRET_KEY": '',
"ACCESS_KEY": '',
"PROFILE_ID": '',
}
}
# Setting for PAID_COURSE_REGISTRATION, DOES NOT AFFECT VERIFIED STUDENTS
PAID_COURSE_REGISTRATION_CURRENCY = ['usd', '$']
# Members of this group are allowed to generate payment reports
PAYMENT_REPORT_GENERATOR_GROUP = 'shoppingcart_report_access'
################################# open ended grading config #####################
#By setting up the default settings with an incorrect user name and password,
# will get an error when attempting to connect
OPEN_ENDED_GRADING_INTERFACE = {
'url': 'http://example.com/peer_grading',
'username': 'incorrect_user',
'password': 'incorrect_pass',
'staff_grading': 'staff_grading',
'peer_grading': 'peer_grading',
'grading_controller': 'grading_controller'
}
# Used for testing, debugging peer grading
MOCK_PEER_GRADING = False
# Used for testing, debugging staff grading
MOCK_STAFF_GRADING = False
################################# Jasmine ##################################
JASMINE_TEST_DIRECTORY = PROJECT_ROOT + '/static/coffee'
################################# Deprecation warnings #####################
# Ignore deprecation warnings (so we don't clutter Jenkins builds/production)
simplefilter('ignore')
################################# Middleware ###################################
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'staticfiles.finders.FileSystemFinder',
'staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'edxmako.makoloader.MakoFilesystemLoader',
'edxmako.makoloader.MakoAppDirectoriesLoader',
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'request_cache.middleware.RequestCache',
'microsite_configuration.middleware.MicrositeMiddleware',
'django_comment_client.middleware.AjaxExceptionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# Instead of AuthenticationMiddleware, we use a cached backed version
#'django.contrib.auth.middleware.AuthenticationMiddleware',
'cache_toolbox.middleware.CacheBackedAuthenticationMiddleware',
'student.middleware.UserStandingMiddleware',
'contentserver.middleware.StaticContentServer',
'crum.CurrentRequestUserMiddleware',
# Adds user tags to tracking events
# Must go before TrackMiddleware, to get the context set up
'user_api.middleware.UserTagsEventContextMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'track.middleware.TrackMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'splash.middleware.SplashMiddleware',
# Allows us to dark-launch particular languages
'dark_lang.middleware.DarkLangMiddleware',
'geoinfo.middleware.CountryMiddleware',
'embargo.middleware.EmbargoMiddleware',
# Allows us to set user preferences
# should be after DarkLangMiddleware
'lang_pref.middleware.LanguagePreferenceMiddleware',
# Detects user-requested locale from 'accept-language' header in http request
'django.middleware.locale.LocaleMiddleware',
'django.middleware.transaction.TransactionMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django_comment_client.utils.ViewNameMiddleware',
'codejail.django_integration.ConfigureCodeJailMiddleware',
# catches any uncaught RateLimitExceptions and returns a 403 instead of a 500
'ratelimitbackend.middleware.RateLimitMiddleware',
# needs to run after locale middleware (or anything that modifies the request context)
'edxmako.middleware.MakoMiddleware',
# for expiring inactive sessions
'session_inactivity_timeout.middleware.SessionInactivityTimeout',
# use Django built in clickjacking protection
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# to redirected unenrolled students to the course info page
'courseware.middleware.RedirectUnenrolledMiddleware',
'course_wiki.middleware.WikiAccessMiddleware',
)
# Clickjacking protection can be enabled by setting this to 'DENY'
X_FRAME_OPTIONS = 'ALLOW'
############################### Pipeline #######################################
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
from rooted_paths import rooted_glob
courseware_js = (
[
'coffee/src/' + pth + '.js'
for pth in ['courseware', 'histogram', 'navigation', 'time']
] +
['js/' + pth + '.js' for pth in ['ajax-error']] +
sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/modules/**/*.js'))
)
# Before a student accesses courseware, we do not
# need many of the JS dependencies. This includes
# only the dependencies used everywhere in the LMS
# (including the dashboard/account/profile pages)
# Currently, this partially duplicates the "main vendor"
# JavaScript file, so only one of the two should be included
# on a page at any time.
# In the future, we will likely refactor this to use
# RequireJS and an optimizer.
base_vendor_js = [
'js/vendor/jquery.min.js',
'js/vendor/jquery.cookie.js',
'js/vendor/underscore-min.js'
]
main_vendor_js = base_vendor_js + [
'js/vendor/require.js',
'js/RequireJS-namespace-undefine.js',
'js/vendor/json2.js',
'js/vendor/jquery-ui.min.js',
'js/vendor/jquery.qtip.min.js',
'js/vendor/swfobject/swfobject.js',
'js/vendor/jquery.ba-bbq.min.js',
'js/vendor/ova/annotator-full.js',
'js/vendor/ova/annotator-full-firebase-auth.js',
'js/vendor/ova/video.dev.js',
'js/vendor/ova/vjs.youtube.js',
'js/vendor/ova/rangeslider.js',
'js/vendor/ova/share-annotator.js',
'js/vendor/ova/richText-annotator.js',
'js/vendor/ova/reply-annotator.js',
'js/vendor/ova/tags-annotator.js',
'js/vendor/ova/flagging-annotator.js',
'js/vendor/ova/diacritic-annotator.js',
'js/vendor/ova/grouping-annotator.js',
'js/vendor/ova/jquery-Watch.js',
'js/vendor/ova/openseadragon.js',
'js/vendor/ova/OpenSeaDragonAnnotation.js',
'js/vendor/ova/ova.js',
'js/vendor/ova/catch/js/catch.js',
'js/vendor/ova/catch/js/handlebars-1.1.2.js',
'js/vendor/URI.min.js',
]
dashboard_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/dashboard/**/*.js'))
discussion_js = sorted(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/discussion/**/*.js'))
staff_grading_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/staff_grading/**/*.js'))
open_ended_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/open_ended/**/*.js'))
notes_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/notes/**/*.js'))
instructor_dash_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/instructor_dashboard/**/*.js'))
# JavaScript used by the student account and profile pages
# These are not courseware, so they do not need many of the courseware-specific
# JavaScript modules.
student_account_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/student_account/**/*.js'))
student_profile_js = sorted(rooted_glob(PROJECT_ROOT / 'static', 'js/student_profile/**/*.js'))
PIPELINE_CSS = {
'style-vendor': {
'source_filenames': [
'css/vendor/font-awesome.css',
'css/vendor/jquery.qtip.min.css',
'css/vendor/responsive-carousel/responsive-carousel.css',
'css/vendor/responsive-carousel/responsive-carousel.slide.css',
],
'output_filename': 'css/lms-style-vendor.css',
},
'style-vendor-tinymce-content': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/content.min.css'
],
'output_filename': 'css/lms-style-vendor-tinymce-content.css',
},
'style-vendor-tinymce-skin': {
'source_filenames': [
'js/vendor/tinymce/js/tinymce/skins/studio-tmce4/skin.min.css'
],
'output_filename': 'css/lms-style-vendor-tinymce-skin.css',
},
'style-app': {
'source_filenames': [
'sass/application.css',
'sass/ie.css'
],
'output_filename': 'css/lms-style-app.css',
},
'style-app-extend1': {
'source_filenames': [
'sass/application-extend1.css',
],
'output_filename': 'css/lms-style-app-extend1.css',
},
'style-app-extend2': {
'source_filenames': [
'sass/application-extend2.css',
],
'output_filename': 'css/lms-style-app-extend2.css',
},
'style-app-rtl': {
'source_filenames': [
'sass/application-rtl.css',
'sass/ie-rtl.css'
],
'output_filename': 'css/lms-style-app-rtl.css',
},
'style-app-extend1-rtl': {
'source_filenames': [
'sass/application-extend1-rtl.css',
],
'output_filename': 'css/lms-style-app-extend1-rtl.css',
},
'style-app-extend2-rtl': {
'source_filenames': [
'sass/application-extend2-rtl.css',
],
'output_filename': 'css/lms-style-app-extend2-rtl.css',
},
'style-course-vendor': {
'source_filenames': [
'js/vendor/CodeMirror/codemirror.css',
'css/vendor/jquery.treeview.css',
'css/vendor/ui-lightness/jquery-ui-1.8.22.custom.css',
],
'output_filename': 'css/lms-style-course-vendor.css',
},
'style-course': {
'source_filenames': [
'sass/course.css',
'xmodule/modules.css',
],
'output_filename': 'css/lms-style-course.css',
},
'style-course-rtl': {
'source_filenames': [
'sass/course-rtl.css',
'xmodule/modules.css',
],
'output_filename': 'css/lms-style-course-rtl.css',
},
'style-xmodule-annotations': {
'source_filenames': [
'css/vendor/ova/annotator.css',
'css/vendor/ova/edx-annotator.css',
'css/vendor/ova/video-js.min.css',
'css/vendor/ova/rangeslider.css',
'css/vendor/ova/share-annotator.css',
'css/vendor/ova/richText-annotator.css',
'css/vendor/ova/tags-annotator.css',
'css/vendor/ova/flagging-annotator.css',
'css/vendor/ova/diacritic-annotator.css',
'css/vendor/ova/grouping-annotator.css',
'css/vendor/ova/ova.css',
'js/vendor/ova/catch/css/main.css'
],
'output_filename': 'css/lms-style-xmodule-annotations.css',
},
}
common_js = set(rooted_glob(COMMON_ROOT / 'static', 'coffee/src/**/*.js')) - set(courseware_js + discussion_js + staff_grading_js + open_ended_js + notes_js + instructor_dash_js)
project_js = set(rooted_glob(PROJECT_ROOT / 'static', 'coffee/src/**/*.js')) - set(courseware_js + discussion_js + staff_grading_js + open_ended_js + notes_js + instructor_dash_js)
PIPELINE_JS = {
'application': {
# Application will contain all paths not in courseware_only_js
'source_filenames': sorted(common_js) + sorted(project_js) + [
'js/form.ext.js',
'js/my_courses_dropdown.js',
'js/toggle_login_modal.js',
'js/sticky_filter.js',
'js/query-params.js',
'js/src/utility.js',
'js/src/accessibility_tools.js',
'js/src/ie_shim.js',
'js/src/string_utils.js',
],
'output_filename': 'js/lms-application.js',
},
'courseware': {
'source_filenames': courseware_js,
'output_filename': 'js/lms-courseware.js',
},
'base_vendor': {
'source_filenames': base_vendor_js,
'output_filename': 'js/lms-base-vendor.js',
},
'main_vendor': {
'source_filenames': main_vendor_js,
'output_filename': 'js/lms-main_vendor.js',
},
'module-descriptor-js': {
'source_filenames': rooted_glob(COMMON_ROOT / 'static/', 'xmodule/descriptors/js/*.js'),
'output_filename': 'js/lms-module-descriptors.js',
},
'module-js': {
'source_filenames': rooted_glob(COMMON_ROOT / 'static', 'xmodule/modules/js/*.js'),
'output_filename': 'js/lms-modules.js',
},
'discussion': {
'source_filenames': discussion_js,
'output_filename': 'js/discussion.js',
},
'staff_grading': {
'source_filenames': staff_grading_js,
'output_filename': 'js/staff_grading.js',
},
'open_ended': {
'source_filenames': open_ended_js,
'output_filename': 'js/open_ended.js',
},
'notes': {
'source_filenames': notes_js,
'output_filename': 'js/notes.js',
},
'instructor_dash': {
'source_filenames': instructor_dash_js,
'output_filename': 'js/instructor_dash.js',
},
'dashboard': {
'source_filenames': dashboard_js,
'output_filename': 'js/dashboard.js'
},
'student_account': {
'source_filenames': student_account_js,
'output_filename': 'js/student_account.js'
},
'student_profile': {
'source_filenames': student_profile_js,
'output_filename': 'js/student_profile.js'
},
}
PIPELINE_DISABLE_WRAPPER = True
# Compile all coffee files in course data directories if they are out of date.
# TODO: Remove this once we move data into Mongo. This is only temporary while
# course data directories are still in use.
if os.path.isdir(DATA_DIR):
for course_dir in os.listdir(DATA_DIR):
js_dir = DATA_DIR / course_dir / "js"
if not os.path.isdir(js_dir):
continue
for filename in os.listdir(js_dir):
if filename.endswith('coffee'):
new_filename = os.path.splitext(filename)[0] + ".js"
if os.path.exists(js_dir / new_filename):
coffee_timestamp = os.stat(js_dir / filename).st_mtime
js_timestamp = os.stat(js_dir / new_filename).st_mtime
if coffee_timestamp <= js_timestamp:
continue
os.system("rm %s" % (js_dir / new_filename))
os.system("coffee -c %s" % (js_dir / filename))
PIPELINE_CSS_COMPRESSOR = None
PIPELINE_JS_COMPRESSOR = "pipeline.compressors.uglifyjs.UglifyJSCompressor"
STATICFILES_IGNORE_PATTERNS = (
"sass/*",
"coffee/*",
# Symlinks used by js-test-tool
"xmodule_js",
"common_static",
)
PIPELINE_UGLIFYJS_BINARY='node_modules/.bin/uglifyjs'
# Setting that will only affect the edX version of django-pipeline until our changes are merged upstream
PIPELINE_COMPILE_INPLACE = True
################################# CELERY ######################################
# Message configuration
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_MESSAGE_COMPRESSION = 'gzip'
# Results configuration
CELERY_IGNORE_RESULT = False
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
# Events configuration
CELERY_TRACK_STARTED = True
CELERY_SEND_EVENTS = True
CELERY_SEND_TASK_SENT_EVENT = True
# Exchange configuration
CELERY_DEFAULT_EXCHANGE = 'edx.core'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
# Queues configuration
HIGH_PRIORITY_QUEUE = 'edx.core.high'
DEFAULT_PRIORITY_QUEUE = 'edx.core.default'
LOW_PRIORITY_QUEUE = 'edx.core.low'
HIGH_MEM_QUEUE = 'edx.core.high_mem'
CELERY_QUEUE_HA_POLICY = 'all'
CELERY_CREATE_MISSING_QUEUES = True
CELERY_DEFAULT_QUEUE = DEFAULT_PRIORITY_QUEUE
CELERY_DEFAULT_ROUTING_KEY = DEFAULT_PRIORITY_QUEUE
CELERY_QUEUES = {
HIGH_PRIORITY_QUEUE: {},
LOW_PRIORITY_QUEUE: {},
DEFAULT_PRIORITY_QUEUE: {},
HIGH_MEM_QUEUE: {},
}
# let logging work as configured:
CELERYD_HIJACK_ROOT_LOGGER = False
################################ Bulk Email ###################################
# Suffix used to construct 'from' email address for bulk emails.
# A course-specific identifier is prepended.
BULK_EMAIL_DEFAULT_FROM_EMAIL = '[email protected]'
# Parameters for breaking down course enrollment into subtasks.
BULK_EMAIL_EMAILS_PER_TASK = 100
# Initial delay used for retrying tasks. Additional retries use
# longer delays. Value is in seconds.
BULK_EMAIL_DEFAULT_RETRY_DELAY = 30
# Maximum number of retries per task for errors that are not related
# to throttling.
BULK_EMAIL_MAX_RETRIES = 5
# Maximum number of retries per task for errors that are related to
# throttling. If this is not set, then there is no cap on such retries.
BULK_EMAIL_INFINITE_RETRY_CAP = 1000
# We want Bulk Email running on the high-priority queue, so we define the
# routing key that points to it. At the moment, the name is the same.
BULK_EMAIL_ROUTING_KEY = HIGH_PRIORITY_QUEUE
# Flag to indicate if individual email addresses should be logged as they are sent
# a bulk email message.
BULK_EMAIL_LOG_SENT_EMAILS = False
# Delay in seconds to sleep between individual mail messages being sent,
# when a bulk email task is retried for rate-related reasons. Choose this
# value depending on the number of workers that might be sending email in
# parallel, and what the SES rate is.
BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS = 0.02
############################## Video ##########################################
YOUTUBE = {
# YouTube JavaScript API
'API': 'www.youtube.com/iframe_api',
# URL to test YouTube availability
'TEST_URL': 'gdata.youtube.com/feeds/api/videos/',
# Current youtube api for requesting transcripts.
# For example: http://video.google.com/timedtext?lang=en&v=j_jEn79vS3g.
'TEXT_API': {
'url': 'video.google.com/timedtext',
'params': {
'lang': 'en',
'v': 'set_youtube_id_of_11_symbols_here',
},
},
}
################################### APPS ######################################
INSTALLED_APPS = (
# Standard ones that are always installed...
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'djcelery',
'south',
# Database-backed configuration
'config_models',
# Monitor the status of services
'service_status',
# For asset pipelining
'edxmako',
'pipeline',
'staticfiles',
'static_replace',
# Our courseware
'circuit',
'courseware',
'student',
'static_template_view',
'staticbook',
'track',
'eventtracking.django',
'util',
'certificates',
'dashboard',
'instructor',
'instructor_task',
'open_ended_grading',
'psychometrics',
'licenses',
'course_groups',
'bulk_email',
# External auth (OpenID, shib)
'external_auth',
'django_openid_auth',
# OAuth2 Provider
'provider',
'provider.oauth2',
'oauth2_provider',
# For the wiki
'wiki', # The new django-wiki from benjaoming
'django_notify',
'course_wiki', # Our customizations
'mptt',
'sekizai',
#'wiki.plugins.attachments',
'wiki.plugins.links',
'wiki.plugins.notifications',
'course_wiki.plugins.markdownedx',
# Foldit integration
'foldit',
# For testing
'django.contrib.admin', # only used in DEBUG mode
'django_nose',
'debug',
# Discussion forums
'django_comment_client',
'django_comment_common',
'notes',
# Splash screen
'splash',
# Monitoring
'datadog',
# User API
'rest_framework',
'user_api',
# Shopping cart
'shoppingcart',
# Notification preferences setting
'notification_prefs',
'notifier_api',
# Different Course Modes
'course_modes',
# Student Identity Verification
'verify_student',
# Dark-launching languages
'dark_lang',
# Microsite configuration
'microsite_configuration',
# Student Identity Reverification
'reverification',
'embargo',
# Monitoring functionality
'monitoring',
# Course action state
'course_action_state',
# Additional problem types
'edx_jsme', # Molecular Structure
# Country list
'django_countries',
# edX Mobile API
'mobile_api',
)
######################### MARKETING SITE ###############################
EDXMKTG_COOKIE_NAME = 'edxloggedin'
MKTG_URLS = {}
MKTG_URL_LINK_MAP = {
'ABOUT': 'about_edx',
'CONTACT': 'contact',
'FAQ': 'help_edx',
'COURSES': 'courses',
'ROOT': 'root',
'TOS': 'tos',
'HONOR': 'honor',
'PRIVACY': 'privacy_edx',
'JOBS': 'jobs',
'NEWS': 'news',
'PRESS': 'press',
'BLOG': 'edx-blog',
'DONATE': 'donate',
# Verified Certificates
'WHAT_IS_VERIFIED_CERT': 'verified-certificate',
}
################# Student Verification #################
VERIFY_STUDENT = {
"DAYS_GOOD_FOR": 365, # How many days is a verficiation good for?
}
### This enables the Metrics tab for the Instructor dashboard ###########
FEATURES['CLASS_DASHBOARD'] = False
if FEATURES.get('CLASS_DASHBOARD'):
INSTALLED_APPS += ('class_dashboard',)
######################## CAS authentication ###########################
if FEATURES.get('AUTH_USE_CAS'):
CAS_SERVER_URL = 'https://provide_your_cas_url_here'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas.backends.CASBackend',
)
INSTALLED_APPS += ('django_cas',)
MIDDLEWARE_CLASSES += ('django_cas.middleware.CASMiddleware',)
###################### Registration ##################################
# For each of the fields, give one of the following values:
# - 'required': to display the field, and make it mandatory
# - 'optional': to display the field, and make it non-mandatory
# - 'hidden': to not display the field
REGISTRATION_EXTRA_FIELDS = {
'level_of_education': 'optional',
'gender': 'optional',
'year_of_birth': 'optional',
'mailing_address': 'optional',
'goals': 'optional',
'honor_code': 'required',
'city': 'hidden',
'country': 'hidden',
}
########################## CERTIFICATE NAME ########################
CERT_NAME_SHORT = "Certificate"
CERT_NAME_LONG = "Certificate of Achievement"
###################### Grade Downloads ######################
GRADES_DOWNLOAD_ROUTING_KEY = HIGH_MEM_QUEUE
GRADES_DOWNLOAD = {
'STORAGE_TYPE': 'localfs',
'BUCKET': 'edx-grades',
'ROOT_PATH': '/tmp/edx-s3/grades',
}
######################## PROGRESS SUCCESS BUTTON ##############################
# The following fields are available in the URL: {course_id} {student_id}
PROGRESS_SUCCESS_BUTTON_URL = 'http://<domain>/<path>/{course_id}'
PROGRESS_SUCCESS_BUTTON_TEXT_OVERRIDE = None
#### PASSWORD POLICY SETTINGS #####
PASSWORD_MIN_LENGTH = None
PASSWORD_MAX_LENGTH = None
PASSWORD_COMPLEXITY = {}
PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD = None
PASSWORD_DICTIONARY = []
##################### LinkedIn #####################
INSTALLED_APPS += ('django_openid_auth',)
############################ LinkedIn Integration #############################
INSTALLED_APPS += ('linkedin',)
LINKEDIN_API = {
'EMAIL_WHITELIST': [],
'COMPANY_ID': '2746406',
}
############################ ORA 2 ############################################
# By default, don't use a file prefix
ORA2_FILE_PREFIX = None
# Default File Upload Storage bucket and prefix. Used by the FileUpload Service.
FILE_UPLOAD_STORAGE_BUCKET_NAME = 'edxuploads'
FILE_UPLOAD_STORAGE_PREFIX = 'submissions_attachments'
##### ACCOUNT LOCKOUT DEFAULT PARAMETERS #####
MAX_FAILED_LOGIN_ATTEMPTS_ALLOWED = 5
MAX_FAILED_LOGIN_ATTEMPTS_LOCKOUT_PERIOD_SECS = 15 * 60
##### LMS DEADLINE DISPLAY TIME_ZONE #######
TIME_ZONE_DISPLAYED_FOR_DEADLINES = 'UTC'
# Source:
# http://loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt according to http://en.wikipedia.org/wiki/ISO_639-1
ALL_LANGUAGES = (
[u"aa", u"Afar"],
[u"ab", u"Abkhazian"],
[u"af", u"Afrikaans"],
[u"ak", u"Akan"],
[u"sq", u"Albanian"],
[u"am", u"Amharic"],
[u"ar", u"Arabic"],
[u"an", u"Aragonese"],
[u"hy", u"Armenian"],
[u"as", u"Assamese"],
[u"av", u"Avaric"],
[u"ae", u"Avestan"],
[u"ay", u"Aymara"],
[u"az", u"Azerbaijani"],
[u"ba", u"Bashkir"],
[u"bm", u"Bambara"],
[u"eu", u"Basque"],
[u"be", u"Belarusian"],
[u"bn", u"Bengali"],
[u"bh", u"Bihari languages"],
[u"bi", u"Bislama"],
[u"bs", u"Bosnian"],
[u"br", u"Breton"],
[u"bg", u"Bulgarian"],
[u"my", u"Burmese"],
[u"ca", u"Catalan"],
[u"ch", u"Chamorro"],
[u"ce", u"Chechen"],
[u"zh", u"Chinese"],
[u"cu", u"Church Slavic"],
[u"cv", u"Chuvash"],
[u"kw", u"Cornish"],
[u"co", u"Corsican"],
[u"cr", u"Cree"],
[u"cs", u"Czech"],
[u"da", u"Danish"],
[u"dv", u"Divehi"],
[u"nl", u"Dutch"],
[u"dz", u"Dzongkha"],
[u"en", u"English"],
[u"eo", u"Esperanto"],
[u"et", u"Estonian"],
[u"ee", u"Ewe"],
[u"fo", u"Faroese"],
[u"fj", u"Fijian"],
[u"fi", u"Finnish"],
[u"fr", u"French"],
[u"fy", u"Western Frisian"],
[u"ff", u"Fulah"],
[u"ka", u"Georgian"],
[u"de", u"German"],
[u"gd", u"Gaelic"],
[u"ga", u"Irish"],
[u"gl", u"Galician"],
[u"gv", u"Manx"],
[u"el", u"Greek"],
[u"gn", u"Guarani"],
[u"gu", u"Gujarati"],
[u"ht", u"Haitian"],
[u"ha", u"Hausa"],
[u"he", u"Hebrew"],
[u"hz", u"Herero"],
[u"hi", u"Hindi"],
[u"ho", u"Hiri Motu"],
[u"hr", u"Croatian"],
[u"hu", u"Hungarian"],
[u"ig", u"Igbo"],
[u"is", u"Icelandic"],
[u"io", u"Ido"],
[u"ii", u"Sichuan Yi"],
[u"iu", u"Inuktitut"],
[u"ie", u"Interlingue"],
[u"ia", u"Interlingua"],
[u"id", u"Indonesian"],
[u"ik", u"Inupiaq"],
[u"it", u"Italian"],
[u"jv", u"Javanese"],
[u"ja", u"Japanese"],
[u"kl", u"Kalaallisut"],
[u"kn", u"Kannada"],
[u"ks", u"Kashmiri"],
[u"kr", u"Kanuri"],
[u"kk", u"Kazakh"],
[u"km", u"Central Khmer"],
[u"ki", u"Kikuyu"],
[u"rw", u"Kinyarwanda"],
[u"ky", u"Kirghiz"],
[u"kv", u"Komi"],
[u"kg", u"Kongo"],
[u"ko", u"Korean"],
[u"kj", u"Kuanyama"],
[u"ku", u"Kurdish"],
[u"lo", u"Lao"],
[u"la", u"Latin"],
[u"lv", u"Latvian"],
[u"li", u"Limburgan"],
[u"ln", u"Lingala"],
[u"lt", u"Lithuanian"],
[u"lb", u"Luxembourgish"],
[u"lu", u"Luba-Katanga"],
[u"lg", u"Ganda"],
[u"mk", u"Macedonian"],
[u"mh", u"Marshallese"],
[u"ml", u"Malayalam"],
[u"mi", u"Maori"],
[u"mr", u"Marathi"],
[u"ms", u"Malay"],
[u"mg", u"Malagasy"],
[u"mt", u"Maltese"],
[u"mn", u"Mongolian"],
[u"na", u"Nauru"],
[u"nv", u"Navajo"],
[u"nr", u"Ndebele, South"],
[u"nd", u"Ndebele, North"],
[u"ng", u"Ndonga"],
[u"ne", u"Nepali"],
[u"nn", u"Norwegian Nynorsk"],
[u"nb", u"Bokmål, Norwegian"],
[u"no", u"Norwegian"],
[u"ny", u"Chichewa"],
[u"oc", u"Occitan"],
[u"oj", u"Ojibwa"],
[u"or", u"Oriya"],
[u"om", u"Oromo"],
[u"os", u"Ossetian"],
[u"pa", u"Panjabi"],
[u"fa", u"Persian"],
[u"pi", u"Pali"],
[u"pl", u"Polish"],
[u"pt", u"Portuguese"],
[u"ps", u"Pushto"],
[u"qu", u"Quechua"],
[u"rm", u"Romansh"],
[u"ro", u"Romanian"],
[u"rn", u"Rundi"],
[u"ru", u"Russian"],
[u"sg", u"Sango"],
[u"sa", u"Sanskrit"],
[u"si", u"Sinhala"],
[u"sk", u"Slovak"],
[u"sl", u"Slovenian"],
[u"se", u"Northern Sami"],
[u"sm", u"Samoan"],
[u"sn", u"Shona"],
[u"sd", u"Sindhi"],
[u"so", u"Somali"],
[u"st", u"Sotho, Southern"],
[u"es", u"Spanish"],
[u"sc", u"Sardinian"],
[u"sr", u"Serbian"],
[u"ss", u"Swati"],
[u"su", u"Sundanese"],
[u"sw", u"Swahili"],
[u"sv", u"Swedish"],
[u"ty", u"Tahitian"],
[u"ta", u"Tamil"],
[u"tt", u"Tatar"],
[u"te", u"Telugu"],
[u"tg", u"Tajik"],
[u"tl", u"Tagalog"],
[u"th", u"Thai"],
[u"bo", u"Tibetan"],
[u"ti", u"Tigrinya"],
[u"to", u"Tonga (Tonga Islands)"],
[u"tn", u"Tswana"],
[u"ts", u"Tsonga"],
[u"tk", u"Turkmen"],
[u"tr", u"Turkish"],
[u"tw", u"Twi"],
[u"ug", u"Uighur"],
[u"uk", u"Ukrainian"],
[u"ur", u"Urdu"],
[u"uz", u"Uzbek"],
[u"ve", u"Venda"],
[u"vi", u"Vietnamese"],
[u"vo", u"Volapük"],
[u"cy", u"Welsh"],
[u"wa", u"Walloon"],
[u"wo", u"Wolof"],
[u"xh", u"Xhosa"],
[u"yi", u"Yiddish"],
[u"yo", u"Yoruba"],
[u"za", u"Zhuang"],
[u"zu", u"Zulu"]
)
### Apps only installed in some instances
OPTIONAL_APPS = (
'mentoring',
# edx-ora2
'submissions',
'openassessment',
'openassessment.assessment',
'openassessment.fileupload',
'openassessment.workflow',
'openassessment.xblock',
# edxval
'edxval'
)
for app_name in OPTIONAL_APPS:
# First attempt to only find the module rather than actually importing it,
# to avoid circular references - only try to import if it can't be found
# by find_module, which doesn't work with import hooks
try:
imp.find_module(app_name)
except ImportError:
try:
__import__(app_name)
except ImportError:
continue
INSTALLED_APPS += (app_name,)
# Stub for third_party_auth options.
# See common/djangoapps/third_party_auth/settings.py for configuration details.
THIRD_PARTY_AUTH = {}
### ADVANCED_SECURITY_CONFIG
# Empty by default
ADVANCED_SECURITY_CONFIG = {}
### External auth usage -- prefixes for ENROLLMENT_DOMAIN
SHIBBOLETH_DOMAIN_PREFIX = 'shib:'
OPENID_DOMAIN_PREFIX = 'openid:'
### Analytics data api settings
ANALYTICS_DATA_URL = ""
ANALYTICS_DATA_TOKEN = ""
ANALYTICS_DASHBOARD_URL = ""
ANALYTICS_DASHBOARD_NAME = PLATFORM_NAME + " Insights"
# REGISTRATION CODES DISPLAY INFORMATION SUBTITUTIONS IN THE INVOICE ATTACHMENT
INVOICE_CORP_ADDRESS = "Please place your corporate address\nin this configuration"
INVOICE_PAYMENT_INSTRUCTIONS = "This is where you can\nput directions on how people\nbuying registration codes"
# Country code overrides
# Used by django-countries
COUNTRIES_OVERRIDE = {
"TW": _("Taiwan"),
}
| agpl-3.0 |
sachintyagi22/spark | examples/src/main/python/ml/generalized_linear_regression_example.py | 76 | 2506 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from pyspark.sql import SparkSession
# $example on$
from pyspark.ml.regression import GeneralizedLinearRegression
# $example off$
"""
An example demonstrating generalized linear regression.
Run with:
bin/spark-submit examples/src/main/python/ml/generalized_linear_regression_example.py
"""
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("GeneralizedLinearRegressionExample")\
.getOrCreate()
# $example on$
# Load training data
dataset = spark.read.format("libsvm")\
.load("data/mllib/sample_linear_regression_data.txt")
glr = GeneralizedLinearRegression(family="gaussian", link="identity", maxIter=10, regParam=0.3)
# Fit the model
model = glr.fit(dataset)
# Print the coefficients and intercept for generalized linear regression model
print("Coefficients: " + str(model.coefficients))
print("Intercept: " + str(model.intercept))
# Summarize the model over the training set and print out some metrics
summary = model.summary
print("Coefficient Standard Errors: " + str(summary.coefficientStandardErrors))
print("T Values: " + str(summary.tValues))
print("P Values: " + str(summary.pValues))
print("Dispersion: " + str(summary.dispersion))
print("Null Deviance: " + str(summary.nullDeviance))
print("Residual Degree Of Freedom Null: " + str(summary.residualDegreeOfFreedomNull))
print("Deviance: " + str(summary.deviance))
print("Residual Degree Of Freedom: " + str(summary.residualDegreeOfFreedom))
print("AIC: " + str(summary.aic))
print("Deviance Residuals: ")
summary.residuals().show()
# $example off$
spark.stop()
| apache-2.0 |
hitszxp/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 40 | 12814 | import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros((100, 1)))
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
if __name__ == "__main__":
np.testing.run_module_suite()
| bsd-3-clause |
TheWardoctor/Wardoctors-repo | script.module.fantastic/lib/resources/lib/sources/en/to_be_fixed/needsfixing/crazy.py | 1 | 6406 | # -*- coding: utf-8 -*-
'''
fantastic Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,base64
from resources.lib.modules import control
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import cfscrape
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['crazy4tv.com', 'crazy4ad.in']
self.base_link = 'http://crazy4tv.com'
self.search_link = '/search/%s/feed/rss2/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.base_link, url)
scraper = cfscrape.create_scraper()
r = scraper.get(url).content
posts = client.parseDOM(r, 'item')
hostDict = hostprDict + hostDict
print posts
items = []
for post in posts:
try:
print post
items += zip(client.parseDOM(post, 'title'), client.parseDOM(post, 'link'))
except:
pass
items = [(i[0], i[1]) for i in items if data['year'] in i[0]]
print items[:1]
for item in items:
try:
name = item[0]
name = client.replaceHTMLCodes(name)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if not y == hdlr: raise Exception()
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = 'HD'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
info = []
if '3d' in fmt: info.append('3D')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)', name)[-1]
div = 1 if size.endswith(' GB') else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
if any(i in ['hevc', 'h265', 'x265'] for i in fmt): info.append('HEVC')
info = ' | '.join(info)
url = item[1]
if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
| apache-2.0 |
vipullakhani/mi-instrument | mi/dataset/parser/dosta_ln_auv.py | 8 | 3170 | """
@package mi.dataset.parser
@file marine-integrations/mi/dataset/parser/dosta_ln_auv.py
@author Jeff Roy
@brief Parser and particle Classes and tools for the dosta_ln_auv data
Release notes:
initial release
"""
__author__ = 'Jeff Roy'
__license__ = 'Apache 2.0'
from mi.core.log import get_logger
log = get_logger()
from mi.dataset.parser.auv_common import \
AuvCommonParticle, \
AuvCommonParser, \
compute_timestamp
# The structure below is a list of tuples
# Each tuple consists of
# parameter name, index into raw data parts list, encoding function
DOSTA_LN_AUV_PARAM_MAP = [
# message ID is typically index 0
('mission_epoch', 1, int),
('auv_latitude', 2, float),
('auv_longitude', 3, float),
('mission_time', 4, int),
('m_depth', 5, float),
('salinity', 6, float),
('product_number', 7, int),
('serial_number', 8, str),
('estimated_oxygen_concentration', 9, float),
('estimated_oxygen_saturation', 10, float),
('optode_temperature', 11, float),
('calibrated_phase', 12, float),
('blue_phase', 13, float),
('red_phase', 14, float),
('blue_amplitude', 15, float),
('b_pot', 16, float),
('red_amplitude', 17, float),
('raw_temperature', 18, float),
('calculated_oxygen_concentration', 19, float),
('calculated_oxygen_saturation', 20, float),
('external_temperature', 21, float)
]
class DostaLnAuvInstrumentParticle(AuvCommonParticle):
_auv_param_map = DOSTA_LN_AUV_PARAM_MAP
# must provide a parameter map for _build_parsed_values
class DostaLnAuvTelemeteredParticle(DostaLnAuvInstrumentParticle):
# set the data_particle_type for the DataParticle class
_data_particle_type = "dosta_ln_auv_instrument"
class DostaLnAuvRecoveredParticle(DostaLnAuvInstrumentParticle):
# set the data_particle_type for the DataParticle class
_data_particle_type = "dosta_ln_auv_instrument_recovered"
DOSTA_LN_AUV_ID = '1109' # message ID of dost_ln records
DOSTA_LN_AUV_FIELD_COUNT = 22 # number of expected fields in an dost_ln record
DOSTA_LN_AUV_TELEMETERED_MESSAGE_MAP = [(DOSTA_LN_AUV_ID,
DOSTA_LN_AUV_FIELD_COUNT,
compute_timestamp,
DostaLnAuvTelemeteredParticle)]
DOSTA_LN_AUV_RECOVERED_MESSAGE_MAP = [(DOSTA_LN_AUV_ID,
DOSTA_LN_AUV_FIELD_COUNT,
compute_timestamp,
DostaLnAuvRecoveredParticle)]
class DostaLnAuvParser(AuvCommonParser):
def __init__(self,
stream_handle,
exception_callback,
is_telemetered):
if is_telemetered:
message_map = DOSTA_LN_AUV_TELEMETERED_MESSAGE_MAP
else:
message_map = DOSTA_LN_AUV_RECOVERED_MESSAGE_MAP
# provide message ID and # of fields to parent class
super(DostaLnAuvParser, self).__init__(stream_handle,
exception_callback,
message_map)
| bsd-2-clause |
stelfrich/openmicroscopy | components/tests/ui/library/python/ImageCheckLibrary.py | 14 | 1094 |
import Image
from numpy import asarray
def crop_image(path, cropX, cropY, cropW, cropH):
image = Image.open(path)
x = int(cropX)
y = int(cropY)
x2 = int(cropW) + x
y2 = int(cropH) + y
img = image.crop((x, y, x2, y2))
img.save(path)
def image_should_be_blank(path, expected=True):
image = Image.open(path)
image.save(path) # avoids errors on .split
blank = True
minVals = []
maxVals = []
for channel in image.split():
plane = asarray(channel)
pMin = plane.min()
pMax = plane.max()
minVals.append(pMin)
maxVals.append(pMax)
if pMin != pMax:
blank = False
if expected:
if not blank:
raise AssertionError("Image %s is not blank. min: %s, max: %s"
% (path, minVals, maxVals))
else:
if blank:
raise AssertionError("Image %s is blank. min: %s, max: %s"
% (path, minVals, maxVals))
def image_should_not_be_blank(path):
image_should_be_blank(expected=False)
| gpl-2.0 |
mrrrgn/AutobahnPython | examples/twisted/wamp/basic/pubsub/complex/backend.py | 8 | 1506 | ###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import random
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn.wamp.types import SubscribeOptions
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession
class Component(ApplicationSession):
"""
An application component that publishes events with no payload
and with complex payloads every second.
"""
@inlineCallbacks
def onJoin(self, details):
counter = 0
while True:
self.publish('com.myapp.heartbeat')
obj = {'counter': counter, 'foo': [1, 2, 3]}
self.publish('com.myapp.topic2', random.randint(0, 100), 23, c = "Hello", d = obj)
counter += 1
yield sleep(1)
| apache-2.0 |
jettisonjoe/openhtf | examples/phase_groups.py | 2 | 4400 | # Copyright 2018 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example OpenHTF Phase Groups.
PhaseGroups are used to control phase shortcutting due to terminal errors to
better guarentee when teardown phases run.
"""
import openhtf as htf
def setup_phase(test):
test.logger.info('Setup in a group.')
def main_phase(test):
test.logger.info('This is a main phase.')
def teardown_phase(test):
test.logger.info('Teardown phase.')
def inner_main_phase(test):
test.logger.info('Inner main phase.')
def inner_teardown_phase(test):
test.logger.info('Inner teardown phase.')
def error_setup_phase(test):
test.logger.info('Error in setup phase.')
return htf.PhaseResult.STOP
def error_main_phase(test):
test.logger.info('Error in main phase.')
return htf.PhaseResult.STOP
def run_basic_group():
"""Run the basic phase group example.
In this example, there are no terminal phases; all phases are run.
"""
test = htf.Test(htf.PhaseGroup(
setup=[setup_phase],
main=[main_phase],
teardown=[teardown_phase],
))
test.execute()
def run_setup_error_group():
"""Run the phase group example where an error occurs in a setup phase.
The terminal setup phase shortcuts the test. The main phases are
skipped. The PhaseGroup is not entered, so the teardown phases are also
skipped.
"""
test = htf.Test(htf.PhaseGroup(
setup=[error_setup_phase],
main=[main_phase],
teardown=[teardown_phase],
))
test.execute()
def run_main_error_group():
"""Run the phase group example where an error occurs in a main phase.
The main phase in this example is terminal. The PhaseGroup was entered
because the setup phases ran without error, so the teardown phases are run.
The other main phase is skipped.
"""
test = htf.Test(htf.PhaseGroup(
setup=[setup_phase],
main=[error_main_phase, main_phase],
teardown=[teardown_phase],
))
test.execute()
def run_nested_groups():
"""Run the nested groups example.
This example shows a PhaseGroup in a PhaseGroup. No phase is terminal, so all
are run in the order;
main_phase
inner_main_phase
inner_teardown_phase
teardown_phase
"""
test = htf.Test(
htf.PhaseGroup(
main=[
main_phase,
htf.PhaseGroup.with_teardown(inner_teardown_phase)(
inner_main_phase),
],
teardown=[teardown_phase]
)
)
test.execute()
def run_nested_error_groups():
"""Run nested groups example where an error occurs in nested main phase.
In this example, the first main phase in the nested PhaseGroup errors out.
The other inner main phase is skipped, as is the outer main phase. Both
PhaseGroups were entered, so both teardown phases are run.
"""
test = htf.Test(
htf.PhaseGroup(
main=[
htf.PhaseGroup.with_teardown(inner_teardown_phase)(
error_main_phase, main_phase),
main_phase,
],
teardown=[teardown_phase],
)
)
test.execute()
def run_nested_error_skip_unentered_groups():
"""Run nested groups example where an error occurs in outer main phase.
Lastly, the first main phase in the outer PhaseGroup errors out. This skips
the nested PhaseGroup and the other outer main phase. The outer PhaseGroup
was entered, so its teardown phase runs.
"""
test = htf.Test(
htf.PhaseGroup(
main=[
error_main_phase,
htf.PhaseGroup.with_teardown(inner_teardown_phase)(main_phase),
main_phase,
],
teardown=[teardown_phase],
)
)
test.execute()
if __name__ == '__main__':
run_basic_group()
run_setup_error_group()
run_main_error_group()
run_nested_groups()
run_nested_error_groups()
run_nested_error_skip_unentered_groups()
| apache-2.0 |
LeeMendelowitz/malign_viz | server/cors.py | 8 | 1654 | from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator | gpl-3.0 |
hehongliang/tensorflow | tensorflow/contrib/learn/python/learn/estimators/rnn_common.py | 42 | 12923 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common operations for RNN Estimators (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import metrics
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# NOTE(jtbates): As of February 10, 2017, some of the `RNNKeys` have been
# removed and replaced with values from `prediction_key.PredictionKey`. The key
# `RNNKeys.PREDICTIONS_KEY` has been replaced by
# `prediction_key.PredictionKey.SCORES` for regression and
# `prediction_key.PredictionKey.CLASSES` for classification. The key
# `RNNKeys.PROBABILITIES_KEY` has been replaced by
# `prediction_key.PredictionKey.PROBABILITIES`.
class RNNKeys(object):
FINAL_STATE_KEY = 'final_state'
LABELS_KEY = '__labels__'
SEQUENCE_LENGTH_KEY = 'sequence_length'
STATE_PREFIX = 'rnn_cell_state'
class PredictionType(object):
"""Enum-like values for the type of prediction that the model makes.
"""
SINGLE_VALUE = 1
MULTIPLE_VALUE = 2
_CELL_TYPES = {'basic_rnn': contrib_rnn.BasicRNNCell,
'lstm': contrib_rnn.LSTMCell,
'gru': contrib_rnn.GRUCell,}
def _get_single_cell(cell_type, num_units):
"""Constructs and return a single `RNNCell`.
Args:
cell_type: Either a string identifying the `RNNCell` type or a subclass of
`RNNCell`.
num_units: The number of units in the `RNNCell`.
Returns:
An initialized `RNNCell`.
Raises:
ValueError: `cell_type` is an invalid `RNNCell` name.
TypeError: `cell_type` is not a string or a subclass of `RNNCell`.
"""
cell_type = _CELL_TYPES.get(cell_type, cell_type)
if not cell_type or not issubclass(cell_type, contrib_rnn.RNNCell):
raise ValueError('The supported cell types are {}; got {}'.format(
list(_CELL_TYPES.keys()), cell_type))
return cell_type(num_units=num_units)
def construct_rnn_cell(num_units, cell_type='basic_rnn',
dropout_keep_probabilities=None):
"""Constructs cells, applies dropout and assembles a `MultiRNNCell`.
The cell type chosen by DynamicRNNEstimator.__init__() is the same as
returned by this function when called with the same arguments.
Args:
num_units: A single `int` or a list/tuple of `int`s. The size of the
`RNNCell`s.
cell_type: A string identifying the `RNNCell` type or a subclass of
`RNNCell`.
dropout_keep_probabilities: a list of dropout probabilities or `None`. If a
list is given, it must have length `len(cell_type) + 1`.
Returns:
An initialized `RNNCell`.
"""
if not isinstance(num_units, (list, tuple)):
num_units = (num_units,)
cells = [_get_single_cell(cell_type, n) for n in num_units]
if dropout_keep_probabilities:
cells = apply_dropout(cells, dropout_keep_probabilities)
if len(cells) == 1:
return cells[0]
return contrib_rnn.MultiRNNCell(cells)
def apply_dropout(cells, dropout_keep_probabilities, random_seed=None):
"""Applies dropout to the outputs and inputs of `cell`.
Args:
cells: A list of `RNNCell`s.
dropout_keep_probabilities: a list whose elements are either floats in
`[0.0, 1.0]` or `None`. It must have length one greater than `cells`.
random_seed: Seed for random dropout.
Returns:
A list of `RNNCell`s, the result of applying the supplied dropouts.
Raises:
ValueError: If `len(dropout_keep_probabilities) != len(cells) + 1`.
"""
if len(dropout_keep_probabilities) != len(cells) + 1:
raise ValueError(
'The number of dropout probabilities must be one greater than the '
'number of cells. Got {} cells and {} dropout probabilities.'.format(
len(cells), len(dropout_keep_probabilities)))
wrapped_cells = [
contrib_rnn.DropoutWrapper(cell, prob, 1.0, seed=random_seed)
for cell, prob in zip(cells[:-1], dropout_keep_probabilities[:-2])
]
wrapped_cells.append(
contrib_rnn.DropoutWrapper(cells[-1], dropout_keep_probabilities[-2],
dropout_keep_probabilities[-1]))
return wrapped_cells
def get_eval_metric_ops(problem_type, prediction_type, sequence_length,
prediction_dict, labels):
"""Returns eval metric ops for given `problem_type` and `prediction_type`.
Args:
problem_type: `ProblemType.CLASSIFICATION` or
`ProblemType.LINEAR_REGRESSION`.
prediction_type: `PredictionType.SINGLE_VALUE` or
`PredictionType.MULTIPLE_VALUE`.
sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
prediction_dict: A dict of prediction tensors.
labels: The label `Tensor`.
Returns:
A `dict` mapping strings to the result of calling the metric_fn.
"""
eval_metric_ops = {}
if problem_type == constants.ProblemType.CLASSIFICATION:
# Multi value classification
if prediction_type == PredictionType.MULTIPLE_VALUE:
mask_predictions, mask_labels = mask_activations_and_labels(
prediction_dict[prediction_key.PredictionKey.CLASSES], labels,
sequence_length)
eval_metric_ops['accuracy'] = metrics.streaming_accuracy(
predictions=mask_predictions, labels=mask_labels)
# Single value classification
elif prediction_type == PredictionType.SINGLE_VALUE:
eval_metric_ops['accuracy'] = metrics.streaming_accuracy(
predictions=prediction_dict[prediction_key.PredictionKey.CLASSES],
labels=labels)
elif problem_type == constants.ProblemType.LINEAR_REGRESSION:
# Multi value regression
if prediction_type == PredictionType.MULTIPLE_VALUE:
pass
# Single value regression
elif prediction_type == PredictionType.SINGLE_VALUE:
pass
return eval_metric_ops
def select_last_activations(activations, sequence_lengths):
"""Selects the nth set of activations for each n in `sequence_length`.
Returns a `Tensor` of shape `[batch_size, k]`. If `sequence_length` is not
`None`, then `output[i, :] = activations[i, sequence_length[i] - 1, :]`. If
`sequence_length` is `None`, then `output[i, :] = activations[i, -1, :]`.
Args:
activations: A `Tensor` with shape `[batch_size, padded_length, k]`.
sequence_lengths: A `Tensor` with shape `[batch_size]` or `None`.
Returns:
A `Tensor` of shape `[batch_size, k]`.
"""
with ops.name_scope(
'select_last_activations', values=[activations, sequence_lengths]):
activations_shape = array_ops.shape(activations)
batch_size = activations_shape[0]
padded_length = activations_shape[1]
num_label_columns = activations_shape[2]
if sequence_lengths is None:
sequence_lengths = padded_length
reshaped_activations = array_ops.reshape(activations,
[-1, num_label_columns])
indices = math_ops.range(batch_size) * padded_length + sequence_lengths - 1
last_activations = array_ops.gather(reshaped_activations, indices)
last_activations.set_shape(
[activations.get_shape()[0], activations.get_shape()[2]])
return last_activations
def mask_activations_and_labels(activations, labels, sequence_lengths):
"""Remove entries outside `sequence_lengths` and returned flattened results.
Args:
activations: Output of the RNN, shape `[batch_size, padded_length, k]`.
labels: Label values, shape `[batch_size, padded_length]`.
sequence_lengths: A `Tensor` of shape `[batch_size]` with the unpadded
length of each sequence. If `None`, then each sequence is unpadded.
Returns:
activations_masked: `logit` values with those beyond `sequence_lengths`
removed for each batch. Batches are then concatenated. Shape
`[tf.sum(sequence_lengths), k]` if `sequence_lengths` is not `None` and
shape `[batch_size * padded_length, k]` otherwise.
labels_masked: Label values after removing unneeded entries. Shape
`[tf.sum(sequence_lengths)]` if `sequence_lengths` is not `None` and shape
`[batch_size * padded_length]` otherwise.
"""
with ops.name_scope(
'mask_activations_and_labels',
values=[activations, labels, sequence_lengths]):
labels_shape = array_ops.shape(labels)
batch_size = labels_shape[0]
padded_length = labels_shape[1]
if sequence_lengths is None:
flattened_dimension = padded_length * batch_size
activations_masked = array_ops.reshape(activations,
[flattened_dimension, -1])
labels_masked = array_ops.reshape(labels, [flattened_dimension])
else:
mask = array_ops.sequence_mask(sequence_lengths, padded_length)
activations_masked = array_ops.boolean_mask(activations, mask)
labels_masked = array_ops.boolean_mask(labels, mask)
return activations_masked, labels_masked
def multi_value_predictions(activations, target_column, problem_type,
predict_probabilities):
"""Maps `activations` from the RNN to predictions for multi value models.
If `predict_probabilities` is `False`, this function returns a `dict`
containing single entry with key `prediction_key.PredictionKey.CLASSES` for
`problem_type` `ProblemType.CLASSIFICATION` or
`prediction_key.PredictionKey.SCORE` for `problem_type`
`ProblemType.LINEAR_REGRESSION`.
If `predict_probabilities` is `True`, it will contain a second entry with key
`prediction_key.PredictionKey.PROBABILITIES`. The
value of this entry is a `Tensor` of probabilities with shape
`[batch_size, padded_length, num_classes]`.
Note that variable length inputs will yield some predictions that don't have
meaning. For example, if `sequence_length = [3, 2]`, then prediction `[1, 2]`
has no meaningful interpretation.
Args:
activations: Output from an RNN. Should have dtype `float32` and shape
`[batch_size, padded_length, ?]`.
target_column: An initialized `TargetColumn`, calculate predictions.
problem_type: Either `ProblemType.CLASSIFICATION` or
`ProblemType.LINEAR_REGRESSION`.
predict_probabilities: A Python boolean, indicating whether probabilities
should be returned. Should only be set to `True` for
classification/logistic regression problems.
Returns:
A `dict` mapping strings to `Tensors`.
"""
with ops.name_scope('MultiValuePrediction'):
activations_shape = array_ops.shape(activations)
flattened_activations = array_ops.reshape(activations,
[-1, activations_shape[2]])
prediction_dict = {}
if predict_probabilities:
flat_probabilities = target_column.logits_to_predictions(
flattened_activations, proba=True)
flat_predictions = math_ops.argmax(flat_probabilities, 1)
if target_column.num_label_columns == 1:
probability_shape = array_ops.concat([activations_shape[:2], [2]], 0)
else:
probability_shape = activations_shape
probabilities = array_ops.reshape(
flat_probabilities,
probability_shape,
name=prediction_key.PredictionKey.PROBABILITIES)
prediction_dict[
prediction_key.PredictionKey.PROBABILITIES] = probabilities
else:
flat_predictions = target_column.logits_to_predictions(
flattened_activations, proba=False)
predictions_name = (prediction_key.PredictionKey.CLASSES
if problem_type == constants.ProblemType.CLASSIFICATION
else prediction_key.PredictionKey.SCORES)
predictions = array_ops.reshape(
flat_predictions, [activations_shape[0], activations_shape[1]],
name=predictions_name)
prediction_dict[predictions_name] = predictions
return prediction_dict
| apache-2.0 |
code-sauce/tensorflow | tensorflow/python/kernel_tests/substr_op_test.py | 55 | 8327 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Substr op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class SubstrOpTest(test.TestCase):
def _testScalarString(self, dtype):
test_string = b"Hello"
position = np.array(1, dtype)
length = np.array(3, dtype)
expected_value = b"ell"
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
def _testVectorStrings(self, dtype):
test_string = [b"Hello", b"World"]
position = np.array(1, dtype)
length = np.array(3, dtype)
expected_value = [b"ell", b"orl"]
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
def _testMatrixStrings(self, dtype):
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]]
position = np.array(1, dtype)
length = np.array(4, dtype)
expected_value = [[b"en", b"leve", b"welv"], [b"hirt", b"ourt", b"ifte"],
[b"ixte", b"even", b"ight"]]
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
def _testElementWisePosLen(self, dtype):
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]]
position = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]], dtype)
length = np.array([[2, 3, 4], [4, 3, 2], [5, 5, 5]], dtype)
expected_value = [[b"en", b"eve", b"lve"], [b"hirt", b"urt", b"te"],
[b"ixtee", b"vente", b"hteen"]]
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
def _testBroadcast(self, dtype):
# Broadcast pos/len onto input string
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"],
[b"nineteen", b"twenty", b"twentyone"]]
position = np.array([1, 2, 3], dtype)
length = np.array([1, 2, 3], dtype)
expected_value = [[b"e", b"ev", b"lve"], [b"h", b"ur", b"tee"],
[b"i", b"ve", b"hte"], [b"i", b"en", b"nty"]]
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
# Broadcast input string onto pos/len
test_string = [b"thirteen", b"fourteen", b"fifteen"]
position = np.array([[1, 2, 3], [3, 2, 1], [5, 5, 5]], dtype)
length = np.array([[3, 2, 1], [1, 2, 3], [2, 2, 2]], dtype)
expected_value = [[b"hir", b"ur", b"t"], [b"r", b"ur", b"ift"],
[b"ee", b"ee", b"en"]]
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
# Test 1D broadcast
test_string = b"thirteen"
position = np.array([1, 5, 7], dtype)
length = np.array([3, 2, 1], dtype)
expected_value = [b"hir", b"ee", b"n"]
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
def _testBadBroadcast(self, dtype):
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]]
position = np.array([1, 2, 3, 4], dtype)
length = np.array([1, 2, 3, 4], dtype)
expected_value = [[b"e", b"ev", b"lve"], [b"h", b"ur", b"tee"],
[b"i", b"ve", b"hte"]]
with self.assertRaises(ValueError):
substr_op = string_ops.substr(test_string, position, length)
def _testOutOfRangeError(self, dtype):
# Scalar/Scalar
test_string = b"Hello"
position = np.array(7, dtype)
length = np.array(3, dtype)
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
substr = substr_op.eval()
# Vector/Scalar
test_string = [b"good", b"good", b"bad", b"good"]
position = np.array(3, dtype)
length = np.array(1, dtype)
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
substr = substr_op.eval()
# Negative pos
test_string = b"Hello"
position = np.array(-1, dtype)
length = np.array(3, dtype)
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
substr = substr_op.eval()
# Matrix/Matrix
test_string = [[b"good", b"good", b"good"], [b"good", b"good", b"bad"],
[b"good", b"good", b"good"]]
position = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]], dtype)
length = np.array([[3, 2, 1], [1, 2, 3], [2, 2, 2]], dtype)
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
substr = substr_op.eval()
# Broadcast
test_string = [[b"good", b"good", b"good"], [b"good", b"good", b"bad"]]
position = np.array([1, 2, 3], dtype)
length = np.array([1, 2, 3], dtype)
substr_op = string_ops.substr(test_string, position, length)
with self.test_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
substr = substr_op.eval()
def _testMismatchPosLenShapes(self, dtype):
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]]
position = np.array([[1, 2, 3]], dtype)
length = np.array([2, 3, 4], dtype)
# Should fail: position/length have different rank
with self.assertRaises(ValueError):
substr_op = string_ops.substr(test_string, position, length)
position = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]], dtype)
length = np.array([[2, 3, 4]], dtype)
# Should fail: postion/length have different dimensionality
with self.assertRaises(ValueError):
substr_op = string_ops.substr(test_string, position, length)
def _testAll(self, dtype):
self._testScalarString(dtype)
self._testVectorStrings(dtype)
self._testMatrixStrings(dtype)
self._testElementWisePosLen(dtype)
self._testBroadcast(dtype)
self._testBadBroadcast(dtype)
self._testOutOfRangeError(dtype)
self._testMismatchPosLenShapes(dtype)
def testInt32(self):
self._testAll(np.int32)
def testInt64(self):
self._testAll(np.int64)
def testWrongDtype(self):
with self.test_session():
with self.assertRaises(TypeError):
string_ops.substr(b"test", 3.0, 1)
with self.assertRaises(TypeError):
string_ops.substr(b"test", 3, 1.0)
if __name__ == "__main__":
test.main()
| apache-2.0 |
marcoscaceres/bedrock | bedrock/base/tests/test_middleware.py | 28 | 2690 | from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from bedrock.base.middleware import LocaleURLMiddleware
@override_settings(DEV=True)
class TestLocaleURLMiddleware(TestCase):
def setUp(self):
self.rf = RequestFactory()
self.middleware = LocaleURLMiddleware()
@override_settings(DEV_LANGUAGES=('de', 'fr'),
FF_EXEMPT_LANG_PARAM_URLS=())
def test_redirects_to_correct_language(self):
"""Should redirect to lang prefixed url."""
path = '/the/dude/'
req = self.rf.get(path, HTTP_ACCEPT_LANGUAGE='de')
resp = LocaleURLMiddleware().process_request(req)
self.assertEqual(resp['Location'], '/de' + path)
@override_settings(DEV_LANGUAGES=('es', 'fr'),
LANGUAGE_CODE='en-US',
FF_EXEMPT_LANG_PARAM_URLS=())
def test_redirects_to_default_language(self):
"""Should redirect to default lang if not in settings."""
path = '/the/dude/'
req = self.rf.get(path, HTTP_ACCEPT_LANGUAGE='de')
resp = LocaleURLMiddleware().process_request(req)
self.assertEqual(resp['Location'], '/en-US' + path)
@override_settings(DEV_LANGUAGES=('de', 'fr'),
FF_EXEMPT_LANG_PARAM_URLS=('/other/',))
def test_redirects_lang_param(self):
"""Middleware should remove the lang param on redirect."""
path = '/fr/the/dude/'
req = self.rf.get(path, {'lang': 'de'})
resp = LocaleURLMiddleware().process_request(req)
self.assertEqual(resp['Location'], '/de/the/dude/')
@override_settings(DEV_LANGUAGES=('de', 'fr'),
FF_EXEMPT_LANG_PARAM_URLS=('/dude/',))
def test_no_redirect_lang_param(self):
"""Middleware should not redirect when exempt."""
path = '/fr/the/dude/'
req = self.rf.get(path, {'lang': 'de'})
resp = LocaleURLMiddleware().process_request(req)
self.assertIs(resp, None) # no redirect
@override_settings(DEV_LANGUAGES=('de', 'fr'),
FF_EXEMPT_LANG_PARAM_URLS=())
def test_redirects_to_correct_language_despite_unicode_errors(self):
"""Should redirect to lang prefixed url, stripping invalid chars."""
path = '/the/dude/'
corrupt_querystring = '?a\xa4\x91b\xa4\x91i\xc0de=s'
corrected_querystring = '?abide=s'
req = self.rf.get(path + corrupt_querystring,
HTTP_ACCEPT_LANGUAGE='de')
resp = LocaleURLMiddleware().process_request(req)
self.assertEqual(resp['Location'],
'/de' + path + corrected_querystring)
| mpl-2.0 |
laperry1/android_external_chromium_org | native_client_sdk/src/tools/tests/create_html_test.py | 108 | 1808 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
import shutil
import sys
import tempfile
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT_DIR = os.path.dirname(SCRIPT_DIR)
CHROME_SRC = os.path.dirname(os.path.dirname(os.path.dirname(PARENT_DIR)))
MOCK_DIR = os.path.join(CHROME_SRC, "third_party", "pymock")
sys.path.append(PARENT_DIR)
sys.path.append(MOCK_DIR)
import create_html
import mock
class TestCreateHtml(unittest.TestCase):
def setUp(self):
self.tempdir = None
def tearDown(self):
if self.tempdir:
shutil.rmtree(self.tempdir)
def testBadInput(self):
# Non-existant file
self.assertRaises(create_html.Error, create_html.main, ['foo.nexe'])
# Existing file with wrong extension
self.assertRaises(create_html.Error, create_html.main, [__file__])
# Existing directory
self.assertRaises(create_html.Error, create_html.main, [PARENT_DIR])
def testCreatesOutput(self):
self.tempdir = tempfile.mkdtemp("_sdktest")
expected_html = os.path.join(self.tempdir, 'foo.html')
nmf_file = os.path.join(self.tempdir, 'foo.nmf')
with mock.patch('sys.stdout'):
with mock.patch('os.path.exists'):
with mock.patch('os.path.isfile'):
options = mock.MagicMock(return_value=False)
options.output = None
create_html.CreateHTML([nmf_file], options)
# Assert that the file was created
self.assertTrue(os.path.exists(expected_html))
# Assert that nothing else was created
self.assertEqual(os.listdir(self.tempdir),
[os.path.basename(expected_html)])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
aplicatii-romanesti/allinclusive-kodi-pi | .kodi/addons/plugin.video.movie4k/plugintools.py | 1 | 19027 | # -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Plugin Tools v1.0.8
#---------------------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Based on code from youtube, parsedom and pelisalacarta addons
# Author:
# Jesús
# [email protected]
# http://www.mimediacenter.info/plugintools
#---------------------------------------------------------------------------
# Changelog:
# 1.0.0
# - First release
# 1.0.1
# - If find_single_match can't find anything, it returns an empty string
# - Remove addon id from this module, so it remains clean
# 1.0.2
# - Added parameter on "add_item" to say that item is playable
# 1.0.3
# - Added direct play
# - Fixed bug when video isPlayable=True
# 1.0.4
# - Added get_temp_path, get_runtime_path, get_data_path
# - Added get_setting, set_setting, open_settings_dialog and get_localized_string
# - Added keyboard_input
# - Added message
# 1.0.5
# - Added read_body_and_headers for advanced http handling
# - Added show_picture for picture addons support
# - Added optional parameters "title" and "hidden" to keyboard_input
# 1.0.6
# - Added fanart, show, episode and infolabels to add_item
# 1.0.7
# - Added set_view function
# 1.0.8
# - Added selector
#---------------------------------------------------------------------------
import xbmc
import xbmcplugin
import xbmcaddon
import xbmcgui
import urllib
import urllib2
import re
import sys
import os
import time
import socket
from StringIO import StringIO
import gzip
module_log_enabled = False
http_debug_log_enabled = False
LIST = "list"
THUMBNAIL = "thumbnail"
MOVIES = "movies"
TV_SHOWS = "tvshows"
SEASONS = "seasons"
EPISODES = "episodes"
OTHER = "other"
# Suggested view codes for each type from different skins (initial list thanks to xbmcswift2 library)
ALL_VIEW_CODES = {
'list': {
'skin.confluence': 50, # List
'skin.aeon.nox': 50, # List
'skin.droid': 50, # List
'skin.quartz': 50, # List
'skin.re-touched': 50, # List
},
'thumbnail': {
'skin.confluence': 500, # Thumbnail
'skin.aeon.nox': 500, # Wall
'skin.droid': 51, # Big icons
'skin.quartz': 51, # Big icons
'skin.re-touched': 500, #Thumbnail
},
'movies': {
'skin.confluence': 500, # 500 Thumbnail # 515 Media Info 3
'skin.aeon.nox': 500, # Wall
'skin.droid': 51, # Big icons
'skin.quartz': 52, # Media info
'skin.re-touched': 500, #Thumbnail
},
'tvshows': {
'skin.confluence': 500, # Thumbnail 515, # Media Info 3
'skin.aeon.nox': 500, # Wall
'skin.droid': 51, # Big icons
'skin.quartz': 52, # Media info
'skin.re-touched': 500, #Thumbnail
},
'seasons': {
'skin.confluence': 50, # List
'skin.aeon.nox': 50, # List
'skin.droid': 50, # List
'skin.quartz': 52, # Media info
'skin.re-touched': 50, # List
},
'episodes': {
'skin.confluence': 504, # Media Info
'skin.aeon.nox': 518, # Infopanel
'skin.droid': 50, # List
'skin.quartz': 52, # Media info
'skin.re-touched': 550, # Wide
},
}
# Write something on XBMC log
def log(message):
xbmc.log(message)
# Write this module messages on XBMC log
def _log(message):
if module_log_enabled:
xbmc.log("plugintools."+message)
# Parse XBMC params - based on script.module.parsedom addon
def get_params():
_log("get_params")
param_string = sys.argv[2]
_log("get_params "+str(param_string))
commands = {}
if param_string:
split_commands = param_string[param_string.find('?') + 1:].split('&')
for command in split_commands:
_log("get_params command="+str(command))
if len(command) > 0:
if "=" in command:
split_command = command.split('=')
key = split_command[0]
value = urllib.unquote_plus(split_command[1])
commands[key] = value
else:
commands[command] = ""
_log("get_params "+repr(commands))
return commands
# Fetch text content from an URL
def read(url):
_log("read "+url)
f = urllib2.urlopen(url)
data = f.read()
f.close()
return data
def read_body_and_headers(url, post=None, headers=[], follow_redirects=False, timeout=None):
_log("read_body_and_headers "+url)
if post is not None:
_log("read_body_and_headers post="+post)
if len(headers)==0:
headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:18.0) Gecko/20100101 Firefox/18.0"])
# Start cookie lib
ficherocookies = os.path.join( get_data_path(), 'cookies.dat' )
_log("read_body_and_headers cookies_file="+ficherocookies)
cj = None
ClientCookie = None
cookielib = None
# Let's see if cookielib is available
try:
_log("read_body_and_headers importing cookielib")
import cookielib
except ImportError:
_log("read_body_and_headers cookielib no disponible")
# If importing cookielib fails
# let's try ClientCookie
try:
_log("read_body_and_headers importing ClientCookie")
import ClientCookie
except ImportError:
_log("read_body_and_headers ClientCookie not available")
# ClientCookie isn't available either
urlopen = urllib2.urlopen
Request = urllib2.Request
else:
_log("read_body_and_headers ClientCookie available")
# imported ClientCookie
urlopen = ClientCookie.urlopen
Request = ClientCookie.Request
cj = ClientCookie.MozillaCookieJar()
else:
_log("read_body_and_headers cookielib available")
# importing cookielib worked
# proxy_handler = urllib2.ProxyHandler({'http':'217.12.25.160:80'})
# opener = urllib2.build_opener(proxy_handler)
# urlopen = opener.open
urlopen = urllib2.urlopen
Request = urllib2.Request
cj = cookielib.MozillaCookieJar()
# This is a subclass of FileCookieJar
# that has useful load and save methods
if cj is not None:
# we successfully imported
# one of the two cookie handling modules
_log("read_body_and_headers Cookies enabled")
if os.path.isfile(ficherocookies):
_log("read_body_and_headers Reading cookie file")
# if we have a cookie file already saved
# then load the cookies into the Cookie Jar
try:
cj.load(ficherocookies)
except:
_log("read_body_and_headers Wrong cookie file, deleting...")
os.remove(ficherocookies)
# Now we need to get our Cookie Jar
# installed in the opener;
# for fetching URLs
if cookielib is not None:
_log("read_body_and_headers opener using urllib2 (cookielib)")
# if we use cookielib
# then we get the HTTPCookieProcessor
# and install the opener in urllib2
if not follow_redirects:
opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=http_debug_log_enabled),urllib2.HTTPCookieProcessor(cj),NoRedirectHandler())
else:
opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=http_debug_log_enabled),urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
else:
_log("read_body_and_headers opener using ClientCookie")
# if we use ClientCookie
# then we get the HTTPCookieProcessor
# and install the opener in ClientCookie
opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj))
ClientCookie.install_opener(opener)
# -------------------------------------------------
# Cookies instaladas, lanza la petición
# -------------------------------------------------
# Contador
inicio = time.clock()
# Diccionario para las cabeceras
txheaders = {}
# Construye el request
if post is None:
_log("read_body_and_headers GET request")
else:
_log("read_body_and_headers POST request")
# Añade las cabeceras
_log("read_body_and_headers ---------------------------")
for header in headers:
_log("read_body_and_headers header %s=%s" % (str(header[0]),str(header[1])) )
txheaders[header[0]]=header[1]
_log("read_body_and_headers ---------------------------")
req = Request(url, post, txheaders)
if timeout is None:
handle=urlopen(req)
else:
#Disponible en python 2.6 en adelante --> handle = urlopen(req, timeout=timeout)
#Para todas las versiones:
try:
import socket
deftimeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
handle=urlopen(req)
socket.setdefaulttimeout(deftimeout)
except:
import sys
for line in sys.exc_info():
_log( "%s" % line )
# Actualiza el almacén de cookies
cj.save(ficherocookies)
# Lee los datos y cierra
if handle.info().get('Content-Encoding') == 'gzip':
buf = StringIO( handle.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
else:
data=handle.read()
info = handle.info()
_log("read_body_and_headers Response")
returnheaders=[]
_log("read_body_and_headers ---------------------------")
for header in info:
_log("read_body_and_headers "+header+"="+info[header])
returnheaders.append([header,info[header]])
handle.close()
_log("read_body_and_headers ---------------------------")
'''
# Lanza la petición
try:
response = urllib2.urlopen(req)
# Si falla la repite sustituyendo caracteres especiales
except:
req = urllib2.Request(url.replace(" ","%20"))
# Añade las cabeceras
for header in headers:
req.add_header(header[0],header[1])
response = urllib2.urlopen(req)
'''
# Tiempo transcurrido
fin = time.clock()
_log("read_body_and_headers Downloaded in %d seconds " % (fin-inicio+1))
_log("read_body_and_headers body="+data)
return data,returnheaders
class NoRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
infourl.code = code
return infourl
http_error_300 = http_error_302
http_error_301 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
# Parse string and extracts multiple matches using regular expressions
def find_multiple_matches(text,pattern):
_log("find_multiple_matches pattern="+pattern)
matches = re.findall(pattern,text,re.DOTALL)
return matches
# Parse string and extracts first match as a string
def find_single_match(text,pattern):
_log("find_single_match pattern="+pattern)
result = ""
try:
matches = re.findall(pattern,text, flags=re.DOTALL)
result = matches[0]
except:
result = ""
return result
def add_item( action="" , title="" , plot="" , url="" , thumbnail="" , fanart="" , show="" , episode="" , extra="", page="", info_labels = None, isPlayable = False , folder=True ):
_log("add_item action=["+action+"] title=["+title+"] url=["+url+"] thumbnail=["+thumbnail+"] fanart=["+fanart+"] show=["+show+"] episode=["+episode+"] extra=["+extra+"] page=["+page+"] isPlayable=["+str(isPlayable)+"] folder=["+str(folder)+"]")
listitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail )
if info_labels is None:
info_labels = { "Title" : title, "FileName" : title, "Plot" : plot }
listitem.setInfo( "video", info_labels )
if fanart!="":
listitem.setProperty('fanart_image',fanart)
xbmcplugin.setPluginFanart(int(sys.argv[1]), fanart)
if url.startswith("plugin://"):
itemurl = url
listitem.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem( handle=int(sys.argv[1]), url=itemurl, listitem=listitem, isFolder=folder)
elif isPlayable:
listitem.setProperty("Video", "true")
listitem.setProperty('IsPlayable', 'true')
itemurl = '%s?action=%s&title=%s&url=%s&thumbnail=%s&plot=%s&extra=%s&page=%s' % ( sys.argv[ 0 ] , action , urllib.quote_plus( title ) , urllib.quote_plus(url) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , urllib.quote_plus( extra ) , urllib.quote_plus( page ))
xbmcplugin.addDirectoryItem( handle=int(sys.argv[1]), url=itemurl, listitem=listitem, isFolder=folder)
else:
itemurl = '%s?action=%s&title=%s&url=%s&thumbnail=%s&plot=%s&extra=%s&page=%s' % ( sys.argv[ 0 ] , action , urllib.quote_plus( title ) , urllib.quote_plus(url) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , urllib.quote_plus( extra ) , urllib.quote_plus( page ))
xbmcplugin.addDirectoryItem( handle=int(sys.argv[1]), url=itemurl, listitem=listitem, isFolder=folder)
def close_item_list():
_log("close_item_list")
xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True)
def play_resolved_url(url):
_log("play_resolved_url ["+url+"]")
listitem = xbmcgui.ListItem(path=url)
listitem.setProperty('IsPlayable', 'true')
return xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, listitem)
def direct_play(url):
_log("direct_play ["+url+"]")
title = ""
try:
xlistitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", path=url)
except:
xlistitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", )
xlistitem.setInfo( "video", { "Title": title } )
playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
playlist.clear()
playlist.add( url, xlistitem )
player_type = xbmc.PLAYER_CORE_AUTO
xbmcPlayer = xbmc.Player( player_type )
xbmcPlayer.play(playlist)
def show_picture(url):
local_folder = os.path.join(get_data_path(),"images")
if not os.path.exists(local_folder):
try:
os.mkdir(local_folder)
except:
pass
local_file = os.path.join(local_folder,"temp.jpg")
# Download picture
urllib.urlretrieve(url, local_file)
# Show picture
xbmc.executebuiltin( "SlideShow("+local_folder+")" )
def get_temp_path():
_log("get_temp_path")
dev = xbmc.translatePath( "special://temp/" )
_log("get_temp_path ->'"+str(dev)+"'")
return dev
def get_runtime_path():
_log("get_runtime_path")
dev = xbmc.translatePath( __settings__.getAddonInfo('Path') )
_log("get_runtime_path ->'"+str(dev)+"'")
return dev
def get_data_path():
_log("get_data_path")
dev = xbmc.translatePath( __settings__.getAddonInfo('Profile') )
# Parche para XBMC4XBOX
if not os.path.exists(dev):
os.makedirs(dev)
_log("get_data_path ->'"+str(dev)+"'")
return dev
def get_setting(name):
_log("get_setting name='"+name+"'")
dev = __settings__.getSetting( name )
_log("get_setting ->'"+str(dev)+"'")
return dev
def set_setting(name,value):
_log("set_setting name='"+name+"','"+value+"'")
__settings__.setSetting( name,value )
def open_settings_dialog():
_log("open_settings_dialog")
__settings__.openSettings()
def get_localized_string(code):
_log("get_localized_string code="+str(code))
dev = __language__(code)
try:
dev = dev.encode("utf-8")
except:
pass
_log("get_localized_string ->'"+dev+"'")
return dev
def keyboard_input(default_text="", title="", hidden=False):
_log("keyboard_input default_text='"+default_text+"'")
keyboard = xbmc.Keyboard(default_text,title,hidden)
keyboard.doModal()
if (keyboard.isConfirmed()):
tecleado = keyboard.getText()
else:
tecleado = ""
_log("keyboard_input ->'"+tecleado+"'")
return tecleado
def message(text1, text2="", text3=""):
_log("message text1='"+text1+"', text2='"+text2+"', text3='"+text3+"'")
if text3=="":
xbmcgui.Dialog().ok( text1 , text2 )
elif text2=="":
xbmcgui.Dialog().ok( "" , text1 )
else:
xbmcgui.Dialog().ok( text1 , text2 , text3 )
def message_yes_no(text1, text2="", text3=""):
_log("message_yes_no text1='"+text1+"', text2='"+text2+"', text3='"+text3+"'")
if text3=="":
yes_pressed = xbmcgui.Dialog().yesno( text1 , text2 )
elif text2=="":
yes_pressed = xbmcgui.Dialog().yesno( "" , text1 )
else:
yes_pressed = xbmcgui.Dialog().yesno( text1 , text2 , text3 )
return yes_pressed
def selector(option_list,title="Select one"):
_log("selector title='"+title+"', options="+repr(option_list))
dia = xbmcgui.Dialog()
selection = dia.select(title,option_list)
return selection
def set_view(view_mode, view_code=0):
_log("set_view view_mode='"+view_mode+"', view_code="+str(view_code))
# Set the content for extended library views if needed
if view_mode==MOVIES:
_log("set_view content is movies")
xbmcplugin.setContent( int(sys.argv[1]) ,"movies" )
elif view_mode==TV_SHOWS:
_log("set_view content is tvshows")
xbmcplugin.setContent( int(sys.argv[1]) ,"tvshows" )
elif view_mode==SEASONS:
_log("set_view content is seasons")
xbmcplugin.setContent( int(sys.argv[1]) ,"seasons" )
elif view_mode==EPISODES:
_log("set_view content is episodes")
xbmcplugin.setContent( int(sys.argv[1]) ,"episodes" )
# Reads skin name
skin_name = xbmc.getSkinDir()
_log("set_view skin_name='"+skin_name+"'")
try:
if view_code==0:
_log("set_view view mode is "+view_mode)
view_codes = ALL_VIEW_CODES.get(view_mode)
view_code = view_codes.get(skin_name)
_log("set_view view code for "+view_mode+" in "+skin_name+" is "+str(view_code))
xbmc.executebuiltin("Container.SetViewMode("+str(view_code)+")")
else:
_log("set_view view code forced to "+str(view_code))
xbmc.executebuiltin("Container.SetViewMode("+str(view_code)+")")
except:
_log("Unable to find view code for view mode "+str(view_mode)+" and skin "+skin_name)
f = open( os.path.join( os.path.dirname(__file__) , "addon.xml") )
data = f.read()
f.close()
addon_id = find_single_match(data,'id="([^"]+)"')
if addon_id=="":
addon_id = find_single_match(data,"id='([^']+)'")
__settings__ = xbmcaddon.Addon(id=addon_id)
__language__ = __settings__.getLocalizedString
| apache-2.0 |
m-kuhn/QGIS | python/plugins/db_manager/db_plugins/oracle/plugin.py | 6 | 22780 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS (Oracle)
Date : Aug 27, 2014
copyright : (C) 2014 by Médéric RIBREUX
email : [email protected]
The content of this file is based on
- PG_Manager by Martin Dobias <[email protected]> (GPLv2 license)
- DB Manager by Giuseppe Sucameli <[email protected]> (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from builtins import range
# this will disable the dbplugin if the connector raise an ImportError
from .connector import OracleDBConnector
from qgis.PyQt.QtCore import Qt, QCoreApplication
from qgis.PyQt.QtGui import QIcon, QKeySequence
from qgis.PyQt.QtWidgets import QAction, QApplication, QMessageBox
from qgis.core import QgsApplication, QgsVectorLayer, NULL, QgsSettings
from ..plugin import ConnectionError, InvalidDataException, DBPlugin, \
Database, Schema, Table, VectorTable, TableField, TableConstraint, \
TableIndex, TableTrigger
from qgis.core import QgsCredentials
def classFactory():
return OracleDBPlugin
class OracleDBPlugin(DBPlugin):
@classmethod
def icon(self):
return QgsApplication.getThemeIcon("/mIconOracle.svg")
@classmethod
def typeName(self):
return 'oracle'
@classmethod
def typeNameString(self):
return QCoreApplication.translate('db_manager', 'Oracle Spatial')
@classmethod
def providerName(self):
return 'oracle'
@classmethod
def connectionSettingsKey(self):
return '/Oracle/connections'
def connectToUri(self, uri):
self.db = self.databasesFactory(self, uri)
if self.db:
return True
return False
def databasesFactory(self, connection, uri):
return ORDatabase(connection, uri)
def connect(self, parent=None):
conn_name = self.connectionName()
settings = QgsSettings()
settings.beginGroup(u"/{0}/{1}".format(
self.connectionSettingsKey(), conn_name))
if not settings.contains("database"): # non-existent entry?
raise InvalidDataException(
self.tr('There is no defined database connection "{0}".'.format(
conn_name)))
from qgis.core import QgsDataSourceUri
uri = QgsDataSourceUri()
settingsList = ["host", "port", "database", "username", "password"]
host, port, database, username, password = [
settings.value(x, "", type=str) for x in settingsList]
# get all of the connexion options
useEstimatedMetadata = settings.value(
"estimatedMetadata", False, type=bool)
uri.setParam('userTablesOnly', str(
settings.value("userTablesOnly", False, type=bool)))
uri.setParam('geometryColumnsOnly', str(
settings.value("geometryColumnsOnly", False, type=bool)))
uri.setParam('allowGeometrylessTables', str(
settings.value("allowGeometrylessTables", False, type=bool)))
uri.setParam('onlyExistingTypes', str(
settings.value("onlyExistingTypes", False, type=bool)))
uri.setParam('includeGeoAttributes', str(
settings.value("includeGeoAttributes", False, type=bool)))
settings.endGroup()
uri.setConnection(host, port, database, username, password)
uri.setUseEstimatedMetadata(useEstimatedMetadata)
err = u""
try:
return self.connectToUri(uri)
except ConnectionError as e:
err = str(e)
# ask for valid credentials
max_attempts = 3
for i in range(max_attempts):
(ok, username, password) = QgsCredentials.instance().get(
uri.connectionInfo(False), username, password, err)
if not ok:
return False
uri.setConnection(host, port, database, username, password)
try:
self.connectToUri(uri)
except ConnectionError as e:
if i == max_attempts - 1: # failed the last attempt
raise e
err = str(e)
continue
QgsCredentials.instance().put(
uri.connectionInfo(False), username, password)
return True
return False
class ORDatabase(Database):
def __init__(self, connection, uri):
self.connName = connection.connectionName()
Database.__init__(self, connection, uri)
def connectorsFactory(self, uri):
return OracleDBConnector(uri, self.connName)
def dataTablesFactory(self, row, db, schema=None):
return ORTable(row, db, schema)
def vectorTablesFactory(self, row, db, schema=None):
return ORVectorTable(row, db, schema)
def info(self):
from .info_model import ORDatabaseInfo
return ORDatabaseInfo(self)
def schemasFactory(self, row, db):
return ORSchema(row, db)
def columnUniqueValuesModel(self, col, table, limit=10):
l = u""
if limit:
l = u"WHERE ROWNUM < {:d}".format(limit)
con = self.database().connector
# Prevent geometry column show
tableName = table.replace(u'"', u"").split(u".")
if len(tableName) == 0:
tableName = [None, tableName[0]]
colName = col.replace(u'"', u"").split(u".")[-1]
if con.isGeometryColumn(tableName, colName):
return None
query = u"SELECT DISTINCT {} FROM {} {}".format(col, table, l)
return self.sqlResultModel(query, self)
def sqlResultModel(self, sql, parent):
from .data_model import ORSqlResultModel
return ORSqlResultModel(self, sql, parent)
def sqlResultModelAsync(self, sql, parent):
from .data_model import ORSqlResultModelAsync
return ORSqlResultModelAsync(self, sql, parent)
def toSqlLayer(self, sql, geomCol, uniqueCol,
layerName=u"QueryLayer", layerType=None,
avoidSelectById=False, filter=""):
uri = self.uri()
con = self.database().connector
uri.setDataSource(u"", u"({}\n)".format(
sql), geomCol, filter, uniqueCol.strip(u'"'))
if avoidSelectById:
uri.disableSelectAtId(True)
provider = self.dbplugin().providerName()
vlayer = QgsVectorLayer(uri.uri(False), layerName, provider)
# handling undetermined geometry type
if not vlayer.isValid():
wkbType, srid = con.getTableMainGeomType(
u"({}\n)".format(sql), geomCol)
uri.setWkbType(wkbType)
if srid:
uri.setSrid(str(srid))
vlayer = QgsVectorLayer(uri.uri(False), layerName, provider)
return vlayer
def registerDatabaseActions(self, mainWindow):
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Re-connect"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Database"), self.reconnectActionSlot)
if self.schemas():
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Create Schema…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Schema"), self.createSchemaActionSlot)
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Delete (Empty) Schema…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Schema"), self.deleteSchemaActionSlot)
action = QAction(QApplication.translate(
"DBManagerPlugin", "Delete Selected Item"), self)
mainWindow.registerAction(action, None, self.deleteActionSlot)
action.setShortcuts(QKeySequence.Delete)
action = QAction(QgsApplication.getThemeIcon("/mActionCreateTable.svg"),
QApplication.translate(
"DBManagerPlugin", "&Create Table…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.createTableActionSlot)
action = QAction(QgsApplication.getThemeIcon("/mActionEditTable.svg"),
QApplication.translate(
"DBManagerPlugin", "&Edit Table…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.editTableActionSlot)
action = QAction(QgsApplication.getThemeIcon("/mActionDeleteTable.svg"),
QApplication.translate(
"DBManagerPlugin", "&Delete Table/View…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.deleteTableActionSlot)
action = QAction(QApplication.translate(
"DBManagerPlugin", "&Empty Table…"), self)
mainWindow.registerAction(action, QApplication.translate(
"DBManagerPlugin", "&Table"), self.emptyTableActionSlot)
def supportsComment(self):
return False
class ORSchema(Schema):
def __init__(self, row, db):
Schema.__init__(self, db)
# self.oid, self.name, self.owner, self.perms, self.comment = row
self.name = row[0]
class ORTable(Table):
def __init__(self, row, db, schema=None):
Table.__init__(self, db, schema)
self.name, self.owner, isView = row
self.estimatedRowCount = None
self.objectType = None
self.isView = False
self.isMaterializedView = False
if isView == 1:
self.isView = True
self.creationDate = None
self.modificationDate = None
def getDates(self):
"""Grab the creation/modification dates of the table"""
self.creationDate, self.modificationDate = (
self.database().connector.getTableDates((self.schemaName(),
self.name)))
def refreshRowEstimation(self):
"""Use ALL_ALL_TABLE to get an estimation of rows"""
if self.isView:
self.estimatedRowCount = 0
self.estimatedRowCount = (
self.database().connector.getTableRowEstimation(
(self.schemaName(), self.name)))
def getType(self):
"""Grab the type of object for the table"""
self.objectType = self.database().connector.getTableType(
(self.schemaName(), self.name))
def getComment(self):
"""Grab the general comment of the table/view"""
self.comment = self.database().connector.getTableComment(
(self.schemaName(), self.name), self.objectType)
def getDefinition(self):
return self.database().connector.getDefinition(
(self.schemaName(), self.name), self.objectType)
def getMViewInfo(self):
if self.objectType == u"MATERIALIZED VIEW":
return self.database().connector.getMViewInfo(
(self.schemaName(), self.name))
else:
return None
def runAction(self, action):
action = str(action)
if action.startswith("rows/"):
if action == "rows/recount":
self.refreshRowCount()
return True
elif action.startswith("index/"):
parts = action.split('/')
index_name = parts[1]
index_action = parts[2]
msg = QApplication.translate(
"DBManagerPlugin",
"Do you want to {} index {}?".format(
index_action, index_name))
QApplication.restoreOverrideCursor()
try:
if QMessageBox.question(
None,
QApplication.translate(
"DBManagerPlugin", "Table Index"),
msg,
QMessageBox.Yes | QMessageBox.No) == QMessageBox.No:
return False
finally:
QApplication.setOverrideCursor(Qt.WaitCursor)
if index_action == "rebuild":
self.aboutToChange.emit()
self.database().connector.rebuildTableIndex(
(self.schemaName(), self.name), index_name)
self.refreshIndexes()
return True
elif action.startswith(u"mview/"):
if action == "mview/refresh":
self.aboutToChange.emit()
self.database().connector.refreshMView(
(self.schemaName(), self.name))
return True
return Table.runAction(self, action)
def tableFieldsFactory(self, row, table):
return ORTableField(row, table)
def tableConstraintsFactory(self, row, table):
return ORTableConstraint(row, table)
def tableIndexesFactory(self, row, table):
return ORTableIndex(row, table)
def tableTriggersFactory(self, row, table):
return ORTableTrigger(row, table)
def info(self):
from .info_model import ORTableInfo
return ORTableInfo(self)
def tableDataModel(self, parent):
from .data_model import ORTableDataModel
return ORTableDataModel(self, parent)
def getValidQgisUniqueFields(self, onlyOne=False):
""" list of fields valid to load the table as layer in Qgis canvas.
Qgis automatically search for a valid unique field, so it's
needed only for queries and views.
"""
ret = []
# add the pk
pkcols = [x for x in self.fields() if x.primaryKey]
if len(pkcols) == 1:
ret.append(pkcols[0])
# then add integer fields with an unique index
indexes = self.indexes()
if indexes is not None:
for idx in indexes:
if idx.isUnique and len(idx.columns) == 1:
fld = idx.fields()[idx.columns[0]]
if (fld.dataType == u"NUMBER" and not fld.modifier and fld.notNull and fld not in ret):
ret.append(fld)
# and finally append the other suitable fields
for fld in self.fields():
if (fld.dataType == u"NUMBER" and not fld.modifier and fld.notNull and fld not in ret):
ret.append(fld)
if onlyOne:
return ret[0] if len(ret) > 0 else None
return ret
def uri(self):
uri = self.database().uri()
schema = self.schemaName() if self.schemaName() else ''
geomCol = self.geomColumn if self.type in [
Table.VectorType, Table.RasterType] else ""
uniqueCol = self.getValidQgisUniqueFields(
True) if self.isView else None
uri.setDataSource(schema, self.name, geomCol if geomCol else None,
None, uniqueCol.name if uniqueCol else "")
# Handle geographic table
if geomCol:
uri.setWkbType(self.wkbType)
uri.setSrid(str(self.srid))
return uri
class ORVectorTable(ORTable, VectorTable):
def __init__(self, row, db, schema=None):
ORTable.__init__(self, row[0:3], db, schema)
VectorTable.__init__(self, db, schema)
self.geomColumn, self.geomType, self.wkbType, self.geomDim, \
self.srid = row[-7:-2]
def info(self):
from .info_model import ORVectorTableInfo
return ORVectorTableInfo(self)
def runAction(self, action):
if action.startswith("extent/"):
if action == "extent/update":
self.aboutToChange.emit()
self.updateExtent()
return True
if ORTable.runAction(self, action):
return True
return VectorTable.runAction(self, action)
def canUpdateMetadata(self):
return self.database().connector.canUpdateMetadata((self.schemaName(),
self.name))
def updateExtent(self):
self.database().connector.updateMetadata(
(self.schemaName(), self.name),
self.geomColumn, extent=self.extent)
self.refreshTableEstimatedExtent()
self.refresh()
def hasSpatialIndex(self, geom_column=None):
geom_column = geom_column if geom_column else self.geomColumn
for idx in self.indexes():
if geom_column == idx.column:
return True
return False
class ORTableField(TableField):
def __init__(self, row, table):
""" build fields information from query and find primary key """
TableField.__init__(self, table)
self.num, self.name, self.dataType, self.charMaxLen, \
self.modifier, self.notNull, self.hasDefault, \
self.default, typeStr, self.comment = row
self.primaryKey = False
self.num = int(self.num)
if self.charMaxLen == NULL:
self.charMaxLen = None
else:
self.charMaxLen = int(self.charMaxLen)
if self.modifier == NULL:
self.modifier = None
else:
self.modifier = int(self.modifier)
if self.notNull.upper() == u"Y":
self.notNull = False
else:
self.notNull = True
if self.comment == NULL:
self.comment = u""
# find out whether fields are part of primary key
for con in self.table().constraints():
if con.type == ORTableConstraint.TypePrimaryKey and self.name == con.column:
self.primaryKey = True
break
def type2String(self):
if (u"TIMESTAMP" in self.dataType or self.dataType in [u"DATE", u"SDO_GEOMETRY", u"BINARY_FLOAT", u"BINARY_DOUBLE"]):
return u"{}".format(self.dataType)
if self.charMaxLen in [None, -1]:
return u"{}".format(self.dataType)
elif self.modifier in [None, -1, 0]:
return u"{}({})".format(self.dataType, self.charMaxLen)
return u"{}({},{})".format(self.dataType, self.charMaxLen,
self.modifier)
def update(self, new_name, new_type_str=None, new_not_null=None,
new_default_str=None):
self.table().aboutToChange.emit()
if self.name == new_name:
new_name = None
if self.type2String() == new_type_str:
new_type_str = None
if self.notNull == new_not_null:
new_not_null = None
if self.default2String() == new_default_str:
new_default_str = None
ret = self.table().database().connector.updateTableColumn(
(self.table().schemaName(), self.table().name),
self.name, new_name, new_type_str,
new_not_null, new_default_str)
# When changing a field, refresh also constraints and
# indexes.
if ret is not False:
self.table().refreshFields()
self.table().refreshConstraints()
self.table().refreshIndexes()
return ret
class ORTableConstraint(TableConstraint):
TypeCheck, TypeForeignKey, TypePrimaryKey, \
TypeUnique, TypeUnknown = list(range(5))
types = {"c": TypeCheck, "r": TypeForeignKey,
"p": TypePrimaryKey, "u": TypeUnique}
def __init__(self, row, table):
""" build constraints info from query """
TableConstraint.__init__(self, table)
self.name, constr_type_str, self.column, self.validated, \
self.generated, self.status = row[0:6]
constr_type_str = constr_type_str.lower()
if constr_type_str in ORTableConstraint.types:
self.type = ORTableConstraint.types[constr_type_str]
else:
self.type = ORTableConstraint.TypeUnknown
if row[6] == NULL:
self.checkSource = u""
else:
self.checkSource = row[6]
if row[8] == NULL:
self.foreignTable = u""
else:
self.foreignTable = row[8]
if row[7] == NULL:
self.foreignOnDelete = u""
else:
self.foreignOnDelete = row[7]
if row[9] == NULL:
self.foreignKey = u""
else:
self.foreignKey = row[9]
def type2String(self):
if self.type == ORTableConstraint.TypeCheck:
return QApplication.translate("DBManagerPlugin", "Check")
if self.type == ORTableConstraint.TypePrimaryKey:
return QApplication.translate("DBManagerPlugin", "Primary key")
if self.type == ORTableConstraint.TypeForeignKey:
return QApplication.translate("DBManagerPlugin", "Foreign key")
if self.type == ORTableConstraint.TypeUnique:
return QApplication.translate("DBManagerPlugin", "Unique")
return QApplication.translate("DBManagerPlugin", 'Unknown')
def fields(self):
""" Hack to make edit dialog box work """
fields = self.table().fields()
field = None
for fld in fields:
if fld.name == self.column:
field = fld
cols = {}
cols[0] = field
return cols
class ORTableIndex(TableIndex):
def __init__(self, row, table):
TableIndex.__init__(self, table)
self.name, self.column, self.indexType, self.status, \
self.analyzed, self.compression, self.isUnique = row
def fields(self):
""" Hack to make edit dialog box work """
self.table().refreshFields()
fields = self.table().fields()
field = None
for fld in fields:
if fld.name == self.column:
field = fld
cols = {}
cols[0] = field
return cols
class ORTableTrigger(TableTrigger):
def __init__(self, row, table):
TableTrigger.__init__(self, table)
self.name, self.event, self.type, self.enabled = row
| gpl-2.0 |
miqlar/PyFME | src/pyfme/utils/trimmer.py | 5 | 8158 | # -*- coding: utf-8 -*-
"""
Python Flight Mechanics Engine (PyFME).
Copyright (c) AeroPython Development Team.
Distributed under the terms of the MIT License.
Trimmer
-------
This module solves the problem of calculating the values of the state and
control vectors that satisfy the state equations of the aircraft at the
given condition. This cannot be done analytically because of the very complex
functional dependence on the aerodynamic data. Instead, it must be done with
a numerical algorithm which iteratively adjusts the independent variables
until some solution criterion is met.
"""
from copy import deepcopy
from warnings import warn
from math import sqrt, sin, cos, tan, atan
import numpy as np
from scipy.optimize import least_squares
from pyfme.utils.coordinates import wind2body
from pyfme.models.constants import GRAVITY
def steady_state_flight_trimmer(aircraft, system, env,
TAS,
controls_0, controls2trim=None,
gamma=0.0, turn_rate=0.0,
verbose=0):
"""Finds a combination of values of the state and control variables that
correspond to a steady-state flight condition. Steady-state aircraft flight
can be defined as a condition in which all of the motion variables are
constant or zero. That is, the linear and angular velocity components are
constant (or zero), thus all acceleration components are zero.
Parameters
----------
aircraft : Aircraft
Plane to be trimmed.
system : System
System for aircraft trimming.
env : Environment
Environment with the models for wind, atmosphere and gravity.
TAS : float
True Air Speed (m/s).
controls_0 : dict
Initial value guess for each control. If the control is not in
`controls2trim` or `controls2trim` is `None` the control is
considered fixed to that value during the trimming process.
controls2trim : list, optional
List with controls to be trimmed. If not given, no control is
considered fixed.
gamma : float, optional
Flight path angle (rad).
turn_rate : float, optional
Turn rate, d(psi)/dt (rad/s).
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations (not supported by 'lm'
method).
Returns
-------
aircraft : Aircraft
Trimmed plane.
system : System
Trimmed system.
env : Environment
Trimmed environment (gravity in body axis).
results : dict
Relevant parameters calculated during the aircraft trimming,
including least square results.
Notes
-----
See section 3.4 in [1] for the algorithm description.
See section 2.5 in [1] for the definition of steady-state flight condition.
References
----------
.. [1] Stevens, BL and Lewis, FL, "Aircraft Control and Simulation",
Wiley-lnterscience.
"""
# Creating a copy of these objects in order to not modify any attribute
# inside this funciton.
trimmed_ac = deepcopy(aircraft)
trimmed_sys = deepcopy(system)
trimmed_env = deepcopy(env)
trimmed_ac.TAS = TAS
trimmed_ac.Mach = aircraft.TAS / env.a
trimmed_ac.q_inf = 0.5 * trimmed_env.rho * aircraft.TAS ** 2
# Update environment
trimmed_env.update(trimmed_sys)
# Check if every necessary control for the aircraft is given in controls_0.
for ac_control in trimmed_ac.controls:
if ac_control not in controls_0:
raise ValueError("Control {} not given in controls_0: {}".format(
ac_control, controls_0))
trimmed_ac.controls = controls_0
# If controls2trim is not given, trim for every control.
if controls2trim is None:
controls2trim = list(controls_0.keys())
# TODO: try to look for a good initialization method for alpha & beta
initial_guess = [0.05 * np.sign(turn_rate), # alpha
0.001 * np.sign(turn_rate)] # beta
for control in controls2trim:
initial_guess.append(controls_0[control])
args = (trimmed_sys, trimmed_ac, trimmed_env,
controls2trim, gamma, turn_rate)
lower_bounds = [-0.5, -0.25] # Alpha and beta upper bounds.
upper_bounds = [+0.5, +0.25] # Alpha and beta lower bounds.
for ii in controls2trim:
lower_bounds.append(aircraft.control_limits[ii][0])
upper_bounds.append(aircraft.control_limits[ii][1])
bounds = (lower_bounds, upper_bounds)
results = least_squares(trimming_cost_func, x0=initial_guess, args=args,
verbose=verbose, bounds=bounds)
fun = results['fun']
cost = results['cost']
if cost > 1e-7 or any(abs(fun) > 1e-3):
warn("Trim process did not converge", RuntimeWarning)
trimmed_sys.set_initial_state_vector()
results = {'alpha': trimmed_ac.alpha, 'beta': trimmed_ac.beta,
'u': trimmed_sys.u, 'v': trimmed_sys.v, 'w': trimmed_sys.w,
'p': trimmed_sys.p, 'q': trimmed_sys.q, 'r': trimmed_sys.r,
'theta': trimmed_sys.theta, 'phi': trimmed_sys.phi,
'ls_opt': results}
for control in controls2trim:
results[control] = trimmed_ac.controls[control]
return trimmed_ac, trimmed_sys, trimmed_env, results
def turn_coord_cons(turn_rate, alpha, beta, TAS, gamma=0):
"""Calculates phi for coordinated turn.
"""
g0 = GRAVITY
G = turn_rate * TAS / g0
if abs(gamma) < 1e-8:
phi = G * cos(beta) / (cos(alpha) - G * sin(alpha) * sin(beta))
phi = atan(phi)
else:
a = 1 - G * tan(alpha) * sin(beta)
b = sin(gamma) / cos(beta)
c = 1 + G ** 2 * cos(beta) ** 2
sq = sqrt(c * (1 - b ** 2) + G ** 2 * sin(beta) ** 2)
num = (a - b ** 2) + b * tan(alpha) * sq
den = a ** 2 - b ** 2 * (1 + c * tan(alpha) ** 2)
phi = atan(G * cos(beta) / cos(alpha) * num / den)
return phi
def turn_coord_cons_horizontal_and_small_beta(turn_rate, alpha, TAS):
"""Calculates phi for coordinated turn given that gamma is equal to zero
and beta is small (beta << 1).
"""
g0 = GRAVITY
G = turn_rate * TAS / g0
phi = G / cos(alpha)
phi = atan(phi)
return phi
def rate_of_climb_cons(gamma, alpha, beta, phi):
"""Calculates theta for the given ROC, wind angles, and roll angle.
"""
a = cos(alpha) * cos(beta)
b = sin(phi) * sin(beta) + cos(phi) * sin(alpha) * cos(beta)
sq = sqrt(a ** 2 - sin(gamma) ** 2 + b ** 2)
theta = (a * b + sin(gamma) * sq) / (a ** 2 - sin(gamma) ** 2)
theta = atan(theta)
return theta
def trimming_cost_func(trimmed_params, system, ac, env, controls2trim,
gamma, turn_rate):
"""Function to optimize
"""
alpha = trimmed_params[0]
beta = trimmed_params[1]
new_controls = {}
for ii, control in enumerate(controls2trim):
new_controls[control] = trimmed_params[ii + 2]
# Choose coordinated turn constrain equation:
if abs(turn_rate) < 1e-8:
phi = 0
else:
phi = turn_coord_cons(turn_rate, alpha, beta, ac.TAS, gamma)
system.euler_angles[2] = phi
# Rate of climb constrain
theta = rate_of_climb_cons(gamma, alpha, beta, phi)
system.euler_angles[1] = theta
# w = turn_rate * k_h
# k_h = sin(theta) i_b + sin(phi) * cos(theta) j_b + cos(theta) * sin(phi)
# w = p * i_b + q * j_b + r * k_b
p = - turn_rate * sin(theta)
q = turn_rate * sin(phi) * cos(theta)
r = turn_rate * cos(theta) * sin(phi)
system.vel_ang = np.array([p, q, r])
system.vel_body = wind2body((ac.TAS, 0, 0), alpha=alpha, beta=beta)
env.update(system)
ac.update(new_controls, system, env)
forces, moments = ac.calculate_forces_and_moments()
vel = np.concatenate((system.vel_body[:], system.vel_ang[:]))
output = system.lamceq(0, vel, ac.mass, ac.inertia, forces, moments)
return output
| mit |
vericred/vericred-python | test/test_providers_api.py | 1 | 10307 | # coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import vericred_client
from vericred_client.rest import ApiException
from vericred_client.apis.providers_api import ProvidersApi
class TestProvidersApi(unittest.TestCase):
""" ProvidersApi unit test stubs """
def setUp(self):
self.api = vericred_client.apis.providers_api.ProvidersApi()
def tearDown(self):
pass
def test_get_provider(self):
"""
Test case for get_provider
Find a Provider
"""
pass
def test_get_providers(self):
"""
Test case for get_providers
Find Providers
"""
pass
def test_get_providers_0(self):
"""
Test case for get_providers_0
Find Providers
"""
pass
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Yuudachimoe/HikariChun-RedBot | lib/youtube_dl/extractor/abc.py | 24 | 6210 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
js_to_json,
int_or_none,
parse_iso8601,
)
class ABCIE(InfoExtractor):
IE_NAME = 'abc.net.au'
_VALID_URL = r'https?://(?:www\.)?abc\.net\.au/news/(?:[^/]+/){1,2}(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
'md5': 'cb3dd03b18455a661071ee1e28344d9f',
'info_dict': {
'id': '5868334',
'ext': 'mp4',
'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
},
'skip': 'this video has expired',
}, {
'url': 'http://www.abc.net.au/news/2015-08-17/warren-entsch-introduces-same-sex-marriage-bill/6702326',
'md5': 'db2a5369238b51f9811ad815b69dc086',
'info_dict': {
'id': 'NvqvPeNZsHU',
'ext': 'mp4',
'upload_date': '20150816',
'uploader': 'ABC News (Australia)',
'description': 'Government backbencher Warren Entsch introduces a cross-party sponsored bill to legalise same-sex marriage, saying the bill is designed to promote "an inclusive Australia, not a divided one.". Read more here: http://ab.co/1Mwc6ef',
'uploader_id': 'NewsOnABC',
'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill',
},
'add_ie': ['Youtube'],
'skip': 'Not accessible from Travis CI server',
}, {
'url': 'http://www.abc.net.au/news/2015-10-23/nab-lifts-interest-rates-following-westpac-and-cba/6880080',
'md5': 'b96eee7c9edf4fc5a358a0252881cc1f',
'info_dict': {
'id': '6880080',
'ext': 'mp3',
'title': 'NAB lifts interest rates, following Westpac and CBA',
'description': 'md5:f13d8edc81e462fce4a0437c7dc04728',
},
}, {
'url': 'http://www.abc.net.au/news/2015-10-19/6866214',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
mobj = re.search(
r'inline(?P<type>Video|Audio|YouTube)Data\.push\((?P<json_data>[^)]+)\);',
webpage)
if mobj is None:
expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None)
if expired:
raise ExtractorError('%s said: %s' % (self.IE_NAME, expired), expected=True)
raise ExtractorError('Unable to extract video urls')
urls_info = self._parse_json(
mobj.group('json_data'), video_id, transform_source=js_to_json)
if not isinstance(urls_info, list):
urls_info = [urls_info]
if mobj.group('type') == 'YouTube':
return self.playlist_result([
self.url_result(url_info['url']) for url_info in urls_info])
formats = [{
'url': url_info['url'],
'vcodec': url_info.get('codec') if mobj.group('type') == 'Video' else 'none',
'width': int_or_none(url_info.get('width')),
'height': int_or_none(url_info.get('height')),
'tbr': int_or_none(url_info.get('bitrate')),
'filesize': int_or_none(url_info.get('filesize')),
} for url_info in urls_info]
self._sort_formats(formats)
return {
'id': video_id,
'title': self._og_search_title(webpage),
'formats': formats,
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
}
class ABCIViewIE(InfoExtractor):
IE_NAME = 'abc.net.au:iview'
_VALID_URL = r'https?://iview\.abc\.net\.au/programs/[^/]+/(?P<id>[^/?#]+)'
# ABC iview programs are normally available for 14 days only.
_TESTS = [{
'url': 'http://iview.abc.net.au/programs/diaries-of-a-broken-mind/ZX9735A001S00',
'md5': 'cde42d728b3b7c2b32b1b94b4a548afc',
'info_dict': {
'id': 'ZX9735A001S00',
'ext': 'mp4',
'title': 'Diaries Of A Broken Mind',
'description': 'md5:7de3903874b7a1be279fe6b68718fc9e',
'upload_date': '20161010',
'uploader_id': 'abc2',
'timestamp': 1476064920,
},
'skip': 'Video gone',
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_params = self._parse_json(self._search_regex(
r'videoParams\s*=\s*({.+?});', webpage, 'video params'), video_id)
title = video_params.get('title') or video_params['seriesTitle']
stream = next(s for s in video_params['playlist'] if s.get('type') == 'program')
formats = self._extract_akamai_formats(stream['hds-unmetered'], video_id)
self._sort_formats(formats)
subtitles = {}
src_vtt = stream.get('captions', {}).get('src-vtt')
if src_vtt:
subtitles['en'] = [{
'url': src_vtt,
'ext': 'vtt',
}]
return {
'id': video_id,
'title': title,
'description': self._html_search_meta(['og:description', 'twitter:description'], webpage),
'thumbnail': self._html_search_meta(['og:image', 'twitter:image:src'], webpage),
'duration': int_or_none(video_params.get('eventDuration')),
'timestamp': parse_iso8601(video_params.get('pubDate'), ' '),
'series': video_params.get('seriesTitle'),
'series_id': video_params.get('seriesHouseNumber') or video_id[:7],
'episode_number': int_or_none(self._html_search_meta('episodeNumber', webpage, default=None)),
'episode': self._html_search_meta('episode_title', webpage, default=None),
'uploader_id': video_params.get('channel'),
'formats': formats,
'subtitles': subtitles,
}
| gpl-3.0 |
xianjunzhengbackup/Cloud-Native-Python | env/lib/python3.5/site-packages/pip/_vendor/distlib/metadata.py | 335 | 38833 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Implementation of the Metadata for Python packages PEPs.
Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental).
"""
from __future__ import unicode_literals
import codecs
from email import message_from_file
import json
import logging
import re
from . import DistlibException, __version__
from .compat import StringIO, string_types, text_type
from .markers import interpret
from .util import extract_by_key, get_extras
from .version import get_scheme, PEP440_VERSION_RE
logger = logging.getLogger(__name__)
class MetadataMissingError(DistlibException):
"""A required metadata is missing"""
class MetadataConflictError(DistlibException):
"""Attempt to read or write metadata fields that are conflictual."""
class MetadataUnrecognizedVersionError(DistlibException):
"""Unknown metadata version number."""
class MetadataInvalidError(DistlibException):
"""A metadata value is invalid"""
# public API of this module
__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION']
# Encoding used for the PKG-INFO files
PKG_INFO_ENCODING = 'utf-8'
# preferred version. Hopefully will be changed
# to 1.2 once PEP 345 is supported everywhere
PKG_INFO_PREFERRED_VERSION = '1.1'
_LINE_PREFIX_1_2 = re.compile('\n \|')
_LINE_PREFIX_PRE_1_2 = re.compile('\n ')
_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License')
_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License', 'Classifier', 'Download-URL', 'Obsoletes',
'Provides', 'Requires')
_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier',
'Download-URL')
_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External')
_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python',
'Obsoletes-Dist', 'Requires-External', 'Maintainer',
'Maintainer-email', 'Project-URL')
_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External', 'Private-Version',
'Obsoleted-By', 'Setup-Requires-Dist', 'Extension',
'Provides-Extra')
_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By',
'Setup-Requires-Dist', 'Extension')
_ALL_FIELDS = set()
_ALL_FIELDS.update(_241_FIELDS)
_ALL_FIELDS.update(_314_FIELDS)
_ALL_FIELDS.update(_345_FIELDS)
_ALL_FIELDS.update(_426_FIELDS)
EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''')
def _version2fieldlist(version):
if version == '1.0':
return _241_FIELDS
elif version == '1.1':
return _314_FIELDS
elif version == '1.2':
return _345_FIELDS
elif version == '2.0':
return _426_FIELDS
raise MetadataUnrecognizedVersionError(version)
def _best_version(fields):
"""Detect the best version depending on the fields used."""
def _has_marker(keys, markers):
for marker in markers:
if marker in keys:
return True
return False
keys = []
for key, value in fields.items():
if value in ([], 'UNKNOWN', None):
continue
keys.append(key)
possible_versions = ['1.0', '1.1', '1.2', '2.0']
# first let's try to see if a field is not part of one of the version
for key in keys:
if key not in _241_FIELDS and '1.0' in possible_versions:
possible_versions.remove('1.0')
if key not in _314_FIELDS and '1.1' in possible_versions:
possible_versions.remove('1.1')
if key not in _345_FIELDS and '1.2' in possible_versions:
possible_versions.remove('1.2')
if key not in _426_FIELDS and '2.0' in possible_versions:
possible_versions.remove('2.0')
# possible_version contains qualified versions
if len(possible_versions) == 1:
return possible_versions[0] # found !
elif len(possible_versions) == 0:
raise MetadataConflictError('Unknown metadata set')
# let's see if one unique marker is found
is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS)
is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS)
is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)
if int(is_1_1) + int(is_1_2) + int(is_2_0) > 1:
raise MetadataConflictError('You used incompatible 1.1/1.2/2.0 fields')
# we have the choice, 1.0, or 1.2, or 2.0
# - 1.0 has a broken Summary field but works with all tools
# - 1.1 is to avoid
# - 1.2 fixes Summary but has little adoption
# - 2.0 adds more features and is very new
if not is_1_1 and not is_1_2 and not is_2_0:
# we couldn't find any specific marker
if PKG_INFO_PREFERRED_VERSION in possible_versions:
return PKG_INFO_PREFERRED_VERSION
if is_1_1:
return '1.1'
if is_1_2:
return '1.2'
return '2.0'
_ATTR2FIELD = {
'metadata_version': 'Metadata-Version',
'name': 'Name',
'version': 'Version',
'platform': 'Platform',
'supported_platform': 'Supported-Platform',
'summary': 'Summary',
'description': 'Description',
'keywords': 'Keywords',
'home_page': 'Home-page',
'author': 'Author',
'author_email': 'Author-email',
'maintainer': 'Maintainer',
'maintainer_email': 'Maintainer-email',
'license': 'License',
'classifier': 'Classifier',
'download_url': 'Download-URL',
'obsoletes_dist': 'Obsoletes-Dist',
'provides_dist': 'Provides-Dist',
'requires_dist': 'Requires-Dist',
'setup_requires_dist': 'Setup-Requires-Dist',
'requires_python': 'Requires-Python',
'requires_external': 'Requires-External',
'requires': 'Requires',
'provides': 'Provides',
'obsoletes': 'Obsoletes',
'project_url': 'Project-URL',
'private_version': 'Private-Version',
'obsoleted_by': 'Obsoleted-By',
'extension': 'Extension',
'provides_extra': 'Provides-Extra',
}
_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist')
_VERSIONS_FIELDS = ('Requires-Python',)
_VERSION_FIELDS = ('Version',)
_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes',
'Requires', 'Provides', 'Obsoletes-Dist',
'Provides-Dist', 'Requires-Dist', 'Requires-External',
'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist',
'Provides-Extra', 'Extension')
_LISTTUPLEFIELDS = ('Project-URL',)
_ELEMENTSFIELD = ('Keywords',)
_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description')
_MISSING = object()
_FILESAFE = re.compile('[^A-Za-z0-9.]+')
def _get_name_and_version(name, version, for_filename=False):
"""Return the distribution name with version.
If for_filename is true, return a filename-escaped form."""
if for_filename:
# For both name and version any runs of non-alphanumeric or '.'
# characters are replaced with a single '-'. Additionally any
# spaces in the version string become '.'
name = _FILESAFE.sub('-', name)
version = _FILESAFE.sub('-', version.replace(' ', '.'))
return '%s-%s' % (name, version)
class LegacyMetadata(object):
"""The legacy metadata of a release.
Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can
instantiate the class with one of these arguments (or none):
- *path*, the path to a metadata file
- *fileobj* give a file-like object with metadata as content
- *mapping* is a dict-like object
- *scheme* is a version scheme name
"""
# TODO document the mapping API and UNKNOWN default key
def __init__(self, path=None, fileobj=None, mapping=None,
scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._fields = {}
self.requires_files = []
self._dependencies = None
self.scheme = scheme
if path is not None:
self.read(path)
elif fileobj is not None:
self.read_file(fileobj)
elif mapping is not None:
self.update(mapping)
self.set_metadata_version()
def set_metadata_version(self):
self._fields['Metadata-Version'] = _best_version(self._fields)
def _write_field(self, fileobj, name, value):
fileobj.write('%s: %s\n' % (name, value))
def __getitem__(self, name):
return self.get(name)
def __setitem__(self, name, value):
return self.set(name, value)
def __delitem__(self, name):
field_name = self._convert_name(name)
try:
del self._fields[field_name]
except KeyError:
raise KeyError(name)
def __contains__(self, name):
return (name in self._fields or
self._convert_name(name) in self._fields)
def _convert_name(self, name):
if name in _ALL_FIELDS:
return name
name = name.replace('-', '_').lower()
return _ATTR2FIELD.get(name, name)
def _default_value(self, name):
if name in _LISTFIELDS or name in _ELEMENTSFIELD:
return []
return 'UNKNOWN'
def _remove_line_prefix(self, value):
if self.metadata_version in ('1.0', '1.1'):
return _LINE_PREFIX_PRE_1_2.sub('\n', value)
else:
return _LINE_PREFIX_1_2.sub('\n', value)
def __getattr__(self, name):
if name in _ATTR2FIELD:
return self[name]
raise AttributeError(name)
#
# Public API
#
# dependencies = property(_get_dependencies, _set_dependencies)
def get_fullname(self, filesafe=False):
"""Return the distribution name with version.
If filesafe is true, return a filename-escaped form."""
return _get_name_and_version(self['Name'], self['Version'], filesafe)
def is_field(self, name):
"""return True if name is a valid metadata key"""
name = self._convert_name(name)
return name in _ALL_FIELDS
def is_multi_field(self, name):
name = self._convert_name(name)
return name in _LISTFIELDS
def read(self, filepath):
"""Read the metadata values from a file path."""
fp = codecs.open(filepath, 'r', encoding='utf-8')
try:
self.read_file(fp)
finally:
fp.close()
def read_file(self, fileob):
"""Read the metadata values from a file object."""
msg = message_from_file(fileob)
self._fields['Metadata-Version'] = msg['metadata-version']
# When reading, get all the fields we can
for field in _ALL_FIELDS:
if field not in msg:
continue
if field in _LISTFIELDS:
# we can have multiple lines
values = msg.get_all(field)
if field in _LISTTUPLEFIELDS and values is not None:
values = [tuple(value.split(',')) for value in values]
self.set(field, values)
else:
# single line
value = msg[field]
if value is not None and value != 'UNKNOWN':
self.set(field, value)
self.set_metadata_version()
def write(self, filepath, skip_unknown=False):
"""Write the metadata fields to filepath."""
fp = codecs.open(filepath, 'w', encoding='utf-8')
try:
self.write_file(fp, skip_unknown)
finally:
fp.close()
def write_file(self, fileobject, skip_unknown=False):
"""Write the PKG-INFO format data to a file object."""
self.set_metadata_version()
for field in _version2fieldlist(self['Metadata-Version']):
values = self.get(field)
if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']):
continue
if field in _ELEMENTSFIELD:
self._write_field(fileobject, field, ','.join(values))
continue
if field not in _LISTFIELDS:
if field == 'Description':
if self.metadata_version in ('1.0', '1.1'):
values = values.replace('\n', '\n ')
else:
values = values.replace('\n', '\n |')
values = [values]
if field in _LISTTUPLEFIELDS:
values = [','.join(value) for value in values]
for value in values:
self._write_field(fileobject, field, value)
def update(self, other=None, **kwargs):
"""Set metadata values from the given iterable `other` and kwargs.
Behavior is like `dict.update`: If `other` has a ``keys`` method,
they are looped over and ``self[key]`` is assigned ``other[key]``.
Else, ``other`` is an iterable of ``(key, value)`` iterables.
Keys that don't match a metadata field or that have an empty value are
dropped.
"""
def _set(key, value):
if key in _ATTR2FIELD and value:
self.set(self._convert_name(key), value)
if not other:
# other is None or empty container
pass
elif hasattr(other, 'keys'):
for k in other.keys():
_set(k, other[k])
else:
for k, v in other:
_set(k, v)
if kwargs:
for k, v in kwargs.items():
_set(k, v)
def set(self, name, value):
"""Control then set a metadata field."""
name = self._convert_name(name)
if ((name in _ELEMENTSFIELD or name == 'Platform') and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [v.strip() for v in value.split(',')]
else:
value = []
elif (name in _LISTFIELDS and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [value]
else:
value = []
if logger.isEnabledFor(logging.WARNING):
project_name = self['Name']
scheme = get_scheme(self.scheme)
if name in _PREDICATE_FIELDS and value is not None:
for v in value:
# check that the values are valid
if not scheme.is_valid_matcher(v.split(';')[0]):
logger.warning(
"'%s': '%s' is not valid (field '%s')",
project_name, v, name)
# FIXME this rejects UNKNOWN, is that right?
elif name in _VERSIONS_FIELDS and value is not None:
if not scheme.is_valid_constraint_list(value):
logger.warning("'%s': '%s' is not a valid version (field '%s')",
project_name, value, name)
elif name in _VERSION_FIELDS and value is not None:
if not scheme.is_valid_version(value):
logger.warning("'%s': '%s' is not a valid version (field '%s')",
project_name, value, name)
if name in _UNICODEFIELDS:
if name == 'Description':
value = self._remove_line_prefix(value)
self._fields[name] = value
def get(self, name, default=_MISSING):
"""Get a metadata field."""
name = self._convert_name(name)
if name not in self._fields:
if default is _MISSING:
default = self._default_value(name)
return default
if name in _UNICODEFIELDS:
value = self._fields[name]
return value
elif name in _LISTFIELDS:
value = self._fields[name]
if value is None:
return []
res = []
for val in value:
if name not in _LISTTUPLEFIELDS:
res.append(val)
else:
# That's for Project-URL
res.append((val[0], val[1]))
return res
elif name in _ELEMENTSFIELD:
value = self._fields[name]
if isinstance(value, string_types):
return value.split(',')
return self._fields[name]
def check(self, strict=False):
"""Check if the metadata is compliant. If strict is True then raise if
no Name or Version are provided"""
self.set_metadata_version()
# XXX should check the versions (if the file was loaded)
missing, warnings = [], []
for attr in ('Name', 'Version'): # required by PEP 345
if attr not in self:
missing.append(attr)
if strict and missing != []:
msg = 'missing required metadata: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for attr in ('Home-page', 'Author'):
if attr not in self:
missing.append(attr)
# checking metadata 1.2 (XXX needs to check 1.1, 1.0)
if self['Metadata-Version'] != '1.2':
return missing, warnings
scheme = get_scheme(self.scheme)
def are_valid_constraints(value):
for v in value:
if not scheme.is_valid_matcher(v.split(';')[0]):
return False
return True
for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints),
(_VERSIONS_FIELDS,
scheme.is_valid_constraint_list),
(_VERSION_FIELDS,
scheme.is_valid_version)):
for field in fields:
value = self.get(field, None)
if value is not None and not controller(value):
warnings.append("Wrong value for '%s': %s" % (field, value))
return missing, warnings
def todict(self, skip_missing=False):
"""Return fields as a dict.
Field names will be converted to use the underscore-lowercase style
instead of hyphen-mixed case (i.e. home_page instead of Home-page).
"""
self.set_metadata_version()
mapping_1_0 = (
('metadata_version', 'Metadata-Version'),
('name', 'Name'),
('version', 'Version'),
('summary', 'Summary'),
('home_page', 'Home-page'),
('author', 'Author'),
('author_email', 'Author-email'),
('license', 'License'),
('description', 'Description'),
('keywords', 'Keywords'),
('platform', 'Platform'),
('classifiers', 'Classifier'),
('download_url', 'Download-URL'),
)
data = {}
for key, field_name in mapping_1_0:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
if self['Metadata-Version'] == '1.2':
mapping_1_2 = (
('requires_dist', 'Requires-Dist'),
('requires_python', 'Requires-Python'),
('requires_external', 'Requires-External'),
('provides_dist', 'Provides-Dist'),
('obsoletes_dist', 'Obsoletes-Dist'),
('project_url', 'Project-URL'),
('maintainer', 'Maintainer'),
('maintainer_email', 'Maintainer-email'),
)
for key, field_name in mapping_1_2:
if not skip_missing or field_name in self._fields:
if key != 'project_url':
data[key] = self[field_name]
else:
data[key] = [','.join(u) for u in self[field_name]]
elif self['Metadata-Version'] == '1.1':
mapping_1_1 = (
('provides', 'Provides'),
('requires', 'Requires'),
('obsoletes', 'Obsoletes'),
)
for key, field_name in mapping_1_1:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
return data
def add_requirements(self, requirements):
if self['Metadata-Version'] == '1.1':
# we can't have 1.1 metadata *and* Setuptools requires
for field in ('Obsoletes', 'Requires', 'Provides'):
if field in self:
del self[field]
self['Requires-Dist'] += requirements
# Mapping API
# TODO could add iter* variants
def keys(self):
return list(_version2fieldlist(self['Metadata-Version']))
def __iter__(self):
for key in self.keys():
yield key
def values(self):
return [self[key] for key in self.keys()]
def items(self):
return [(key, self[key]) for key in self.keys()]
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.name,
self.version)
METADATA_FILENAME = 'pydist.json'
WHEEL_METADATA_FILENAME = 'metadata.json'
class Metadata(object):
"""
The metadata of a release. This implementation uses 2.0 (JSON)
metadata where possible. If not possible, it wraps a LegacyMetadata
instance which handles the key-value metadata format.
"""
METADATA_VERSION_MATCHER = re.compile('^\d+(\.\d+)*$')
NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I)
VERSION_MATCHER = PEP440_VERSION_RE
SUMMARY_MATCHER = re.compile('.{1,2047}')
METADATA_VERSION = '2.0'
GENERATOR = 'distlib (%s)' % __version__
MANDATORY_KEYS = {
'name': (),
'version': (),
'summary': ('legacy',),
}
INDEX_KEYS = ('name version license summary description author '
'author_email keywords platform home_page classifiers '
'download_url')
DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires '
'dev_requires provides meta_requires obsoleted_by '
'supports_environments')
SYNTAX_VALIDATORS = {
'metadata_version': (METADATA_VERSION_MATCHER, ()),
'name': (NAME_MATCHER, ('legacy',)),
'version': (VERSION_MATCHER, ('legacy',)),
'summary': (SUMMARY_MATCHER, ('legacy',)),
}
__slots__ = ('_legacy', '_data', 'scheme')
def __init__(self, path=None, fileobj=None, mapping=None,
scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._legacy = None
self._data = None
self.scheme = scheme
#import pdb; pdb.set_trace()
if mapping is not None:
try:
self._validate_mapping(mapping, scheme)
self._data = mapping
except MetadataUnrecognizedVersionError:
self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme)
self.validate()
else:
data = None
if path:
with open(path, 'rb') as f:
data = f.read()
elif fileobj:
data = fileobj.read()
if data is None:
# Initialised with no args - to be added
self._data = {
'metadata_version': self.METADATA_VERSION,
'generator': self.GENERATOR,
}
else:
if not isinstance(data, text_type):
data = data.decode('utf-8')
try:
self._data = json.loads(data)
self._validate_mapping(self._data, scheme)
except ValueError:
# Note: MetadataUnrecognizedVersionError does not
# inherit from ValueError (it's a DistlibException,
# which should not inherit from ValueError).
# The ValueError comes from the json.load - if that
# succeeds and we get a validation error, we want
# that to propagate
self._legacy = LegacyMetadata(fileobj=StringIO(data),
scheme=scheme)
self.validate()
common_keys = set(('name', 'version', 'license', 'keywords', 'summary'))
none_list = (None, list)
none_dict = (None, dict)
mapped_keys = {
'run_requires': ('Requires-Dist', list),
'build_requires': ('Setup-Requires-Dist', list),
'dev_requires': none_list,
'test_requires': none_list,
'meta_requires': none_list,
'extras': ('Provides-Extra', list),
'modules': none_list,
'namespaces': none_list,
'exports': none_dict,
'commands': none_dict,
'classifiers': ('Classifier', list),
'source_url': ('Download-URL', None),
'metadata_version': ('Metadata-Version', None),
}
del none_list, none_dict
def __getattribute__(self, key):
common = object.__getattribute__(self, 'common_keys')
mapped = object.__getattribute__(self, 'mapped_keys')
if key in mapped:
lk, maker = mapped[key]
if self._legacy:
if lk is None:
result = None if maker is None else maker()
else:
result = self._legacy.get(lk)
else:
value = None if maker is None else maker()
if key not in ('commands', 'exports', 'modules', 'namespaces',
'classifiers'):
result = self._data.get(key, value)
else:
# special cases for PEP 459
sentinel = object()
result = sentinel
d = self._data.get('extensions')
if d:
if key == 'commands':
result = d.get('python.commands', value)
elif key == 'classifiers':
d = d.get('python.details')
if d:
result = d.get(key, value)
else:
d = d.get('python.exports')
if not d:
d = self._data.get('python.exports')
if d:
result = d.get(key, value)
if result is sentinel:
result = value
elif key not in common:
result = object.__getattribute__(self, key)
elif self._legacy:
result = self._legacy.get(key)
else:
result = self._data.get(key)
return result
def _validate_value(self, key, value, scheme=None):
if key in self.SYNTAX_VALIDATORS:
pattern, exclusions = self.SYNTAX_VALIDATORS[key]
if (scheme or self.scheme) not in exclusions:
m = pattern.match(value)
if not m:
raise MetadataInvalidError("'%s' is an invalid value for "
"the '%s' property" % (value,
key))
def __setattr__(self, key, value):
self._validate_value(key, value)
common = object.__getattribute__(self, 'common_keys')
mapped = object.__getattribute__(self, 'mapped_keys')
if key in mapped:
lk, _ = mapped[key]
if self._legacy:
if lk is None:
raise NotImplementedError
self._legacy[lk] = value
elif key not in ('commands', 'exports', 'modules', 'namespaces',
'classifiers'):
self._data[key] = value
else:
# special cases for PEP 459
d = self._data.setdefault('extensions', {})
if key == 'commands':
d['python.commands'] = value
elif key == 'classifiers':
d = d.setdefault('python.details', {})
d[key] = value
else:
d = d.setdefault('python.exports', {})
d[key] = value
elif key not in common:
object.__setattr__(self, key, value)
else:
if key == 'keywords':
if isinstance(value, string_types):
value = value.strip()
if value:
value = value.split()
else:
value = []
if self._legacy:
self._legacy[key] = value
else:
self._data[key] = value
@property
def name_and_version(self):
return _get_name_and_version(self.name, self.version, True)
@property
def provides(self):
if self._legacy:
result = self._legacy['Provides-Dist']
else:
result = self._data.setdefault('provides', [])
s = '%s (%s)' % (self.name, self.version)
if s not in result:
result.append(s)
return result
@provides.setter
def provides(self, value):
if self._legacy:
self._legacy['Provides-Dist'] = value
else:
self._data['provides'] = value
def get_requirements(self, reqts, extras=None, env=None):
"""
Base method to get dependencies, given a set of extras
to satisfy and an optional environment context.
:param reqts: A list of sometimes-wanted dependencies,
perhaps dependent on extras and environment.
:param extras: A list of optional components being requested.
:param env: An optional environment for marker evaluation.
"""
if self._legacy:
result = reqts
else:
result = []
extras = get_extras(extras or [], self.extras)
for d in reqts:
if 'extra' not in d and 'environment' not in d:
# unconditional
include = True
else:
if 'extra' not in d:
# Not extra-dependent - only environment-dependent
include = True
else:
include = d.get('extra') in extras
if include:
# Not excluded because of extras, check environment
marker = d.get('environment')
if marker:
include = interpret(marker, env)
if include:
result.extend(d['requires'])
for key in ('build', 'dev', 'test'):
e = ':%s:' % key
if e in extras:
extras.remove(e)
# A recursive call, but it should terminate since 'test'
# has been removed from the extras
reqts = self._data.get('%s_requires' % key, [])
result.extend(self.get_requirements(reqts, extras=extras,
env=env))
return result
@property
def dictionary(self):
if self._legacy:
return self._from_legacy()
return self._data
@property
def dependencies(self):
if self._legacy:
raise NotImplementedError
else:
return extract_by_key(self._data, self.DEPENDENCY_KEYS)
@dependencies.setter
def dependencies(self, value):
if self._legacy:
raise NotImplementedError
else:
self._data.update(value)
def _validate_mapping(self, mapping, scheme):
if mapping.get('metadata_version') != self.METADATA_VERSION:
raise MetadataUnrecognizedVersionError()
missing = []
for key, exclusions in self.MANDATORY_KEYS.items():
if key not in mapping:
if scheme not in exclusions:
missing.append(key)
if missing:
msg = 'Missing metadata items: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for k, v in mapping.items():
self._validate_value(k, v, scheme)
def validate(self):
if self._legacy:
missing, warnings = self._legacy.check(True)
if missing or warnings:
logger.warning('Metadata: missing: %s, warnings: %s',
missing, warnings)
else:
self._validate_mapping(self._data, self.scheme)
def todict(self):
if self._legacy:
return self._legacy.todict(True)
else:
result = extract_by_key(self._data, self.INDEX_KEYS)
return result
def _from_legacy(self):
assert self._legacy and not self._data
result = {
'metadata_version': self.METADATA_VERSION,
'generator': self.GENERATOR,
}
lmd = self._legacy.todict(True) # skip missing ones
for k in ('name', 'version', 'license', 'summary', 'description',
'classifier'):
if k in lmd:
if k == 'classifier':
nk = 'classifiers'
else:
nk = k
result[nk] = lmd[k]
kw = lmd.get('Keywords', [])
if kw == ['']:
kw = []
result['keywords'] = kw
keys = (('requires_dist', 'run_requires'),
('setup_requires_dist', 'build_requires'))
for ok, nk in keys:
if ok in lmd and lmd[ok]:
result[nk] = [{'requires': lmd[ok]}]
result['provides'] = self.provides
author = {}
maintainer = {}
return result
LEGACY_MAPPING = {
'name': 'Name',
'version': 'Version',
'license': 'License',
'summary': 'Summary',
'description': 'Description',
'classifiers': 'Classifier',
}
def _to_legacy(self):
def process_entries(entries):
reqts = set()
for e in entries:
extra = e.get('extra')
env = e.get('environment')
rlist = e['requires']
for r in rlist:
if not env and not extra:
reqts.add(r)
else:
marker = ''
if extra:
marker = 'extra == "%s"' % extra
if env:
if marker:
marker = '(%s) and %s' % (env, marker)
else:
marker = env
reqts.add(';'.join((r, marker)))
return reqts
assert self._data and not self._legacy
result = LegacyMetadata()
nmd = self._data
for nk, ok in self.LEGACY_MAPPING.items():
if nk in nmd:
result[ok] = nmd[nk]
r1 = process_entries(self.run_requires + self.meta_requires)
r2 = process_entries(self.build_requires + self.dev_requires)
if self.extras:
result['Provides-Extra'] = sorted(self.extras)
result['Requires-Dist'] = sorted(r1)
result['Setup-Requires-Dist'] = sorted(r2)
# TODO: other fields such as contacts
return result
def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True):
if [path, fileobj].count(None) != 1:
raise ValueError('Exactly one of path and fileobj is needed')
self.validate()
if legacy:
if self._legacy:
legacy_md = self._legacy
else:
legacy_md = self._to_legacy()
if path:
legacy_md.write(path, skip_unknown=skip_unknown)
else:
legacy_md.write_file(fileobj, skip_unknown=skip_unknown)
else:
if self._legacy:
d = self._from_legacy()
else:
d = self._data
if fileobj:
json.dump(d, fileobj, ensure_ascii=True, indent=2,
sort_keys=True)
else:
with codecs.open(path, 'w', 'utf-8') as f:
json.dump(d, f, ensure_ascii=True, indent=2,
sort_keys=True)
def add_requirements(self, requirements):
if self._legacy:
self._legacy.add_requirements(requirements)
else:
run_requires = self._data.setdefault('run_requires', [])
always = None
for entry in run_requires:
if 'environment' not in entry and 'extra' not in entry:
always = entry
break
if always is None:
always = { 'requires': requirements }
run_requires.insert(0, always)
else:
rset = set(always['requires']) | set(requirements)
always['requires'] = sorted(rset)
def __repr__(self):
name = self.name or '(no name)'
version = self.version or 'no version'
return '<%s %s %s (%s)>' % (self.__class__.__name__,
self.metadata_version, name, version)
| mit |
Elettronik/SickRage | lib/hachoir_metadata/metadata.py | 54 | 9258 | # -*- coding: utf-8 -*-
from hachoir_core.compatibility import any, sorted
from hachoir_core.endian import endian_name
from hachoir_core.tools import makePrintable, makeUnicode
from hachoir_core.dict import Dict
from hachoir_core.error import error, HACHOIR_ERRORS
from hachoir_core.i18n import _
from hachoir_core.log import Logger
from hachoir_metadata.metadata_item import (
MIN_PRIORITY, MAX_PRIORITY, QUALITY_NORMAL)
from hachoir_metadata.register import registerAllItems
extractors = {}
class Metadata(Logger):
header = u"Metadata"
def __init__(self, parent, quality=QUALITY_NORMAL):
assert isinstance(self.header, unicode)
# Limit to 0.0 .. 1.0
if parent:
quality = parent.quality
else:
quality = min(max(0.0, quality), 1.0)
object.__init__(self)
object.__setattr__(self, "_Metadata__data", {})
object.__setattr__(self, "quality", quality)
header = self.__class__.header
object.__setattr__(self, "_Metadata__header", header)
registerAllItems(self)
def _logger(self):
pass
def __setattr__(self, key, value):
"""
Add a new value to data with name 'key'. Skip duplicates.
"""
# Invalid key?
if key not in self.__data:
raise KeyError(_("%s has no metadata '%s'") % (self.__class__.__name__, key))
# Skip duplicates
self.__data[key].add(value)
def setHeader(self, text):
object.__setattr__(self, "header", text)
def getItems(self, key):
try:
return self.__data[key]
except LookupError:
raise ValueError("Metadata has no value '%s'" % key)
def getItem(self, key, index):
try:
return self.getItems(key)[index]
except (LookupError, ValueError):
return None
def has(self, key):
return 1 <= len(self.getItems(key))
def get(self, key, default=None, index=0):
"""
Read first value of tag with name 'key'.
>>> from datetime import timedelta
>>> a = RootMetadata()
>>> a.duration = timedelta(seconds=2300)
>>> a.get('duration')
datetime.timedelta(0, 2300)
>>> a.get('author', u'Anonymous')
u'Anonymous'
"""
item = self.getItem(key, index)
if item is None:
if default is None:
raise ValueError("Metadata has no value '%s' (index %s)" % (key, index))
else:
return default
return item.value
def getValues(self, key):
try:
data = self.__data[key]
except LookupError:
raise ValueError("Metadata has no value '%s'" % key)
return [ item.value for item in data ]
def getText(self, key, default=None, index=0):
"""
Read first value, as unicode string, of tag with name 'key'.
>>> from datetime import timedelta
>>> a = RootMetadata()
>>> a.duration = timedelta(seconds=2300)
>>> a.getText('duration')
u'38 min 20 sec'
>>> a.getText('titre', u'Unknown')
u'Unknown'
"""
item = self.getItem(key, index)
if item is not None:
return item.text
else:
return default
def register(self, data):
assert data.key not in self.__data
data.metadata = self
self.__data[data.key] = data
def __iter__(self):
return self.__data.itervalues()
def __str__(self):
r"""
Create a multi-line ASCII string (end of line is "\n") which
represents all datas.
>>> a = RootMetadata()
>>> a.author = "haypo"
>>> a.copyright = unicode("© Hachoir", "UTF-8")
>>> print a
Metadata:
- Author: haypo
- Copyright: \xa9 Hachoir
@see __unicode__() and exportPlaintext()
"""
text = self.exportPlaintext()
return "\n".join( makePrintable(line, "ASCII") for line in text )
def __unicode__(self):
r"""
Create a multi-line Unicode string (end of line is "\n") which
represents all datas.
>>> a = RootMetadata()
>>> a.copyright = unicode("© Hachoir", "UTF-8")
>>> print repr(unicode(a))
u'Metadata:\n- Copyright: \xa9 Hachoir'
@see __str__() and exportPlaintext()
"""
return "\n".join(self.exportPlaintext())
def exportPlaintext(self, priority=None, human=True, line_prefix=u"- ", title=None):
r"""
Convert metadata to multi-line Unicode string and skip datas
with priority lower than specified priority.
Default priority is Metadata.MAX_PRIORITY. If human flag is True, data
key are translated to better human name (eg. "bit_rate" becomes
"Bit rate") which may be translated using gettext.
If priority is too small, metadata are empty and so None is returned.
>>> print RootMetadata().exportPlaintext()
None
>>> meta = RootMetadata()
>>> meta.copyright = unicode("© Hachoir", "UTF-8")
>>> print repr(meta.exportPlaintext())
[u'Metadata:', u'- Copyright: \xa9 Hachoir']
@see __str__() and __unicode__()
"""
if priority is not None:
priority = max(priority, MIN_PRIORITY)
priority = min(priority, MAX_PRIORITY)
else:
priority = MAX_PRIORITY
if not title:
title = self.header
text = ["%s:" % title]
for data in sorted(self):
if priority < data.priority:
break
if not data.values:
continue
if human:
title = data.description
else:
title = data.key
for item in data.values:
if human:
value = item.text
else:
value = makeUnicode(item.value)
text.append("%s%s: %s" % (line_prefix, title, value))
if 1 < len(text):
return text
else:
return None
def __nonzero__(self):
return any(item for item in self.__data.itervalues())
class RootMetadata(Metadata):
def __init__(self, quality=QUALITY_NORMAL):
Metadata.__init__(self, None, quality)
class MultipleMetadata(RootMetadata):
header = _("Common")
def __init__(self, quality=QUALITY_NORMAL):
RootMetadata.__init__(self, quality)
object.__setattr__(self, "_MultipleMetadata__groups", Dict())
object.__setattr__(self, "_MultipleMetadata__key_counter", {})
def __contains__(self, key):
return key in self.__groups
def __getitem__(self, key):
return self.__groups[key]
def iterGroups(self):
return self.__groups.itervalues()
def __nonzero__(self):
if RootMetadata.__nonzero__(self):
return True
return any(bool(group) for group in self.__groups)
def addGroup(self, key, metadata, header=None):
"""
Add a new group (metadata of a sub-document).
Returns False if the group is skipped, True if it has been added.
"""
if not metadata:
self.warning("Skip empty group %s" % key)
return False
if key.endswith("[]"):
key = key[:-2]
if key in self.__key_counter:
self.__key_counter[key] += 1
else:
self.__key_counter[key] = 1
key += "[%u]" % self.__key_counter[key]
if header:
metadata.setHeader(header)
self.__groups.append(key, metadata)
return True
def exportPlaintext(self, priority=None, human=True, line_prefix=u"- "):
common = Metadata.exportPlaintext(self, priority, human, line_prefix)
if common:
text = common
else:
text = []
for key, metadata in self.__groups.iteritems():
if not human:
title = key
else:
title = None
value = metadata.exportPlaintext(priority, human, line_prefix, title=title)
if value:
text.extend(value)
if len(text):
return text
else:
return None
def registerExtractor(parser, extractor):
assert parser not in extractors
assert issubclass(extractor, RootMetadata)
extractors[parser] = extractor
def extractMetadata(parser, quality=QUALITY_NORMAL):
"""
Create a Metadata class from a parser. Returns None if no metadata
extractor does exist for the parser class.
"""
try:
extractor = extractors[parser.__class__]
except KeyError:
return None
metadata = extractor(quality)
try:
metadata.extract(parser)
except HACHOIR_ERRORS, err:
error("Error during metadata extraction: %s" % unicode(err))
return None
except Exception, err:
error("Error during metadata extraction: %s" % unicode(err))
return None
if metadata:
metadata.mime_type = parser.mime_type
metadata.endian = endian_name[parser.endian]
return metadata
| gpl-3.0 |
cloudbase/neutron-virtualbox | neutron/plugins/ml2/plugin.py | 1 | 65394 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from eventlet import greenthread
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_db import exception as os_db_exception
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import importutils
from sqlalchemy import exc as sql_exc
from sqlalchemy.orm import exc as sa_exc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.api.rpc.handlers import metadata_rpc
from neutron.api.rpc.handlers import securitygroups_rpc
from neutron.api.v2 import attributes
from neutron.common import constants as const
from neutron.common import exceptions as exc
from neutron.common import ipv6_utils
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db_api
from neutron.db import db_base_plugin_v2
from neutron.db import dvr_mac_db
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import models_v2
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import l3agentscheduler
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.openstack.common import log
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import config # noqa
from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2 import managers
from neutron.plugins.ml2 import models
from neutron.plugins.ml2 import rpc
LOG = log.getLogger(__name__)
MAX_BIND_TRIES = 10
# REVISIT(rkukura): Move this and other network_type constants to
# providernet.py?
TYPE_MULTI_SEGMENT = 'multi-segment'
class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
dvr_mac_db.DVRDbMixin,
external_net_db.External_net_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
addr_pair_db.AllowedAddressPairsMixin,
extradhcpopt_db.ExtraDhcpOptMixin):
"""Implement the Neutron L2 abstractions using modules.
Ml2Plugin is a Neutron plugin based on separately extensible sets
of network types and mechanisms for connecting to networks of
those types. The network types and mechanisms are implemented as
drivers loaded via Python entry points. Networks can be made up of
multiple segments (not yet fully implemented).
"""
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
# List of supported extensions
_supported_extension_aliases = ["provider", "external-net", "binding",
"quotas", "security-group", "agent",
"dhcp_agent_scheduler",
"multi-provider", "allowed-address-pairs",
"extra_dhcp_opt"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
aliases += self.extension_manager.extension_aliases()
sg_rpc.disable_security_group_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
# First load drivers, then initialize DB, then initialize drivers
self.type_manager = managers.TypeManager()
self.extension_manager = managers.ExtensionManager()
self.mechanism_manager = managers.MechanismManager()
super(Ml2Plugin, self).__init__()
self.type_manager.initialize()
self.extension_manager.initialize()
self.mechanism_manager.initialize()
self._setup_rpc()
# REVISIT(rkukura): Use stevedore for these?
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
self.start_periodic_dhcp_agent_status_check()
LOG.info(_LI("Modular L2 Plugin initialization complete"))
def _setup_rpc(self):
self.notifier = rpc.AgentNotifierApi(topics.AGENT)
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
def start_rpc_listeners(self):
self.endpoints = [rpc.RpcCallbacks(self.notifier, self.type_manager),
securitygroups_rpc.SecurityGroupServerRpcCallback(),
dvr_rpc.DVRServerRpcCallback(),
dhcp_rpc.DhcpRpcCallback(),
agents_db.AgentExtRpcCallback(),
metadata_rpc.MetadataRpcCallback()]
self.topic = topics.PLUGIN
self.conn = n_rpc.create_connection(new=True)
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
return self.conn.consume_in_threads()
def _filter_nets_provider(self, context, networks, filters):
return [network
for network in networks
if self.type_manager.network_matches_filters(network, filters)
]
def _notify_l3_agent_new_port(self, context, port):
if not port:
return
# Whenever a DVR serviceable port comes up on a
# node, it has to be communicated to the L3 Plugin
# and agent for creating the respective namespaces.
if (utils.is_dvr_serviced(port['device_owner'])):
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if (utils.is_extension_supported(
l3plugin, const.L3_DISTRIBUTED_EXT_ALIAS)):
l3plugin.dvr_update_router_addvm(context, port)
def _get_host_port_if_changed(self, mech_context, attrs):
binding = mech_context._binding
host = attrs and attrs.get(portbindings.HOST_ID)
if (attributes.is_attr_set(host) and binding.host != host):
return mech_context.current
def _check_mac_update_allowed(self, orig_port, port, binding):
unplugged_types = (portbindings.VIF_TYPE_BINDING_FAILED,
portbindings.VIF_TYPE_UNBOUND)
new_mac = port.get('mac_address')
mac_change = (new_mac is not None and
orig_port['mac_address'] != new_mac)
if (mac_change and binding.vif_type not in unplugged_types):
raise exc.PortBound(port_id=orig_port['id'],
vif_type=binding.vif_type,
old_mac=orig_port['mac_address'],
new_mac=port['mac_address'])
return mac_change
def _process_port_binding(self, mech_context, attrs):
session = mech_context._plugin_context.session
binding = mech_context._binding
port = mech_context.current
port_id = port['id']
changes = False
host = attrs and attrs.get(portbindings.HOST_ID)
original_host = binding.host
if (attributes.is_attr_set(host) and
original_host != host):
binding.host = host
changes = True
vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE)
if (attributes.is_attr_set(vnic_type) and
binding.vnic_type != vnic_type):
binding.vnic_type = vnic_type
changes = True
# treat None as clear of profile.
profile = None
if attrs and portbindings.PROFILE in attrs:
profile = attrs.get(portbindings.PROFILE) or {}
if profile not in (None, attributes.ATTR_NOT_SPECIFIED,
self._get_profile(binding)):
binding.profile = jsonutils.dumps(profile)
if len(binding.profile) > models.BINDING_PROFILE_LEN:
msg = _("binding:profile value too large")
raise exc.InvalidInput(error_message=msg)
changes = True
# Unbind the port if needed.
if changes:
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
binding.vif_details = ''
db.clear_binding_levels(session, port_id, original_host)
mech_context._clear_binding_levels()
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding.vif_type = portbindings.VIF_TYPE_DISTRIBUTED
binding.vif_details = ''
db.clear_binding_levels(session, port_id, original_host)
mech_context._clear_binding_levels()
binding.host = ''
self._update_port_dict_binding(port, binding)
return changes
def _bind_port_if_needed(self, context, allow_notify=False,
need_notify=False):
plugin_context = context._plugin_context
port_id = context._port['id']
# Since the mechanism driver bind_port() calls must be made
# outside a DB transaction locking the port state, it is
# possible (but unlikely) that the port's state could change
# concurrently while these calls are being made. If another
# thread or process succeeds in binding the port before this
# thread commits its results, the already committed results are
# used. If attributes such as binding:host_id,
# binding:profile, or binding:vnic_type are updated
# concurrently, this loop retries binding using the new
# values.
count = 0
while True:
# First, determine whether it is necessary and possible to
# bind the port.
binding = context._binding
if (binding.vif_type not in [portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED]
or not binding.host):
# We either don't need to bind the port, or can't, so
# notify if needed and return.
if allow_notify and need_notify:
self._notify_port_updated(context)
return context
# Limit binding attempts to avoid any possibility of
# infinite looping and to ensure an error is logged
# instead. This does not need to be tunable because no
# more than a couple attempts should ever be required in
# normal operation. Log at info level if not 1st attempt.
count += 1
if count > MAX_BIND_TRIES:
LOG.error(_LE("Failed to commit binding results for %(port)s "
"after %(max)s tries"),
{'port': port_id, 'max': MAX_BIND_TRIES})
return context
if count > 1:
greenthread.sleep(0) # yield
LOG.info(_LI("Attempt %(count)s to bind port %(port)s"),
{'count': count, 'port': port_id})
# The port isn't already bound and the necessary
# information is available, so attempt to bind the port.
bind_context = self._bind_port(context)
# Now try to commit result of attempting to bind the port.
new_context, did_commit = self._commit_port_binding(
plugin_context, port_id, binding, bind_context)
if not new_context:
# The port has been deleted concurrently, so just
# return the unbound result from the initial
# transaction that completed before the deletion.
LOG.debug("Port %s has been deleted concurrently",
port_id)
return context
context = new_context
if (context._binding.vif_type ==
portbindings.VIF_TYPE_BINDING_FAILED):
return context
# Need to notify if we succeed and our results were
# committed.
need_notify |= did_commit
def _bind_port(self, orig_context):
# Construct a new PortContext from the one from the previous
# transaction.
port = orig_context._port
orig_binding = orig_context._binding
new_binding = models.PortBinding(
host=orig_binding.host,
vnic_type=orig_binding.vnic_type,
profile=orig_binding.profile,
vif_type=portbindings.VIF_TYPE_UNBOUND,
vif_details=''
)
self._update_port_dict_binding(port, new_binding)
new_context = driver_context.PortContext(
self, orig_context._plugin_context, port,
orig_context._network_context._network, new_binding, None)
# Attempt to bind the port and return the context with the
# result.
self.mechanism_manager.bind_port(new_context)
return new_context
def _commit_port_binding(self, plugin_context, port_id, orig_binding,
new_context):
session = plugin_context.session
new_binding = new_context._binding
# After we've attempted to bind the port, we begin a
# transaction, get the current port state, and decide whether
# to commit the binding results.
#
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
# Get the current port state and build a new PortContext
# reflecting this state as original state for subsequent
# mechanism driver update_port_*commit() calls.
port_db, cur_binding = db.get_locked_port_and_binding(session,
port_id)
if not port_db:
# The port has been deleted concurrently.
return (None, None)
oport = self._make_port_dict(port_db)
port = self._make_port_dict(port_db)
network = new_context.network.current
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
# REVISIT(rkukura): The PortBinding instance from the
# ml2_port_bindings table, returned as cur_binding
# from db.get_locked_port_and_binding() above, is
# currently not used for DVR distributed ports, and is
# replaced here with the DVRPortBinding instance from
# the ml2_dvr_port_bindings table specific to the host
# on which the distributed port is being bound. It
# would be possible to optimize this code to avoid
# fetching the PortBinding instance in the DVR case,
# and even to avoid creating the unused entry in the
# ml2_port_bindings table. But the upcoming resolution
# for bug 1367391 will eliminate the
# ml2_dvr_port_bindings table, use the
# ml2_port_bindings table to store non-host-specific
# fields for both distributed and non-distributed
# ports, and introduce a new ml2_port_binding_hosts
# table for the fields that need to be host-specific
# in the distributed case. Since the PortBinding
# instance will then be needed, it does not make sense
# to optimize this code to avoid fetching it.
cur_binding = db.get_dvr_port_binding_by_host(
session, port_id, orig_binding.host)
cur_context = driver_context.PortContext(
self, plugin_context, port, network, cur_binding, None,
original_port=oport)
# Commit our binding results only if port has not been
# successfully bound concurrently by another thread or
# process and no binding inputs have been changed.
commit = ((cur_binding.vif_type in
[portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED]) and
orig_binding.host == cur_binding.host and
orig_binding.vnic_type == cur_binding.vnic_type and
orig_binding.profile == cur_binding.profile)
if commit:
# Update the port's binding state with our binding
# results.
cur_binding.vif_type = new_binding.vif_type
cur_binding.vif_details = new_binding.vif_details
db.clear_binding_levels(session, port_id, cur_binding.host)
db.set_binding_levels(session, new_context._binding_levels)
cur_context._binding_levels = new_context._binding_levels
# Update PortContext's port dictionary to reflect the
# updated binding state.
self._update_port_dict_binding(port, cur_binding)
# Update the port status if requested by the bound driver.
if (new_context._binding_levels and
new_context._new_port_status):
port_db.status = new_context._new_port_status
port['status'] = new_context._new_port_status
# Call the mechanism driver precommit methods, commit
# the results, and call the postcommit methods.
self.mechanism_manager.update_port_precommit(cur_context)
if commit:
self.mechanism_manager.update_port_postcommit(cur_context)
# Continue, using the port state as of the transaction that
# just finished, whether that transaction committed new
# results or discovered concurrent port state changes.
return (cur_context, commit)
def _update_port_dict_binding(self, port, binding):
port[portbindings.HOST_ID] = binding.host
port[portbindings.VNIC_TYPE] = binding.vnic_type
port[portbindings.PROFILE] = self._get_profile(binding)
port[portbindings.VIF_TYPE] = binding.vif_type
port[portbindings.VIF_DETAILS] = self._get_vif_details(binding)
def _get_vif_details(self, binding):
if binding.vif_details:
try:
return jsonutils.loads(binding.vif_details)
except Exception:
LOG.error(_LE("Serialized vif_details DB value '%(value)s' "
"for port %(port)s is invalid"),
{'value': binding.vif_details,
'port': binding.port_id})
return {}
def _get_profile(self, binding):
if binding.profile:
try:
return jsonutils.loads(binding.profile)
except Exception:
LOG.error(_LE("Serialized profile DB value '%(value)s' for "
"port %(port)s is invalid"),
{'value': binding.profile,
'port': binding.port_id})
return {}
def _ml2_extend_port_dict_binding(self, port_res, port_db):
# None when called during unit tests for other plugins.
if port_db.port_binding:
self._update_port_dict_binding(port_res, port_db.port_binding)
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_ml2_extend_port_dict_binding'])
# Register extend dict methods for network and port resources.
# Each mechanism driver that supports extend attribute for the resources
# can add those attribute to the result.
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.NETWORKS, ['_ml2_md_extend_network_dict'])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_ml2_md_extend_port_dict'])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.SUBNETS, ['_ml2_md_extend_subnet_dict'])
def _ml2_md_extend_network_dict(self, result, netdb):
session = db_api.get_session()
with session.begin(subtransactions=True):
self.extension_manager.extend_network_dict(session, netdb, result)
def _ml2_md_extend_port_dict(self, result, portdb):
session = db_api.get_session()
with session.begin(subtransactions=True):
self.extension_manager.extend_port_dict(session, portdb, result)
def _ml2_md_extend_subnet_dict(self, result, subnetdb):
session = db_api.get_session()
with session.begin(subtransactions=True):
self.extension_manager.extend_subnet_dict(
session, subnetdb, result)
# Note - The following hook methods have "ml2" in their names so
# that they are not called twice during unit tests due to global
# registration of hooks in portbindings_db.py used by other
# plugins.
def _ml2_port_model_hook(self, context, original_model, query):
query = query.outerjoin(models.PortBinding,
(original_model.id ==
models.PortBinding.port_id))
return query
def _ml2_port_result_filter_hook(self, query, filters):
values = filters and filters.get(portbindings.HOST_ID, [])
if not values:
return query
return query.filter(models.PortBinding.host.in_(values))
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
models_v2.Port,
"ml2_port_bindings",
'_ml2_port_model_hook',
None,
'_ml2_port_result_filter_hook')
def _notify_port_updated(self, mech_context):
port = mech_context._port
segment = mech_context.bottom_bound_segment
if not segment:
# REVISIT(rkukura): This should notify agent to unplug port
network = mech_context.network.current
LOG.warning(_LW("In _notify_port_updated(), no bound segment for "
"port %(port_id)s on network %(network_id)s"),
{'port_id': port['id'],
'network_id': network['id']})
return
self.notifier.port_update(mech_context._plugin_context, port,
segment[api.NETWORK_TYPE],
segment[api.SEGMENTATION_ID],
segment[api.PHYSICAL_NETWORK])
def _delete_objects(self, context, resource, objects):
delete_op = getattr(self, 'delete_%s' % resource)
for obj in objects:
try:
delete_op(context, obj['result']['id'])
except KeyError:
LOG.exception(_LE("Could not find %s to delete."),
resource)
except Exception:
LOG.exception(_LE("Could not delete %(res)s %(id)s."),
{'res': resource,
'id': obj['result']['id']})
def _create_bulk_ml2(self, resource, context, request_items):
objects = []
collection = "%ss" % resource
items = request_items[collection]
try:
with context.session.begin(subtransactions=True):
obj_creator = getattr(self, '_create_%s_db' % resource)
for item in items:
attrs = item[resource]
result, mech_context = obj_creator(context, item)
objects.append({'mech_context': mech_context,
'result': result,
'attributes': attrs})
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("An exception occurred while creating "
"the %(resource)s:%(item)s"),
{'resource': resource, 'item': item})
try:
postcommit_op = getattr(self.mechanism_manager,
'create_%s_postcommit' % resource)
for obj in objects:
postcommit_op(obj['mech_context'])
return objects
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
resource_ids = [res['result']['id'] for res in objects]
LOG.exception(_LE("mechanism_manager.create_%(res)s"
"_postcommit failed for %(res)s: "
"'%(failed_id)s'. Deleting "
"%(res)ss %(resource_ids)s"),
{'res': resource,
'failed_id': obj['result']['id'],
'resource_ids': ', '.join(resource_ids)})
self._delete_objects(context, resource, objects)
def _create_network_db(self, context, network):
net_data = network[attributes.NETWORK]
tenant_id = self._get_tenant_id_for_create(context, net_data)
session = context.session
with session.begin(subtransactions=True):
self._ensure_default_security_group(context, tenant_id)
result = super(Ml2Plugin, self).create_network(context, network)
self.extension_manager.process_create_network(context, net_data,
result)
self._process_l3_create(context, result, net_data)
net_data['id'] = result['id']
self.type_manager.create_network_segments(context, net_data,
tenant_id)
self.type_manager.extend_network_dict_provider(context, result)
mech_context = driver_context.NetworkContext(self, context,
result)
self.mechanism_manager.create_network_precommit(mech_context)
return result, mech_context
def create_network(self, context, network):
result, mech_context = self._create_network_db(context, network)
try:
self.mechanism_manager.create_network_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("mechanism_manager.create_network_postcommit "
"failed, deleting network '%s'"), result['id'])
self.delete_network(context, result['id'])
return result
def create_network_bulk(self, context, networks):
objects = self._create_bulk_ml2(attributes.NETWORK, context, networks)
return [obj['result'] for obj in objects]
def update_network(self, context, id, network):
provider._raise_if_updates_provider_attributes(network['network'])
session = context.session
with session.begin(subtransactions=True):
original_network = super(Ml2Plugin, self).get_network(context, id)
updated_network = super(Ml2Plugin, self).update_network(context,
id,
network)
self.extension_manager.process_update_network(context, network,
updated_network)
self._process_l3_update(context, updated_network,
network['network'])
self.type_manager.extend_network_dict_provider(context,
updated_network)
mech_context = driver_context.NetworkContext(
self, context, updated_network,
original_network=original_network)
self.mechanism_manager.update_network_precommit(mech_context)
# TODO(apech) - handle errors raised by update_network, potentially
# by re-calling update_network with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_network_postcommit(mech_context)
return updated_network
def get_network(self, context, id, fields=None):
session = context.session
with session.begin(subtransactions=True):
result = super(Ml2Plugin, self).get_network(context, id, None)
self.type_manager.extend_network_dict_provider(context, result)
return self._fields(result, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
nets = super(Ml2Plugin,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
for net in nets:
self.type_manager.extend_network_dict_provider(context, net)
nets = self._filter_nets_provider(context, nets, filters)
nets = self._filter_nets_l3(context, nets, filters)
return [self._fields(net, fields) for net in nets]
def _delete_ports(self, context, ports):
for port in ports:
try:
self.delete_port(context, port.id)
except exc.PortNotFound:
# concurrent port deletion can be performed by
# release_dhcp_port caused by concurrent subnet_delete
LOG.info(_LI("Port %s was deleted concurrently"), port.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Exception auto-deleting port %s"),
port.id)
def _delete_subnets(self, context, subnets):
for subnet in subnets:
try:
self.delete_subnet(context, subnet.id)
except exc.SubnetNotFound:
LOG.info(_LI("Subnet %s was deleted concurrently"),
subnet.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Exception auto-deleting subnet %s"),
subnet.id)
def delete_network(self, context, id):
# REVISIT(rkukura) The super(Ml2Plugin, self).delete_network()
# function is not used because it auto-deletes ports and
# subnets from the DB without invoking the derived class's
# delete_port() or delete_subnet(), preventing mechanism
# drivers from being called. This approach should be revisited
# when the API layer is reworked during icehouse.
LOG.debug("Deleting network %s", id)
session = context.session
while True:
try:
# REVISIT: Serialize this operation with a semaphore
# to prevent deadlock waiting to acquire a DB lock
# held by another thread in the same process, leading
# to 'lock wait timeout' errors.
#
# Process L3 first, since, depending on the L3 plugin, it may
# involve locking the db-access semaphore, sending RPC
# notifications, and/or calling delete_port on this plugin.
# Additionally, a rollback may not be enough to undo the
# deletion of a floating IP with certain L3 backends.
self._process_l3_delete(context, id)
# Using query().with_lockmode isn't necessary. Foreign-key
# constraints prevent deletion if concurrent creation happens.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
# Get ports to auto-delete.
ports = (session.query(models_v2.Port).
enable_eagerloads(False).
filter_by(network_id=id).all())
LOG.debug("Ports to auto-delete: %s", ports)
only_auto_del = all(p.device_owner
in db_base_plugin_v2.
AUTO_DELETE_PORT_OWNERS
for p in ports)
if not only_auto_del:
LOG.debug("Tenant-owned ports exist")
raise exc.NetworkInUse(net_id=id)
# Get subnets to auto-delete.
subnets = (session.query(models_v2.Subnet).
enable_eagerloads(False).
filter_by(network_id=id).all())
LOG.debug("Subnets to auto-delete: %s", subnets)
if not (ports or subnets):
network = self.get_network(context, id)
mech_context = driver_context.NetworkContext(self,
context,
network)
self.mechanism_manager.delete_network_precommit(
mech_context)
self.type_manager.release_network_segments(session, id)
record = self._get_network(context, id)
LOG.debug("Deleting network record %s", record)
session.delete(record)
# The segment records are deleted via cascade from the
# network record, so explicit removal is not necessary.
LOG.debug("Committing transaction")
break
except os_db_exception.DBError as e:
with excutils.save_and_reraise_exception() as ctxt:
if isinstance(e.inner_exception, sql_exc.IntegrityError):
ctxt.reraise = False
LOG.warning(_LW("A concurrent port creation has "
"occurred"))
continue
self._delete_ports(context, ports)
self._delete_subnets(context, subnets)
try:
self.mechanism_manager.delete_network_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the network. Ideally we'd notify the caller of
# the fact that an error occurred.
LOG.error(_LE("mechanism_manager.delete_network_postcommit"
" failed"))
self.notifier.network_delete(context, id)
def _create_subnet_db(self, context, subnet):
session = context.session
with session.begin(subtransactions=True):
result = super(Ml2Plugin, self).create_subnet(context, subnet)
self.extension_manager.process_create_subnet(context, subnet,
result)
mech_context = driver_context.SubnetContext(self, context, result)
self.mechanism_manager.create_subnet_precommit(mech_context)
return result, mech_context
def create_subnet(self, context, subnet):
result, mech_context = self._create_subnet_db(context, subnet)
try:
self.mechanism_manager.create_subnet_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("mechanism_manager.create_subnet_postcommit "
"failed, deleting subnet '%s'"), result['id'])
self.delete_subnet(context, result['id'])
return result
def create_subnet_bulk(self, context, subnets):
objects = self._create_bulk_ml2(attributes.SUBNET, context, subnets)
return [obj['result'] for obj in objects]
def update_subnet(self, context, id, subnet):
session = context.session
with session.begin(subtransactions=True):
original_subnet = super(Ml2Plugin, self).get_subnet(context, id)
updated_subnet = super(Ml2Plugin, self).update_subnet(
context, id, subnet)
self.extension_manager.process_update_subnet(context, subnet,
updated_subnet)
mech_context = driver_context.SubnetContext(
self, context, updated_subnet, original_subnet=original_subnet)
self.mechanism_manager.update_subnet_precommit(mech_context)
# TODO(apech) - handle errors raised by update_subnet, potentially
# by re-calling update_subnet with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_subnet_postcommit(mech_context)
return updated_subnet
def delete_subnet(self, context, id):
# REVISIT(rkukura) The super(Ml2Plugin, self).delete_subnet()
# function is not used because it deallocates the subnet's addresses
# from ports in the DB without invoking the derived class's
# update_port(), preventing mechanism drivers from being called.
# This approach should be revisited when the API layer is reworked
# during icehouse.
LOG.debug("Deleting subnet %s", id)
session = context.session
while True:
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock
# wait timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
record = self._get_subnet(context, id)
subnet = self._make_subnet_dict(record, None)
qry_allocated = (session.query(models_v2.IPAllocation).
filter_by(subnet_id=id).
join(models_v2.Port))
is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
# Remove network owned ports, and delete IP allocations
# for IPv6 addresses which were automatically generated
# via SLAAC
if not is_auto_addr_subnet:
qry_allocated = (
qry_allocated.filter(models_v2.Port.device_owner.
in_(db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS)))
allocated = qry_allocated.all()
# Delete all the IPAllocation that can be auto-deleted
if allocated:
map(session.delete, allocated)
LOG.debug("Ports to auto-deallocate: %s", allocated)
# Check if there are more IP allocations, unless
# is_auto_address_subnet is True. In that case the check is
# unnecessary. This additional check not only would be wasteful
# for this class of subnet, but is also error-prone since when
# the isolation level is set to READ COMMITTED allocations made
# concurrently will be returned by this query
if not is_auto_addr_subnet:
if self._subnet_check_ip_allocations(context, id):
LOG.debug("Found IP allocations on subnet %s, "
"cannot delete", id)
raise exc.SubnetInUse(subnet_id=id)
# If allocated is None, then all the IPAllocation were
# correctly deleted during the previous pass.
if not allocated:
mech_context = driver_context.SubnetContext(self, context,
subnet)
self.mechanism_manager.delete_subnet_precommit(
mech_context)
LOG.debug("Deleting subnet record")
session.delete(record)
LOG.debug("Committing transaction")
break
for a in allocated:
if a.port_id:
# calling update_port() for each allocation to remove the
# IP from the port and call the MechanismDrivers
data = {'port':
{'fixed_ips': [{'subnet_id': ip.subnet_id,
'ip_address': ip.ip_address}
for ip in a.ports.fixed_ips
if ip.subnet_id != id]}}
try:
self.update_port(context, a.port_id, data)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Exception deleting fixed_ip "
"from port %s"), a.port_id)
try:
self.mechanism_manager.delete_subnet_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the subnet. Ideally we'd notify the caller of
# the fact that an error occurred.
LOG.error(_LE("mechanism_manager.delete_subnet_postcommit failed"))
def _create_port_db(self, context, port):
attrs = port[attributes.PORT]
attrs['status'] = const.PORT_STATUS_DOWN
session = context.session
with session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
result = super(Ml2Plugin, self).create_port(context, port)
self.extension_manager.process_create_port(context, attrs, result)
self._process_port_create_security_group(context, result, sgids)
network = self.get_network(context, result['network_id'])
binding = db.add_port_binding(session, result['id'])
mech_context = driver_context.PortContext(self, context, result,
network, binding, None)
self._process_port_binding(mech_context, attrs)
result[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, result,
attrs.get(addr_pair.ADDRESS_PAIRS)))
self._process_port_create_extra_dhcp_opts(context, result,
dhcp_opts)
self.mechanism_manager.create_port_precommit(mech_context)
return result, mech_context
def create_port(self, context, port):
attrs = port['port']
result, mech_context = self._create_port_db(context, port)
new_host_port = self._get_host_port_if_changed(mech_context, attrs)
self._notify_l3_agent_new_port(context, new_host_port)
try:
self.mechanism_manager.create_port_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("mechanism_manager.create_port_postcommit "
"failed, deleting port '%s'"), result['id'])
self.delete_port(context, result['id'])
# REVISIT(rkukura): Is there any point in calling this before
# a binding has been successfully established?
self.notify_security_groups_member_updated(context, result)
try:
bound_context = self._bind_port_if_needed(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("_bind_port_if_needed "
"failed, deleting port '%s'"), result['id'])
self.delete_port(context, result['id'])
return bound_context._port
def create_port_bulk(self, context, ports):
objects = self._create_bulk_ml2(attributes.PORT, context, ports)
# REVISIT(rkukura): Is there any point in calling this before
# a binding has been successfully established?
results = [obj['result'] for obj in objects]
self.notify_security_groups_member_updated_bulk(context, results)
for obj in objects:
attrs = obj['attributes']
if attrs and attrs.get(portbindings.HOST_ID):
new_host_port = self._get_host_port_if_changed(
obj['mech_context'], attrs)
self._notify_l3_agent_new_port(context, new_host_port)
try:
for obj in objects:
obj['bound_context'] = self._bind_port_if_needed(
obj['mech_context'])
return [obj['bound_context']._port for obj in objects]
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
resource_ids = [res['result']['id'] for res in objects]
LOG.error(_LE("_bind_port_if_needed failed. "
"Deleting all ports from create bulk '%s'"),
resource_ids)
self._delete_objects(context, 'port', objects)
def update_port(self, context, id, port):
attrs = port['port']
need_port_update_notify = False
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
is_dvr_enabled = utils.is_extension_supported(
l3plugin, const.L3_DISTRIBUTED_EXT_ALIAS)
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port_db, binding = db.get_locked_port_and_binding(session, id)
if not port_db:
raise exc.PortNotFound(port_id=id)
mac_address_updated = self._check_mac_update_allowed(
port_db, port, binding)
need_port_update_notify |= mac_address_updated
original_port = self._make_port_dict(port_db)
updated_port = super(Ml2Plugin, self).update_port(context, id,
port)
self.extension_manager.process_update_port(context, attrs,
updated_port)
if addr_pair.ADDRESS_PAIRS in port['port']:
need_port_update_notify |= (
self.update_address_pairs_on_port(context, id, port,
original_port,
updated_port))
need_port_update_notify |= self.update_security_group_on_port(
context, id, port, original_port, updated_port)
network = self.get_network(context, original_port['network_id'])
need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
context, id, port, updated_port)
levels = db.get_binding_levels(session, id, binding.host)
mech_context = driver_context.PortContext(
self, context, updated_port, network, binding, levels,
original_port=original_port)
new_host_port = self._get_host_port_if_changed(mech_context, attrs)
need_port_update_notify |= self._process_port_binding(
mech_context, attrs)
self.mechanism_manager.update_port_precommit(mech_context)
# Notifications must be sent after the above transaction is complete
if mac_address_updated and l3plugin and is_dvr_enabled:
# NOTE: "add" actually does a 'replace' operation
l3plugin.dvr_vmarp_table_update(context, updated_port, "add")
self._notify_l3_agent_new_port(context, new_host_port)
# TODO(apech) - handle errors raised by update_port, potentially
# by re-calling update_port with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_port_postcommit(mech_context)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
bound_context = self._bind_port_if_needed(
mech_context,
allow_notify=True,
need_notify=need_port_update_notify)
return bound_context._port
def _process_dvr_port_binding(self, mech_context, context, attrs):
session = mech_context._plugin_context.session
binding = mech_context._binding
port = mech_context.current
port_id = port['id']
if binding.vif_type != portbindings.VIF_TYPE_UNBOUND:
binding.vif_details = ''
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
if binding.host:
db.clear_binding_levels(session, port_id, binding.host)
binding.host = ''
self._update_port_dict_binding(port, binding)
binding.host = attrs and attrs.get(portbindings.HOST_ID)
binding.router_id = attrs and attrs.get('device_id')
def update_dvr_port_binding(self, context, id, port):
attrs = port['port']
host = attrs and attrs.get(portbindings.HOST_ID)
host_set = attributes.is_attr_set(host)
if not host_set:
LOG.error(_LE("No Host supplied to bind DVR Port %s"), id)
return
session = context.session
binding = db.get_dvr_port_binding_by_host(session, id, host)
device_id = attrs and attrs.get('device_id')
router_id = binding and binding.get('router_id')
update_required = (not binding or
binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED or
router_id != device_id)
if update_required:
with session.begin(subtransactions=True):
try:
orig_port = super(Ml2Plugin, self).get_port(context, id)
except exc.PortNotFound:
LOG.debug("DVR Port %s has been deleted concurrently", id)
return
if not binding:
binding = db.ensure_dvr_port_binding(
session, id, host, router_id=device_id)
network = self.get_network(context, orig_port['network_id'])
levels = db.get_binding_levels(session, id, host)
mech_context = driver_context.PortContext(self,
context, orig_port, network,
binding, levels, original_port=orig_port)
self._process_dvr_port_binding(mech_context, context, attrs)
self._bind_port_if_needed(mech_context)
def delete_port(self, context, id, l3_port_check=True):
LOG.debug("Deleting port %s", id)
removed_routers = []
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
is_dvr_enabled = utils.is_extension_supported(
l3plugin, const.L3_DISTRIBUTED_EXT_ALIAS)
if l3plugin and l3_port_check:
l3plugin.prevent_l3_port_deletion(context, id)
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port_db, binding = db.get_locked_port_and_binding(session, id)
if not port_db:
LOG.debug("The port '%s' was deleted", id)
return
port = self._make_port_dict(port_db)
network = self.get_network(context, port['network_id'])
bound_mech_contexts = []
device_owner = port['device_owner']
if device_owner == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_dvr_port_bindings(context.session, id)
for bind in bindings:
levels = db.get_binding_levels(context.session, id,
bind.host)
mech_context = driver_context.PortContext(
self, context, port, network, bind, levels)
self.mechanism_manager.delete_port_precommit(mech_context)
bound_mech_contexts.append(mech_context)
else:
levels = db.get_binding_levels(context.session, id,
binding.host)
mech_context = driver_context.PortContext(
self, context, port, network, binding, levels)
if is_dvr_enabled and utils.is_dvr_serviced(device_owner):
removed_routers = l3plugin.dvr_deletens_if_no_port(
context, id)
self.mechanism_manager.delete_port_precommit(mech_context)
bound_mech_contexts.append(mech_context)
if l3plugin:
router_ids = l3plugin.disassociate_floatingips(
context, id, do_notify=False)
LOG.debug("Calling delete_port for %(port_id)s owned by %(owner)s",
{"port_id": id, "owner": device_owner})
super(Ml2Plugin, self).delete_port(context, id)
# now that we've left db transaction, we are safe to notify
if l3plugin:
if is_dvr_enabled:
l3plugin.dvr_vmarp_table_update(context, port, "del")
l3plugin.notify_routers_updated(context, router_ids)
for router in removed_routers:
try:
l3plugin.remove_router_from_l3_agent(
context, router['agent_id'], router['router_id'])
except l3agentscheduler.RouterNotHostedByL3Agent:
# router may have been removed by another process
LOG.debug("Router %(id)s not hosted by L3 agent %(agent)s",
{'id': router['router_id'],
'agent': router['agent_id']})
try:
# Note that DVR Interface ports will have bindings on
# multiple hosts, and so will have multiple mech_contexts,
# while other ports typically have just one.
for mech_context in bound_mech_contexts:
self.mechanism_manager.delete_port_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the port. Ideally we'd notify the caller of the
# fact that an error occurred.
LOG.error(_LE("mechanism_manager.delete_port_postcommit failed for"
" port %s"), id)
self.notify_security_groups_member_updated(context, port)
def get_bound_port_context(self, plugin_context, port_id, host=None):
session = plugin_context.session
with session.begin(subtransactions=True):
try:
port_db = (session.query(models_v2.Port).
enable_eagerloads(False).
filter(models_v2.Port.id.startswith(port_id)).
one())
except sa_exc.NoResultFound:
LOG.debug("No ports have port_id starting with %s",
port_id)
return
except sa_exc.MultipleResultsFound:
LOG.error(_LE("Multiple ports have port_id starting with %s"),
port_id)
return
port = self._make_port_dict(port_db)
network = self.get_network(plugin_context, port['network_id'])
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding = db.get_dvr_port_binding_by_host(
session, port['id'], host)
if not binding:
LOG.error(_LE("Binding info for DVR port %s not found"),
port_id)
return None
levels = db.get_binding_levels(session, port_db.id, host)
port_context = driver_context.PortContext(
self, plugin_context, port, network, binding, levels)
else:
# since eager loads are disabled in port_db query
# related attribute port_binding could disappear in
# concurrent port deletion.
# It's not an error condition.
binding = port_db.port_binding
if not binding:
LOG.info(_LI("Binding info for port %s was not found, "
"it might have been deleted already."),
port_id)
return
levels = db.get_binding_levels(session, port_db.id,
port_db.port_binding.host)
port_context = driver_context.PortContext(
self, plugin_context, port, network, binding, levels)
return self._bind_port_if_needed(port_context)
def update_port_status(self, context, port_id, status, host=None):
"""
Returns port_id (non-truncated uuid) if the port exists.
Otherwise returns None.
"""
updated = False
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_LW("Port %(port)s updated up by agent not found"),
{'port': port_id})
return None
if (port.status != status and
port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE):
original_port = self._make_port_dict(port)
port.status = status
updated_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
levels = db.get_binding_levels(session, port_id,
port.port_binding.host)
mech_context = driver_context.PortContext(
self, context, updated_port, network, port.port_binding,
levels, original_port=original_port)
self.mechanism_manager.update_port_precommit(mech_context)
updated = True
elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding = db.get_dvr_port_binding_by_host(
session, port['id'], host)
if not binding:
return
binding['status'] = status
binding.update(binding)
updated = True
if (updated and
port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_LW("Port %s not found during update"),
port_id)
return
original_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
port.status = db.generate_dvr_port_status(session, port['id'])
updated_port = self._make_port_dict(port)
levels = db.get_binding_levels(session, port_id, host)
mech_context = (driver_context.PortContext(
self, context, updated_port, network,
binding, levels, original_port=original_port))
self.mechanism_manager.update_port_precommit(mech_context)
if updated:
self.mechanism_manager.update_port_postcommit(mech_context)
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
db.delete_dvr_port_binding_if_stale(session, binding)
return port['id']
def port_bound_to_host(self, context, port_id, host):
port = db.get_port(context.session, port_id)
if not port:
LOG.debug("No Port match for: %s", port_id)
return False
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_dvr_port_bindings(context.session, port_id)
for b in bindings:
if b.host == host:
return True
LOG.debug("No binding found for DVR port %s", port['id'])
return False
else:
port_host = db.get_port_binding_host(port_id)
return (port_host == host)
def get_ports_from_devices(self, devices):
port_ids_to_devices = dict((self._device_to_port_id(device), device)
for device in devices)
port_ids = port_ids_to_devices.keys()
ports = db.get_ports_and_sgs(port_ids)
for port in ports:
# map back to original requested id
port_id = next((port_id for port_id in port_ids
if port['id'].startswith(port_id)), None)
port['device'] = port_ids_to_devices.get(port_id)
return ports
def _device_to_port_id(self, device):
# REVISIT(rkukura): Consider calling into MechanismDrivers to
# process device names, or having MechanismDrivers supply list
# of device prefixes to strip.
if device.startswith(const.TAP_DEVICE_PREFIX):
return device[len(const.TAP_DEVICE_PREFIX):]
else:
# REVISIT(irenab): Consider calling into bound MD to
# handle the get_device_details RPC, then remove the 'else' clause
if not uuidutils.is_uuid_like(device):
port = db.get_port_from_device_mac(device)
if port:
return port.id
return device
| apache-2.0 |
kmarius/qutebrowser | tests/end2end/fixtures/webserver.py | 4 | 6473 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Fixtures for the server webserver."""
import re
import sys
import json
import os.path
from http import HTTPStatus
import attr
import pytest
from PyQt5.QtCore import pyqtSignal, QUrl
from end2end.fixtures import testprocess
from qutebrowser.utils import utils
class Request(testprocess.Line):
"""A parsed line from the flask log output.
Attributes:
verb/path/status: Parsed from the log output.
"""
def __init__(self, data):
super().__init__(data)
try:
parsed = json.loads(data)
except ValueError:
raise testprocess.InvalidLine(data)
assert isinstance(parsed, dict)
assert set(parsed.keys()) == {'path', 'verb', 'status'}
self.verb = parsed['verb']
path = parsed['path']
self.path = '/' if path == '/' else path.rstrip('/')
self.status = parsed['status']
self._check_status()
def _check_status(self):
"""Check if the http status is what we expected."""
path_to_statuses = {
'/favicon.ico': [HTTPStatus.NOT_FOUND],
'/does-not-exist': [HTTPStatus.NOT_FOUND],
'/does-not-exist-2': [HTTPStatus.NOT_FOUND],
'/404': [HTTPStatus.NOT_FOUND],
'/redirect-later': [HTTPStatus.FOUND],
'/redirect-self': [HTTPStatus.FOUND],
'/redirect-to': [HTTPStatus.FOUND],
'/relative-redirect': [HTTPStatus.FOUND],
'/absolute-redirect': [HTTPStatus.FOUND],
'/cookies/set': [HTTPStatus.FOUND],
'/500-inline': [HTTPStatus.INTERNAL_SERVER_ERROR],
'/500': [HTTPStatus.INTERNAL_SERVER_ERROR],
}
for i in range(15):
path_to_statuses['/redirect/{}'.format(i)] = [HTTPStatus.FOUND]
for suffix in ['', '1', '2', '3', '4', '5', '6']:
key = '/basic-auth/user{}/password{}'.format(suffix, suffix)
path_to_statuses[key] = [HTTPStatus.UNAUTHORIZED, HTTPStatus.OK]
default_statuses = [HTTPStatus.OK, HTTPStatus.NOT_MODIFIED]
sanitized = QUrl('http://localhost' + self.path).path() # Remove ?foo
expected_statuses = path_to_statuses.get(sanitized, default_statuses)
if self.status not in expected_statuses:
raise AssertionError(
"{} loaded with status {} but expected {}".format(
sanitized, self.status,
' / '.join(repr(e) for e in expected_statuses)))
def __eq__(self, other):
return NotImplemented
@attr.s(frozen=True, cmp=False, hash=True)
class ExpectedRequest:
"""Class to compare expected requests easily."""
verb = attr.ib()
path = attr.ib()
@classmethod
def from_request(cls, request):
"""Create an ExpectedRequest from a Request."""
return cls(request.verb, request.path)
def __eq__(self, other):
if isinstance(other, (Request, ExpectedRequest)):
return self.verb == other.verb and self.path == other.path
else:
return NotImplemented
class WebserverProcess(testprocess.Process):
"""Abstraction over a running Flask server process.
Reads the log from its stdout and parses it.
Signals:
new_request: Emitted when there's a new request received.
"""
new_request = pyqtSignal(Request)
Request = Request # So it can be used from the fixture easily.
ExpectedRequest = ExpectedRequest
KEYS = ['verb', 'path']
def __init__(self, request, script, parent=None):
super().__init__(request, parent)
self._script = script
self.port = utils.random_port()
self.new_data.connect(self.new_request)
def get_requests(self):
"""Get the requests to the server during this test."""
requests = self._get_data()
return [r for r in requests if r.path != '/favicon.ico']
def _parse_line(self, line):
self._log(line)
started_re = re.compile(r' \* Running on https?://127\.0\.0\.1:{}/ '
r'\(Press CTRL\+C to quit\)'.format(self.port))
if started_re.fullmatch(line):
self.ready.emit()
return None
return Request(line)
def _executable_args(self):
if hasattr(sys, 'frozen'):
executable = os.path.join(os.path.dirname(sys.executable),
self._script)
args = []
else:
executable = sys.executable
py_file = os.path.join(os.path.dirname(__file__),
self._script + '.py')
args = [py_file]
return executable, args
def _default_args(self):
return [str(self.port)]
@pytest.fixture(scope='session', autouse=True)
def server(qapp, request):
"""Fixture for an server object which ensures clean setup/teardown."""
server = WebserverProcess(request, 'webserver_sub')
server.start()
yield server
server.terminate()
@pytest.fixture(autouse=True)
def server_per_test(server, request):
"""Fixture to clean server request list after each test."""
request.node._server_log = server.captured_log
server.before_test()
yield
server.after_test()
@pytest.fixture
def ssl_server(request, qapp):
"""Fixture for a webserver with a self-signed SSL certificate.
This needs to be explicitly used in a test, and overwrites the server log
used in that test.
"""
server = WebserverProcess(request, 'webserver_sub_ssl')
request.node._server_log = server.captured_log
server.start()
yield server
server.after_test()
server.terminate()
| gpl-3.0 |
xissy/titanium-mobile-sdk | android/run.py | 1 | 1625 | import os, subprocess, types, sys, re
def check_output_for_error(output, match, error_in_first_match):
success = re.findall(match, output)
if len(success) > 0:
if (error_in_first_match):
print "[ERROR] %s" % success[0]
sys.exit(1)
else:
return True
else:
return False
def check_and_print_err(err, warning_regex):
errored = False
for line in err.splitlines():
warning_match = None
if warning_regex != None:
warning_match = re.search(warning_regex, line)
if warning_match != None:
sys.stderr.write("[WARN] %s\n" % line)
else:
errored = True
sys.stderr.write("[ERROR] %s\n" % line)
sys.stderr.flush()
return errored
def run(args, ignore_error=False, debug=True, ignore_output=False, warning_regex=None, return_error=False, return_process=False):
if debug:
print "[DEBUG] %s" % (subprocess.list2cmdline(args))
sys.stdout.flush()
if ignore_output:
subprocess.Popen(args, stderr=subprocess.PIPE, stdout=subprocess.PIPE).wait()
return None
process = subprocess.Popen(args, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
(so, se) = process.communicate()
if type(se) != types.NoneType and len(se) > 0:
if not ignore_error:
err = str(se)
if 'adb' in args[0] and ' bytes in ' in err:
# adb emits data about compile into stderr so we ignore it in special case
pass
else:
if (check_and_print_err(err, warning_regex)):
if return_process:
return (None, process)
else:
return None
if return_error:
if return_process:
return so, se, process
else:
return so, se
elif return_process:
return so, process
else:
return so
| apache-2.0 |
s20121035/rk3288_android5.1_repo | external/clang/bindings/python/tests/cindex/test_cdb.py | 38 | 4306 | from clang.cindex import CompilationDatabase
from clang.cindex import CompilationDatabaseError
from clang.cindex import CompileCommands
from clang.cindex import CompileCommand
import os
import gc
kInputsDir = os.path.join(os.path.dirname(__file__), 'INPUTS')
def test_create_fail():
"""Check we fail loading a database with an assertion"""
path = os.path.dirname(__file__)
try:
cdb = CompilationDatabase.fromDirectory(path)
except CompilationDatabaseError as e:
assert e.cdb_error == CompilationDatabaseError.ERROR_CANNOTLOADDATABASE
else:
assert False
def test_create():
"""Check we can load a compilation database"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
def test_lookup_fail():
"""Check file lookup failure"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
assert cdb.getCompileCommands('file_do_not_exist.cpp') == None
def test_lookup_succeed():
"""Check we get some results if the file exists in the db"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project.cpp')
assert len(cmds) != 0
def test_all_compilecommand():
"""Check we get all results from the db"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
cmds = cdb.getAllCompileCommands()
assert len(cmds) == 3
expected = [
{ 'wd': '/home/john.doe/MyProjectA',
'line': ['clang++', '-o', 'project2.o', '-c',
'/home/john.doe/MyProject/project2.cpp']},
{ 'wd': '/home/john.doe/MyProjectB',
'line': ['clang++', '-DFEATURE=1', '-o', 'project2-feature.o', '-c',
'/home/john.doe/MyProject/project2.cpp']},
{ 'wd': '/home/john.doe/MyProject',
'line': ['clang++', '-o', 'project.o', '-c',
'/home/john.doe/MyProject/project.cpp']}
]
for i in range(len(cmds)):
assert cmds[i].directory == expected[i]['wd']
for arg, exp in zip(cmds[i].arguments, expected[i]['line']):
assert arg == exp
def test_1_compilecommand():
"""Check file with single compile command"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project.cpp')
assert len(cmds) == 1
assert cmds[0].directory == '/home/john.doe/MyProject'
expected = [ 'clang++', '-o', 'project.o', '-c',
'/home/john.doe/MyProject/project.cpp']
for arg, exp in zip(cmds[0].arguments, expected):
assert arg == exp
def test_2_compilecommand():
"""Check file with 2 compile commands"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project2.cpp')
assert len(cmds) == 2
expected = [
{ 'wd': '/home/john.doe/MyProjectA',
'line': ['clang++', '-o', 'project2.o', '-c',
'/home/john.doe/MyProject/project2.cpp']},
{ 'wd': '/home/john.doe/MyProjectB',
'line': ['clang++', '-DFEATURE=1', '-o', 'project2-feature.o', '-c',
'/home/john.doe/MyProject/project2.cpp']}
]
for i in range(len(cmds)):
assert cmds[i].directory == expected[i]['wd']
for arg, exp in zip(cmds[i].arguments, expected[i]['line']):
assert arg == exp
def test_compilecommand_iterator_stops():
"""Check that iterator stops after the correct number of elements"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
count = 0
for cmd in cdb.getCompileCommands('/home/john.doe/MyProject/project2.cpp'):
count += 1
assert count <= 2
def test_compilationDB_references():
"""Ensure CompilationsCommands are independent of the database"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project.cpp')
del cdb
gc.collect()
workingdir = cmds[0].directory
def test_compilationCommands_references():
"""Ensure CompilationsCommand keeps a reference to CompilationCommands"""
cdb = CompilationDatabase.fromDirectory(kInputsDir)
cmds = cdb.getCompileCommands('/home/john.doe/MyProject/project.cpp')
del cdb
cmd0 = cmds[0]
del cmds
gc.collect()
workingdir = cmd0.directory
| gpl-3.0 |
dtrip/weevely3 | modules/audit/phpconf.py | 15 | 6310 | from core.vectors import PhpCode, ShellCmd, ModuleExec, Os
from core.module import Module
from core import messages
from core import modules
import re
class Phpconf(Module):
"""Audit PHP configuration."""
def init(self):
self.register_info(
{
'author': [
'Emilio Pinna'
],
'license': 'GPLv3'
}
)
def _check_user(self):
user = ModuleExec('system_info', [ '-info', 'whoami' ]).load_result_or_run('whoami')
if not user: return messages.module_audit_phpconf.error
result = user
if 'win' in self.os_type: result += ': ' + messages.module_audit_phpconf.user_win_admin
elif user == 'root': result += ': ' + messages.module_audit_phpconf.user_nix_root
return result
def _check_openbasedir(self):
open_basedir = ModuleExec('system_info', [ '-info', 'open_basedir' ]).load_result_or_run('open_basedir')
if not open_basedir: return messages.module_audit_phpconf.basedir_unrestricted
dir_sep = ModuleExec('system_info', [ '-info', 'dir_sep' ]).load_result_or_run('dir_sep')
if not self.os_type or not dir_sep: return messages.module_audit_phpconf.error
path_sep = ':' if 'win' in self.os_type else ';'
paths = open_basedir.split(path_sep)
result = ''
for path in paths:
result += path + ': '
if not path.endswith(dir_sep): result += ' ' + messages.module_audit_phpconf.basedir_no_slash
elif path == '.': result += ' ' + messages.module_audit_phpconf.basedir_dot
result += '\n'
return result[-2:]
def _check_features(self):
features = [
'expose_php',
'file_uploads',
'register_globals',
'allow_url_fopen',
'display_errors',
'enable_dl',
'safe_mode',
'magic_quotes_gpc',
'allow_url_include',
'session.use_trans_sid'
]
feat_found = PhpCode("""foreach ( Array("${ '", "'.join(features) }") as $f) if((bool)ini_get($f)) print($f. "\n");""").run(
{ 'features' : features }
)
result = []
if feat_found:
for feat in feat_found.split('\n'):
feat_msg = 'feat_' + re.sub('[^a-zA-Z_]', '_', feat)
if hasattr(messages.module_audit_phpconf, feat_msg):
result.append((feat, getattr(messages.module_audit_phpconf, feat_msg)))
return result
def _check_classes(self):
classes = [
'splFileObject',
'COM',
'Java'
]
class_found = PhpCode("""foreach ( Array("${ '", "'.join(classes) }") as $f) if((bool)class_exists($f)) print($f. "\n");""").run(
{ 'classes' : classes }
)
result = []
if class_found:
for class_name in class_found.split('\n'):
class_msg = 'class_' + re.sub('[^a-zA-Z_]', '_', class_name)
if hasattr(messages.module_audit_phpconf, class_msg):
result.append((class_name, getattr(messages.module_audit_phpconf, class_msg)))
return result
def _check_functions(self):
functions = {
'info' : [
'apache_get_modules',
'apache_get_version',
'apache_getenv',
'get_loaded_extensions',
'phpinfo',
'phpversion',
],
'files' : [
'chgrp',
'chmod',
'chown',
'copy',
'link',
'mkdir',
'rename',
'rmdir',
'symlink',
'touch',
'unlink',
'posix_mkfifo'
],
'log' : [
'openlog',
'syslog',
'debugger_off',
'debugger_on',
'closelog'
],
'proc_execution' : [
'exec',
'passthru',
'pcntl_exec',
'popen',
'proc_open',
'shell_exec',
'system',
'dotnet_load'
],
'proc_manipulation' : [
'apache_child_terminate',
'apache_note',
'apache_setenv',
'dl',
'proc_close',
'proc_get_status',
'proc_terminate',
'proc_nice',
'putenv',
'virtual'
'posix_kill',
'posix_setpgid',
'posix_setsid',
'posix_setuid',
'runkit_function_rename'
]
}
result = []
for ftype, flist in functions.items():
func_found = PhpCode("""foreach ( Array("${ '", "'.join(functions) }") as $f) if(function_exists($f)&&is_callable($f)) print($f. "\n");""").run(
{ 'functions' : flist }
)
if func_found:
for func_name in func_found.split('\n'):
type_msg = 'func_' + re.sub('[^a-zA-Z_]', '_', ftype)
if hasattr(messages.module_audit_phpconf, type_msg):
result.append((func_name, getattr(messages.module_audit_phpconf, type_msg)))
return result
def run(self):
self.os_type = ModuleExec('system_info', [ '-info', 'os' ]).load_result_or_run('os')
self.php_version = ModuleExec('system_info', [ '-info', 'php_version' ]).load_result_or_run('php_version')
results = [
( 'Operating System',
self.os_type if self.os_type else 'Undetected' ),
( 'PHP version',
self.php_version if self.php_version else 'Undetected' ),
( 'User',
self._check_user() ),
( 'open_basedir',
self._check_openbasedir() )
] + self._check_features() + self._check_classes() + self._check_functions()
return results
| gpl-3.0 |
tima/ansible | lib/ansible/modules/network/netvisor/pn_show.py | 72 | 5460 | #!/usr/bin/python
""" PN CLI show commands """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_show
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
short_description: Run show commands on nvOS device.
description:
- Execute show command in the nodes and returns the results
read from the device.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
pn_command:
description:
- The C(pn_command) takes a CLI show command as value.
required: true
pn_parameters:
description:
- Display output using a specific parameter. Use 'all' to display possible
output. List of comma separated parameters.
pn_options:
description:
- Specify formatting options.
"""
EXAMPLES = """
- name: run the vlan-show command
pn_show:
pn_command: 'vlan-show'
pn_parameters: id,scope,ports
pn_options: 'layout vertical'
- name: run the vlag-show command
pn_show:
pn_command: 'vlag-show'
pn_parameters: 'id,name,cluster,mode'
pn_options: 'no-show-headers'
- name: run the cluster-show command
pn_show:
pn_command: 'cluster-show'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the show command.
returned: always
type: list
stderr:
description: The set of error responses from the show command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused any change on the target.
returned: always(False)
type: bool
"""
import shlex
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch:
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
command = module.params['pn_command']
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
msg='%s: ' % command,
stderr=err.strip(),
changed=False
)
if out:
module.exit_json(
command=print_cli,
msg='%s: ' % command,
stdout=out.strip(),
changed=False
)
else:
module.exit_json(
command=cli,
msg='%s: Nothing to display!!!' % command,
changed=False
)
def main():
""" This section is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=True, type='str'),
pn_clipassword=dict(required=True, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str'),
pn_command=dict(required=True, type='str'),
pn_parameters=dict(default='all', type='str'),
pn_options=dict(type='str')
)
)
# Accessing the arguments
command = module.params['pn_command']
parameters = module.params['pn_parameters']
options = module.params['pn_options']
# Building the CLI command string
cli = pn_cli(module)
cli += ' %s format %s ' % (command, parameters)
if options:
cli += options
run_cli(module, cli)
# AnsibleModule boilerplate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
webmakin/scrapy | tests/test_utils_signal.py | 121 | 2741 | from testfixtures import LogCapture
from twisted.trial import unittest
from twisted.python.failure import Failure
from twisted.internet import defer, reactor
from pydispatch import dispatcher
from scrapy.utils.signal import send_catch_log, send_catch_log_deferred
class SendCatchLogTest(unittest.TestCase):
@defer.inlineCallbacks
def test_send_catch_log(self):
test_signal = object()
handlers_called = set()
dispatcher.connect(self.error_handler, signal=test_signal)
dispatcher.connect(self.ok_handler, signal=test_signal)
with LogCapture() as l:
result = yield defer.maybeDeferred(
self._get_result, test_signal, arg='test',
handlers_called=handlers_called
)
assert self.error_handler in handlers_called
assert self.ok_handler in handlers_called
self.assertEqual(len(l.records), 1)
record = l.records[0]
self.assertIn('error_handler', record.getMessage())
self.assertEqual(record.levelname, 'ERROR')
self.assertEqual(result[0][0], self.error_handler)
self.assert_(isinstance(result[0][1], Failure))
self.assertEqual(result[1], (self.ok_handler, "OK"))
dispatcher.disconnect(self.error_handler, signal=test_signal)
dispatcher.disconnect(self.ok_handler, signal=test_signal)
def _get_result(self, signal, *a, **kw):
return send_catch_log(signal, *a, **kw)
def error_handler(self, arg, handlers_called):
handlers_called.add(self.error_handler)
a = 1/0
def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == 'test'
return "OK"
class SendCatchLogDeferredTest(SendCatchLogTest):
def _get_result(self, signal, *a, **kw):
return send_catch_log_deferred(signal, *a, **kw)
class SendCatchLogDeferredTest2(SendCatchLogTest):
def ok_handler(self, arg, handlers_called):
handlers_called.add(self.ok_handler)
assert arg == 'test'
d = defer.Deferred()
reactor.callLater(0, d.callback, "OK")
return d
def _get_result(self, signal, *a, **kw):
return send_catch_log_deferred(signal, *a, **kw)
class SendCatchLogTest2(unittest.TestCase):
def test_error_logged_if_deferred_not_supported(self):
test_signal = object()
test_handler = lambda: defer.Deferred()
dispatcher.connect(test_handler, test_signal)
with LogCapture() as l:
send_catch_log(test_signal)
self.assertEqual(len(l.records), 1)
self.assertIn("Cannot return deferreds from signal handler", str(l))
dispatcher.disconnect(test_handler, test_signal)
| bsd-3-clause |
veger/ansible | test/units/conftest.py | 37 | 1052 | """Monkey patch os._exit when running under coverage so we don't lose coverage data in forks, such as with `pytest --boxed`."""
import gc
import os
try:
import coverage
except ImportError:
coverage = None
try:
test = coverage.Coverage
except AttributeError:
coverage = None
def pytest_configure():
if not coverage:
return
coverage_instances = []
for obj in gc.get_objects():
if isinstance(obj, coverage.Coverage):
coverage_instances.append(obj)
if not coverage_instances:
coverage_config = os.environ.get('_ANSIBLE_COVERAGE_CONFIG')
if not coverage_config:
return
cov = coverage.Coverage(config_file=coverage_config)
coverage_instances.append(cov)
else:
cov = None
os_exit = os._exit
def coverage_exit(*args, **kwargs):
for instance in coverage_instances:
instance.stop()
instance.save()
os_exit(*args, **kwargs)
os._exit = coverage_exit
if cov:
cov.start()
| gpl-3.0 |
nave91/dbt | test/integration/014_hook_tests/test_model_hooks_bq.py | 1 | 4613 | from nose.plugins.attrib import attr
from test.integration.base import DBTIntegrationTest
MODEL_PRE_HOOK = """
insert into {{this.schema}}.on_model_hook (
state,
target_name,
target_schema,
target_type,
target_threads,
run_started_at,
invocation_id
) VALUES (
'start',
'{{ target.name }}',
'{{ target.schema }}',
'{{ target.type }}',
{{ target.threads }},
'{{ run_started_at }}',
'{{ invocation_id }}'
)
"""
MODEL_POST_HOOK = """
insert into {{this.schema}}.on_model_hook (
state,
target_name,
target_schema,
target_type,
target_threads,
run_started_at,
invocation_id
) VALUES (
'end',
'{{ target.name }}',
'{{ target.schema }}',
'{{ target.type }}',
{{ target.threads }},
'{{ run_started_at }}',
'{{ invocation_id }}'
)
"""
class TestBigqueryPrePostModelHooks(DBTIntegrationTest):
def setUp(self):
DBTIntegrationTest.setUp(self)
self.use_profile('bigquery')
self.use_default_project()
self.run_sql_file("test/integration/014_hook_tests/seed_model_bigquery.sql")
self.fields = [
'state',
'target_name',
'target_schema',
'target_threads',
'target_type',
'run_started_at',
'invocation_id'
]
@property
def schema(self):
return "model_hooks_014"
@property
def profile_config(self):
profile = self.bigquery_profile()
profile['test']['outputs']['default2']['threads'] = 3
return profile
@property
def project_config(self):
return {
'macro-paths': ['test/integration/014_hook_tests/macros'],
'models': {
'test': {
'pre-hook': [MODEL_PRE_HOOK],
'post-hook':[MODEL_POST_HOOK]
}
}
}
@property
def models(self):
return "test/integration/014_hook_tests/models"
def get_ctx_vars(self, state):
field_list = ", ".join(self.fields)
query = "select {field_list} from `{schema}.on_model_hook` where state = '{state}'".format(field_list=field_list, schema=self.unique_schema(), state=state)
vals = self.run_sql(query, fetch='all')
self.assertFalse(len(vals) == 0, 'nothing inserted into hooks table')
self.assertFalse(len(vals) > 1, 'too many rows in hooks table')
ctx = dict(zip(self.fields, vals[0]))
return ctx
def check_hooks(self, state):
ctx = self.get_ctx_vars(state)
self.assertEqual(ctx['state'], state)
self.assertEqual(ctx['target_name'], 'default2')
self.assertEqual(ctx['target_schema'], self.unique_schema())
self.assertEqual(ctx['target_threads'], 3)
self.assertEqual(ctx['target_type'], 'bigquery')
self.assertTrue(ctx['run_started_at'] is not None and len(ctx['run_started_at']) > 0, 'run_started_at was not set')
self.assertTrue(ctx['invocation_id'] is not None and len(ctx['invocation_id']) > 0, 'invocation_id was not set')
@attr(type='bigquery')
def test_pre_and_post_model_hooks(self):
self.run_dbt(['run'])
self.check_hooks('start')
self.check_hooks('end')
class TestBigqueryPrePostModelHooksOnSeeds(DBTIntegrationTest):
def setUp(self):
DBTIntegrationTest.setUp(self)
self.use_profile('bigquery')
self.use_default_project()
@property
def schema(self):
return "model_hooks_014"
@property
def models(self):
return "test/integration/014_hook_tests/seed-models-bq"
@property
def project_config(self):
return {
'data-paths': ['test/integration/014_hook_tests/data'],
'models': {},
'seeds': {
'post-hook': [
'insert into {{ this }} (a, b, c) VALUES (10, 11, 12)',
]
}
}
@attr(type='bigquery')
def test_hooks_on_seeds(self):
res = self.run_dbt(['seed'])
self.assertEqual(len(res), 1, 'Expected exactly one item')
res = self.run_dbt(['test'])
self.assertEqual(len(res), 1, 'Expected exactly one item')
result = self.run_sql(
'select a, b, c from `{schema}`.`example_seed` where a = 10',
fetch='all'
)
self.assertFalse(len(result) == 0, 'nothing inserted into table by hook')
self.assertFalse(len(result) > 1, 'too many rows in table')
| apache-2.0 |
niksolaz/GeoJS | venv/lib/python2.7/site-packages/pip/commands/install.py | 61 | 15982 | from __future__ import absolute_import
import logging
import operator
import os
import tempfile
import shutil
import warnings
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.locations import build_prefix, virtualenv_no_global, distutils_scheme
from pip.basecommand import Command
from pip.index import PackageFinder
from pip.exceptions import (
InstallationError, CommandError, PreviousBuildDirError,
)
from pip import cmdoptions
from pip.utils.build import BuildDirectory
from pip.utils.deprecation import RemovedInPip7Warning, RemovedInPip8Warning
logger = logging.getLogger(__name__)
class InstallCommand(Command):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
name = 'install'
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
summary = 'Install packages.'
def __init__(self, *args, **kw):
super(InstallCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.editable.make())
cmd_opts.add_option(cmdoptions.requirements.make())
cmd_opts.add_option(cmdoptions.build_dir.make())
cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmd_opts.add_option(
'-d', '--download', '--download-dir', '--download-directory',
dest='download_dir',
metavar='dir',
default=None,
help=("Download packages into <dir> instead of installing them, "
"regardless of what's already installed."),
)
cmd_opts.add_option(cmdoptions.download_cache.make())
cmd_opts.add_option(cmdoptions.src.make())
cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. This process is recursive regardless of whether '
'a dependency is already satisfied.'
)
cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='When upgrading, reinstall all packages even if they are '
'already up-to-date.')
cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages (reinstalling instead).')
cmd_opts.add_option(cmdoptions.no_deps.make())
cmd_opts.add_option(
'--no-install',
dest='no_install',
action='store_true',
help="DEPRECATED. Download and unpack all packages, but don't "
"actually install them."
)
cmd_opts.add_option(
'--no-download',
dest='no_download',
action="store_true",
help="DEPRECATED. Don't download any packages, just install the "
"ones already downloaded (completes an install run with "
"--no-install).")
cmd_opts.add_option(cmdoptions.install_options.make())
cmd_opts.add_option(cmdoptions.global_options.make())
cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help='Install using the user scheme.')
cmd_opts.add_option(
'--egg',
dest='as_egg',
action='store_true',
help="Install packages as eggs, not 'flat', like pip normally "
"does. This option is not about installing *from* eggs. "
"(WARNING: Because this option overrides pip's normal install"
" logic, requirements files may not behave as expected.)")
cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile py files to pyc",
)
cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile py files to pyc",
)
cmd_opts.add_option(cmdoptions.use_wheel.make())
cmd_opts.add_option(cmdoptions.no_use_wheel.make())
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, "
"pip only finds stable versions.")
cmd_opts.add_option(cmdoptions.no_clean.make())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this install command.
This method is meant to be overridden by subclasses, not
called directly.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
use_wheel=options.use_wheel,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
trusted_hosts=options.trusted_hosts,
allow_all_prereleases=options.pre,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if (
options.no_install or
options.no_download
):
warnings.warn(
"--no-install and --no-download are deprecated. "
"See https://github.com/pypa/pip/issues/906.",
RemovedInPip7Warning,
)
# If we have --no-install or --no-download and no --build we use the
# legacy static build dir
if (options.build_dir is None
and (options.no_install or options.no_download)):
options.build_dir = build_prefix
if options.download_dir:
options.no_install = True
options.ignore_installed = True
if options.build_dir:
options.build_dir = os.path.abspath(options.build_dir)
options.src_dir = os.path.abspath(options.src_dir)
install_options = options.install_options or []
if options.use_user_site:
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
install_options.append('--user')
temp_target_dir = None
if options.target_dir:
options.ignore_installed = True
temp_target_dir = tempfile.mkdtemp()
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir)
and not os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
install_options.append('--home=' + temp_target_dir)
global_options = options.global_options or []
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.use_mirrors:
warnings.warn(
"--use-mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
if options.mirrors:
warnings.warn(
"--mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
index_urls += options.mirrors
if options.download_cache:
warnings.warn(
"--download-cache has been deprecated and will be removed in "
"the future. Pip now automatically uses and configures its "
"cache.",
RemovedInPip8Warning,
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
build_delete = (not (options.no_clean or options.build_dir))
with BuildDirectory(options.build_dir,
delete=build_delete) as build_dir:
requirement_set = RequirementSet(
build_dir=build_dir,
src_dir=options.src_dir,
download_dir=options.download_dir,
upgrade=options.upgrade,
as_egg=options.as_egg,
ignore_installed=options.ignore_installed,
ignore_dependencies=options.ignore_dependencies,
force_reinstall=options.force_reinstall,
use_user_site=options.use_user_site,
target_dir=temp_target_dir,
session=session,
pycompile=options.compile,
isolated=options.isolated_mode,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(
name, None, isolated=options.isolated_mode,
)
)
for name in options.editables:
requirement_set.add_requirement(
InstallRequirement.from_editable(
name,
default_vcs=options.default_vcs,
isolated=options.isolated_mode,
)
)
for filename in options.requirements:
for req in parse_requirements(
filename,
finder=finder, options=options, session=session):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
opts = {'name': self.name}
if options.find_links:
msg = ('You must give at least one requirement to '
'%(name)s (maybe you meant "pip %(name)s '
'%(links)s"?)' %
dict(opts, links=' '.join(options.find_links)))
else:
msg = ('You must give at least one requirement '
'to %(name)s (see "pip help %(name)s")' % opts)
logger.warning(msg)
return
try:
if not options.no_download:
requirement_set.prepare_files(finder)
else:
requirement_set.locate_files()
if not options.no_install:
requirement_set.install(
install_options,
global_options,
root=options.root_path,
)
reqs = sorted(
requirement_set.successfully_installed,
key=operator.attrgetter('name'))
items = []
for req in reqs:
item = req.name
try:
if hasattr(req, 'installed_version'):
if req.installed_version:
item += '-' + req.installed_version
except Exception:
pass
items.append(item)
installed = ' '.join(items)
if installed:
logger.info('Successfully installed %s', installed)
else:
downloaded = ' '.join([
req.name
for req in requirement_set.successfully_downloaded
])
if downloaded:
logger.info(
'Successfully downloaded %s', downloaded
)
except PreviousBuildDirError:
options.no_clean = True
raise
finally:
# Clean up
if ((not options.no_clean)
and ((not options.no_install)
or options.download_dir)):
requirement_set.cleanup_files()
if options.target_dir:
if not os.path.exists(options.target_dir):
os.makedirs(options.target_dir)
lib_dir = distutils_scheme('', home=temp_target_dir)['purelib']
for item in os.listdir(lib_dir):
target_item_dir = os.path.join(options.target_dir, item)
if os.path.exists(target_item_dir):
if not options.upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. Pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
shutil.rmtree(temp_target_dir)
return requirement_set
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.