code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
#
# test_refractory.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy as np
import nest
"""
Assert that all neuronal models that have a refractory period implement it
correctly (except for Hodgkin-Huxley models which cannot be tested).
Details
-------
Submit the neuron to a constant excitatory current so that it spikes in the
[0, 50] ms.
A ``spike_recorder`` is used to detect the time at which the neuron spikes and
a ``voltmeter`` is then used to make sure the voltage is clamped to ``V_reset``
during exactly ``t_ref``.
For neurons that do not clamp the potential, use a very large current to
trigger immediate spiking.
For untested models please see the ignore_model list.
"""
# --------------------------------------------------------------------------- #
# Models, specific parameters
# --------------------------------------------------------------------------- #
# Neurons that must be tested through a high current to spike immediately
# (t_ref = interspike)
neurons_interspike = [
"amat2_psc_exp",
"ht_neuron",
"mat2_psc_exp",
]
neurons_interspike_ps = [
"iaf_psc_alpha_canon",
"iaf_psc_alpha_ps",
"iaf_psc_delta_ps",
"iaf_psc_exp_ps",
]
# Models that first clamp the membrane potential at a higher value
neurons_with_clamping = [
"aeif_psc_delta_clopath",
]
# Multi-compartment models
mc_models = [
"iaf_cond_alpha_mc",
]
# Models that cannot be tested
ignore_model = [
"gif_pop_psc_exp", # This one commits spikes at same time
"hh_cond_exp_traub", # This one does not support V_reset
"hh_cond_beta_gap_traub", # This one does not support V_reset
"hh_psc_alpha", # This one does not support V_reset
"hh_psc_alpha_clopath", # This one does not support V_reset
"hh_psc_alpha_gap", # This one does not support V_reset
"pp_cond_exp_mc_urbanczik", # This one does not support V_reset
"iaf_psc_exp_ps_lossless", # This one use presice times
"siegert_neuron", # This one does not connect to voltmeter
"step_rate_generator" # No regular neuron model
]
tested_models = [m for m in nest.node_models
if nest.GetDefaults(m, "element_type") == "neuron"
and m not in ignore_model]
# Additional parameters for the connector
add_connect_param = {
"iaf_cond_alpha_mc": {"receptor_type": 7},
}
# --------------------------------------------------------------------------- #
# Simulation time and refractory time limits
# --------------------------------------------------------------------------- #
simtime = 100
resolution = 0.1
# --------------------------------------------------------------------------- #
# Test class
# --------------------------------------------------------------------------- #
class TestRefractoryCase(unittest.TestCase):
"""
Check the correct implementation of refractory time in all neuronal models.
"""
def reset(self):
nest.ResetKernel()
nest.resolution = resolution
nest.rng_seed = 123456
def compute_reftime(self, model, sr, vm, neuron):
'''
Compute the refractory time of the neuron.
Parameters
----------
model : str
Name of the neuronal model.
sr : tuple
node ID of the spike recorder.
vm : tuple
node ID of the voltmeter.
neuron : tuple
node ID of the recorded neuron.
Returns
-------
t_ref_sim : double
Value of the simulated refractory period.
'''
spike_times = nest.GetStatus(sr, "events")[0]["times"]
if model in neurons_interspike:
# Spike emitted at next timestep so substract resolution
return spike_times[1]-spike_times[0]-resolution
elif model in neurons_interspike_ps:
return spike_times[1]-spike_times[0]
else:
Vr = nest.GetStatus(neuron, "V_reset")[0]
times = nest.GetStatus(vm, "events")[0]["times"]
# Index of the 2nd spike
idx_max = np.argwhere(times == spike_times[1])[0][0]
name_Vm = "V_m.s" if model in mc_models else "V_m"
Vs = nest.GetStatus(vm, "events")[0][name_Vm]
# Get the index at which the spike occured
idx_spike = np.argwhere(times == spike_times[0])[0][0]
# Find end of refractory period between 1st and 2nd spike
idx_end = np.where(
np.isclose(Vs[idx_spike:idx_max], Vr, 1e-6))[0][-1]
t_ref_sim = idx_end * resolution
return t_ref_sim
def test_refractory_time(self):
'''
Check that refractory time implementation is correct.
'''
for model in tested_models:
self.reset()
if "t_ref" not in nest.GetDefaults(model):
continue
# Randomly set a refractory period
t_ref = 1.7
# Create the neuron and devices
nparams = {"t_ref": t_ref}
neuron = nest.Create(model, params=nparams)
name_Vm = "V_m.s" if model in mc_models else "V_m"
vm_params = {"interval": resolution, "record_from": [name_Vm]}
vm = nest.Create("voltmeter", params=vm_params)
sr = nest.Create("spike_recorder")
cg = nest.Create("dc_generator", params={"amplitude": 1200.})
# For models that do not clamp V_m, use very large current to
# trigger almost immediate spiking => t_ref almost equals
# interspike
if model in neurons_interspike_ps:
nest.SetStatus(cg, "amplitude", 10000000.)
elif model == 'ht_neuron':
# ht_neuron use too long time with a very large amplitude
nest.SetStatus(cg, "amplitude", 2000.)
elif model in neurons_interspike:
nest.SetStatus(cg, "amplitude", 15000.)
# Connect them and simulate
nest.Connect(vm, neuron)
nest.Connect(cg, neuron, syn_spec=add_connect_param.get(model, {}))
nest.Connect(neuron, sr)
nest.Simulate(simtime)
# Get and compare t_ref
t_ref_sim = self.compute_reftime(model, sr, vm, neuron)
if model in neurons_with_clamping:
t_ref_sim = t_ref_sim - nest.GetStatus(neuron, "t_clamp")[0]
# Approximate result for precise spikes (interpolation error)
if model in neurons_interspike_ps:
self.assertAlmostEqual(t_ref, t_ref_sim, places=3,
msg='''Error in model {}:
{} != {}'''.format(
model, t_ref, t_ref_sim))
else:
self.assertAlmostEqual(t_ref, t_ref_sim,
msg='''Error in model {}:
{} != {}'''.format(
model, t_ref, t_ref_sim))
# --------------------------------------------------------------------------- #
# Run the comparisons
# --------------------------------------------------------------------------- #
def suite():
return unittest.makeSuite(TestRefractoryCase, "test")
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == '__main__':
run()
| heplesser/nest-simulator | testsuite/pytests/test_refractory.py | Python | gpl-2.0 | 8,170 |
default_app_config = 'nodeshot.community.participation.apps.AppConfig'
| SCORE42/nodeshot | nodeshot/community/participation/__init__.py | Python | gpl-3.0 | 71 |
#!/usr/bin/env python
#
# Copyright 2008,2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, digital, blocks
import pmt
# See gr-digital/lib/additive_scrambler_bb_impl.cc for reference.
def additive_scramble_lfsr(mask, seed, reglen, bpb, data):
l = digital.lfsr(mask, seed, reglen)
out = []
for d in data:
scramble_word = 0
for i in range(0,bpb):
scramble_word ^= l.next_bit() << i
out.append(d ^ scramble_word)
return tuple(out)
class test_scrambler(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_scrambler_descrambler(self):
src_data = (1,)*1000
src = blocks.vector_source_b(src_data, False)
scrambler = digital.scrambler_bb(0x8a, 0x7F, 7) # CCSDS 7-bit scrambler
descrambler = digital.descrambler_bb(0x8a, 0x7F, 7)
dst = blocks.vector_sink_b()
self.tb.connect(src, scrambler, descrambler, dst)
self.tb.run()
self.assertEqual(tuple(src_data[:-8]), dst.data()[8:]) # skip garbage during synchronization
def test_additive_scrambler(self):
src_data = (1,)*1000
src = blocks.vector_source_b(src_data, False)
scrambler = digital.additive_scrambler_bb(0x8a, 0x7f, 7)
descrambler = digital.additive_scrambler_bb(0x8a, 0x7f, 7)
dst = blocks.vector_sink_b()
self.tb.connect(src, scrambler, descrambler, dst)
self.tb.run()
self.assertEqual(src_data, dst.data())
def test_additive_scrambler_reset(self):
src_data = (1,)*1000
src = blocks.vector_source_b(src_data, False)
scrambler = digital.additive_scrambler_bb(0x8a, 0x7f, 7, 100)
descrambler = digital.additive_scrambler_bb(0x8a, 0x7f, 7, 100)
dst = blocks.vector_sink_b()
self.tb.connect(src, scrambler, descrambler, dst)
self.tb.run()
self.assertEqual(src_data, dst.data())
def test_additive_scrambler_reset_3bpb(self):
src_data = (5,)*2000
src = blocks.vector_source_b(src_data, False)
scrambler = digital.additive_scrambler_bb(0x8a, 0x7f, 7, 100, 3)
descrambler = digital.additive_scrambler_bb(0x8a, 0x7f, 7, 100, 3)
dst = blocks.vector_sink_b()
dst2 = blocks.vector_sink_b()
self.tb.connect(src, scrambler, descrambler, dst)
self.tb.connect(scrambler, dst2)
self.tb.run()
if not (src_data == dst.data()):
self.fail('Not equal.')
self.assertEqual(src_data, src_data)
def test_additive_scrambler_tags(self):
src_data = (1,)*1000
src = blocks.vector_source_b(src_data, False)
scrambler = digital.additive_scrambler_bb(0x8a, 0x7f, 7, 100)
descrambler = digital.additive_scrambler_bb(0x8a, 0x7f, 7, 100)
reset_tag_key = 'reset_lfsr'
reset_tag1 = gr.tag_t()
reset_tag1.key = pmt.string_to_symbol(reset_tag_key)
reset_tag1.offset = 17
reset_tag2 = gr.tag_t()
reset_tag2.key = pmt.string_to_symbol(reset_tag_key)
reset_tag2.offset = 110
reset_tag3 = gr.tag_t()
reset_tag3.key = pmt.string_to_symbol(reset_tag_key)
reset_tag3.offset = 523
src = blocks.vector_source_b(src_data, False, 1, (reset_tag1, reset_tag2, reset_tag3))
scrambler = digital.additive_scrambler_bb(0x8a, 0x7f, 7, 100, 1, reset_tag_key)
descrambler = digital.additive_scrambler_bb(0x8a, 0x7f, 7, 100, 1, reset_tag_key)
dst = blocks.vector_sink_b()
self.tb.connect(src, scrambler, descrambler, dst)
self.tb.run()
self.assertEqual(src_data, dst.data())
def test_additive_scrambler_tags_oneway(self):
src_data = [x for x in range(0, 10)]
reset_tag_key = 'reset_lfsr'
reset_tag1 = gr.tag_t()
reset_tag1.key = pmt.string_to_symbol(reset_tag_key)
reset_tag1.offset = 0
reset_tag2 = gr.tag_t()
reset_tag2.key = pmt.string_to_symbol(reset_tag_key)
reset_tag2.offset = 10
reset_tag3 = gr.tag_t()
reset_tag3.key = pmt.string_to_symbol(reset_tag_key)
reset_tag3.offset = 20
src = blocks.vector_source_b(src_data * 3, False, 1, (reset_tag1, reset_tag2, reset_tag3))
scrambler = digital.additive_scrambler_bb(0x8a, 0x7f, 7, 0, 8, reset_tag_key)
dst = blocks.vector_sink_b()
self.tb.connect(src, scrambler, dst)
self.tb.run()
expected_data = additive_scramble_lfsr(0x8a, 0x7f, 7, 8, src_data)
self.assertEqual(expected_data * 3, dst.data())
if __name__ == '__main__':
gr_unittest.run(test_scrambler, "test_scrambler.xml")
| iohannez/gnuradio | gr-digital/python/digital/qa_scrambler.py | Python | gpl-3.0 | 5,468 |
# -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: Copyright 2008 by Armin Ronacher.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| scyclops/Readable-Feeds | jinja2/visitor.py | Python | gpl-3.0 | 3,322 |
#!/usr/bin/env python -u
import sys
from argparse import ArgumentParser
import time
import json
import yaml
import os
try:
import boto.ec2
import boto.sqs
from boto.vpc import VPCConnection
from boto.exception import NoAuthHandlerFound, EC2ResponseError
from boto.sqs.message import RawMessage
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
except ImportError:
print "boto required for script"
sys.exit(1)
from pprint import pprint
AMI_TIMEOUT = 2700 # time to wait for AMIs to complete(45 minutes)
EC2_RUN_TIMEOUT = 180 # time to wait for ec2 state transition
EC2_STATUS_TIMEOUT = 300 # time to wait for ec2 system status checks
NUM_TASKS = 5 # number of tasks for time summary report
NUM_PLAYBOOKS = 2
class Unbuffered:
"""
For unbuffered output, not
needed if PYTHONUNBUFFERED is set
"""
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
def parse_args():
parser = ArgumentParser()
parser.add_argument('--noop', action='store_true',
help="don't actually run the cmds",
default=False)
parser.add_argument('--secure-vars-file', required=False,
metavar="SECURE_VAR_FILE", default=None,
help="path to secure-vars from the root of "
"the secure repo. By default <deployment>.yml and "
"<environment>-<deployment>.yml will be used if they "
"exist in <secure-repo>/ansible/vars/. This secure file "
"will be used in addition to these if they exist.")
parser.add_argument('--stack-name',
help="defaults to ENVIRONMENT-DEPLOYMENT",
metavar="STACK_NAME",
required=False)
parser.add_argument('-p', '--play',
help='play name without the yml extension',
metavar="PLAY", required=True)
parser.add_argument('--playbook-dir',
help='directory to find playbooks in',
default='configuration/playbooks/edx-east',
metavar="PLAYBOOKDIR", required=False)
parser.add_argument('-d', '--deployment', metavar="DEPLOYMENT",
required=True)
parser.add_argument('-e', '--environment', metavar="ENVIRONMENT",
required=True)
parser.add_argument('-v', '--verbose', action='store_true',
help="turn on verbosity")
parser.add_argument('--no-cleanup', action='store_true',
help="don't cleanup on failures")
parser.add_argument('--vars', metavar="EXTRA_VAR_FILE",
help="path to extra var file", required=False)
parser.add_argument('--configuration-version', required=False,
help="configuration repo gitref",
default="master")
parser.add_argument('--configuration-secure-version', required=False,
help="configuration-secure repo gitref",
default="master")
parser.add_argument('--configuration-secure-repo', required=False,
default="[email protected]:edx-ops/prod-secure",
help="repo to use for the secure files")
parser.add_argument('--configuration-private-version', required=False,
help="configuration-private repo gitref",
default="master")
parser.add_argument('--configuration-private-repo', required=False,
default="[email protected]:edx-ops/ansible-private",
help="repo to use for private playbooks")
parser.add_argument('-c', '--cache-id', required=True,
help="unique id to use as part of cache prefix")
parser.add_argument('-i', '--identity', required=False,
help="path to identity file for pulling "
"down configuration-secure",
default=None)
parser.add_argument('-r', '--region', required=False,
default="us-east-1",
help="aws region")
parser.add_argument('-k', '--keypair', required=False,
default="deployment",
help="AWS keypair to use for instance")
parser.add_argument('-t', '--instance-type', required=False,
default="m1.large",
help="instance type to launch")
parser.add_argument("--role-name", required=False,
default="abbey",
help="IAM role name to use (must exist)")
parser.add_argument("--msg-delay", required=False,
default=5,
help="How long to delay message display from sqs "
"to ensure ordering")
parser.add_argument("--hipchat-room-id", required=False,
default=None,
help="The API ID of the Hipchat room to post"
"status messages to")
parser.add_argument("--ansible-hipchat-room-id", required=False,
default='Hammer',
help="The room used by the abbey instance for "
"printing verbose ansible run data.")
parser.add_argument("--hipchat-api-token", required=False,
default=None,
help="The API token for Hipchat integration")
parser.add_argument("--root-vol-size", required=False,
default=50,
help="The size of the root volume to use for the "
"abbey instance.")
group = parser.add_mutually_exclusive_group()
group.add_argument('-b', '--base-ami', required=False,
help="ami to use as a base ami",
default="ami-0568456c")
group.add_argument('--blessed', action='store_true',
help="Look up blessed ami for env-dep-play.",
default=False)
return parser.parse_args()
def get_instance_sec_group(vpc_id):
grp_details = ec2.get_all_security_groups(
filters={
'vpc_id': vpc_id,
'tag:play': args.play
}
)
if len(grp_details) < 1:
sys.stderr.write("ERROR: Expected atleast one security group, got {}\n".format(
len(grp_details)))
return grp_details[0].id
def get_blessed_ami():
images = ec2.get_all_images(
filters={
'tag:environment': args.environment,
'tag:deployment': args.deployment,
'tag:play': args.play,
'tag:blessed': True
}
)
if len(images) != 1:
raise Exception("ERROR: Expected only one blessed ami, got {}\n".format(
len(images)))
return images[0].id
def create_instance_args():
"""
Looks up security group, subnet
and returns arguments to pass into
ec2.run_instances() including
user data
"""
vpc = VPCConnection()
subnet = vpc.get_all_subnets(
filters={
'tag:aws:cloudformation:stack-name': stack_name,
'tag:play': args.play}
)
if len(subnet) < 1:
#
# try scheme for non-cloudformation builds
#
subnet = vpc.get_all_subnets(
filters={
'tag:cluster': args.play,
'tag:environment': args.environment,
'tag:deployment': args.deployment}
)
if len(subnet) < 1:
sys.stderr.write("ERROR: Expected at least one subnet, got {}\n".format(
len(subnet)))
sys.exit(1)
subnet_id = subnet[0].id
vpc_id = subnet[0].vpc_id
security_group_id = get_instance_sec_group(vpc_id)
if args.identity:
config_secure = 'true'
with open(args.identity) as f:
identity_contents = f.read()
else:
config_secure = 'false'
identity_contents = "dummy"
user_data = """#!/bin/bash
set -x
set -e
exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
base_dir="/var/tmp/edx-cfg"
extra_vars="$base_dir/extra-vars-$$.yml"
secure_identity="$base_dir/secure-identity"
git_ssh="$base_dir/git_ssh.sh"
configuration_version="{configuration_version}"
configuration_secure_version="{configuration_secure_version}"
configuration_private_version="{configuration_private_version}"
environment="{environment}"
deployment="{deployment}"
play="{play}"
config_secure={config_secure}
git_repo_name="configuration"
git_repo="https://github.com/edx/$git_repo_name"
git_repo_secure="{configuration_secure_repo}"
git_repo_secure_name=$(basename $git_repo_secure .git)
git_repo_private="{configuration_private_repo}"
git_repo_private_name=$(basename $git_repo_private .git)
secure_vars_file={secure_vars_file}
environment_deployment_secure_vars="$base_dir/$git_repo_secure_name/ansible/vars/{environment}-{deployment}.yml"
deployment_secure_vars="$base_dir/$git_repo_secure_name/ansible/vars/{deployment}.yml"
instance_id=\\
$(curl http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null)
instance_ip=\\
$(curl http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null)
instance_type=\\
$(curl http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null)
playbook_dir="$base_dir/{playbook_dir}"
if $config_secure; then
git_cmd="env GIT_SSH=$git_ssh git"
else
git_cmd="git"
fi
ANSIBLE_ENABLE_SQS=true
SQS_NAME={queue_name}
SQS_REGION=us-east-1
SQS_MSG_PREFIX="[ $instance_id $instance_ip $environment-$deployment $play ]"
PYTHONUNBUFFERED=1
HIPCHAT_TOKEN={hipchat_token}
HIPCHAT_ROOM={hipchat_room}
HIPCHAT_MSG_PREFIX="$environment-$deployment-$play: "
HIPCHAT_FROM="ansible-$instance_id"
HIPCHAT_MSG_COLOR=$(echo -e "yellow\\ngreen\\npurple\\ngray" | shuf | head -1)
# environment for ansible
export ANSIBLE_ENABLE_SQS SQS_NAME SQS_REGION SQS_MSG_PREFIX PYTHONUNBUFFERED
export HIPCHAT_TOKEN HIPCHAT_ROOM HIPCHAT_MSG_PREFIX HIPCHAT_FROM HIPCHAT_MSG_COLOR
if [[ ! -x /usr/bin/git || ! -x /usr/bin/pip ]]; then
echo "Installing pkg dependencies"
/usr/bin/apt-get update
/usr/bin/apt-get install -y git python-pip python-apt \\
git-core build-essential python-dev libxml2-dev \\
libxslt-dev curl --force-yes
fi
rm -rf $base_dir
mkdir -p $base_dir
cd $base_dir
cat << EOF > $git_ssh
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no -i "$secure_identity" "\$@"
EOF
chmod 755 $git_ssh
if $config_secure; then
cat << EOF > $secure_identity
{identity_contents}
EOF
fi
cat << EOF >> $extra_vars
---
# extra vars passed into
# abbey.py including versions
# of all the repositories
{extra_vars_yml}
# abbey will always run fake migrations
# this is so that the application can come
# up healthy
fake_migrations: true
disable_edx_services: true
COMMON_TAG_EC2_INSTANCE: true
# abbey should never take instances in
# and out of elbs
elb_pre_post: false
EOF
chmod 400 $secure_identity
$git_cmd clone $git_repo $git_repo_name
cd $git_repo_name
$git_cmd checkout $configuration_version
cd $base_dir
if $config_secure; then
$git_cmd clone $git_repo_secure $git_repo_secure_name
cd $git_repo_secure_name
$git_cmd checkout $configuration_secure_version
cd $base_dir
fi
if [[ ! -z $git_repo_private ]]; then
$git_cmd clone $git_repo_private $git_repo_private_name
cd $git_repo_private_name
$git_cmd checkout $configuration_private_version
cd $base_dir
fi
cd $base_dir/$git_repo_name
sudo pip install -r requirements.txt
cd $playbook_dir
if [[ -r "$deployment_secure_vars" ]]; then
extra_args_opts+=" -e@$deployment_secure_vars"
fi
if [[ -r "$environment_deployment_secure_vars" ]]; then
extra_args_opts+=" -e@$environment_deployment_secure_vars"
fi
if $secure_vars_file; then
extra_args_opts+=" -e@$secure_vars_file"
fi
extra_args_opts+=" -e@$extra_vars"
ansible-playbook -vvvv -c local -i "localhost," $play.yml $extra_args_opts
ansible-playbook -vvvv -c local -i "localhost," stop_all_edx_services.yml $extra_args_opts
rm -rf $base_dir
""".format(
hipchat_token=args.hipchat_api_token,
hipchat_room=args.ansible_hipchat_room_id,
configuration_version=args.configuration_version,
configuration_secure_version=args.configuration_secure_version,
configuration_secure_repo=args.configuration_secure_repo,
configuration_private_version=args.configuration_private_version,
configuration_private_repo=args.configuration_private_repo,
environment=args.environment,
deployment=args.deployment,
play=args.play,
playbook_dir=args.playbook_dir,
config_secure=config_secure,
identity_contents=identity_contents,
queue_name=run_id,
extra_vars_yml=extra_vars_yml,
secure_vars_file=secure_vars_file,
cache_id=args.cache_id)
mapping = BlockDeviceMapping()
root_vol = BlockDeviceType(size=args.root_vol_size,
delete_on_termination=True,
volume_type='gp2')
mapping['/dev/sda1'] = root_vol
ec2_args = {
'security_group_ids': [security_group_id],
'subnet_id': subnet_id,
'key_name': args.keypair,
'image_id': base_ami,
'instance_type': args.instance_type,
'instance_profile_name': args.role_name,
'user_data': user_data,
'block_device_map': mapping,
}
return ec2_args
def poll_sqs_ansible():
"""
Prints events to the console and
blocks until a final STATS ansible
event is read off of SQS.
SQS does not guarantee FIFO, for that
reason there is a buffer that will delay
messages before they are printed to the
console.
Returns length of the ansible run.
"""
oldest_msg_ts = 0
buf = []
task_report = [] # list of tasks for reporting
last_task = None
completed = 0
while True:
messages = []
while True:
# get all available messages on the queue
msgs = sqs_queue.get_messages(attributes='All')
if not msgs:
break
messages.extend(msgs)
for message in messages:
recv_ts = float(
message.attributes['ApproximateFirstReceiveTimestamp']) * .001
sent_ts = float(message.attributes['SentTimestamp']) * .001
try:
msg_info = {
'msg': json.loads(message.get_body()),
'sent_ts': sent_ts,
'recv_ts': recv_ts,
}
buf.append(msg_info)
except ValueError as e:
print "!!! ERROR !!! unable to parse queue message, " \
"expecting valid json: {} : {}".format(
message.get_body(), e)
if not oldest_msg_ts or recv_ts < oldest_msg_ts:
oldest_msg_ts = recv_ts
sqs_queue.delete_message(message)
now = int(time.time())
if buf:
try:
if (now - min([msg['recv_ts'] for msg in buf])) > args.msg_delay:
# sort by TS instead of recv_ts
# because the sqs timestamp is not as
# accurate
buf.sort(key=lambda k: k['msg']['TS'])
to_disp = buf.pop(0)
if 'START' in to_disp['msg']:
print '\n{:0>2.0f}:{:0>5.2f} {} : Starting "{}"'.format(
to_disp['msg']['TS'] / 60,
to_disp['msg']['TS'] % 60,
to_disp['msg']['PREFIX'],
to_disp['msg']['START']),
elif 'TASK' in to_disp['msg']:
print "\n{:0>2.0f}:{:0>5.2f} {} : {}".format(
to_disp['msg']['TS'] / 60,
to_disp['msg']['TS'] % 60,
to_disp['msg']['PREFIX'],
to_disp['msg']['TASK']),
last_task = to_disp['msg']['TASK']
elif 'OK' in to_disp['msg']:
if args.verbose:
print "\n"
for key, value in to_disp['msg']['OK'].iteritems():
print " {:<15}{}".format(key, value)
else:
invocation = to_disp['msg']['OK']['invocation']
module = invocation['module_name']
# 'set_fact' does not provide a changed value.
if module == 'set_fact':
changed = "OK"
elif to_disp['msg']['OK']['changed']:
changed = "*OK*"
else:
changed = "OK"
print " {}".format(changed),
task_report.append({
'TASK': last_task,
'INVOCATION': to_disp['msg']['OK']['invocation'],
'DELTA': to_disp['msg']['delta'],
})
elif 'FAILURE' in to_disp['msg']:
print " !!!! FAILURE !!!!",
for key, value in to_disp['msg']['FAILURE'].iteritems():
print " {:<15}{}".format(key, value)
raise Exception("Failed Ansible run")
elif 'STATS' in to_disp['msg']:
print "\n{:0>2.0f}:{:0>5.2f} {} : COMPLETE".format(
to_disp['msg']['TS'] / 60,
to_disp['msg']['TS'] % 60,
to_disp['msg']['PREFIX'])
# Since 3 ansible plays get run.
# We see the COMPLETE message 3 times
# wait till the last one to end listening
# for new messages.
completed += 1
if completed >= NUM_PLAYBOOKS:
return (to_disp['msg']['TS'], task_report)
except KeyError:
print "Failed to print status from message: {}".format(to_disp)
if not messages:
# wait 1 second between sqs polls
time.sleep(1)
def create_ami(instance_id, name, description):
params = {'instance_id': instance_id,
'name': name,
'description': description,
'no_reboot': True}
AWS_API_WAIT_TIME = 1
image_id = ec2.create_image(**params)
print("Checking if image is ready.")
for _ in xrange(AMI_TIMEOUT):
try:
img = ec2.get_image(image_id)
if img.state == 'available':
print("Tagging image.")
img.add_tag("environment", args.environment)
time.sleep(AWS_API_WAIT_TIME)
img.add_tag("deployment", args.deployment)
time.sleep(AWS_API_WAIT_TIME)
img.add_tag("play", args.play)
time.sleep(AWS_API_WAIT_TIME)
conf_tag = "{} {}".format("http://github.com/edx/configuration", args.configuration_version)
img.add_tag("version:configuration", conf_tag)
time.sleep(AWS_API_WAIT_TIME)
conf_secure_tag = "{} {}".format(args.configuration_secure_repo, args.configuration_secure_version)
img.add_tag("version:configuration_secure", conf_secure_tag)
time.sleep(AWS_API_WAIT_TIME)
img.add_tag("cache_id", args.cache_id)
time.sleep(AWS_API_WAIT_TIME)
# Get versions from the instance.
tags = ec2.get_all_tags(filters={'resource-id': instance_id})
for tag in tags:
if tag.name.startswith('version:'):
img.add_tag(tag.name, tag.value)
time.sleep(AWS_API_WAIT_TIME)
break
else:
time.sleep(1)
except EC2ResponseError as e:
if e.error_code == 'InvalidAMIID.NotFound':
time.sleep(1)
else:
raise Exception("Unexpected error code: {}".format(
e.error_code))
time.sleep(1)
else:
raise Exception("Timeout waiting for AMI to finish")
return image_id
def launch_and_configure(ec2_args):
"""
Creates an sqs queue, launches an ec2 instance,
configures it and creates an AMI. Polls
SQS for updates
"""
print "{:<40}".format(
"Creating SQS queue and launching instance for {}:".format(run_id))
print
for k, v in ec2_args.iteritems():
if k != 'user_data':
print " {:<25}{}".format(k, v)
print
global sqs_queue
global instance_id
sqs_queue = sqs.create_queue(run_id)
sqs_queue.set_message_class(RawMessage)
res = ec2.run_instances(**ec2_args)
inst = res.instances[0]
instance_id = inst.id
print "{:<40}".format(
"Waiting for instance {} to reach running status:".format(instance_id)),
status_start = time.time()
for _ in xrange(EC2_RUN_TIMEOUT):
res = ec2.get_all_instances(instance_ids=[instance_id])
if res[0].instances[0].state == 'running':
status_delta = time.time() - status_start
run_summary.append(('EC2 Launch', status_delta))
print "[ OK ] {:0>2.0f}:{:0>2.0f}".format(
status_delta / 60,
status_delta % 60)
break
else:
time.sleep(1)
else:
raise Exception("Timeout waiting for running status: {} ".format(
instance_id))
print "{:<40}".format("Waiting for system status:"),
system_start = time.time()
for _ in xrange(EC2_STATUS_TIMEOUT):
status = ec2.get_all_instance_status(inst.id)
if status[0].system_status.status == u'ok':
system_delta = time.time() - system_start
run_summary.append(('EC2 Status Checks', system_delta))
print "[ OK ] {:0>2.0f}:{:0>2.0f}".format(
system_delta / 60,
system_delta % 60)
break
else:
time.sleep(1)
else:
raise Exception("Timeout waiting for status checks: {} ".format(
instance_id))
print
print "{:<40}".format(
"Waiting for user-data, polling sqs for Ansible events:")
(ansible_delta, task_report) = poll_sqs_ansible()
run_summary.append(('Ansible run', ansible_delta))
print
print "{} longest Ansible tasks (seconds):".format(NUM_TASKS)
for task in sorted(
task_report, reverse=True,
key=lambda k: k['DELTA'])[:NUM_TASKS]:
print "{:0>3.0f} {}".format(task['DELTA'], task['TASK'])
print " - {}".format(task['INVOCATION'])
print
print "{:<40}".format("Creating AMI:"),
ami_start = time.time()
ami = create_ami(instance_id, run_id, run_id)
ami_delta = time.time() - ami_start
print "[ OK ] {:0>2.0f}:{:0>2.0f}".format(
ami_delta / 60,
ami_delta % 60)
run_summary.append(('AMI Build', ami_delta))
total_time = time.time() - start_time
all_stages = sum(run[1] for run in run_summary)
if total_time - all_stages > 0:
run_summary.append(('Other', total_time - all_stages))
run_summary.append(('Total', total_time))
return run_summary, ami
def send_hipchat_message(message):
print(message)
#If hipchat is configured send the details to the specified room
if args.hipchat_api_token and args.hipchat_room_id:
import hipchat
try:
hipchat = hipchat.HipChat(token=args.hipchat_api_token)
hipchat.message_room(args.hipchat_room_id, 'AbbeyNormal',
message)
except Exception as e:
print("Hipchat messaging resulted in an error: %s." % e)
if __name__ == '__main__':
args = parse_args()
run_summary = []
start_time = time.time()
if args.vars:
with open(args.vars) as f:
extra_vars_yml = f.read()
extra_vars = yaml.load(extra_vars_yml)
else:
extra_vars_yml = ""
extra_vars = {}
if args.secure_vars_file:
# explicit path to a single
# secure var file
secure_vars_file = args.secure_vars_file
else:
secure_vars_file = 'false'
if args.stack_name:
stack_name = args.stack_name
else:
stack_name = "{}-{}".format(args.environment, args.deployment)
try:
ec2 = boto.ec2.connect_to_region(args.region)
except NoAuthHandlerFound:
print 'Unable to connect to ec2 in region :{}'.format(args.region)
sys.exit(1)
try:
sqs = boto.sqs.connect_to_region(args.region)
except NoAuthHandlerFound:
print 'Unable to connect to sqs in region :{}'.format(args.region)
sys.exit(1)
if args.blessed:
base_ami = get_blessed_ami()
else:
base_ami = args.base_ami
error_in_abbey_run = False
try:
sqs_queue = None
instance_id = None
run_id = "{}-abbey-{}-{}-{}".format(
int(time.time() * 100), args.environment, args.deployment, args.play)
ec2_args = create_instance_args()
if args.noop:
print "Would have created sqs_queue with id: {}\nec2_args:".format(
run_id)
pprint(ec2_args)
ami = "ami-00000"
else:
run_summary, ami = launch_and_configure(ec2_args)
print
print "Summary:\n"
for run in run_summary:
print "{:<30} {:0>2.0f}:{:0>5.2f}".format(
run[0], run[1] / 60, run[1] % 60)
print "AMI: {}".format(ami)
message = 'Finished baking AMI {image_id} for {environment} {deployment} {play}.'.format(
image_id=ami,
environment=args.environment,
deployment=args.deployment,
play=args.play)
send_hipchat_message(message)
except Exception as e:
message = 'An error occurred building AMI for {environment} ' \
'{deployment} {play}. The Exception was {exception}'.format(
environment=args.environment,
deployment=args.deployment,
play=args.play,
exception=repr(e))
send_hipchat_message(message)
error_in_abbey_run = True
finally:
print
if not args.no_cleanup and not args.noop:
if sqs_queue:
print "Cleaning up - Removing SQS queue - {}".format(run_id)
sqs.delete_queue(sqs_queue)
if instance_id:
print "Cleaning up - Terminating instance ID - {}".format(
instance_id)
# Check to make sure we have an instance id.
if instance_id:
ec2.terminate_instances(instance_ids=[instance_id])
if error_in_abbey_run:
exit(1)
| chudaol/configuration | util/vpc-tools/abbey.py | Python | agpl-3.0 | 27,867 |
"""
Unit tests for the stem.util.tor_tools functions.
"""
import unittest
import stem.util.tor_tools
class TestTorTools(unittest.TestCase):
def test_is_valid_fingerprint(self):
"""
Checks the is_valid_fingerprint function.
"""
valid_fingerprints = (
'$A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EB',
'$a7569a83b5706ab1b1a9cb52eff7d2d32e4553eb',
)
invalid_fingerprints = (
None,
'',
5,
['A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EB'],
'A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EB',
'$A7569A83B5706AB1B1A9CB52EFF7D2D32E4553E',
'$A7569A83B5706AB1B1A9CB52EFF7D2D32E4553E33',
'$A7569A83B5706AB1B1A9CB52EFF7D2D32E4553EG',
)
for fingerprint in valid_fingerprints:
self.assertTrue(stem.util.tor_tools.is_valid_fingerprint(fingerprint, True))
for fingerprint in invalid_fingerprints:
self.assertFalse(stem.util.tor_tools.is_valid_fingerprint(fingerprint, True))
def test_is_valid_nickname(self):
"""
Checks the is_valid_nickname function.
"""
valid_nicknames = (
'caerSidi',
'a',
'abcABC123',
)
invalid_nicknames = (
None,
'',
5,
'toolongggggggggggggg',
'bad_character',
)
for nickname in valid_nicknames:
self.assertTrue(stem.util.tor_tools.is_valid_nickname(nickname))
for nickname in invalid_nicknames:
self.assertFalse(stem.util.tor_tools.is_valid_nickname(nickname))
def test_is_valid_circuit_id(self):
"""
Checks the is_valid_circuit_id function.
"""
valid_circuit_ids = (
'0',
'2',
'abcABC123',
)
invalid_circuit_ids = (
None,
'',
0,
2,
'toolonggggggggggg',
'bad_character',
)
for circuit_id in valid_circuit_ids:
self.assertTrue(stem.util.tor_tools.is_valid_circuit_id(circuit_id))
for circuit_id in invalid_circuit_ids:
self.assertFalse(stem.util.tor_tools.is_valid_circuit_id(circuit_id))
def test_is_valid_hex_digits(self):
"""
Checks the is_valid_hex_digits function.
"""
valid_hex_digits = (
"12345",
"AbCdE",
)
invalid_hex_digits = (
None,
'',
5,
'X',
"1234",
"ABCDEF",
[1, "2", (3, 4)]
)
for hex_digits in valid_hex_digits:
self.assertTrue(stem.util.tor_tools.is_hex_digits(hex_digits, 5))
for hex_digits in invalid_hex_digits:
self.assertFalse(stem.util.tor_tools.is_hex_digits(hex_digits, 5))
| leivaburto/stem | test/unit/util/tor_tools.py | Python | lgpl-3.0 | 2,529 |
name = "foo"
version = "1.0.0"
description = "This package exists only to test config overrides."
uuid = "28d94bcd1a934bb4999bcf70a21106cc"
authors = [
"joe.bloggs"
]
with scope("config") as c:
c.build_directory = "weeble"
c.parent_variables = [
"FOO",
"BAH_${FUNK}",
"EEK"
]
c.release_hooks = ModifyList(append=["bah"])
c.plugins = {
"release_vcs": {
"tag_name": "tag"
},
"release_hook": {
"emailer": {
"sender": "{system.user}@somewhere.com",
"recipients": ModifyList(append=["[email protected]"]),
# nonexistant keys should not cause a problem
"nonexistant": "hello"
}
}
}
| cwmartin/rez | src/rez/tests/data/config/package.py | Python | lgpl-3.0 | 766 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from kafkatest.utils.remote_account import java_version
from kafkatest.version import LATEST_0_8_2, LATEST_0_9, LATEST_0_10_0, LATEST_0_10_1, LATEST_0_10_2, LATEST_0_11_0, LATEST_1_0
TopicPartition = namedtuple('TopicPartition', ['topic', 'partition'])
new_jdk_not_supported = frozenset([str(LATEST_0_8_2), str(LATEST_0_9), str(LATEST_0_10_0), str(LATEST_0_10_1), str(LATEST_0_10_2), str(LATEST_0_11_0), str(LATEST_1_0)])
def fix_opts_for_new_jvm(node):
# Startup scripts for early versions of Kafka contains options
# that not supported on latest versions of JVM like -XX:+PrintGCDateStamps or -XX:UseParNewGC.
# When system test run on JVM that doesn't support these options
# we should setup environment variables with correct options.
java_ver = java_version(node)
if java_ver <= 9:
return ""
cmd = ""
if node.version == LATEST_0_8_2 or node.version == LATEST_0_9 or node.version == LATEST_0_10_0 or node.version == LATEST_0_10_1 or node.version == LATEST_0_10_2 or node.version == LATEST_0_11_0 or node.version == LATEST_1_0:
cmd += "export KAFKA_GC_LOG_OPTS=\"-Xlog:gc*:file=kafka-gc.log:time,tags:filecount=10,filesize=102400\"; "
cmd += "export KAFKA_JVM_PERFORMANCE_OPTS=\"-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 -Djava.awt.headless=true\"; "
return cmd
| TiVo/kafka | tests/kafkatest/services/kafka/util.py | Python | apache-2.0 | 2,245 |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron.api import extensions
from neutron import wsgi
class StubExtension(object):
def __init__(self, alias="stub_extension"):
self.alias = alias
def get_name(self):
return "Stub Extension"
def get_alias(self):
return self.alias
def get_description(self):
return ""
def get_updated(self):
return ""
class StubPlugin(object):
def __init__(self, supported_extensions=None):
supported_extensions = supported_extensions or []
self.supported_extension_aliases = supported_extensions
class ExtensionExpectingPluginInterface(StubExtension):
"""Expect plugin to implement all methods in StubPluginInterface.
This extension expects plugin to implement all the methods defined
in StubPluginInterface.
"""
def get_plugin_interface(self):
return StubPluginInterface
class StubPluginInterface(extensions.PluginInterface):
@abc.abstractmethod
def get_foo(self, bar=None):
pass
class StubBaseAppController(wsgi.Controller):
def index(self, request):
return "base app index"
def show(self, request, id):
return {'fort': 'knox'}
def update(self, request, id):
return {'uneditable': 'original_value'}
| glove747/liberty-neutron | neutron/tests/unit/extension_stubs.py | Python | apache-2.0 | 1,920 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.emr_step`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.sensors.emr_step import EmrStepSensor # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.emr_step`.",
DeprecationWarning, stacklevel=2
)
| spektom/incubator-airflow | airflow/contrib/sensors/emr_step_sensor.py | Python | apache-2.0 | 1,159 |
"""Provides functionality to interact with lights."""
import asyncio
import csv
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant.auth.permissions.const import POLICY_CONTROL
from homeassistant.components.group import \
ENTITY_ID_FORMAT as GROUP_ENTITY_ID_FORMAT
from homeassistant.const import (
ATTR_ENTITY_ID, SERVICE_TOGGLE, SERVICE_TURN_OFF, SERVICE_TURN_ON,
STATE_ON)
from homeassistant.exceptions import UnknownUser, Unauthorized
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa
PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers import intent
from homeassistant.loader import bind_hass
import homeassistant.util.color as color_util
DOMAIN = 'light'
SCAN_INTERVAL = timedelta(seconds=30)
GROUP_NAME_ALL_LIGHTS = 'all lights'
ENTITY_ID_ALL_LIGHTS = GROUP_ENTITY_ID_FORMAT.format('all_lights')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
# Bitfield of features supported by the light entity
SUPPORT_BRIGHTNESS = 1
SUPPORT_COLOR_TEMP = 2
SUPPORT_EFFECT = 4
SUPPORT_FLASH = 8
SUPPORT_COLOR = 16
SUPPORT_TRANSITION = 32
SUPPORT_WHITE_VALUE = 128
# Integer that represents transition time in seconds to make change.
ATTR_TRANSITION = "transition"
# Lists holding color values
ATTR_RGB_COLOR = "rgb_color"
ATTR_XY_COLOR = "xy_color"
ATTR_HS_COLOR = "hs_color"
ATTR_COLOR_TEMP = "color_temp"
ATTR_KELVIN = "kelvin"
ATTR_MIN_MIREDS = "min_mireds"
ATTR_MAX_MIREDS = "max_mireds"
ATTR_COLOR_NAME = "color_name"
ATTR_WHITE_VALUE = "white_value"
# Brightness of the light, 0..255 or percentage
ATTR_BRIGHTNESS = "brightness"
ATTR_BRIGHTNESS_PCT = "brightness_pct"
# String representing a profile (built-in ones or external defined).
ATTR_PROFILE = "profile"
# If the light should flash, can be FLASH_SHORT or FLASH_LONG.
ATTR_FLASH = "flash"
FLASH_SHORT = "short"
FLASH_LONG = "long"
# List of possible effects
ATTR_EFFECT_LIST = "effect_list"
# Apply an effect to the light, can be EFFECT_COLORLOOP.
ATTR_EFFECT = "effect"
EFFECT_COLORLOOP = "colorloop"
EFFECT_RANDOM = "random"
EFFECT_WHITE = "white"
COLOR_GROUP = "Color descriptors"
LIGHT_PROFILES_FILE = "light_profiles.csv"
# Service call validation schemas
VALID_TRANSITION = vol.All(vol.Coerce(float), vol.Clamp(min=0, max=6553))
VALID_BRIGHTNESS = vol.All(vol.Coerce(int), vol.Clamp(min=0, max=255))
VALID_BRIGHTNESS_PCT = vol.All(vol.Coerce(float), vol.Range(min=0, max=100))
LIGHT_TURN_ON_SCHEMA = vol.Schema({
ATTR_ENTITY_ID: cv.comp_entity_ids,
vol.Exclusive(ATTR_PROFILE, COLOR_GROUP): cv.string,
ATTR_TRANSITION: VALID_TRANSITION,
ATTR_BRIGHTNESS: VALID_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT: VALID_BRIGHTNESS_PCT,
vol.Exclusive(ATTR_COLOR_NAME, COLOR_GROUP): cv.string,
vol.Exclusive(ATTR_RGB_COLOR, COLOR_GROUP):
vol.All(vol.ExactSequence((cv.byte, cv.byte, cv.byte)),
vol.Coerce(tuple)),
vol.Exclusive(ATTR_XY_COLOR, COLOR_GROUP):
vol.All(vol.ExactSequence((cv.small_float, cv.small_float)),
vol.Coerce(tuple)),
vol.Exclusive(ATTR_HS_COLOR, COLOR_GROUP):
vol.All(vol.ExactSequence(
(vol.All(vol.Coerce(float), vol.Range(min=0, max=360)),
vol.All(vol.Coerce(float), vol.Range(min=0, max=100)))),
vol.Coerce(tuple)),
vol.Exclusive(ATTR_COLOR_TEMP, COLOR_GROUP):
vol.All(vol.Coerce(int), vol.Range(min=1)),
vol.Exclusive(ATTR_KELVIN, COLOR_GROUP):
vol.All(vol.Coerce(int), vol.Range(min=0)),
ATTR_WHITE_VALUE: vol.All(vol.Coerce(int), vol.Range(min=0, max=255)),
ATTR_FLASH: vol.In([FLASH_SHORT, FLASH_LONG]),
ATTR_EFFECT: cv.string,
})
LIGHT_TURN_OFF_SCHEMA = vol.Schema({
ATTR_ENTITY_ID: cv.comp_entity_ids,
ATTR_TRANSITION: VALID_TRANSITION,
ATTR_FLASH: vol.In([FLASH_SHORT, FLASH_LONG]),
})
LIGHT_TOGGLE_SCHEMA = LIGHT_TURN_ON_SCHEMA
PROFILE_SCHEMA = vol.Schema(
vol.ExactSequence((str, cv.small_float, cv.small_float, cv.byte))
)
INTENT_SET = 'HassLightSet'
_LOGGER = logging.getLogger(__name__)
@bind_hass
def is_on(hass, entity_id=None):
"""Return if the lights are on based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_LIGHTS
return hass.states.is_state(entity_id, STATE_ON)
def preprocess_turn_on_alternatives(params):
"""Process extra data for turn light on request."""
profile = Profiles.get(params.pop(ATTR_PROFILE, None))
if profile is not None:
params.setdefault(ATTR_XY_COLOR, profile[:2])
params.setdefault(ATTR_BRIGHTNESS, profile[2])
color_name = params.pop(ATTR_COLOR_NAME, None)
if color_name is not None:
try:
params[ATTR_RGB_COLOR] = color_util.color_name_to_rgb(color_name)
except ValueError:
_LOGGER.warning('Got unknown color %s, falling back to white',
color_name)
params[ATTR_RGB_COLOR] = (255, 255, 255)
kelvin = params.pop(ATTR_KELVIN, None)
if kelvin is not None:
mired = color_util.color_temperature_kelvin_to_mired(kelvin)
params[ATTR_COLOR_TEMP] = int(mired)
brightness_pct = params.pop(ATTR_BRIGHTNESS_PCT, None)
if brightness_pct is not None:
params[ATTR_BRIGHTNESS] = int(255 * brightness_pct/100)
xy_color = params.pop(ATTR_XY_COLOR, None)
if xy_color is not None:
params[ATTR_HS_COLOR] = color_util.color_xy_to_hs(*xy_color)
rgb_color = params.pop(ATTR_RGB_COLOR, None)
if rgb_color is not None:
params[ATTR_HS_COLOR] = color_util.color_RGB_to_hs(*rgb_color)
def preprocess_turn_off(params):
"""Process data for turning light off if brightness is 0."""
if ATTR_BRIGHTNESS in params and params[ATTR_BRIGHTNESS] == 0:
# Zero brightness: Light will be turned off
params = {k: v for k, v in params.items() if k in [ATTR_TRANSITION,
ATTR_FLASH]}
return (True, params) # Light should be turned off
return (False, None) # Light should be turned on
class SetIntentHandler(intent.IntentHandler):
"""Handle set color intents."""
intent_type = INTENT_SET
slot_schema = {
vol.Required('name'): cv.string,
vol.Optional('color'): color_util.color_name_to_rgb,
vol.Optional('brightness'): vol.All(vol.Coerce(int), vol.Range(0, 100))
}
async def async_handle(self, intent_obj):
"""Handle the hass intent."""
hass = intent_obj.hass
slots = self.async_validate_slots(intent_obj.slots)
state = hass.helpers.intent.async_match_state(
slots['name']['value'],
[state for state in hass.states.async_all()
if state.domain == DOMAIN])
service_data = {
ATTR_ENTITY_ID: state.entity_id,
}
speech_parts = []
if 'color' in slots:
intent.async_test_feature(
state, SUPPORT_COLOR, 'changing colors')
service_data[ATTR_RGB_COLOR] = slots['color']['value']
# Use original passed in value of the color because we don't have
# human readable names for that internally.
speech_parts.append('the color {}'.format(
intent_obj.slots['color']['value']))
if 'brightness' in slots:
intent.async_test_feature(
state, SUPPORT_BRIGHTNESS, 'changing brightness')
service_data[ATTR_BRIGHTNESS_PCT] = slots['brightness']['value']
speech_parts.append('{}% brightness'.format(
slots['brightness']['value']))
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON, service_data)
response = intent_obj.create_response()
if not speech_parts: # No attributes changed
speech = 'Turned on {}'.format(state.name)
else:
parts = ['Changed {} to'.format(state.name)]
for index, part in enumerate(speech_parts):
if index == 0:
parts.append(' {}'.format(part))
elif index != len(speech_parts) - 1:
parts.append(', {}'.format(part))
else:
parts.append(' and {}'.format(part))
speech = ''.join(parts)
response.async_set_speech(speech)
return response
async def async_setup(hass, config):
"""Expose light control via state machine and services."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_LIGHTS)
await component.async_setup(config)
# load profiles from files
profiles_valid = await Profiles.load_profiles(hass)
if not profiles_valid:
return False
async def async_handle_light_on_service(service):
"""Handle a turn light on service call."""
# Get the validated data
params = service.data.copy()
# Convert the entity ids to valid light ids
target_lights = await component.async_extract_from_service(service)
params.pop(ATTR_ENTITY_ID, None)
if service.context.user_id:
user = await hass.auth.async_get_user(service.context.user_id)
if user is None:
raise UnknownUser(context=service.context)
entity_perms = user.permissions.check_entity
for light in target_lights:
if not entity_perms(light, POLICY_CONTROL):
raise Unauthorized(
context=service.context,
entity_id=light,
permission=POLICY_CONTROL
)
preprocess_turn_on_alternatives(params)
turn_lights_off, off_params = preprocess_turn_off(params)
update_tasks = []
for light in target_lights:
light.async_set_context(service.context)
pars = params
off_pars = off_params
turn_light_off = turn_lights_off
if not pars:
pars = params.copy()
pars[ATTR_PROFILE] = Profiles.get_default(light.entity_id)
preprocess_turn_on_alternatives(pars)
turn_light_off, off_pars = preprocess_turn_off(pars)
if turn_light_off:
await light.async_turn_off(**off_pars)
else:
await light.async_turn_on(**pars)
if not light.should_poll:
continue
update_tasks.append(
light.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks, loop=hass.loop)
# Listen for light on and light off service calls.
hass.services.async_register(
DOMAIN, SERVICE_TURN_ON, async_handle_light_on_service,
schema=LIGHT_TURN_ON_SCHEMA)
component.async_register_entity_service(
SERVICE_TURN_OFF, LIGHT_TURN_OFF_SCHEMA,
'async_turn_off'
)
component.async_register_entity_service(
SERVICE_TOGGLE, LIGHT_TOGGLE_SCHEMA,
'async_toggle'
)
hass.helpers.intent.async_register(SetIntentHandler())
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class Profiles:
"""Representation of available color profiles."""
_all = None
@classmethod
async def load_profiles(cls, hass):
"""Load and cache profiles."""
def load_profile_data(hass):
"""Load built-in profiles and custom profiles."""
profile_paths = [os.path.join(os.path.dirname(__file__),
LIGHT_PROFILES_FILE),
hass.config.path(LIGHT_PROFILES_FILE)]
profiles = {}
for profile_path in profile_paths:
if not os.path.isfile(profile_path):
continue
with open(profile_path) as inp:
reader = csv.reader(inp)
# Skip the header
next(reader, None)
try:
for rec in reader:
profile, color_x, color_y, brightness = \
PROFILE_SCHEMA(rec)
profiles[profile] = (color_x, color_y, brightness)
except vol.MultipleInvalid as ex:
_LOGGER.error(
"Error parsing light profile from %s: %s",
profile_path, ex)
return None
return profiles
cls._all = await hass.async_add_job(load_profile_data, hass)
return cls._all is not None
@classmethod
def get(cls, name):
"""Return a named profile."""
return cls._all.get(name)
@classmethod
def get_default(cls, entity_id):
"""Return the default turn-on profile for the given light."""
# pylint: disable=unsupported-membership-test
name = entity_id + ".default"
if name in cls._all:
return name
name = ENTITY_ID_ALL_LIGHTS + ".default"
if name in cls._all:
return name
return None
class Light(ToggleEntity):
"""Representation of a light."""
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return None
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
return None
@property
def color_temp(self):
"""Return the CT color value in mireds."""
return None
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
# Default to the Philips Hue value that HA has always assumed
# https://developers.meethue.com/documentation/core-concepts
return 153
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
# Default to the Philips Hue value that HA has always assumed
# https://developers.meethue.com/documentation/core-concepts
return 500
@property
def white_value(self):
"""Return the white value of this light between 0..255."""
return None
@property
def effect_list(self):
"""Return the list of supported effects."""
return None
@property
def effect(self):
"""Return the current effect."""
return None
@property
def state_attributes(self):
"""Return optional state attributes."""
data = {}
supported_features = self.supported_features
if supported_features & SUPPORT_COLOR_TEMP:
data[ATTR_MIN_MIREDS] = self.min_mireds
data[ATTR_MAX_MIREDS] = self.max_mireds
if supported_features & SUPPORT_EFFECT:
data[ATTR_EFFECT_LIST] = self.effect_list
if self.is_on:
if supported_features & SUPPORT_BRIGHTNESS:
data[ATTR_BRIGHTNESS] = self.brightness
if supported_features & SUPPORT_COLOR_TEMP:
data[ATTR_COLOR_TEMP] = self.color_temp
if supported_features & SUPPORT_COLOR and self.hs_color:
# pylint: disable=unsubscriptable-object,not-an-iterable
hs_color = self.hs_color
data[ATTR_HS_COLOR] = (
round(hs_color[0], 3),
round(hs_color[1], 3),
)
data[ATTR_RGB_COLOR] = color_util.color_hs_to_RGB(*hs_color)
data[ATTR_XY_COLOR] = color_util.color_hs_to_xy(*hs_color)
if supported_features & SUPPORT_WHITE_VALUE:
data[ATTR_WHITE_VALUE] = self.white_value
if supported_features & SUPPORT_EFFECT:
data[ATTR_EFFECT] = self.effect
return {key: val for key, val in data.items() if val is not None}
@property
def supported_features(self):
"""Flag supported features."""
return 0
| MartinHjelmare/home-assistant | homeassistant/components/light/__init__.py | Python | apache-2.0 | 16,431 |
#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from os.path import join, abspath, dirname, exists
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from shutil import move
from workspace_tools.paths import *
from workspace_tools.utils import mkdir, cmd
from workspace_tools.export import export, setup_user_prj
USR_PRJ_NAME = "usr_prj"
USER_PRJ = join(EXPORT_WORKSPACE, USR_PRJ_NAME)
USER_SRC = join(USER_PRJ, "src")
def setup_test_user_prj():
if exists(USER_PRJ):
print 'Test user project already generated...'
return
setup_user_prj(USER_PRJ, join(TEST_DIR, "rtos", "mbed", "basic"), [join(LIB_DIR, "rtos"), join(LIB_DIR, "tests", "mbed", "env")])
# FAKE BUILD URL
open(join(USER_SRC, "mbed.bld"), 'w').write("http://mbed.org/users/mbed_official/code/mbed/builds/976df7c37ad5\n")
def fake_build_url_resolver(url):
# FAKE BUILD URL: Ignore the URL, always return the path to the mbed library
return {'path':MBED_LIBRARIES, 'name':'mbed'}
def test_export(toolchain, target, expected_error=None):
if toolchain is None and target is None:
base_dir = join(EXPORT_TMP, "zip")
else:
base_dir = join(EXPORT_TMP, toolchain, target)
temp_dir = join(base_dir, "temp")
mkdir(temp_dir)
zip_path, report = export(USER_PRJ, USR_PRJ_NAME, toolchain, target, base_dir, temp_dir, False, None, fake_build_url_resolver)
if report['success']:
move(zip_path, join(EXPORT_DIR, "export_%s_%s.zip" % (toolchain, target)))
print "[OK]"
else:
if expected_error is None:
print '[ERRROR] %s' % report['errormsg']
else:
if (zip_path is None) and (expected_error in report['errormsg']):
print '[OK]'
else:
print '[ERROR]'
print ' zip:', zip_path
print ' msg:', report['errormsg']
if __name__ == '__main__':
setup_test_user_prj()
for toolchain, target in [
('zip', 'LPC1768'),
('emblocks', 'LPC1768'),
('emblocks', 'LPC1549'),
('emblocks', 'LPC1114'),
('emblocks', 'LPC11U35_401'),
('emblocks', 'LPC11U35_501'),
('emblocks', 'LPCCAPPUCCINO'),
('emblocks', 'LPC2368'),
('emblocks', 'STM32F407'),
('emblocks', 'DISCO_F100RB'),
('emblocks', 'DISCO_F051R8'),
('emblocks', 'DISCO_F407VG'),
('emblocks', 'DISCO_F303VC'),
('emblocks', 'NRF51822'),
('emblocks', 'NUCLEO_F401RE'),
('emblocks', 'NUCLEO_F411RE'),
('emblocks', 'MTS_MDOT_F405RG'),
('emblocks', 'MTS_MDOT_F411RE'),
('coide', 'KL05Z'),
('coide', 'KL25Z'),
('coide', 'LPC1768'),
('coide', 'ARCH_PRO'),
('coide', 'DISCO_F407VG'),
('coide', 'NUCLEO_F401RE'),
('coide', 'NUCLEO_F411RE'),
('coide', 'DISCO_F429ZI'),
('coide', 'NUCLEO_F334R8'),
('coide', 'MTS_MDOT_F405RG'),
('coide', 'MTS_MDOT_F411RE'),
('uvision', 'LPC1768'),
('uvision', 'LPC11U24'),
('uvision', 'KL25Z'),
('uvision', 'LPC1347'),
('uvision', 'LPC1114'),
('uvision', 'LPC4088'),
('uvision', 'LPC4088_DM'),
('uvision', 'LPC4337'),
('uvision', 'HRM1017'),
('uvision', 'NUCLEO_F030R8'),
('uvision', 'NUCLEO_F070RB'),
('uvision', 'NUCLEO_F072RB'),
('uvision', 'NUCLEO_F091RC'),
('uvision', 'NUCLEO_F103RB'),
('uvision', 'NUCLEO_F302R8'),
('uvision', 'NUCLEO_F303RE'),
('uvision', 'NUCLEO_F334R8'),
('uvision', 'NUCLEO_F401RE'),
('uvision', 'NUCLEO_F411RE'),
('uvision', 'NUCLEO_F446RE'),
('uvision', 'NUCLEO_L053R8'),
('uvision', 'NUCLEO_L073RZ'),
('uvision', 'NUCLEO_L152RE'),
('uvision', 'MTS_MDOT_F405RG'),
('uvision', 'MAXWSNENV'),
('uvision', 'MAX32600MBED'),
('uvision', 'DISCO_L053C8'),
('uvision', 'DISCO_F334C8'),
('uvision', 'DISCO_F746NG'),
('uvision', 'DISCO_L476VG'),
('lpcxpresso', 'LPC1768'),
('lpcxpresso', 'LPC4088'),
('lpcxpresso', 'LPC4088_DM'),
('lpcxpresso', 'LPC1114'),
('lpcxpresso', 'LPC11U35_401'),
('lpcxpresso', 'LPC11U35_501'),
('lpcxpresso', 'LPCCAPPUCCINO'),
('lpcxpresso', 'LPC1549'),
('lpcxpresso', 'LPC11U68'),
# Linux path: /home/emimon01/bin/gcc-cs/bin/
# Windows path: "C:/Program Files (x86)/CodeSourcery/Sourcery_CodeBench_Lite_for_ARM_EABI/bin/"
('codesourcery', 'LPC1768'),
# Linux path: /home/emimon01/bin/gcc-arm/bin/
# Windows path: C:/arm-none-eabi-gcc-4_7/bin/
('gcc_arm', 'LPC1768'),
('gcc_arm', 'LPC4088_DM'),
('gcc_arm', 'LPC1549'),
('gcc_arm', 'LPC1114'),
('gcc_arm', 'LPC11U35_401'),
('gcc_arm', 'LPC11U35_501'),
('gcc_arm', 'LPCCAPPUCCINO'),
('gcc_arm', 'LPC2368'),
('gcc_arm', 'LPC2460'),
('gcc_arm', 'LPC824'),
('gcc_arm', 'SSCI824'),
('gcc_arm', 'STM32F407'),
('gcc_arm', 'DISCO_F100RB'),
('gcc_arm', 'DISCO_F051R8'),
('gcc_arm', 'DISCO_F407VG'),
('gcc_arm', 'DISCO_F303VC'),
('gcc_arm', 'DISCO_L053C8'),
('gcc_arm', 'DISCO_F334C8'),
('gcc_arm', 'DISCO_L053C8'),
('gcc_arm', 'DISCO_F746NG'),
('gcc_arm', 'NRF51822'),
('gcc_arm', 'HRM1017'),
('gcc_arm', 'NUCLEO_F401RE'),
('gcc_arm', 'NUCLEO_F411RE'),
('gcc_arm', 'NUCLEO_F446RE'),
('gcc_arm', 'DISCO_F429ZI'),
('gcc_arm', 'NUCLEO_F334R8'),
('gcc_arm', 'MAX32600MBED'),
('gcc_arm', 'MTS_MDOT_F405RG'),
('gcc_arm', 'MTS_MDOT_F411RE'),
('gcc_arm', 'RZ_A1H'),
('gcc_arm', 'MAXWSNENV'),
('gcc_arm', 'MAX32600MBED'),
('gcc_arm', 'ARCH_BLE'),
('gcc_arm', 'ARCH_MAX'),
('gcc_arm', 'ARCH_PRO'),
('gcc_arm', 'DELTA_DFCM_NNN40'),
('gcc_arm', 'K20D50M'),
('gcc_arm', 'K22F'),
('gcc_arm', 'K64F'),
('gcc_arm', 'KL05Z'),
('gcc_arm', 'KL25Z'),
('gcc_arm', 'KL43Z'),
('gcc_arm', 'KL46Z'),
('gcc_arm', 'EFM32GG_STK3700'),
('gcc_arm', 'EFM32LG_STK3600'),
('gcc_arm', 'EFM32WG_STK3800'),
('gcc_arm', 'EFM32ZG_STK3200'),
('gcc_arm', 'EFM32HG_STK3400'),
('ds5_5', 'LPC1768'), ('ds5_5', 'LPC11U24'),
('iar', 'LPC1768'),
('iar', 'LPC4088_DM'),
('iar', 'LPC1347'),
('iar', 'NUCLEO_F030R8'),
('iar', 'NUCLEO_F070RB'),
('iar', 'NUCLEO_F072RB'),
('iar', 'NUCLEO_F091RC'),
('iar', 'NUCLEO_F302R8'),
('iar', 'NUCLEO_F303RE'),
('iar', 'NUCLEO_F334R8'),
('iar', 'NUCLEO_F401RE'),
('iar', 'NUCLEO_F411RE'),
('iar', 'NUCLEO_F446RE'),
('iar', 'NUCLEO_L053R8'),
('iar', 'NUCLEO_L073RZ'),
('iar', 'NUCLEO_L152RE'),
('iar', 'DISCO_L053C8'),
('iar', 'DISCO_F334C8'),
('iar', 'DISCO_F746NG'),
('iar', 'DISCO_L476VG'),
('iar', 'STM32F407'),
('iar', 'MTS_MDOT_F405RG'),
('iar', 'MTS_MDOT_F411RE'),
('iar', 'MAXWSNENV'),
('iar', 'MAX32600MBED'),
# Removed following item to avoid script error
#(None, None),
]:
print '\n=== Exporting to "%s::%s" ===' % (toolchain, target)
test_export(toolchain, target)
print "\n=== Test error messages ==="
test_export('lpcxpresso', 'LPC11U24', expected_error='lpcxpresso')
| pi19404/mbed | workspace_tools/export_test.py | Python | apache-2.0 | 8,840 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.core_tasks.bash_completion import BashCompletion
from pants.core_tasks.changed_target_tasks import CompileChanged, TestChanged
from pants.core_tasks.clean import Clean
from pants.core_tasks.deferred_sources_mapper import DeferredSourcesMapper
from pants.core_tasks.explain_options_task import ExplainOptionsTask
from pants.core_tasks.invalidate import Invalidate
from pants.core_tasks.list_goals import ListGoals
from pants.core_tasks.noop import NoopCompile, NoopTest
from pants.core_tasks.pantsd_kill import PantsDaemonKill
from pants.core_tasks.reporting_server_kill import ReportingServerKill
from pants.core_tasks.reporting_server_run import ReportingServerRun
from pants.core_tasks.roots import ListRoots
from pants.core_tasks.run_prep_command import (RunBinaryPrepCommand, RunCompilePrepCommand,
RunTestPrepCommand)
from pants.core_tasks.substitute_aliased_targets import SubstituteAliasedTargets
from pants.core_tasks.targets_help import TargetsHelp
from pants.core_tasks.what_changed import WhatChanged
from pants.goal.goal import Goal
from pants.goal.task_registrar import TaskRegistrar as task
def register_goals():
# Register descriptions for the standard multiple-task goals. Single-task goals get
# their descriptions from their single task.
Goal.register('buildgen', 'Automatically generate BUILD files.')
Goal.register('bootstrap', 'Bootstrap tools needed by subsequent build steps.')
Goal.register('imports', 'Resolve external source dependencies.')
Goal.register('gen', 'Generate code.')
Goal.register('resolve', 'Resolve external binary dependencies.')
Goal.register('compile', 'Compile source code.')
Goal.register('binary', 'Create a runnable binary.')
Goal.register('resources', 'Prepare resources.')
Goal.register('bundle', 'Create a deployable application bundle.')
Goal.register('test', 'Run tests.')
Goal.register('bench', 'Run benchmarks.')
Goal.register('repl', 'Run a REPL.')
Goal.register('repl-dirty', 'Run a REPL, skipping compilation.')
Goal.register('run', 'Invoke a binary.')
Goal.register('run-dirty', 'Invoke a binary, skipping compilation.')
Goal.register('doc', 'Generate documentation.')
Goal.register('publish', 'Publish a build artifact.')
Goal.register('dep-usage', 'Collect target dependency usage data.')
Goal.register('fmt', 'Autoformat source code.')
# Register tasks.
# Cleaning.
task(name='invalidate', action=Invalidate).install()
task(name='clean-all', action=Clean).install('clean-all')
# Pantsd.
kill_pantsd = task(name='kill-pantsd', action=PantsDaemonKill)
kill_pantsd.install()
kill_pantsd.install('clean-all')
# Reporting server.
# TODO: The reporting server should be subsumed into pantsd, and not run via a task.
task(name='server', action=ReportingServerRun, serialize=False).install()
task(name='killserver', action=ReportingServerKill, serialize=False).install()
# Getting help.
task(name='goals', action=ListGoals).install()
task(name='options', action=ExplainOptionsTask).install()
task(name='targets', action=TargetsHelp).install()
# Stub for other goals to schedule 'compile'. See noop_exec_task.py for why this is useful.
task(name='compile', action=NoopCompile).install('compile')
# Prep commands must be the first thing we register under its goal.
task(name='test-prep-command', action=RunTestPrepCommand).install('test', first=True)
task(name='binary-prep-command', action=RunBinaryPrepCommand).install('binary', first=True)
task(name='compile-prep-command', action=RunCompilePrepCommand).install('compile', first=True)
# Stub for other goals to schedule 'test'. See noop_exec_task.py for why this is useful.
task(name='test', action=NoopTest).install('test')
# Operations on files that the SCM detects as changed.
task(name='changed', action=WhatChanged).install()
task(name='compile-changed', action=CompileChanged).install()
task(name='test-changed', action=TestChanged).install()
# Workspace information.
task(name='roots', action=ListRoots).install()
task(name='bash-completion', action=BashCompletion).install()
# Handle sources that aren't loose files in the repo.
task(name='deferred-sources', action=DeferredSourcesMapper).install()
# Processing aliased targets has to occur very early.
task(name='substitute-aliased-targets', action=SubstituteAliasedTargets).install('bootstrap',
first=True)
| kwlzn/pants | src/python/pants/core_tasks/register.py | Python | apache-2.0 | 4,840 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from cinder.api import common
class ViewBuilder(common.ViewBuilder):
"""Model a server API response as a python dictionary."""
_collection_name = "volumes"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary_list(self, request, volumes, volume_count=None):
"""Show a list of volumes without many details."""
return self._list_view(self.summary, request, volumes,
volume_count)
def detail_list(self, request, volumes, volume_count=None):
"""Detailed view of a list of volumes."""
return self._list_view(self.detail, request, volumes,
volume_count,
self._collection_name + '/detail')
def summary(self, request, volume):
"""Generic, non-detailed view of a volume."""
return {
'volume': {
'id': volume['id'],
'name': volume['display_name'],
'links': self._get_links(request,
volume['id']),
},
}
def _get_volume_status(self, volume):
# NOTE(wanghao): for fixing bug 1504007, we introduce 'managing',
# 'error_managing' and 'error_managing_deleting' status into managing
# process, but still expose 'creating' and 'error' and 'deleting'
# status to user for API compatibility.
status_map = {
'managing': 'creating',
'error_managing': 'error',
'error_managing_deleting': 'deleting',
}
vol_status = volume.get('status')
return status_map.get(vol_status, vol_status)
def detail(self, request, volume):
"""Detailed view of a single volume."""
volume_ref = {
'volume': {
'id': volume.get('id'),
'status': self._get_volume_status(volume),
'size': volume.get('size'),
'availability_zone': volume.get('availability_zone'),
'created_at': volume.get('created_at'),
'updated_at': volume.get('updated_at'),
'attachments': self._get_attachments(volume),
'name': volume.get('display_name'),
'description': volume.get('display_description'),
'volume_type': self._get_volume_type(volume),
'snapshot_id': volume.get('snapshot_id'),
'source_volid': volume.get('source_volid'),
'metadata': self._get_volume_metadata(volume),
'links': self._get_links(request, volume['id']),
'user_id': volume.get('user_id'),
'bootable': six.text_type(volume.get('bootable')).lower(),
'encrypted': self._is_volume_encrypted(volume),
'replication_status': volume.get('replication_status'),
'consistencygroup_id': volume.get('consistencygroup_id'),
'multiattach': volume.get('multiattach'),
}
}
if request.environ['cinder.context'].is_admin:
volume_ref['volume']['migration_status'] = (
volume.get('migration_status'))
return volume_ref
def _is_volume_encrypted(self, volume):
"""Determine if volume is encrypted."""
return volume.get('encryption_key_id') is not None
def _get_attachments(self, volume):
"""Retrieve the attachments of the volume object."""
attachments = []
if volume['attach_status'] == 'attached':
attaches = volume.volume_attachment
for attachment in attaches:
if attachment.get('attach_status') == 'attached':
a = {'id': attachment.get('volume_id'),
'attachment_id': attachment.get('id'),
'volume_id': attachment.get('volume_id'),
'server_id': attachment.get('instance_uuid'),
'host_name': attachment.get('attached_host'),
'device': attachment.get('mountpoint'),
'attached_at': attachment.get('attach_time'),
}
attachments.append(a)
return attachments
def _get_volume_metadata(self, volume):
"""Retrieve the metadata of the volume object."""
return volume.metadata
def _get_volume_type(self, volume):
"""Retrieve the type the volume object."""
if volume['volume_type_id'] and volume.get('volume_type'):
return volume['volume_type']['name']
else:
return volume['volume_type_id']
def _list_view(self, func, request, volumes, volume_count,
coll_name=_collection_name):
"""Provide a view for a list of volumes.
:param func: Function used to format the volume data
:param request: API request
:param volumes: List of volumes in dictionary format
:param volume_count: Length of the original list of volumes
:param coll_name: Name of collection, used to generate the next link
for a pagination query
:returns: Volume data in dictionary format
"""
volumes_list = [func(request, volume)['volume'] for volume in volumes]
volumes_links = self._get_collection_links(request,
volumes,
coll_name,
volume_count)
volumes_dict = dict(volumes=volumes_list)
if volumes_links:
volumes_dict['volumes_links'] = volumes_links
return volumes_dict
| Hybrid-Cloud/cinder | cinder/api/v2/views/volumes.py | Python | apache-2.0 | 6,435 |
class Base:
def __init__(self):
self.inherited_attr = 42
class C(Base):
pass
match C():
case C(inh<caret>):
pass
| smmribeiro/intellij-community | python/testData/completion/inheritedAttributesSuggestedForKeywordPatterns.py | Python | apache-2.0 | 145 |
print 'µble' | slozier/ironpython2 | Tests/encoded_files/cp11334_warn.py | Python | apache-2.0 | 12 |
from director import applogic
from director import cameracontrol
from PythonQt import QtCore, QtGui
class CameraBookmarks(object):
def __init__(self, view):
self.bookmarks = {}
self.view = view
self.flyer = cameracontrol.Flyer(view)
self.flyer.flyTime = 1.0
def storeCameraBookmark(self, key):
camera = self.view.camera()
focal, position = camera.GetFocalPoint(), camera.GetPosition()
self.bookmarks[key] = (focal, position)
def clear(self):
self.bookmarks = {}
def getBookmark(self, key):
return self.bookmarks.get(key)
def flyToBookmark(self, key):
focal, position = self.getBookmark(key)
self.flyer.zoomTo(focal, position)
class CameraBookmarkWidget(object):
def __init__(self, view):
self.bookmarks = CameraBookmarks(view)
self.widget = QtGui.QScrollArea()
self.widget.setWindowTitle('Camera Bookmarks')
self.storeMapper = QtCore.QSignalMapper()
self.flyMapper = QtCore.QSignalMapper()
self.storeMapper.connect('mapped(QObject*)', self.onStoreCamera)
self.flyMapper.connect('mapped(QObject*)', self.onFlyToCamera)
self.numberOfBookmarks = 8
self.updateLayout()
def updateLayout(self):
self.storeButtons = []
self.flyButtons = []
w = QtGui.QWidget()
l = QtGui.QGridLayout(w)
for i in xrange(self.numberOfBookmarks):
storeButton = QtGui.QPushButton('set')
flyButton = QtGui.QPushButton('fly')
textEdit = QtGui.QLineEdit('camera %d' % i)
storeButton.connect('clicked()', self.storeMapper, 'map()')
flyButton.connect('clicked()', self.flyMapper, 'map()')
self.storeMapper.setMapping(storeButton, storeButton)
self.flyMapper.setMapping(flyButton, flyButton)
self.storeButtons.append(storeButton)
self.flyButtons.append(flyButton)
l.addWidget(storeButton, i, 0)
l.addWidget(flyButton, i, 1)
l.addWidget(textEdit, i, 2)
flyButton.setEnabled(False)
self.flySpeedSpinner = QtGui.QDoubleSpinBox()
self.flySpeedSpinner.setMinimum(0)
self.flySpeedSpinner.setMaximum(60)
self.flySpeedSpinner.setDecimals(1)
self.flySpeedSpinner.setSingleStep(0.5)
self.flySpeedSpinner.setSuffix(' seconds')
self.flySpeedSpinner.setValue(1.0)
l.addWidget(QtGui.QLabel('Fly speed:'), i+1, 0, 2)
l.addWidget(self.flySpeedSpinner, i+1, 2)
self.widget.setWidget(w)
def onStoreCamera(self, button):
index = self.storeButtons.index(button)
self.bookmarks.storeCameraBookmark(index)
self.flyButtons[index].setEnabled(True)
def onFlyToCamera(self, button):
index = self.flyButtons.index(button)
self.bookmarks.flyer.flyTime = self.flySpeedSpinner.value
self.bookmarks.flyToBookmark(index)
def init(view):
global widget, dock
widget = CameraBookmarkWidget(view)
dock = applogic.addWidgetToDock(widget.widget, action=None)
dock.hide()
| patmarion/director | src/python/director/camerabookmarks.py | Python | bsd-3-clause | 3,155 |
"""
=============
Miscellaneous
=============
IEEE 754 Floating Point Special Values:
-----------------------------------------------
Special values defined in numpy: nan, inf,
NaNs can be used as a poor-man's mask (if you don't care what the
original value was)
Note: cannot use equality to test NaNs. E.g.: ::
>>> np.where(myarr == np.nan)
>>> nan == nan # is always False! Use special numpy functions instead.
>>> np.nan == np.nan
False
>>> myarr = np.array([1., 0., np.nan, 3.])
>>> myarr[myarr == np.nan] = 0. # doesn't work
>>> myarr
array([ 1., 0., NaN, 3.])
>>> myarr[np.isnan(myarr)] = 0. # use this instead find
>>> myarr
array([ 1., 0., 0., 3.])
Other related special value functions: ::
isinf(): True if value is inf
isfinite(): True if not nan or inf
nan_to_num(): Map nan to 0, inf to max float, -inf to min float
The following corresponds to the usual functions except that nans are excluded from
the results: ::
nansum()
nanmax()
nanmin()
nanargmax()
nanargmin()
>>> x = np.arange(10.)
>>> x[3] = np.nan
>>> x.sum()
nan
>>> np.nansum(x)
42.0
How numpy handles numerical exceptions
Default is to "warn"
But this can be changed, and it can be set individually for different kinds
of exceptions. The different behaviors are: ::
'ignore' : ignore completely
'warn' : print a warning (once only)
'raise' : raise an exception
'call' : call a user-supplied function (set using seterrcall())
These behaviors can be set for all kinds of errors or specific ones: ::
all: apply to all numeric exceptions
invalid: when NaNs are generated
divide: divide by zero (for integers as well!)
overflow: floating point overflows
underflow: floating point underflows
Note that integer divide-by-zero is handled by the same machinery.
These behaviors are set on a per-thead basis.
Examples:
------------
::
>>> oldsettings = np.seterr(all='warn')
>>> np.zeros(5,dtype=np.float32)/0.
invalid value encountered in divide
>>> j = np.seterr(under='ignore')
>>> np.array([1.e-100])**10
>>> j = np.seterr(invalid='raise')
>>> np.sqrt(np.array([-1.]))
FloatingPointError: invalid value encountered in sqrt
>>> def errorhandler(errstr, errflag):
... print "saw stupid error!"
>>> np.seterrcall(errorhandler)
>>> j = np.seterr(all='call')
>>> np.zeros(5, dtype=np.int32)/0
FloatingPointError: invalid value encountered in divide
saw stupid error!
>>> j = np.seterr(**oldsettings) # restore previous
# error-handling settings
Interfacing to C:
-----------------
Only a survey of the choices. Little detail on how each works.
1) Bare metal, wrap your own C-code manually.
- Plusses:
- Efficient
- No dependencies on other tools
- Minuses:
- Lots of learning overhead:
- need to learn basics of Python C API
- need to learn basics of numpy C API
- need to learn how to handle reference counting and love it.
- Reference counting often difficult to get right.
- getting it wrong leads to memory leaks, and worse, segfaults
- API will change for Python 3.0!
2) pyrex
- Plusses:
- avoid learning C API's
- no dealing with reference counting
- can code in psuedo python and generate C code
- can also interface to existing C code
- should shield you from changes to Python C api
- become pretty popular within Python community
- Minuses:
- Can write code in non-standard form which may become obsolete
- Not as flexible as manual wrapping
- Maintainers not easily adaptable to new features
Thus:
3) cython - fork of pyrex to allow needed features for SAGE
- being considered as the standard scipy/numpy wrapping tool
- fast indexing support for arrays
4) ctypes
- Plusses:
- part of Python standard library
- good for interfacing to existing sharable libraries, particularly
Windows DLLs
- avoids API/reference counting issues
- good numpy support: arrays have all these in their ctypes
attribute: ::
a.ctypes.data a.ctypes.get_strides
a.ctypes.data_as a.ctypes.shape
a.ctypes.get_as_parameter a.ctypes.shape_as
a.ctypes.get_data a.ctypes.strides
a.ctypes.get_shape a.ctypes.strides_as
- Minuses:
- can't use for writing code to be turned into C extensions, only a wrapper tool.
5) SWIG (automatic wrapper generator)
- Plusses:
- around a long time
- multiple scripting language support
- C++ support
- Good for wrapping large (many functions) existing C libraries
- Minuses:
- generates lots of code between Python and the C code
- can cause performance problems that are nearly impossible to optimize out
- interface files can be hard to write
- doesn't necessarily avoid reference counting issues or needing to know API's
7) Weave
- Plusses:
- Phenomenal tool
- can turn many numpy expressions into C code
- dynamic compiling and loading of generated C code
- can embed pure C code in Python module and have weave extract, generate interfaces
and compile, etc.
- Minuses:
- Future uncertain--lacks a champion
8) Psyco
- Plusses:
- Turns pure python into efficient machine code through jit-like optimizations
- very fast when it optimizes well
- Minuses:
- Only on intel (windows?)
- Doesn't do much for numpy?
Interfacing to Fortran:
-----------------------
Fortran: Clear choice is f2py. (Pyfort is an older alternative, but not supported
any longer)
Interfacing to C++:
-------------------
1) CXX
2) Boost.python
3) SWIG
4) Sage has used cython to wrap C++ (not pretty, but it can be done)
5) SIP (used mainly in PyQT)
"""
| chadnetzer/numpy-gaurdro | numpy/doc/misc.py | Python | bsd-3-clause | 5,725 |
from grano.logic import Loader
import unicodecsv
# This source URL will be applied to all properties without their own lineage:
DEFAULT_SOURCE_URL = 'http://www.opennews.org/'
# Any settings (free-form dict):
PROJECT_SETTINGS = {}
loader = Loader('opennews',
project_label='opennews',
project_settings=PROJECT_SETTINGS,
source_url=DEFAULT_SOURCE_URL)
reader = unicodecsv.reader(open('fellows.csv'))
reader.next()
for record in reader:
fellow = loader.make_entity('fellow')
fellow.set('name', record[0])
fellow.set('twitter_handle', record[1])
fellow.save()
news_org = loader.make_entity('news_organization')
news_org.set('name', record[4])
news_org.set('contact_url', record[5])
news_org.save()
fellowship = loader.make_relation('fellowship', fellow, news_org)
fellowship.set('date_start', record[2])
fellowship.set('date_end', record[3])
fellowship.save()
loader.persist()
| granoproject/grano | senegal/loader.py | Python | mit | 985 |
# -*- coding: UTF8 -*-
# --------------------------------------------------------------
# Copyright (c) 2015, Nicolas VERDIER ([email protected])
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
# --------------------------------------------------------------
from pupylib.PupyModule import *
from pupylib.PupyCompleter import *
from pupylib.utils.pe import get_pe_arch
from pupylib.PupyErrors import PupyModuleError
__class_name__="MemoryExec"
class MemoryExec(PupyModule):
""" execute a PE executable from memory """
interactive=1
def __init__(self, *args, **kwargs):
PupyModule.__init__(self,*args, **kwargs)
self.interrupted=False
self.mp=None
def init_argparse(self):
self.arg_parser = PupyArgumentParser(prog="memory_exec", description=self.__doc__)
self.arg_parser.add_argument('-p', '--process', default='cmd.exe', help='process to start suspended')
self.arg_parser.add_argument('--fork', action='store_true', help='fork and do not wait for the child program. stdout will not be retrieved', completer=path_completer)
self.arg_parser.add_argument('--interactive', action='store_true', help='interactive with the new process stdin/stdout')
self.arg_parser.add_argument('path', help='path to the exe', completer=path_completer)
self.arg_parser.add_argument('args', nargs='*', help='optional arguments to pass to the exe')
@windows_only
def is_compatible(self):
pass
def interrupt(self):
self.info("interrupting remote process, please wait ...")
if self.mp:
self.mp.close()
res=self.mp.get_stdout()
self.log(res)
def run(self, args):
if args.interactive:
#TODO
self.error("interactive memory execution has not been implemented yet")
return
#check we are injecting from the good process arch:
pe_arch=get_pe_arch(args.path)
proc_arch=self.client.desc["proc_arch"]
if pe_arch!=proc_arch:
self.error("%s is a %s PE and your pupy payload is a %s process. Please inject a %s PE or first migrate into a %s process"%(args.path, pe_arch, proc_arch, proc_arch, pe_arch))
return
wait=True
redirect_stdio=True
if args.fork:
wait=False
redirect_stdio=False
raw_pe=b""
with open(args.path,'rb') as f:
raw_pe=f.read()
self.client.load_package("pupymemexec")
self.client.load_package("pupwinutils.memexec")
res=""
self.mp=self.client.conn.modules['pupwinutils.memexec'].MemoryPE(raw_pe, args=args.args, hidden=True, redirect_stdio=redirect_stdio)
self.mp.run()
while True:
if self.mp.wait(1):
break
self.mp.close()
res=self.mp.get_stdout()
self.log(res)
| sovaa/backdoorme | backdoors/shell/pupy/pupy/modules/memory_exec.py | Python | mit | 3,988 |
from toontown.battle import DistributedBattle
from direct.directnotify import DirectNotifyGlobal
class DistributedBattleTutorial(DistributedBattle.DistributedBattle):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBattleTutorial')
def startTimer(self, ts = 0):
self.townBattle.timer.hide()
def playReward(self, ts):
self.movie.playTutorialReward(ts, self.uniqueName('reward'), self.handleRewardDone)
| ksmit799/Toontown-Source | toontown/tutorial/DistributedBattleTutorial.py | Python | mit | 449 |
def _reset_sys_path():
# Clear generic sys.path[0]
import sys, os
resources = os.environ['RESOURCEPATH']
while sys.path[0] == resources:
del sys.path[0]
_reset_sys_path()
"""
sys.argv emulation
This module starts a basic event loop to collect file- and url-open AppleEvents. Those get
converted to strings and stuffed into sys.argv. When that is done we continue starting
the application.
This is a workaround to convert scripts that expect filenames on the command-line to work
in a GUI environment. GUI applications should not use this feature.
NOTE: This module uses ctypes and not the Carbon modules in the stdlib because the latter
don't work in 64-bit mode and are also not available with python 3.x.
"""
import sys
import os
import time
import ctypes
import struct
class AEDesc (ctypes.Structure):
_fields_ = [
('descKey', ctypes.c_int),
('descContent', ctypes.c_void_p),
]
class EventTypeSpec (ctypes.Structure):
_fields_ = [
('eventClass', ctypes.c_int),
('eventKind', ctypes.c_uint),
]
def _ctypes_setup():
carbon = ctypes.CDLL('/System/Library/Carbon.framework/Carbon')
timer_func = ctypes.CFUNCTYPE(
None, ctypes.c_void_p, ctypes.c_long)
ae_callback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p,
ctypes.c_void_p, ctypes.c_void_p)
carbon.AEInstallEventHandler.argtypes = [
ctypes.c_int, ctypes.c_int, ae_callback,
ctypes.c_void_p, ctypes.c_char ]
carbon.AERemoveEventHandler.argtypes = [
ctypes.c_int, ctypes.c_int, ae_callback,
ctypes.c_char ]
carbon.AEProcessEvent.restype = ctypes.c_int
carbon.AEProcessEvent.argtypes = [ctypes.c_void_p]
carbon.ReceiveNextEvent.restype = ctypes.c_int
carbon.ReceiveNextEvent.argtypes = [
ctypes.c_long, ctypes.POINTER(EventTypeSpec),
ctypes.c_double, ctypes.c_char,
ctypes.POINTER(ctypes.c_void_p)
]
carbon.AEGetParamDesc.restype = ctypes.c_int
carbon.AEGetParamDesc.argtypes = [
ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.POINTER(AEDesc)]
carbon.AECountItems.restype = ctypes.c_int
carbon.AECountItems.argtypes = [ ctypes.POINTER(AEDesc),
ctypes.POINTER(ctypes.c_long) ]
carbon.AEGetNthDesc.restype = ctypes.c_int
carbon.AEGetNthDesc.argtypes = [
ctypes.c_void_p, ctypes.c_long, ctypes.c_int,
ctypes.c_void_p, ctypes.c_void_p ]
carbon.AEGetDescDataSize.restype = ctypes.c_int
carbon.AEGetDescDataSize.argtypes = [ ctypes.POINTER(AEDesc) ]
carbon.AEGetDescData.restype = ctypes.c_int
carbon.AEGetDescData.argtypes = [
ctypes.POINTER(AEDesc),
ctypes.c_void_p,
ctypes.c_int,
]
carbon.FSRefMakePath.restype = ctypes.c_int
carbon.FSRefMakePath.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint]
return carbon
def _run_argvemulator(timeout = 60):
# Configure ctypes
carbon = _ctypes_setup()
# Is the emulator running?
running = [True]
timeout = [timeout]
# Configure AppleEvent handlers
ae_callback = carbon.AEInstallEventHandler.argtypes[2]
kAEInternetSuite, = struct.unpack('>i', b'GURL')
kAEISGetURL, = struct.unpack('>i', b'GURL')
kCoreEventClass, = struct.unpack('>i', b'aevt')
kAEOpenApplication, = struct.unpack('>i', b'oapp')
kAEOpenDocuments, = struct.unpack('>i', b'odoc')
keyDirectObject, = struct.unpack('>i', b'----')
typeAEList, = struct.unpack('>i', b'list')
typeChar, = struct.unpack('>i', b'TEXT')
typeFSRef, = struct.unpack('>i', b'fsrf')
FALSE = b'\0'
TRUE = b'\1'
kEventClassAppleEvent, = struct.unpack('>i', b'eppc')
kEventAppleEvent = 1
@ae_callback
def open_app_handler(message, reply, refcon):
# Got a kAEOpenApplication event, which means we can
# start up. On some OSX versions this event is even
# sent when an kAEOpenDocuments or kAEOpenURLs event
# is sent later on.
#
# Therefore don't set running to false, but reduce the
# timeout to at most two seconds beyond the current time.
timeout[0] = min(timeout[0], time.time() - start + 2)
#running[0] = False
return 0
carbon.AEInstallEventHandler(kCoreEventClass, kAEOpenApplication,
open_app_handler, 0, FALSE)
@ae_callback
def open_file_handler(message, reply, refcon):
listdesc = AEDesc()
sts = carbon.AEGetParamDesc(message, keyDirectObject, typeAEList,
ctypes.byref(listdesc))
if sts != 0:
print("argvemulator warning: cannot unpack open document event")
running[0] = False
return
item_count = ctypes.c_long()
sts = carbon.AECountItems(ctypes.byref(listdesc), ctypes.byref(item_count))
if sts != 0:
print("argvemulator warning: cannot unpack open document event")
running[0] = False
return
desc = AEDesc()
for i in range(item_count.value):
sts = carbon.AEGetNthDesc(ctypes.byref(listdesc), i+1, typeFSRef, 0, ctypes.byref(desc))
if sts != 0:
print("argvemulator warning: cannot unpack open document event")
running[0] = False
return
sz = carbon.AEGetDescDataSize(ctypes.byref(desc))
buf = ctypes.create_string_buffer(sz)
sts = carbon.AEGetDescData(ctypes.byref(desc), buf, sz)
if sts != 0:
print("argvemulator warning: cannot extract open document event")
continue
fsref = buf
buf = ctypes.create_string_buffer(1024)
sts = carbon.FSRefMakePath(ctypes.byref(fsref), buf, 1023)
if sts != 0:
print("argvemulator warning: cannot extract open document event")
continue
print("Adding: %s"%(repr(buf.value.decode('utf-8')),))
if sys.version_info[0] > 2:
sys.argv.append(buf.value.decode('utf-8'))
else:
sys.argv.append(buf.value)
running[0] = False
return 0
carbon.AEInstallEventHandler(kCoreEventClass, kAEOpenDocuments,
open_file_handler, 0, FALSE)
@ae_callback
def open_url_handler(message, reply, refcon):
listdesc = AEDesc()
ok = carbon.AEGetParamDesc(message, keyDirectObject, typeAEList,
ctypes.byref(listdesc))
if ok != 0:
print("argvemulator warning: cannot unpack open document event")
running[0] = False
return
item_count = ctypes.c_long()
sts = carbon.AECountItems(ctypes.byref(listdesc), ctypes.byref(item_count))
if sts != 0:
print("argvemulator warning: cannot unpack open url event")
running[0] = False
return
desc = AEDesc()
for i in range(item_count.value):
sts = carbon.AEGetNthDesc(ctypes.byref(listdesc), i+1, typeChar, 0, ctypes.byref(desc))
if sts != 0:
print("argvemulator warning: cannot unpack open URL event")
running[0] = False
return
sz = carbon.AEGetDescDataSize(ctypes.byref(desc))
buf = ctypes.create_string_buffer(sz)
sts = carbon.AEGetDescData(ctypes.byref(desc), buf, sz)
if sts != 0:
print("argvemulator warning: cannot extract open URL event")
else:
if sys.version_info[0] > 2:
sys.argv.append(buf.value.decode('utf-8'))
else:
sys.argv.append(buf.value)
running[0] = False
return 0
carbon.AEInstallEventHandler(kAEInternetSuite, kAEISGetURL,
open_url_handler, 0, FALSE)
# Remove the funny -psn_xxx_xxx argument
if len(sys.argv) > 1 and sys.argv[1][:4] == '-psn':
del sys.argv[1]
start = time.time()
now = time.time()
eventType = EventTypeSpec()
eventType.eventClass = kEventClassAppleEvent
eventType.eventKind = kEventAppleEvent
while running[0] and now - start < timeout[0]:
event = ctypes.c_void_p()
sts = carbon.ReceiveNextEvent(1, ctypes.byref(eventType),
start + timeout[0] - now, TRUE, ctypes.byref(event))
if sts != 0:
print("argvemulator warning: fetching events failed")
break
sts = carbon.AEProcessEvent(event)
if sts != 0:
print("argvemulator warning: processing events failed")
break
carbon.AERemoveEventHandler(kCoreEventClass, kAEOpenApplication,
open_app_handler, FALSE)
carbon.AERemoveEventHandler(kCoreEventClass, kAEOpenDocuments,
open_file_handler, FALSE)
carbon.AERemoveEventHandler(kAEInternetSuite, kAEISGetURL,
open_url_handler, FALSE)
def _argv_emulation():
import sys
# only use if started by LaunchServices
for arg in sys.argv[1:]:
if arg.startswith('-psn'):
_run_argvemulator()
break
_argv_emulation()
def _chdir_resource():
import os
os.chdir(os.environ['RESOURCEPATH'])
_chdir_resource()
def _disable_linecache():
import linecache
def fake_getline(*args, **kwargs):
return ''
linecache.orig_getline = linecache.getline
linecache.getline = fake_getline
_disable_linecache()
def _run():
global __file__
import os, sys, site
sys.frozen = 'macosx_app'
base = os.environ['RESOURCEPATH']
argv0 = os.path.basename(os.environ['ARGVZERO'])
script = SCRIPT_MAP.get(argv0, DEFAULT_SCRIPT)
path = os.path.join(base, script)
sys.argv[0] = __file__ = path
with open(path, 'rU') as fp:
source = fp.read() + "\n"
exec(compile(source, path, 'exec'), globals(), globals())
DEFAULT_SCRIPT='Aggregate.py'
SCRIPT_MAP={}
_run()
| mtnman38/Aggregate | dist/Aggregate 0.8.9 for Mac.app/Contents/Resources/__boot__.py | Python | gpl-2.0 | 10,160 |
#
# Copyright 2013-2014 eNovance
#
# Author: Julien Danjou <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import fixture as fixture_config
from ceilometer import middleware
from ceilometer.tests import base
HTTP_REQUEST = {
u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2',
u'_context_is_admin': True,
u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e',
u'_context_quota_class': None,
u'_context_read_deleted': u'no',
u'_context_remote_address': u'10.0.2.15',
u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66',
u'_context_roles': [u'admin'],
u'_context_timestamp': u'2012-05-08T20:23:41.425105',
u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2',
u'event_type': u'http.request',
u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451',
u'payload': {u'request': {'HTTP_X_FOOBAR': 'foobaz',
'HTTP_X_USER_ID': 'jd-x32',
'HTTP_X_PROJECT_ID': 'project-id',
'HTTP_X_SERVICE_NAME': 'nova'}},
u'priority': u'INFO',
u'publisher_id': u'compute.vagrant-precise',
u'timestamp': u'2012-05-08 20:23:48.028195',
}
HTTP_RESPONSE = {
u'_context_auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2',
u'_context_is_admin': True,
u'_context_project_id': u'7c150a59fe714e6f9263774af9688f0e',
u'_context_quota_class': None,
u'_context_read_deleted': u'no',
u'_context_remote_address': u'10.0.2.15',
u'_context_request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66',
u'_context_roles': [u'admin'],
u'_context_timestamp': u'2012-05-08T20:23:41.425105',
u'_context_user_id': u'1e3ce043029547f1a61c1996d1a531a2',
u'event_type': u'http.response',
u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451',
u'payload': {u'request': {'HTTP_X_FOOBAR': 'foobaz',
'HTTP_X_USER_ID': 'jd-x32',
'HTTP_X_PROJECT_ID': 'project-id',
'HTTP_X_SERVICE_NAME': 'nova'},
u'response': {'status': '200 OK'}},
u'priority': u'INFO',
u'publisher_id': u'compute.vagrant-precise',
u'timestamp': u'2012-05-08 20:23:48.028195',
}
class TestNotifications(base.BaseTestCase):
def setUp(self):
super(TestNotifications, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.setup_messaging(self.CONF)
def test_process_request_notification(self):
sample = list(middleware.HTTPRequest(mock.Mock()).process_notification(
HTTP_REQUEST
))[0]
self.assertEqual(HTTP_REQUEST['payload']['request']['HTTP_X_USER_ID'],
sample.user_id)
self.assertEqual(HTTP_REQUEST['payload']['request']
['HTTP_X_PROJECT_ID'], sample.project_id)
self.assertEqual(HTTP_REQUEST['payload']['request']
['HTTP_X_SERVICE_NAME'], sample.resource_id)
self.assertEqual(1, sample.volume)
def test_process_response_notification(self):
sample = list(middleware.HTTPResponse(
mock.Mock()).process_notification(HTTP_RESPONSE))[0]
self.assertEqual(HTTP_RESPONSE['payload']['request']['HTTP_X_USER_ID'],
sample.user_id)
self.assertEqual(HTTP_RESPONSE['payload']['request']
['HTTP_X_PROJECT_ID'], sample.project_id)
self.assertEqual(HTTP_RESPONSE['payload']['request']
['HTTP_X_SERVICE_NAME'], sample.resource_id)
self.assertEqual(1, sample.volume)
def test_targets(self):
targets = middleware.HTTPRequest(mock.Mock()).get_targets(self.CONF)
self.assertEqual(4, len(targets))
| ChinaMassClouds/copenstack-server | openstack/src/ceilometer-2014.2.2/ceilometer/tests/test_middleware.py | Python | gpl-2.0 | 4,339 |
# Copyright (c) 2013, 2015-2017 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2011 Advanced Micro Devices, Inc.
# Copyright (c) 2009 The Hewlett-Packard Development Company
# Copyright (c) 2004-2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import SCons.Script
import m5.util.terminal
def ignore_style():
"""Determine whether we should ignore style checks"""
return SCons.Script.GetOption('ignore_style') or not sys.stdin.isatty()
def get_termcap():
return m5.util.terminal.get_termcap(SCons.Script.GetOption('use_colors'))
| vineodd/PIMSim | GEM5Simulation/gem5/site_scons/gem5_scons/util.py | Python | gpl-3.0 | 2,595 |
import bpy
bpy.context.camera.sensor_width = 6.16
bpy.context.camera.sensor_height = 4.62
bpy.context.camera.lens = 2.77
bpy.context.camera.sensor_fit = 'AUTO'
| Microvellum/Fluid-Designer | win64-vc/2.78/scripts/presets/camera/GoPro_Hero3_Black.py | Python | gpl-3.0 | 161 |
# (c) 2013-2014, Michael DeHaan <[email protected]>
# Stephen Fromm <[email protected]>
# Brian Coca <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import os.path
import tempfile
import re
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.utils.hashing import checksum_s
from ansible.utils.unicode import to_str
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp()
tmp = os.fdopen(tmpfd,'w')
delimit_me = False
add_newline = False
for f in sorted(os.listdir(src_path)):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = "%s/%s" % (src_path, f)
if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
continue
fragment_content = file(fragment).read()
# always put a newline between fragments if the previous fragment didn't end with a newline.
if add_newline:
tmp.write('\n')
# delimiters should only appear between fragments
if delimit_me:
if delimiter:
# un-escape anything like newlines
delimiter = delimiter.decode('unicode-escape')
tmp.write(delimiter)
# always make sure there's a newline after the
# delimiter, so lines don't run together
if delimiter[-1] != '\n':
tmp.write('\n')
tmp.write(fragment_content)
delimit_me = True
if fragment_content.endswith('\n'):
add_newline = False
else:
add_newline = True
tmp.close()
return temp_path
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if self._play_context.check_mode:
result['skipped'] = True
result['msg'] = "skipped, this module does not support check_mode."
return result
src = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
delimiter = self._task.args.get('delimiter', None)
remote_src = self._task.args.get('remote_src', 'yes')
regexp = self._task.args.get('regexp', None)
follow = self._task.args.get('follow', False)
ignore_hidden = self._task.args.get('ignore_hidden', False)
if src is None or dest is None:
result['failed'] = True
result['msg'] = "src and dest are required"
return result
remote_user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
if not tmp:
tmp = self._make_tmp_path(remote_user)
self._cleanup_remote_tmp = True
if boolean(remote_src):
result.update(self._execute_module(tmp=tmp, task_vars=task_vars, delete_remote_tmp=False))
self._remove_tmp_path(tmp)
return result
else:
try:
src = self._find_needle('files', src)
except AnsibleError as e:
result['failed'] = True
result['msg'] = to_str(e)
return result
if not os.path.isdir(src):
result['failed'] = True
result['msg'] = "Source (%s) is not a directory" % src
return result
_re = None
if regexp is not None:
_re = re.compile(regexp)
# Does all work assembling the file
path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden)
path_checksum = checksum_s(path)
dest = self._remote_expand_user(dest)
dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow, tmp=tmp)
diff = {}
# setup args for running modules
new_module_args = self._task.args.copy()
# clean assemble specific options
for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden']:
if opt in new_module_args:
del new_module_args[opt]
new_module_args.update(
dict(
dest=dest,
original_basename=os.path.basename(src),
)
)
if path_checksum != dest_stat['checksum']:
if self._play_context.diff:
diff = self._get_diff_data(dest, path, task_vars)
remote_path = self._connection._shell.join_path(tmp, 'src')
xfered = self._transfer_file(path, remote_path)
# fix file permissions when the copy is done as a different user
self._fixup_perms(tmp, remote_user, recursive=True)
new_module_args.update( dict( src=xfered,))
res = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False)
if diff:
res['diff'] = diff
result.update(res)
else:
result.update(self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars, tmp=tmp, delete_remote_tmp=False))
self._remove_tmp_path(tmp)
return result
| filipenf/ansible | lib/ansible/plugins/action/assemble.py | Python | gpl-3.0 | 6,309 |
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2012 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <[email protected]>
#
from django.conf.urls.defaults import handler404, handler500, \
include, patterns
from django.conf import settings
from django.conf.urls.i18n import *
from dialer_campaign.urls import urlpatterns as urlpatterns_dialer_campaign
from dialer_cdr.urls import urlpatterns as urlpatterns_dialer_cdr
from user_profile.urls import urlpatterns as urlpatterns_user_profile
from voice_app.urls import urlpatterns as urlpatterns_voice_app
from survey.urls import urlpatterns as urlpatterns_survey
from dialer_audio.urls import urlpatterns as urlpatterns_dialer_audio
from tastypie.api import Api
from api.user_api import UserResource
from api.voiceapp_api import VoiceAppResource
from api.gateway_api import GatewayResource
from api.content_type_api import ContentTypeResource
from api.phonebook_api import PhonebookResource
from api.campaign_api import CampaignResource
from api.bulk_contact_api import BulkContactResource
from api.campaign_delete_cascade_api import CampaignDeleteCascadeResource
from api.campaign_subscriber_api import CampaignSubscriberResource
from api.campaignsubscriber_per_campaign_api import \
CampaignSubscriberPerCampaignResource
from api.callrequest_api import CallrequestResource
from api.answercall_api import AnswercallResource
from api.dialcallback_api import DialCallbackResource
from api.hangupcall_api import HangupcallResource
from api.store_cdr_api import CdrResource
from survey.api.survey_api import SurveyAppResource
from survey.api.survey_question_api import SurveyQuestionResource
from survey.api.survey_response_api import SurveyResponseResource
import os
from django.contrib import admin
from dajaxice.core import dajaxice_autodiscover
dajaxice_autodiscover()
try:
admin.autodiscover()
except admin.sites.AlreadyRegistered:
# nose imports the admin.py files during tests, so
# the models have already been registered.
pass
# tastypie api
tastypie_api = Api(api_name='v1')
tastypie_api.register(UserResource())
tastypie_api.register(VoiceAppResource())
tastypie_api.register(GatewayResource())
tastypie_api.register(ContentTypeResource())
tastypie_api.register(PhonebookResource())
tastypie_api.register(CampaignResource())
tastypie_api.register(BulkContactResource())
tastypie_api.register(CampaignDeleteCascadeResource())
tastypie_api.register(CampaignSubscriberResource())
tastypie_api.register(CampaignSubscriberPerCampaignResource())
tastypie_api.register(CallrequestResource())
tastypie_api.register(AnswercallResource())
tastypie_api.register(DialCallbackResource())
tastypie_api.register(HangupcallResource())
tastypie_api.register(CdrResource())
tastypie_api.register(SurveyAppResource())
tastypie_api.register(SurveyQuestionResource())
tastypie_api.register(SurveyResponseResource())
js_info_dict = {
'domain': 'djangojs',
'packages': ('dialer_campaign',
'user_profile',
'voice_app',
'survey',
'audiofield'),
}
urlpatterns = patterns('',
(r'^logout/$', 'dialer_campaign.views.logout_view'),
(r'^admin/', include(admin.site.urls)),
(r'^api/', include(tastypie_api.urls)),
(r'^i18n/', include('django.conf.urls.i18n')),
(r'^jsi18n/$', 'django.views.i18n.javascript_catalog', js_info_dict),
(r'^admin_tools/', include('admin_tools.urls')),
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_ROOT}),
(r'^favicon\.ico$', 'django.views.generic.simple.redirect_to', \
{'url': 'static/newfies/images/favicon.png'}),
#(r'^sentry/', include('sentry.web.urls')),
(r'^%s/' % settings.DAJAXICE_MEDIA_PREFIX, include('dajaxice.urls')),
)
urlpatterns += urlpatterns_dialer_campaign
urlpatterns += urlpatterns_dialer_cdr
urlpatterns += urlpatterns_user_profile
urlpatterns += urlpatterns_voice_app
urlpatterns += urlpatterns_survey
urlpatterns += urlpatterns_dialer_audio
urlpatterns += patterns('',
(r'^%s/(?P<path>.*)$' % settings.MEDIA_URL.strip(os.sep),
'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
handler404 = 'urls.custom_404_view'
handler500 = 'urls.custom_500_view'
def custom_404_view(request, template_name='404.html'):
"""404 error handler which includes ``request`` in the context.
Templates: `404.html`
Context: None
"""
from django.template import Context, loader
from django.http import HttpResponseServerError
t = loader.get_template('404.html') # Need to create a 404.html template.
return HttpResponseServerError(t.render(Context({
'request': request,
})))
def custom_500_view(request, template_name='500.html'):
"""500 error handler which includes ``request`` in the context.
Templates: `500.html`
Context: None
"""
from django.template import Context, loader
from django.http import HttpResponseServerError
t = loader.get_template('500.html') # Need to create a 500.html template.
return HttpResponseServerError(t.render(Context({
'request': request,
})))
| areski/newfies-dialer | newfies/urls.py | Python | mpl-2.0 | 5,542 |
"""Django App config for course_modes"""
from django.apps import AppConfig
class CourseModesConfig(AppConfig): # lint-amnesty, pylint: disable=missing-class-docstring
name = 'common.djangoapps.course_modes'
verbose_name = "Course Modes"
def ready(self):
from . import signals # pylint: disable=unused-import
| eduNEXT/edx-platform | common/djangoapps/course_modes/apps.py | Python | agpl-3.0 | 335 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from random import randint
from odoo import api, fields, models
class EventTagCategory(models.Model):
_name = "event.tag.category"
_description = "Event Tag Category"
_order = "sequence"
name = fields.Char("Name", required=True, translate=True)
sequence = fields.Integer('Sequence', default=0)
tag_ids = fields.One2many('event.tag', 'category_id', string="Tags")
class EventTag(models.Model):
_name = "event.tag"
_description = "Event Tag"
_order = "sequence"
def _default_color(self):
return randint(1, 11)
name = fields.Char("Name", required=True, translate=True)
sequence = fields.Integer('Sequence', default=0)
category_id = fields.Many2one("event.tag.category", string="Category", required=True, ondelete='cascade')
color = fields.Integer(
string='Color Index', default=lambda self: self._default_color(),
help='Tag color. No color means no display in kanban or front-end, to distinguish internal tags from public categorization tags.')
def name_get(self):
return [(tag.id, "%s: %s" % (tag.category_id.name, tag.name)) for tag in self]
| rven/odoo | addons/event/models/event_tag.py | Python | agpl-3.0 | 1,241 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp, working_dir
import spack.paths
from spack.util.executable import ProcessError, which
_SPACK_UPSTREAM = 'https://github.com/spack/spack'
description = "create a new installation of spack in another prefix"
section = "admin"
level = "long"
def setup_parser(subparser):
subparser.add_argument(
'-r', '--remote', action='store', dest='remote',
help="name of the remote to clone from", default='origin')
subparser.add_argument(
'prefix',
help="name of prefix where we should install spack")
def get_origin_info(remote):
git_dir = os.path.join(spack.paths.prefix, '.git')
git = which('git', required=True)
try:
branch = git('symbolic-ref', '--short', 'HEAD', output=str)
except ProcessError:
branch = 'develop'
tty.warn('No branch found; using default branch: %s' % branch)
if remote == 'origin' and \
branch not in ('master', 'develop'):
branch = 'develop'
tty.warn('Unknown branch found; using default branch: %s' % branch)
try:
origin_url = git(
'--git-dir=%s' % git_dir,
'config', '--get', 'remote.%s.url' % remote,
output=str)
except ProcessError:
origin_url = _SPACK_UPSTREAM
tty.warn('No git repository found; '
'using default upstream URL: %s' % origin_url)
return (origin_url.strip(), branch.strip())
def clone(parser, args):
origin_url, branch = get_origin_info(args.remote)
prefix = args.prefix
tty.msg("Fetching spack from '%s': %s" % (args.remote, origin_url))
if os.path.isfile(prefix):
tty.die("There is already a file at %s" % prefix)
mkdirp(prefix)
if os.path.exists(os.path.join(prefix, '.git')):
tty.die("There already seems to be a git repository in %s" % prefix)
files_in_the_way = os.listdir(prefix)
if files_in_the_way:
tty.die("There are already files there! "
"Delete these files before boostrapping spack.",
*files_in_the_way)
tty.msg("Installing:",
"%s/bin/spack" % prefix,
"%s/lib/spack/..." % prefix)
with working_dir(prefix):
git = which('git', required=True)
git('init', '--shared', '-q')
git('remote', 'add', 'origin', origin_url)
git('fetch', 'origin', '%s:refs/remotes/origin/%s' % (branch, branch),
'-n', '-q')
git('reset', '--hard', 'origin/%s' % branch, '-q')
git('checkout', '-B', branch, 'origin/%s' % branch, '-q')
tty.msg("Successfully created a new spack in %s" % prefix,
"Run %s/bin/spack to use this installation." % prefix)
| LLNL/spack | lib/spack/spack/cmd/clone.py | Python | lgpl-2.1 | 2,958 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyColorlog(PythonPackage):
"""A colored formatter for the python logging module"""
homepage = "https://github.com/borntyping/python-colorlog"
pypi = "colorlog/colorlog-4.0.2.tar.gz"
version('4.0.2', sha256='3cf31b25cbc8f86ec01fef582ef3b840950dea414084ed19ab922c8b493f9b42')
version('3.1.4', sha256='418db638c9577f37f0fae4914074f395847a728158a011be2a193ac491b9779d')
depends_on('py-setuptools', type='build')
| LLNL/spack | var/spack/repos/builtin/packages/py-colorlog/package.py | Python | lgpl-2.1 | 661 |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
js_to_json,
)
class StreamangoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?streamango\.com/(?:f|embed)/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://streamango.com/f/clapasobsptpkdfe/20170315_150006_mp4',
'md5': 'e992787515a182f55e38fc97588d802a',
'info_dict': {
'id': 'clapasobsptpkdfe',
'ext': 'mp4',
'title': '20170315_150006.mp4',
}
}, {
'url': 'https://streamango.com/embed/clapasobsptpkdfe/20170315_150006_mp4',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
formats = []
for format_ in re.findall(r'({[^}]*\bsrc\s*:\s*[^}]*})', webpage):
video = self._parse_json(
format_, video_id, transform_source=js_to_json, fatal=False)
if not video:
continue
src = video.get('src')
if not src:
continue
ext = determine_ext(src, default_ext=None)
if video.get('type') == 'application/dash+xml' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src, video_id, mpd_id='dash', fatal=False))
else:
formats.append({
'url': src,
'ext': ext or 'mp4',
'width': int_or_none(video.get('width')),
'height': int_or_none(video.get('height')),
'tbr': int_or_none(video.get('bitrate')),
})
self._sort_formats(formats)
return {
'id': video_id,
'url': url,
'title': title,
'formats': formats,
}
| fluxw42/youtube-dl | youtube_dl/extractor/streamango.py | Python | unlicense | 2,004 |
#!/usr/bin/env python2
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name,missing-docstring
import json
import os
import unittest
import shutil
import tempfile
import summarize
make_test = lambda t: {'failure_text': t}
class StringsTest(unittest.TestCase):
def test_normalize(self):
for src, dst in [
('0x1234 a 123.13.45.43 b 2e24e003-9ffd-4e78-852c-9dcb6cbef493-123',
'UNIQ1 a UNIQ2 b UNIQ3'),
('Mon, 12 January 2017 11:34:35 blah blah', 'TIMEblah blah'),
('123.45.68.12:345 abcd1234eeee', 'UNIQ1 UNIQ2'),
('foobarbaz ' * 500000,
'foobarbaz ' * 20000 + '\n...[truncated]...\n' + 'foobarbaz ' * 20000),
]:
self.assertEqual(summarize.normalize(src), dst)
def test_editdist(self):
for a, b, expected in [
('foob', 'food', 1),
('doot', 'dot', 1),
('foob', 'f', 3),
('foob', 'g', 4),
]:
self.assertEqual(summarize.editdist(a, b), expected, (a, b, expected))
def test_make_ngram_counts(self):
self.assertEqual(sum(summarize.make_ngram_counts('abcdefg')), 4)
self.assertEqual(sum(summarize.make_ngram_counts(u'abcdefg')), 4)
self.assertEqual(sum(summarize.make_ngram_counts(u'abcdefg\u2006')), 5)
def test_make_ngram_counts_digest(self):
# ensure stability of ngram count digest
self.assertEqual(summarize.make_ngram_counts_digest('some string'), 'eddb950347d1eb05b5d7')
def test_ngram_editdist(self):
self.assertEqual(summarize.ngram_editdist('example text', 'exampl text'), 1)
def test_common_spans(self):
for a, b, expected in [
('an exact match', 'an exact match', [14]),
('some example string', 'some other string', [5, 7, 7]),
('a problem with a common set', 'a common set', [2, 7, 1, 4, 13]),
]:
self.assertEqual(summarize.common_spans([a, b]), expected)
class ClusterTest(unittest.TestCase):
def test_cluster_test(self):
# small strings aren't equal, even with tiny differences
t1 = make_test('exit 1')
t2 = make_test('exit 2')
self.assertEqual(summarize.cluster_test([t1, t2]), {'exit 1': [t1], 'exit 2': [t2]})
t3 = make_test('long message immediately preceding exit code 1')
t4 = make_test('long message immediately preceding exit code 2')
self.assertEqual(summarize.cluster_test([t3, t4]), {t3['failure_text']: [t3, t4]})
t5 = make_test('1 2 ' * 40000)
t6 = make_test('1 2 ' * 39999 + '3 4 ')
self.assertEqual(summarize.cluster_test([t1, t5, t6]),
{t1['failure_text']: [t1], t5['failure_text']: [t5, t6]})
@staticmethod
def cluster_global(clustered, previous_clustered=None):
return summarize.cluster_global.__wrapped__(clustered, previous_clustered)
def test_cluster_global(self):
t1 = make_test('exit 1')
t2 = make_test('exit 1')
t3 = make_test('exit 1')
self.assertEqual(
self.cluster_global({'test a': {'exit 1': [t1, t2]}, 'test b': {'exit 1': [t3]}}),
{'exit 1': {'test a': [t1, t2], 'test b': [t3]}})
def test_cluster_global_previous(self):
# clusters are stable when provided with previou seeds
textOld = 'some long failure message that changes occasionally foo'
textNew = textOld.replace('foo', 'bar')
t1 = make_test(textNew)
self.assertEqual(
self.cluster_global({'test a': {textNew: [t1]}}, [{'key': textOld}]),
{textOld: {'test a': [t1]}})
############ decode JSON without a bunch of unicode garbage
### http://stackoverflow.com/a/33571117
def json_load_byteified(json_text):
return _byteify(
json.load(json_text, object_hook=_byteify),
ignore_dicts=True
)
def _byteify(data, ignore_dicts=False):
# if this is a unicode string, return its string representation
if isinstance(data, unicode):
return data.encode('utf-8')
# if this is a list of values, return list of byteified values
if isinstance(data, list):
return [_byteify(item, ignore_dicts=True) for item in data]
# if this is a dictionary, return dictionary of byteified keys and values
# but only if we haven't already byteified it
if isinstance(data, dict) and not ignore_dicts:
return {
_byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True)
for key, value in data.iteritems()
}
# if it's anything else, return it in its original form
return data
################################
class IntegrationTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='summarize_test_')
os.chdir(self.tmpdir)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_main(self):
def smear(l):
"given a list of dictionary deltas, return a list of dictionaries"
cur = {}
out = []
for delta in l:
cur.update(delta)
out.append(dict(cur))
return out
json.dump(smear([
{'started': 1234, 'number': 1, 'tests_failed': 1, 'tests_run': 2,
'elapsed': 4, 'path': 'gs://logs/some-job/1', 'job': 'some-job', 'result': 'SUCCESS'},
{'number': 2, 'path': 'gs://logs/some-job/2'},
{'number': 3, 'path': 'gs://logs/some-job/3'},
{'number': 4, 'path': 'gs://logs/some-job/4'},
{'number': 5, 'path': 'gs://logs/other-job/5', 'job': 'other-job', 'elapsed': 8},
{'number': 7, 'path': 'gs://logs/other-job/7', 'result': 'FAILURE'},
]), open('builds.json', 'w'))
json.dump(smear([
{'name': 'example test', 'build': 'gs://logs/some-job/1',
'failure_text': 'some awful stack trace exit 1'},
{'build': 'gs://logs/some-job/2'},
{'build': 'gs://logs/some-job/3'},
{'build': 'gs://logs/some-job/4'},
{'name': 'another test', 'failure_text': 'some other error message'},
{'name': 'unrelated test', 'build': 'gs://logs/other-job/5'},
{}, # intentional dupe
{'build': 'gs://logs/other-job/7'},
]), open('tests.json', 'w'))
json.dump({
'node': ['example']
}, open('owners.json', 'w'))
summarize.main(summarize.parse_args(
['builds.json', 'tests.json',
'--output_slices=failure_data_PREFIX.json',
'--owners=owners.json']))
output = json_load_byteified(open('failure_data.json'))
# uncomment when output changes
# import pprint; pprint.pprint(output)
self.assertEqual(
output['builds'],
{'cols': {'elapsed': [8, 8, 4, 4, 4, 4],
'executor': [None, None, None, None, None, None],
'pr': [None, None, None, None, None, None],
'result': ['SUCCESS',
'FAILURE',
'SUCCESS',
'SUCCESS',
'SUCCESS',
'SUCCESS'],
'started': [1234, 1234, 1234, 1234, 1234, 1234],
'tests_failed': [1, 1, 1, 1, 1, 1],
'tests_run': [2, 2, 2, 2, 2, 2]},
'job_paths': {'other-job': 'gs://logs/other-job',
'some-job': 'gs://logs/some-job'},
'jobs': {'other-job': {'5': 0, '7': 1}, 'some-job': [1, 4, 2]}})
random_hash_1 = output['clustered'][0]['id']
random_hash_2 = output['clustered'][1]['id']
self.assertEqual(
output['clustered'],
[{'id': random_hash_1,
'key': 'some awful stack trace exit 1',
'tests': [{'jobs': [{'builds': [4, 3, 2, 1],
'name': 'some-job'}],
'name': 'example test'}],
'spans': [29],
'owner': 'node',
'text': 'some awful stack trace exit 1'},
{'id': random_hash_2,
'key': 'some other error message',
'tests': [{'jobs': [{'builds': [7, 5],
'name': 'other-job'}],
'name': 'unrelated test'},
{'jobs': [{'builds': [4], 'name': 'some-job'}],
'name': 'another test'}],
'spans': [24],
'owner': 'testing',
'text': 'some other error message'}]
)
slice_output = json_load_byteified(open('failure_data_%s.json' % random_hash_1[:2]))
self.assertEqual(slice_output['clustered'], [output['clustered'][0]])
self.assertEqual(slice_output['builds']['cols']['started'], [1234, 1234, 1234, 1234])
if __name__ == '__main__':
unittest.main()
| grodrigues3/test-infra | triage/summarize_test.py | Python | apache-2.0 | 9,657 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for bucketize_op."""
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class BucketizationOpTest(xla_test.XLATestCase):
def testInt(self):
with self.session() as sess:
p = array_ops.placeholder(dtypes.int32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0, 3, 8, 11])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
self.assertAllEqual(expected_out,
sess.run(op, {p: [-5, 0, 2, 3, 5, 8, 10, 11, 12]}))
def testFloat(self):
with self.session() as sess:
p = array_ops.placeholder(dtypes.float32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0., 3., 8., 11.])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
self.assertAllEqual(
expected_out,
sess.run(op, {p: [-5., 0., 2., 3., 5., 8., 10., 11., 12.]}))
def test2DInput(self):
with self.session() as sess:
p = array_ops.placeholder(dtypes.float32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0, 3, 8, 11])
expected_out = [[0, 1, 1, 2, 2], [3, 3, 4, 4, 1]]
self.assertAllEqual(
expected_out, sess.run(op,
{p: [[-5, 0, 2, 3, 5], [8, 10, 11, 12, 0]]}))
@test_util.disable_mlir_bridge("Error handling")
def testInvalidBoundariesOrder(self):
with self.session() as sess:
p = array_ops.placeholder(dtypes.int32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0, 8, 3, 11])
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"Expected sorted boundaries"):
sess.run(op, {p: [-5, 0]})
def testBoundariesNotList(self):
with self.session():
with self.assertRaisesRegex(TypeError, "Expected list.*"):
p = array_ops.placeholder(dtypes.int32)
with self.test_scope():
math_ops._bucketize(p, boundaries=0)
if __name__ == "__main__":
test.main()
| tensorflow/tensorflow | tensorflow/compiler/tests/bucketize_op_test.py | Python | apache-2.0 | 2,955 |
#!/usr/bin/env python
# Jay Smith
# [email protected]
#
########################################################################
# Copyright 2013 Mandiant
# Copyright 2014 FireEye
#
# Mandiant licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
########################################################################
#
# Attempts to set types for struct members based on searching for
# like-named types in IDA's type libraries.
#
########################################################################
import re
import sys
import ctypes
import logging
from PySide import QtGui
from PySide import QtCore
from PySide.QtCore import Qt
import idc
import idaapi
import idautils
import jayutils
from struct_typer_widget import Ui_Dialog
############################################################
# Several type-related functions aren't accessibly via IDAPython
# so have to do things with ctypes
idaname = "ida64" if idc.__EA64__ else "ida"
if sys.platform == "win32":
g_dll = ctypes.windll[idaname + ".wll"]
elif sys.platform == "linux2":
g_dll = ctypes.cdll["lib" + idaname + ".so"]
elif sys.platform == "darwin":
g_dll = ctypes.cdll["lib" + idaname + ".dylib"]
############################################################
# Specifying function types for a few IDA SDK functions to keep the
# pointer-to-pointer args clear.
get_named_type = g_dll.get_named_type
get_named_type.argtypes = [
ctypes.c_void_p, #const til_t *ti,
ctypes.c_char_p, #const char *name,
ctypes.c_int, #int ntf_flags,
ctypes.POINTER(ctypes.POINTER(ctypes.c_ubyte)), #const type_t **type=NULL,
ctypes.POINTER(ctypes.POINTER(ctypes.c_ubyte)), #const p_list **fields=NULL,
ctypes.POINTER(ctypes.POINTER(ctypes.c_ubyte)), #const char **cmt=NULL,
ctypes.POINTER(ctypes.POINTER(ctypes.c_ubyte)), #const p_list **fieldcmts=NULL,
ctypes.POINTER(ctypes.c_ulong), #sclass_t *sclass=NULL,
ctypes.POINTER(ctypes.c_ulong), #uint32 *value=NULL);
]
print_type_to_one_line = g_dll.print_type_to_one_line
print_type_to_one_line.argtypes = [
ctypes.c_char_p, #char *buf,
ctypes.c_ulong, #size_t bufsize,
ctypes.c_void_p, #const til_t *ti,
ctypes.POINTER(ctypes.c_ubyte), #const type_t *pt,
ctypes.c_char_p, #const char *name = NULL,
ctypes.POINTER(ctypes.c_ubyte), #const char *cmt = NULL,
ctypes.POINTER(ctypes.c_ubyte), #const p_list *field_names = NULL,
ctypes.POINTER(ctypes.c_ubyte), #const p_list *field_cmts = NULL);
]
############################################################
def manualTypeCopy(dest, destOff, destLen, src):
'''Copies an IDA type 'string' to the given location'''
i = 0
while (i+destOff) < destLen:
dest[i+destOff] = chr(src[i])
if (src[i] == 0) or (src[i] == '\x00'):
break
i += 1
g_NUMBERS = '0123456789'
def stripNumberedName(name):
'''Remove trailing unique ID like IDA does for same names'''
idx = len(name) -1
while idx >= 0:
if (name[idx] == '_'):
if (len(name)-1) == idx:
#last char is '_', not allowed so return name
return name
else:
#encountered a '_', strip here
return name[:idx]
if name[idx] in g_NUMBERS:
#still processing tail
pass
else:
#encountered unexpected sequence, just return name
return name
idx -= 1
return name
def loadMembers(struc, sid):
'''Returns list of tuples of (offset, memberName, member)'''
#mixing idc & idaapi, kinda annoying but need low-level idaapi for a
# type access, but cant dig into structs...
members = []
off = g_dll.get_struc_first_offset(struc)
while off >= 0:
member = g_dll.get_member(struc, ctypes.c_int(off))
if (member == 0) or (member is None):
pass #not really an error, i guess
else:
members.append( (off, idc.GetMemberName(sid, off), member) )
off = g_dll.get_struc_next_offset(struc, ctypes.c_int(off) )
members.sort(key = lambda mem: mem[0])
return members
def loadStructs():
idx = idaapi.get_first_struc_idx()
existingStructs = []
while idx != idc.BADADDR:
tid = idaapi.get_struc_by_idx(idx)
existingStructs.append(idaapi.get_struc_name(tid))
idx = idaapi.get_next_struc_idx(idx)
existingStructs.sort()
return existingStructs
############################################################
g_DefaultPrefixRegexp = r'field_.*_'
class StructTyperWidget(QtGui.QDialog):
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent)
try:
self.logger = jayutils.getLogger('StructTyperWidget')
self.logger.debug('StructTyperWidget starting up')
self.ui=Ui_Dialog()
self.ui.setupUi(self)
self.ui.lineEdit.setText(g_DefaultPrefixRegexp)
self.ui.checkBox.setChecked(Qt.CheckState.Unchecked)
except Exception, err:
self.logger.exception('Error during init: %s', str(err))
def getActiveStruct(self):
return str(self.ui.listWidget.currentItem().data(Qt.ItemDataRole.DisplayRole))
def setStructs(self, structs):
for name in structs:
item = QtGui.QListWidgetItem(name)
self.ui.listWidget.addItem(item)
def getRegPrefix(self):
if self.ui.checkBox.isChecked():
return str(self.ui.lineEdit.text())
return ''
############################################################
class StructTypeRunner(object):
def __init__(self):
self.logger = jayutils.getLogger('SearchLauncher')
def run(self):
try:
self.logger.debug('Starting up')
dlg = StructTyperWidget()
dlg.setStructs(loadStructs())
oldTo = idaapi.set_script_timeout(0)
res = dlg.exec_()
idaapi.set_script_timeout(oldTo)
if res == QtGui.QDialog.DialogCode.Accepted:
regPrefix = dlg.getRegPrefix()
sid = None
struc = None
if dlg.ui.rb_useStackFrame.isChecked():
ea = idc.here()
sid = idc.GetFrame(ea)
struc = idaapi.get_frame(ea)
self.logger.debug('Dialog result: accepted stack frame')
if (sid is None) or (sid == idc.BADADDR):
#i should really figure out which is the correct error case
raise RuntimeError('Failed to get sid for stack frame at 0x%x' % ea)
if (struc is None) or (struc == 0) or (struc == idc.BADADDR):
raise RuntimeError('Failed to get struc_t for stack frame at 0x%x' % ea)
#need the actual pointer value, not the swig wrapped struc_t
struc= long(struc.this)
else:
structName = dlg.getActiveStruct()
self.logger.debug('Dialog result: accepted %s "%s"', type(structName), structName)
sid = idc.GetStrucIdByName(structName)
if (sid is None) or (sid == idc.BADADDR):
#i should really figure out which is the correct error case
raise RuntimeError('Failed to get sid for %s' % structName)
tid = idaapi.get_struc_id(structName)
if (tid is None) or (tid == 0) or (tid == idc.BADADDR):
#i should really figure out which is the correct error case
raise RuntimeError('Failed to get tid_t for %s' % structName)
struc = g_dll.get_struc(tid)
if (struc is None) or (struc == 0) or (struc == idc.BADADDR):
raise RuntimeError('Failed to get struc_t for %s' % structName)
foundMembers = self.processStruct(regPrefix, struc, sid)
if dlg.ui.rb_useStackFrame.isChecked() and (foundMembers != 0):
#reanalyze current function if we're analyzing a stack frame & found some func pointers
funcstart = idc.GetFunctionAttr(idc.here(), idc.FUNCATTR_START)
funcend = idc.GetFunctionAttr(idc.here(), idc.FUNCATTR_END)
if (funcstart != idc.BADADDR) and (funcend != idc.BADADDR):
idc.AnalyzeArea(funcstart, funcend)
elif res == QtGui.QDialog.DialogCode.Rejected:
self.logger.info('Dialog result: canceled by user')
else:
self.logger.debug('Unknown result')
raise RuntimeError('Dialog unknown result')
except Exception, err:
self.logger.exception("Exception caught: %s", str(err))
def filterName(self, regPrefix, name):
funcname = stripNumberedName(name)
if len(regPrefix) != 0:
reg = re.compile('('+regPrefix+')(.*)')
m = reg.match(funcname)
if m is not None:
self.logger.debug('Stripping prefix: %s -> %s', name, m.group(2))
funcname = m.group(2)
else:
#if it does not match, continue to see if it can still match
pass
return funcname
def processStruct(self, regPrefix, struc, sid):
'''
Returns the number of identified struct members whose type was found
'''
til = ctypes.c_void_p.in_dll(g_dll, 'idati')
members = loadMembers(struc, sid)
foundFunctions = 0
for off, name, memb in members:
funcname = self.filterName(regPrefix, name)
typ_type = ctypes.POINTER(ctypes.c_ubyte)()
typ_fields = ctypes.POINTER(ctypes.c_ubyte)()
typ_cmt = ctypes.POINTER(ctypes.c_ubyte)()
typ_fieldcmts = ctypes.POINTER(ctypes.c_ubyte)()
typ_sclass = ctypes.c_ulong()
value = ctypes.c_ulong()
ret = get_named_type(
til,
funcname,
idaapi.NTF_SYMM,
ctypes.byref(typ_type),
ctypes.byref(typ_fields),
ctypes.byref(typ_cmt),
ctypes.byref(typ_fieldcmts),
ctypes.byref(typ_sclass),
ctypes.byref(value)
)
if ret == 0:
self.logger.debug('Could not find %s', funcname)
else:
foundFunctions += 1
if typ_type[0] != idaapi.BT_FUNC:
#not positive that the first type value has to be BT_FUNC or not...
# and whether it's important to only apply to funcs or not
self.logger.debug('Found named type, but not a function: %s', funcname)
else:
type_arr = ctypes.create_string_buffer(0x400)
type_arr[0] = chr(idaapi.BT_PTR)
manualTypeCopy(type_arr, 1, len(type_arr), typ_type)
ret = g_dll.set_member_tinfo(
til,
struc,
memb,
ctypes.c_uint(0),
type_arr,
typ_fields,
ctypes.c_uint(0),
)
name_buffer = ctypes.create_string_buffer(0x400)
print_type_to_one_line(
name_buffer,
len(name_buffer),
til,
typ_type,
funcname,
typ_cmt,
typ_fields,
typ_fieldcmts
)
if ret == 0:
self.logger.info('Failed to set_member_tinfo: %s', name_buffer.value)
else:
self.logger.info('set_member_tinfo: %s', name_buffer.value)
return foundFunctions
def main():
#logger = jayutils.configLogger('', logging.DEBUG)
logger = jayutils.configLogger('', logging.INFO)
launcher = StructTypeRunner()
launcher.run()
if __name__ == '__main__':
main()
| CaledoniaProject/flare-ida | python/flare/struct_typer.py | Python | apache-2.0 | 13,026 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.data.Dataset interface to the MNIST dataset.
This is cloned from
https://github.com/tensorflow/models/blob/master/official/r1/mnist/dataset.py
"""
import gzip
import os
import shutil
import tempfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
def read32(bytestream):
"""Read 4 bytes from bytestream as an unsigned 32-bit integer."""
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def check_image_file_header(filename):
"""Validate that filename corresponds to images for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_images, unused
rows = read32(f)
cols = read32(f)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
if rows != 28 or cols != 28:
raise ValueError(
'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' %
(f.name, rows, cols))
def check_labels_file_header(filename):
"""Validate that filename corresponds to labels for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_items, unused
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
def download(directory, filename):
"""Download (and unzip) a file from the MNIST dataset if not already done."""
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
return filepath
if not tf.gfile.Exists(directory):
tf.gfile.MakeDirs(directory)
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'
_, zipped_filepath = tempfile.mkstemp(suffix='.gz')
print('Downloading %s to %s' % (url, zipped_filepath))
urllib.request.urlretrieve(url, zipped_filepath)
with gzip.open(zipped_filepath, 'rb') as f_in, \
tf.gfile.Open(filepath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(zipped_filepath)
return filepath
def dataset(directory, images_file, labels_file):
"""Download and parse MNIST dataset."""
images_file = download(directory, images_file)
labels_file = download(directory, labels_file)
check_image_file_header(images_file)
check_labels_file_header(labels_file)
def decode_image(image):
# Normalize from [0, 255] to [0.0, 1.0]
image = tf.decode_raw(image, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, [784])
return image / 255.0
def decode_label(label):
label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8]
label = tf.reshape(label, []) # label is a scalar
return tf.to_int32(label)
images = tf.data.FixedLengthRecordDataset(
images_file, 28 * 28, header_bytes=16).map(decode_image)
labels = tf.data.FixedLengthRecordDataset(
labels_file, 1, header_bytes=8).map(decode_label)
return tf.data.Dataset.zip((images, labels))
def train(directory):
"""tf.data.Dataset object for MNIST training data."""
return dataset(directory, 'train-images-idx3-ubyte',
'train-labels-idx1-ubyte')
def test(directory):
"""tf.data.Dataset object for MNIST test data."""
return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')
| tensorflow/tensorflow | tensorflow/lite/tutorials/dataset.py | Python | apache-2.0 | 4,189 |
# pylint: disable=E1103
from warnings import catch_warnings
from numpy.random import randn
import numpy as np
import pytest
import pandas as pd
from pandas.compat import lrange
import pandas.compat as compat
from pandas.util.testing import assert_frame_equal
from pandas import DataFrame, MultiIndex, Series, Index, merge, concat
from pandas._libs import join as libjoin
import pandas.util.testing as tm
from pandas.tests.reshape.test_merge import get_test_data, N, NGROUPS
a_ = np.array
class TestJoin(object):
def setup_method(self, method):
# aggregate multiple columns
self.df = DataFrame({'key1': get_test_data(),
'key2': get_test_data(),
'data1': np.random.randn(N),
'data2': np.random.randn(N)})
# exclude a couple keys for fun
self.df = self.df[self.df['key2'] > 1]
self.df2 = DataFrame({'key1': get_test_data(n=N // 5),
'key2': get_test_data(ngroups=NGROUPS // 2,
n=N // 5),
'value': np.random.randn(N // 5)})
index, data = tm.getMixedTypeDict()
self.target = DataFrame(data, index=index)
# Join on string value
self.source = DataFrame({'MergedA': data['A'], 'MergedD': data['D']},
index=data['C'])
def test_cython_left_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
ls, rs = libjoin.left_outer_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8, 9, 10])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5, -1, -1])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_cython_right_outer_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1], dtype=np.int64)
max_group = 5
rs, ls = libjoin.left_outer_join(right, left, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
# 0 1 1 1
exp_li = a_([0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5,
# 2 2 4
6, 7, 8, 6, 7, 8, -1])
exp_ri = a_([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3,
4, 4, 4, 5, 5, 5, 6])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_cython_inner_join(self):
left = a_([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.int64)
right = a_([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.int64)
max_group = 5
ls, rs = libjoin.inner_join(left, right, max_group)
exp_ls = left.argsort(kind='mergesort')
exp_rs = right.argsort(kind='mergesort')
exp_li = a_([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
6, 6, 7, 7, 8, 8])
exp_ri = a_([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3,
4, 5, 4, 5, 4, 5])
exp_ls = exp_ls.take(exp_li)
exp_ls[exp_li == -1] = -1
exp_rs = exp_rs.take(exp_ri)
exp_rs[exp_ri == -1] = -1
tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
def test_left_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='left')
joined_both = merge(self.df, self.df2)
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='left')
def test_right_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='right')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='right')
joined_both = merge(self.df, self.df2, how='right')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='right')
def test_full_outer_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='outer')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='outer')
joined_both = merge(self.df, self.df2, how='outer')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='outer')
def test_inner_join(self):
joined_key2 = merge(self.df, self.df2, on='key2', how='inner')
_check_join(self.df, self.df2, joined_key2, ['key2'], how='inner')
joined_both = merge(self.df, self.df2, how='inner')
_check_join(self.df, self.df2, joined_both, ['key1', 'key2'],
how='inner')
def test_handle_overlap(self):
joined = merge(self.df, self.df2, on='key2',
suffixes=['.foo', '.bar'])
assert 'key1.foo' in joined
assert 'key1.bar' in joined
def test_handle_overlap_arbitrary_key(self):
joined = merge(self.df, self.df2,
left_on='key2', right_on='key1',
suffixes=['.foo', '.bar'])
assert 'key1.foo' in joined
assert 'key2.bar' in joined
def test_join_on(self):
target = self.target
source = self.source
merged = target.join(source, on='C')
tm.assert_series_equal(merged['MergedA'], target['A'],
check_names=False)
tm.assert_series_equal(merged['MergedD'], target['D'],
check_names=False)
# join with duplicates (fix regression from DataFrame/Matrix merge)
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
joined = df.join(df2, on='key')
expected = DataFrame({'key': ['a', 'a', 'b', 'b', 'c'],
'value': [0, 0, 1, 1, 2]})
assert_frame_equal(joined, expected)
# Test when some are missing
df_a = DataFrame([[1], [2], [3]], index=['a', 'b', 'c'],
columns=['one'])
df_b = DataFrame([['foo'], ['bar']], index=[1, 2],
columns=['two'])
df_c = DataFrame([[1], [2]], index=[1, 2],
columns=['three'])
joined = df_a.join(df_b, on='one')
joined = joined.join(df_c, on='one')
assert np.isnan(joined['two']['c'])
assert np.isnan(joined['three']['c'])
# merge column not p resent
pytest.raises(KeyError, target.join, source, on='E')
# overlap
source_copy = source.copy()
source_copy['A'] = 0
pytest.raises(ValueError, target.join, source_copy, on='A')
def test_join_on_fails_with_different_right_index(self):
with pytest.raises(ValueError):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
merge(df, df2, left_on='a', right_index=True)
def test_join_on_fails_with_different_left_index(self):
with pytest.raises(ValueError):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)},
index=tm.makeCustomIndex(10, 2))
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)})
merge(df, df2, right_on='b', left_index=True)
def test_join_on_fails_with_different_column_counts(self):
with pytest.raises(ValueError):
df = DataFrame({'a': np.random.choice(['m', 'f'], size=3),
'b': np.random.randn(3)})
df2 = DataFrame({'a': np.random.choice(['m', 'f'], size=10),
'b': np.random.randn(10)},
index=tm.makeCustomIndex(10, 2))
merge(df, df2, right_on='a', left_on=['a', 'b'])
def test_join_on_fails_with_wrong_object_type(self):
# GH12081
wrongly_typed = [Series([0, 1]), 2, 'str', None, np.array([0, 1])]
df = DataFrame({'a': [1, 1]})
for obj in wrongly_typed:
with tm.assert_raises_regex(ValueError, str(type(obj))):
merge(obj, df, left_on='a', right_on='a')
with tm.assert_raises_regex(ValueError, str(type(obj))):
merge(df, obj, left_on='a', right_on='a')
def test_join_on_pass_vector(self):
expected = self.target.join(self.source, on='C')
del expected['C']
join_col = self.target.pop('C')
result = self.target.join(self.source, on=join_col)
assert_frame_equal(result, expected)
def test_join_with_len0(self):
# nothing to merge
merged = self.target.join(self.source.reindex([]), on='C')
for col in self.source:
assert col in merged
assert merged[col].isna().all()
merged2 = self.target.join(self.source.reindex([]), on='C',
how='inner')
tm.assert_index_equal(merged2.columns, merged.columns)
assert len(merged2) == 0
def test_join_on_inner(self):
df = DataFrame({'key': ['a', 'a', 'd', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1]}, index=['a', 'b'])
joined = df.join(df2, on='key', how='inner')
expected = df.join(df2, on='key')
expected = expected[expected['value'].notna()]
tm.assert_series_equal(joined['key'], expected['key'],
check_dtype=False)
tm.assert_series_equal(joined['value'], expected['value'],
check_dtype=False)
tm.assert_index_equal(joined.index, expected.index)
def test_join_on_singlekey_list(self):
df = DataFrame({'key': ['a', 'a', 'b', 'b', 'c']})
df2 = DataFrame({'value': [0, 1, 2]}, index=['a', 'b', 'c'])
# corner cases
joined = df.join(df2, on=['key'])
expected = df.join(df2, on='key')
assert_frame_equal(joined, expected)
def test_join_on_series(self):
result = self.target.join(self.source['MergedA'], on='C')
expected = self.target.join(self.source[['MergedA']], on='C')
assert_frame_equal(result, expected)
def test_join_on_series_buglet(self):
# GH #638
df = DataFrame({'a': [1, 1]})
ds = Series([2], index=[1], name='b')
result = df.join(ds, on='a')
expected = DataFrame({'a': [1, 1],
'b': [2, 2]}, index=df.index)
tm.assert_frame_equal(result, expected)
def test_join_index_mixed(self):
df1 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(10),
columns=['A', 'B', 'C', 'D'])
assert df1['B'].dtype == np.int64
assert df1['D'].dtype == np.bool_
df2 = DataFrame({'A': 1., 'B': 2, 'C': 'foo', 'D': True},
index=np.arange(0, 10, 2),
columns=['A', 'B', 'C', 'D'])
# overlap
joined = df1.join(df2, lsuffix='_one', rsuffix='_two')
expected_columns = ['A_one', 'B_one', 'C_one', 'D_one',
'A_two', 'B_two', 'C_two', 'D_two']
df1.columns = expected_columns[:4]
df2.columns = expected_columns[4:]
expected = _join_by_hand(df1, df2)
assert_frame_equal(joined, expected)
# no overlapping blocks
df1 = DataFrame(index=np.arange(10))
df1['bool'] = True
df1['string'] = 'foo'
df2 = DataFrame(index=np.arange(5, 15))
df2['int'] = 1
df2['float'] = 1.
for kind in ['inner', 'outer', 'left', 'right']:
joined = df1.join(df2, how=kind)
expected = _join_by_hand(df1, df2, how=kind)
assert_frame_equal(joined, expected)
joined = df2.join(df1, how=kind)
expected = _join_by_hand(df2, df1, how=kind)
assert_frame_equal(joined, expected)
def test_join_empty_bug(self):
# generated an exception in 0.4.3
x = DataFrame()
x.join(DataFrame([3], index=[0], columns=['A']), how='outer')
def test_join_unconsolidated(self):
# GH #331
a = DataFrame(randn(30, 2), columns=['a', 'b'])
c = Series(randn(30))
a['c'] = c
d = DataFrame(randn(30, 1), columns=['q'])
# it works!
a.join(d)
d.join(a)
def test_join_multiindex(self):
index1 = MultiIndex.from_arrays([['a', 'a', 'a', 'b', 'b', 'b'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
index2 = MultiIndex.from_arrays([['b', 'b', 'b', 'c', 'c', 'c'],
[1, 2, 3, 1, 2, 3]],
names=['first', 'second'])
df1 = DataFrame(data=np.random.randn(6), index=index1,
columns=['var X'])
df2 = DataFrame(data=np.random.randn(6), index=index2,
columns=['var Y'])
df1 = df1.sort_index(level=0)
df2 = df2.sort_index(level=0)
joined = df1.join(df2, how='outer')
ex_index = Index(index1.values).union(Index(index2.values))
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
assert joined.index.names == index1.names
df1 = df1.sort_index(level=1)
df2 = df2.sort_index(level=1)
joined = df1.join(df2, how='outer').sort_index(level=0)
ex_index = Index(index1.values).union(Index(index2.values))
expected = df1.reindex(ex_index).join(df2.reindex(ex_index))
expected.index.names = index1.names
assert_frame_equal(joined, expected)
assert joined.index.names == index1.names
def test_join_inner_multiindex(self):
key1 = ['bar', 'bar', 'bar', 'foo', 'foo', 'baz', 'baz', 'qux',
'qux', 'snap']
key2 = ['two', 'one', 'three', 'one', 'two', 'one', 'two', 'two',
'three', 'one']
data = np.random.randn(len(key1))
data = DataFrame({'key1': key1, 'key2': key2,
'data': data})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
to_join = DataFrame(np.random.randn(10, 3), index=index,
columns=['j_one', 'j_two', 'j_three'])
joined = data.join(to_join, on=['key1', 'key2'], how='inner')
expected = merge(data, to_join.reset_index(),
left_on=['key1', 'key2'],
right_on=['first', 'second'], how='inner',
sort=False)
expected2 = merge(to_join, data,
right_on=['key1', 'key2'], left_index=True,
how='inner', sort=False)
assert_frame_equal(joined, expected2.reindex_like(joined))
expected2 = merge(to_join, data, right_on=['key1', 'key2'],
left_index=True, how='inner', sort=False)
expected = expected.drop(['first', 'second'], axis=1)
expected.index = joined.index
assert joined.index.is_monotonic
assert_frame_equal(joined, expected)
# _assert_same_contents(expected, expected2.loc[:, expected.columns])
def test_join_hierarchical_mixed(self):
# GH 2024
df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=['a', 'b', 'c'])
new_df = df.groupby(['a']).agg({'b': [np.mean, np.sum]})
other_df = DataFrame(
[(1, 2, 3), (7, 10, 6)], columns=['a', 'b', 'd'])
other_df.set_index('a', inplace=True)
# GH 9455, 12219
with tm.assert_produces_warning(UserWarning):
result = merge(new_df, other_df, left_index=True, right_index=True)
assert ('b', 'mean') in result
assert 'b' in result
def test_join_float64_float32(self):
a = DataFrame(randn(10, 2), columns=['a', 'b'], dtype=np.float64)
b = DataFrame(randn(10, 1), columns=['c'], dtype=np.float32)
joined = a.join(b)
assert joined.dtypes['a'] == 'float64'
assert joined.dtypes['b'] == 'float64'
assert joined.dtypes['c'] == 'float32'
a = np.random.randint(0, 5, 100).astype('int64')
b = np.random.random(100).astype('float64')
c = np.random.random(100).astype('float32')
df = DataFrame({'a': a, 'b': b, 'c': c})
xpdf = DataFrame({'a': a, 'b': b, 'c': c})
s = DataFrame(np.random.random(5).astype('float32'), columns=['md'])
rs = df.merge(s, left_on='a', right_index=True)
assert rs.dtypes['a'] == 'int64'
assert rs.dtypes['b'] == 'float64'
assert rs.dtypes['c'] == 'float32'
assert rs.dtypes['md'] == 'float32'
xp = xpdf.merge(s, left_on='a', right_index=True)
assert_frame_equal(rs, xp)
def test_join_many_non_unique_index(self):
df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]})
df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]})
df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='outer')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='outer')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='outer')
result = result.reset_index()
expected = expected[result.columns]
expected['a'] = expected.a.astype('int64')
expected['b'] = expected.b.astype('int64')
assert_frame_equal(result, expected)
df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]})
df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]})
df3 = DataFrame(
{"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]})
idf1 = df1.set_index(["a", "b"])
idf2 = df2.set_index(["a", "b"])
idf3 = df3.set_index(["a", "b"])
result = idf1.join([idf2, idf3], how='inner')
df_partially_merged = merge(df1, df2, on=['a', 'b'], how='inner')
expected = merge(df_partially_merged, df3, on=['a', 'b'], how='inner')
result = result.reset_index()
assert_frame_equal(result, expected.loc[:, result.columns])
# GH 11519
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
s = Series(np.repeat(np.arange(8), 2),
index=np.repeat(np.arange(8), 2), name='TEST')
inner = df.join(s, how='inner')
outer = df.join(s, how='outer')
left = df.join(s, how='left')
right = df.join(s, how='right')
assert_frame_equal(inner, outer)
assert_frame_equal(inner, left)
assert_frame_equal(inner, right)
def test_join_sort(self):
left = DataFrame({'key': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
right = DataFrame({'value2': ['a', 'b', 'c']},
index=['bar', 'baz', 'foo'])
joined = left.join(right, on='key', sort=True)
expected = DataFrame({'key': ['bar', 'baz', 'foo', 'foo'],
'value': [2, 3, 1, 4],
'value2': ['a', 'b', 'c', 'c']},
index=[1, 2, 0, 3])
assert_frame_equal(joined, expected)
# smoke test
joined = left.join(right, on='key', sort=False)
tm.assert_index_equal(joined.index, pd.Index(lrange(4)))
def test_join_mixed_non_unique_index(self):
# GH 12814, unorderable types in py3 with a non-unique index
df1 = DataFrame({'a': [1, 2, 3, 4]}, index=[1, 2, 3, 'a'])
df2 = DataFrame({'b': [5, 6, 7, 8]}, index=[1, 3, 3, 4])
result = df1.join(df2)
expected = DataFrame({'a': [1, 2, 3, 3, 4],
'b': [5, np.nan, 6, 7, np.nan]},
index=[1, 2, 3, 3, 'a'])
tm.assert_frame_equal(result, expected)
df3 = DataFrame({'a': [1, 2, 3, 4]}, index=[1, 2, 2, 'a'])
df4 = DataFrame({'b': [5, 6, 7, 8]}, index=[1, 2, 3, 4])
result = df3.join(df4)
expected = DataFrame({'a': [1, 2, 3, 4], 'b': [5, 6, 6, np.nan]},
index=[1, 2, 2, 'a'])
tm.assert_frame_equal(result, expected)
def test_join_non_unique_period_index(self):
# GH #16871
index = pd.period_range('2016-01-01', periods=16, freq='M')
df = DataFrame([i for i in range(len(index))],
index=index, columns=['pnum'])
df2 = concat([df, df])
result = df.join(df2, how='inner', rsuffix='_df2')
expected = DataFrame(
np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2),
columns=['pnum', 'pnum_df2'], index=df2.sort_index().index)
tm.assert_frame_equal(result, expected)
def test_mixed_type_join_with_suffix(self):
# GH #916
df = DataFrame(np.random.randn(20, 6),
columns=['a', 'b', 'c', 'd', 'e', 'f'])
df.insert(0, 'id', 0)
df.insert(5, 'dt', 'foo')
grouped = df.groupby('id')
mn = grouped.mean()
cn = grouped.count()
# it works!
mn.join(cn, rsuffix='_right')
def test_join_many(self):
df = DataFrame(np.random.randn(10, 6), columns=list('abcdef'))
df_list = [df[['a', 'b']], df[['c', 'd']], df[['e', 'f']]]
joined = df_list[0].join(df_list[1:])
tm.assert_frame_equal(joined, df)
df_list = [df[['a', 'b']][:-2],
df[['c', 'd']][2:], df[['e', 'f']][1:9]]
def _check_diff_index(df_list, result, exp_index):
reindexed = [x.reindex(exp_index) for x in df_list]
expected = reindexed[0].join(reindexed[1:])
tm.assert_frame_equal(result, expected)
# different join types
joined = df_list[0].join(df_list[1:], how='outer')
_check_diff_index(df_list, joined, df.index)
joined = df_list[0].join(df_list[1:])
_check_diff_index(df_list, joined, df_list[0].index)
joined = df_list[0].join(df_list[1:], how='inner')
_check_diff_index(df_list, joined, df.index[2:8])
pytest.raises(ValueError, df_list[0].join, df_list[1:], on='a')
def test_join_many_mixed(self):
df = DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D'])
df['key'] = ['foo', 'bar'] * 4
df1 = df.loc[:, ['A', 'B']]
df2 = df.loc[:, ['C', 'D']]
df3 = df.loc[:, ['key']]
result = df1.join([df2, df3])
assert_frame_equal(result, df)
def test_join_dups(self):
# joining dups
df = concat([DataFrame(np.random.randn(10, 4),
columns=['A', 'A', 'B', 'B']),
DataFrame(np.random.randint(0, 10, size=20)
.reshape(10, 2),
columns=['A', 'C'])],
axis=1)
expected = concat([df, df], axis=1)
result = df.join(df, rsuffix='_2')
result.columns = expected.columns
assert_frame_equal(result, expected)
# GH 4975, invalid join on dups
w = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
x = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
y = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
z = DataFrame(np.random.randn(4, 2), columns=["x", "y"])
dta = x.merge(y, left_index=True, right_index=True).merge(
z, left_index=True, right_index=True, how="outer")
dta = dta.merge(w, left_index=True, right_index=True)
expected = concat([x, y, z, w], axis=1)
expected.columns = ['x_x', 'y_x', 'x_y',
'y_y', 'x_x', 'y_x', 'x_y', 'y_y']
assert_frame_equal(dta, expected)
def test_panel_join(self):
with catch_warnings(record=True):
panel = tm.makePanel()
tm.add_nans(panel)
p1 = panel.iloc[:2, :10, :3]
p2 = panel.iloc[2:, 5:, 2:]
# left join
result = p1.join(p2)
expected = p1.copy()
expected['ItemC'] = p2['ItemC']
tm.assert_panel_equal(result, expected)
# right join
result = p1.join(p2, how='right')
expected = p2.copy()
expected['ItemA'] = p1['ItemA']
expected['ItemB'] = p1['ItemB']
expected = expected.reindex(items=['ItemA', 'ItemB', 'ItemC'])
tm.assert_panel_equal(result, expected)
# inner join
result = p1.join(p2, how='inner')
expected = panel.iloc[:, 5:10, 2:3]
tm.assert_panel_equal(result, expected)
# outer join
result = p1.join(p2, how='outer')
expected = p1.reindex(major=panel.major_axis,
minor=panel.minor_axis)
expected = expected.join(p2.reindex(major=panel.major_axis,
minor=panel.minor_axis))
tm.assert_panel_equal(result, expected)
def test_panel_join_overlap(self):
with catch_warnings(record=True):
panel = tm.makePanel()
tm.add_nans(panel)
p1 = panel.loc[['ItemA', 'ItemB', 'ItemC']]
p2 = panel.loc[['ItemB', 'ItemC']]
# Expected index is
#
# ItemA, ItemB_p1, ItemC_p1, ItemB_p2, ItemC_p2
joined = p1.join(p2, lsuffix='_p1', rsuffix='_p2')
p1_suf = p1.loc[['ItemB', 'ItemC']].add_suffix('_p1')
p2_suf = p2.loc[['ItemB', 'ItemC']].add_suffix('_p2')
no_overlap = panel.loc[['ItemA']]
expected = no_overlap.join(p1_suf.join(p2_suf))
tm.assert_panel_equal(joined, expected)
def test_panel_join_many(self):
with catch_warnings(record=True):
tm.K = 10
panel = tm.makePanel()
tm.K = 4
panels = [panel.iloc[:2], panel.iloc[2:6], panel.iloc[6:]]
joined = panels[0].join(panels[1:])
tm.assert_panel_equal(joined, panel)
panels = [panel.iloc[:2, :-5],
panel.iloc[2:6, 2:],
panel.iloc[6:, 5:-7]]
data_dict = {}
for p in panels:
data_dict.update(p.iteritems())
joined = panels[0].join(panels[1:], how='inner')
expected = pd.Panel.from_dict(data_dict, intersect=True)
tm.assert_panel_equal(joined, expected)
joined = panels[0].join(panels[1:], how='outer')
expected = pd.Panel.from_dict(data_dict, intersect=False)
tm.assert_panel_equal(joined, expected)
# edge cases
pytest.raises(ValueError, panels[0].join, panels[1:],
how='outer', lsuffix='foo', rsuffix='bar')
pytest.raises(ValueError, panels[0].join, panels[1:],
how='right')
def _check_join(left, right, result, join_col, how='left',
lsuffix='_x', rsuffix='_y'):
# some smoke tests
for c in join_col:
assert(result[c].notna().all())
left_grouped = left.groupby(join_col)
right_grouped = right.groupby(join_col)
for group_key, group in result.groupby(join_col):
l_joined = _restrict_to_columns(group, left.columns, lsuffix)
r_joined = _restrict_to_columns(group, right.columns, rsuffix)
try:
lgroup = left_grouped.get_group(group_key)
except KeyError:
if how in ('left', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(l_joined, left.columns, join_col)
else:
_assert_same_contents(l_joined, lgroup)
try:
rgroup = right_grouped.get_group(group_key)
except KeyError:
if how in ('right', 'inner'):
raise AssertionError('key %s should not have been in the join'
% str(group_key))
_assert_all_na(r_joined, right.columns, join_col)
else:
_assert_same_contents(r_joined, rgroup)
def _restrict_to_columns(group, columns, suffix):
found = [c for c in group.columns
if c in columns or c.replace(suffix, '') in columns]
# filter
group = group.loc[:, found]
# get rid of suffixes, if any
group = group.rename(columns=lambda x: x.replace(suffix, ''))
# put in the right order...
group = group.loc[:, columns]
return group
def _assert_same_contents(join_chunk, source):
NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly...
jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values
svalues = source.fillna(NA_SENTINEL).drop_duplicates().values
rows = set(tuple(row) for row in jvalues)
assert(len(rows) == len(source))
assert(all(tuple(row) in rows for row in svalues))
def _assert_all_na(join_chunk, source_columns, join_col):
for c in source_columns:
if c in join_col:
continue
assert(join_chunk[c].isna().all())
def _join_by_hand(a, b, how='left'):
join_index = a.index.join(b.index, how=how)
a_re = a.reindex(join_index)
b_re = b.reindex(join_index)
result_columns = a.columns.append(b.columns)
for col, s in compat.iteritems(b_re):
a_re[col] = s
return a_re.reindex(columns=result_columns)
| NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/reshape/test_join.py | Python | apache-2.0 | 31,344 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for functions used to extract and analyze stacks."""
import traceback
from tensorflow.python.platform import test
from tensorflow.python.util import tf_stack
class TFStackTest(test.TestCase):
def testFormatStackSelfConsistency(self):
# Both defined on the same line to produce identical stacks.
stacks = tf_stack.extract_stack(), traceback.extract_stack()
self.assertEqual(
traceback.format_list(stacks[0]), traceback.format_list(stacks[1]))
def testFrameSummaryEquality(self):
frames1 = tf_stack.extract_stack()
frames2 = tf_stack.extract_stack()
self.assertNotEqual(frames1[0], frames1[1])
self.assertEqual(frames1[0], frames1[0])
self.assertEqual(frames1[0], frames2[0])
def testFrameSummaryEqualityAndHash(self):
# Both defined on the same line to produce identical stacks.
frame1, frame2 = tf_stack.extract_stack(), tf_stack.extract_stack()
self.assertEqual(len(frame1), len(frame2))
for f1, f2 in zip(frame1, frame2):
self.assertEqual(f1, f2)
self.assertEqual(hash(f1), hash(f1))
self.assertEqual(hash(f1), hash(f2))
self.assertEqual(frame1, frame2)
self.assertEqual(hash(tuple(frame1)), hash(tuple(frame2)))
def testLastUserFrame(self):
trace = tf_stack.extract_stack() # COMMENT
frame = trace.last_user_frame()
self.assertRegex(frame.line, "# COMMENT")
def testGetUserFrames(self):
def func():
trace = tf_stack.extract_stack() # COMMENT
frames = list(trace.get_user_frames())
return frames
frames = func() # CALLSITE
self.assertRegex(frames[-1].line, "# COMMENT")
self.assertRegex(frames[-2].line, "# CALLSITE")
if __name__ == "__main__":
test.main()
| tensorflow/tensorflow | tensorflow/python/util/tf_stack_test.py | Python | apache-2.0 | 2,415 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_log import helpers
from oslotest import base as test_base
class LogHelpersTestCase(test_base.BaseTestCase):
def test_log_decorator(self):
'''Test that LOG.debug is called with proper arguments.'''
class test_class(object):
@helpers.log_method_call
def test_method(self, arg1, arg2, arg3, *args, **kwargs):
pass
@classmethod
@helpers.log_method_call
def test_classmethod(cls, arg1, arg2, arg3, *args, **kwargs):
pass
args = tuple(range(6))
kwargs = {'kwarg1': 6, 'kwarg2': 7}
obj = test_class()
for method_name in ('test_method', 'test_classmethod'):
data = {'class_name': helpers._get_full_class_name(test_class),
'method_name': method_name,
'args': args,
'kwargs': kwargs}
method = getattr(obj, method_name)
with mock.patch('logging.Logger.debug') as debug:
method(*args, **kwargs)
debug.assert_called_with(mock.ANY, data)
| akash1808/oslo.log | oslo_log/tests/unit/test_helpers.py | Python | apache-2.0 | 1,699 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
import os
import time
import Axon
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Chassis.PAR import PAR
from Kamaelia.Codec.Dirac import DiracDecoder
from Kamaelia.File.ReadFileAdaptor import ReadFileAdaptor
from Kamaelia.UI.Pygame.Button import Button
from Kamaelia.UI.Pygame.Image import Image
from Kamaelia.UI.Pygame.Ticker import Ticker
from Kamaelia.UI.Pygame.Text import TextDisplayer, Textbox
from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay
from Kamaelia.UI.Pygame.VideoSurface import VideoSurface
from Kamaelia.Util.Chooser import Chooser
from Kamaelia.Util.RateFilter import MessageRateLimit
from Kamaelia.Video.PixFormatConversion import ToRGB_interleaved
class timedShutdown(Axon.ThreadedComponent.threadedcomponent):
TTL = 1
def main(self):
time.sleep(self.TTL)
self.send(Axon.Ipc.shutdownMicroprocess(), "signal")
path = "Slides"
extn = ".gif"
allfiles = os.listdir(path)
files = list()
for fname in allfiles:
if fname[-len(extn):]==extn:
files.append(os.path.join(path,fname))
files.sort()
file = "/data/dirac-video/snowboard-jum-352x288x75.dirac.drc"
framerate = 3
Pipeline(
timedShutdown(TTL=15),
PAR(
Pipeline(
ReadFileAdaptor(file, readmode="bitrate",
bitrate = 300000*8/5),
DiracDecoder(),
MessageRateLimit(framerate),
VideoOverlay(position=(260,48), size=(200,300)),
),
Pipeline( ReadFileAdaptor(file, readmode="bitrate", bitrate = 2280960*8),
DiracDecoder(),
# MessageRateLimit(framerate),
ToRGB_interleaved(),
VideoSurface(size=(200, 300), position=(600,48)),
),
Pipeline(
PAR(
Button(caption="Next", msg="NEXT", position=(72,8)),
Button(caption="Previous", msg="PREV", position=(8,8)),
Button(caption="First", msg="FIRST" ,position=(256,8)),
Button(caption="Last", msg="LAST", position=(320,8)),
),
Chooser(items = files),
Image(size=(200,300), position=(8,48), maxpect=(200,300)),
),
Pipeline(
Textbox(size=(200,300), position=(8,360)),
TextDisplayer(size=(200,300), position=(228,360)),
),
Ticker(size=(200,300), position=(450,360)),
),
).run()
| sparkslabs/kamaelia | Sketches/MPS/BugReports/FixTests/Kamaelia/Examples/UsingChassis/PAR/par_shutdown.py | Python | apache-2.0 | 3,471 |
# -*- coding: utf-8 -*-
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Looks for overlapping exceptions."""
import astroid
from pylint import interfaces
from pylint import checkers
from pylint.checkers import utils
from pylint.checkers.exceptions import _annotated_unpack_infer
class OverlappingExceptionsChecker(checkers.BaseChecker):
"""Checks for two or more exceptions in the same exception handler
clause that are identical or parts of the same inheritance hierarchy
(i.e. overlapping)."""
__implements__ = interfaces.IAstroidChecker
name = 'overlap-except'
msgs = {'W0714': ('Overlapping exceptions (%s)',
'overlapping-except',
'Used when exceptions in handler overlap or are identical')}
priority = -2
options = ()
@utils.check_messages('overlapping-except')
def visit_tryexcept(self, node):
"""check for empty except"""
for handler in node.handlers:
if handler.type is None:
continue
if isinstance(handler.type, astroid.BoolOp):
continue
try:
excs = list(_annotated_unpack_infer(handler.type))
except astroid.InferenceError:
continue
handled_in_clause = []
for part, exc in excs:
if exc is astroid.YES:
continue
if (isinstance(exc, astroid.Instance) and
utils.inherit_from_std_ex(exc)):
# pylint: disable=protected-access
exc = exc._proxied
if not isinstance(exc, astroid.ClassDef):
continue
exc_ancestors = [anc for anc in exc.ancestors()
if isinstance(anc, astroid.ClassDef)]
for prev_part, prev_exc in handled_in_clause:
prev_exc_ancestors = [anc for anc in prev_exc.ancestors()
if isinstance(anc, astroid.ClassDef)]
if exc == prev_exc:
self.add_message('overlapping-except',
node=handler.type,
args='%s and %s are the same' %
(prev_part.as_string(),
part.as_string()))
elif (prev_exc in exc_ancestors or
exc in prev_exc_ancestors):
ancestor = part if exc in prev_exc_ancestors else prev_part
descendant = part if prev_exc in exc_ancestors else prev_part
self.add_message('overlapping-except',
node=handler.type,
args='%s is an ancestor class of %s' %
(ancestor.as_string(), descendant.as_string()))
handled_in_clause += [(part, exc)]
def register(linter):
"""Required method to auto register this checker."""
linter.register_checker(OverlappingExceptionsChecker(linter))
| arju88nair/projectCulminate | venv/lib/python3.5/site-packages/pylint/extensions/overlapping_exceptions.py | Python | apache-2.0 | 3,292 |
##################################
# Laboratory Server configuration #
##################################
from __future__ import print_function, unicode_literals
laboratory_assigned_experiments = {
'exp1:dummy1@Dummy experiments':
{
'coord_address': 'myexperiment:myprocess@myhost',
'checkers': ()
},
}
| morelab/weblabdeusto | server/src/test/deployments/webclient_dummy/lab_config.py | Python | bsd-2-clause | 374 |
# relay skeleton & normal socks relay
import logging
import time
from gevent import socket
from gevent import select
from utils import request_fail, basic_handshake_server, read_request, \
sock_addr_info, request_success, pipe_tcp, bind_local_udp, addr_info, \
bind_local_sock_by_addr, pipe_udp
from msg import CMD_NOT_SUPPORTED, CONNECT, BIND, UDP_ASSOCIATE, \
GENERAL_SOCKS_SERVER_FAILURE, UDPRequest
log = logging.getLogger(__name__)
class RelaySessionError(Exception): pass
class RelayFactory(object):
def create_relay_session(self, socksconn, clientaddr):
raise NotImplementedError
class RelaySession(object):
def __init__(self, socksconn):
self.socksconn = socksconn
self.timeout = self.socksconn.gettimeout()
self.allsocks = [self.socksconn]
def track_sock(self, sock):
# track all sockets so we know what to clean
self.allsocks.append(sock)
def cmd_bind(self, req):
request_fail(self.socksconn, req, CMD_NOT_SUPPORTED)
def proc_tcp_request(self, req):
raise NotImplementedError
def relay_tcp(self):
raise NotImplementedError
def cmd_connect(self, req):
# TCP usually follows two steps.
self.proc_tcp_request(req)
self.relay_tcp()
def cmd_udp_associate(self, req):
# UDP is more specific
raise NotImplementedError
def process(self):
try:
if not basic_handshake_server(self.socksconn):
self.clean()
return
req = read_request(self.socksconn)
{
CONNECT: self.cmd_connect,
BIND: self.cmd_bind,
UDP_ASSOCIATE : self.cmd_udp_associate
}[req.cmd](req)
self.clean()
except Exception, e:
log.error("[Exception][RelaySession]: %s" % str(e))
self.clean()
def clean(self):
for sock in self.allsocks:
if sock:
sock.close()
class SocksSession(RelaySession):
def __init__(self, socksconn):
super(SocksSession, self).__init__(socksconn)
self.remoteconn = None
self.client_associate = None
self.last_clientaddr = None
self.client2local_udpsock = None
self.local2remote_udpsock = None
def proc_tcp_request(self, req):
dst = (req.dstaddr, req.dstport)
log.info("TCP request address: (%s:%d)" % dst)
self.remoteconn = socket.create_connection(dst, self.timeout)
self.track_sock(self.remoteconn)
addrtype, bndaddr, bndport = sock_addr_info(self.remoteconn)
request_success(self.socksconn, addrtype, bndaddr, bndport)
def relay_tcp(self):
pipe_tcp(self.socksconn, self.remoteconn, self.timeout, self.timeout)
def proc_udp_request(self, req):
self.client_associate = (req.dstaddr, req.dstport)
log.info("UDP client adress: (%s:%d)" % self.client_associate)
self.last_clientaddr = self.client_associate
self.client2local_udpsock = bind_local_udp(self.socksconn)
if not self.client2local_udpsock:
request_fail(self.socksconn, req, GENERAL_SOCKS_SERVER_FAILURE)
return False
self.track_sock(self.client2local_udpsock)
bndtype, bndaddr, bndport = sock_addr_info(self.client2local_udpsock)
log.info("UDP ACCOSIATE: (%s:%d)" % (bndaddr, bndport))
request_success(self.socksconn, bndtype, bndaddr, bndport)
return True
def wait_for_first_udp(self):
# wait util first VALID packet come.
start = time.time()
timeout = self.timeout
while True:
readable, _, _ = select.select([self.socksconn, self.client2local_udpsock], [], [], timeout)
if not readable:
raise socket.timeout("timeout") # @UndefinedVariable
if self.socksconn in readable:
raise RelaySessionError("unexcepted read-event from tcp socket in UDP session")
timeout -= (time.time() - start)
if timeout <= 0:
raise socket.timeout("timeout") # @UndefinedVariable
data, addr = self.client2local_udpsock.recvfrom(65536)
try:
udpreq = UDPRequest(data)
if udpreq.frag == '\x00':
return data, addr
except:
pass
def relay_udp(self, firstdata, firstaddr):
def addrchecker():
def _(ip, port):
if self.client_associate[0] == "0.0.0.0" or \
self.client_associate[0] == "::":
return True
if self.client_associate == (ip, port):
return True
log.info("UDP packet dropped for invalid address.")
return False
return _
def c2r():
def _(data, addr):
self.last_clientaddr = addr
try:
udpreq = UDPRequest(data)
if udpreq.frag != '\x00':
return None, None
return udpreq.data, (udpreq.dstaddr, udpreq.dstport)
except Exception, e:
log.error("[relay_udp][c2r] Exception: %s", str(e))
return None, None
return _
def r2c():
def _(data, addr):
addrtype, dstaddr, dstport = addr_info(addr)
udpreq = UDPRequest(addrtype=addrtype, dstaddr=dstaddr, dstport=dstport, data=data)
return udpreq.pack(), self.last_clientaddr
return _
data, dst = c2r()(firstdata, firstaddr)
self.local2remote_udpsock = bind_local_sock_by_addr(dst)
self.track_sock(self.local2remote_udpsock)
self.local2remote_udpsock.send(data)
pipe_udp([self.socksconn],
self.client2local_udpsock, self.local2remote_udpsock,
self.timeout, self.timeout,
addrchecker(), c2r(), r2c())
def cmd_udp_associate(self, req):
if self.proc_udp_request(req):
firstdata, firstaddr = self.wait_for_first_udp()
self.relay_udp(firstdata, firstaddr)
class SocksRelayFactory(RelayFactory):
def create_relay_session(self, socksconn, clientaddr):
log.info("New socks connection from %s" % str(clientaddr))
return SocksSession(socksconn)
| yinghuocho/gsocks | relay.py | Python | bsd-2-clause | 6,590 |
"""
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['navy', 'cyan', 'darkorange']
lw = 2
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(train_size=train_size, n_iter=250,
random_state=1))
grid.fit(X, y)
scores = grid.results_['test_mean_score']
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size, color=colors[k], lw=lw)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| toastedcornflakes/scikit-learn | examples/svm/plot_svm_scale_c.py | Python | bsd-3-clause | 5,404 |
###
# Copyright (c) 2014, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""Restricted equivalent to six."""
from __future__ import division
import sys
import warnings
if sys.version_info[0] >= 3:
PY2 = False
PY3 = True
intern = sys.intern
integer_types = (int,)
string_types = (str,)
long = int
import io
import pickle
import queue
u = lambda x:x
L = lambda x:x
def make_datetime_utc(dt):
import datetime
return dt.replace(tzinfo=datetime.timezone.utc)
def timedelta__totalseconds(td):
return td.total_seconds()
if sys.version_info >= (3, 3):
def datetime__timestamp(dt):
return dt.timestamp()
else:
def datetime__timestamp(dt):
import datetime
td = dt - datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
return timedelta__totalseconds(td)
else:
PY2 = True
PY3 = False
if isinstance(__builtins__, dict):
intern = __builtins__['intern']
else:
intern = __builtins__.intern
integer_types = (int, long)
string_types = (basestring,)
long = long
class io:
# cStringIO is buggy with Python 2.6 (
# see http://paste.progval.net/show/227/ )
# and it does not handle unicode objects in Python 2.x
from StringIO import StringIO
from cStringIO import StringIO as BytesIO
import cPickle as pickle
import Queue as queue
u = lambda x:x.decode('utf8')
L = lambda x:long(x)
def make_datetime_utc(dt):
warnings.warn('Timezones are not available on this version of '
'Python and may lead to incorrect results. You should '
'consider upgrading to Python 3.')
return dt.replace(tzinfo=None)
if sys.version_info >= (2, 7):
def timedelta__totalseconds(td):
return td.total_seconds()
else:
def timedelta__totalseconds(td):
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
def datetime__timestamp(dt):
import datetime
warnings.warn('Timezones are not available on this version of '
'Python and may lead to incorrect results. You should '
'consider upgrading to Python 3.')
return timedelta__totalseconds(dt - datetime.datetime(1970, 1, 1))
| ProgVal/Limnoria-test | src/utils/minisix.py | Python | bsd-3-clause | 3,909 |
from django import forms
from django.forms.widgets import HiddenInput
from django.utils.translation import ugettext_lazy as _
from .models import ICON_CHOICES, Tidbit, Feedback, TagSuggestion
from suggestions.forms import InstanceCreateSuggestionForm
class SearchForm(forms.Form):
q = forms.CharField()
class TidbitSuggestionForm(InstanceCreateSuggestionForm):
title = forms.CharField(label=_('Title'), max_length=40,
initial=_('Did you know ?'))
icon = forms.ChoiceField(label=_('Icon'), choices=ICON_CHOICES)
content = forms.CharField(label=_('Content'),
widget=forms.Textarea(attrs={'rows': 3}))
button_text = forms.CharField(label=_('Button text'), max_length=100)
button_link = forms.CharField(label=_('Button link'), max_length=255)
class Meta:
model = Tidbit
caption = _('Suggest Tidbit')
def get_data(self, request):
"Add suggested_by for the tidbit to the action data"
data = super(TidbitSuggestionForm, self).get_data(request)
data['suggested_by'] = request.user
return data
class TagSuggestionForm(InstanceCreateSuggestionForm):
name = forms.CharField(label=_('Name'))
app_label = forms.CharField(widget=HiddenInput)
object_type = forms.CharField(widget=HiddenInput)
object_id = forms.CharField(widget=HiddenInput)
class Meta:
model = TagSuggestion
caption = _('Suggest Tag')
def __init__(self, *args, **kwargs):
super(TagSuggestionForm, self).__init__(*args, **kwargs)
self.helper.form_action = 'suggest-tag-post'
def get_data(self, request):
data = super(TagSuggestionForm, self).get_data(request)
data['suggested_by'] = request.user
return data
class FeedbackSuggestionForm(InstanceCreateSuggestionForm):
content = forms.CharField(label=_('Content'),
widget=forms.Textarea(attrs={'rows': 3}))
url = forms.CharField(widget=forms.HiddenInput, max_length=400)
class Meta:
model = Feedback
caption = _('Send Feedback')
def __init__(self, *args, **kwargs):
super(FeedbackSuggestionForm, self).__init__(*args, **kwargs)
self.helper.form_action = 'feedback-post'
def get_data(self, request):
"Add suggested_by for the tidbit to the action data"
data = super(FeedbackSuggestionForm, self).get_data(request)
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
data.update({
'suggested_by': request.user,
'ip_address': ip,
'user_agent': request.META.get('HTTP_USER_AGENT'),
})
return data
| DanaOshri/Open-Knesset | auxiliary/forms.py | Python | bsd-3-clause | 2,849 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent86140B import *
class agilent86146B(agilent86140B):
"Agilent 86146B OSA driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '86146B')
super(agilent86146B, self).__init__(*args, **kwargs)
| elopezga/ErrorRate | ivi/agilent/agilent86146B.py | Python | mit | 1,413 |
"""Test MailChimp OAuth2 v3 Flow."""
from allauth.socialaccount.tests import OAuth2TestsMixin
from allauth.tests import MockedResponse, TestCase
from .provider import MailChimpProvider
class MailChimpTests(OAuth2TestsMixin, TestCase):
"""Test Class for MailChimp OAuth2 v3."""
provider_id = MailChimpProvider.id
def get_mocked_response(self):
"""Test authentication with an non-null avatar."""
return MockedResponse(200, """{
"dc": "usX",
"role": "owner",
"accountname": "Name can have spaces",
"user_id": "99999999",
"login": {
"email": "[email protected]",
"avatar": "http://gallery.mailchimp.com/1a1a/avatar/2a2a.png",
"login_id": "88888888",
"login_name": "[email protected]",
"login_email": "[email protected]"
},
"login_url": "https://login.mailchimp.com",
"api_endpoint": "https://usX.api.mailchimp.com"
}""")
| joshowen/django-allauth | allauth/socialaccount/providers/mailchimp/tests.py | Python | mit | 1,032 |
#####################################################################
#
# Utilities for pretty printing of table data
# Author: A.Tsaregorodtsev
#
#####################################################################
__RCSID__ = '$Id$'
import StringIO
def int_with_commas(i):
s = str(i)
news = ''
while len(s) > 0:
news = s[-3:]+","+news
s = s[:-3]
return news[:-1]
def printTable( fields, records, sortField='', numbering=True,
printOut = True, columnSeparator = ' ' ):
""" Utility to pretty print tabular data
"""
stringBuffer = StringIO.StringIO()
if not records:
if printOut:
print "No output"
return "No output"
records = list( records )
nFields = len(fields)
if nFields != len(records[0]):
out = "Incorrect data structure to print, nFields %d, nRecords %d" % ( nFields, len(records[0]) )
if printOut:
print out
return out
if sortField:
records.sort( None, lambda x: x[fields.index( sortField )] )
lengths = []
for i in range(nFields):
lengths.append(len(fields[i]))
for r in records:
if len(r[i]) > lengths[i]:
lengths[i] = len(r[i])
numberWidth = len( str( len( records ) ) ) + 1
separatorWidth = len( columnSeparator )
totalLength = 0
for i in lengths:
totalLength += i
totalLength += separatorWidth
if numbering:
totalLength += (numberWidth + separatorWidth)
if numbering:
stringBuffer.write( ' '*(numberWidth+separatorWidth) )
for i in range(nFields):
stringBuffer.write( fields[i].ljust(lengths[i]+separatorWidth) )
stringBuffer.write( '\n' )
stringBuffer.write( '='*totalLength + '\n' )
count = 1
for r in records:
if numbering:
if count == len(records) and records[-1][0] == "Total":
stringBuffer.write( " "*(numberWidth+separatorWidth) )
else:
stringBuffer.write( str(count).rjust(numberWidth)+columnSeparator )
for i in range(nFields):
stringBuffer.write( r[i].ljust(lengths[i])+columnSeparator )
stringBuffer.write( '\n' )
if count == len(records)-1 and records[-1][0] == "Total":
stringBuffer.write( '-'*totalLength + '\n' )
count += 1
output = stringBuffer.getvalue()
if printOut:
print output
return output | Sbalbp/DIRAC | Core/Utilities/PrettyPrint.py | Python | gpl-3.0 | 2,481 |
from datetime import date, datetime, time
from warnings import warn
from django.db import models
from django.db.models import fields
from south.db import generic
from south.db.generic import delete_column_constraints, invalidate_table_constraints, copy_column_constraints
from south.exceptions import ConstraintDropped
from django.utils.encoding import smart_unicode
class DatabaseOperations(generic.DatabaseOperations):
"""
django-pyodbc (sql_server.pyodbc) implementation of database operations.
"""
backend_name = "pyodbc"
add_column_string = 'ALTER TABLE %s ADD %s;'
alter_string_set_type = 'ALTER COLUMN %(column)s %(type)s'
alter_string_set_null = 'ALTER COLUMN %(column)s %(type)s NULL'
alter_string_drop_null = 'ALTER COLUMN %(column)s %(type)s NOT NULL'
allows_combined_alters = False
drop_index_string = 'DROP INDEX %(index_name)s ON %(table_name)s'
drop_constraint_string = 'ALTER TABLE %(table_name)s DROP CONSTRAINT %(constraint_name)s'
delete_column_string = 'ALTER TABLE %s DROP COLUMN %s'
#create_check_constraint_sql = "ALTER TABLE %(table)s " + \
# generic.DatabaseOperations.add_check_constraint_fragment
create_foreign_key_sql = "ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s " + \
"FOREIGN KEY (%(column)s) REFERENCES %(target)s"
create_unique_sql = "ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s UNIQUE (%(columns)s)"
default_schema_name = "dbo"
has_booleans = False
@delete_column_constraints
def delete_column(self, table_name, name):
q_table_name, q_name = (self.quote_name(table_name), self.quote_name(name))
# Zap the indexes
for ind in self._find_indexes_for_column(table_name,name):
params = {'table_name':q_table_name, 'index_name': ind}
sql = self.drop_index_string % params
self.execute(sql, [])
# Zap the constraints
for const in self._find_constraints_for_column(table_name,name):
params = {'table_name':q_table_name, 'constraint_name': const}
sql = self.drop_constraint_string % params
self.execute(sql, [])
# Zap default if exists
drop_default = self.drop_column_default_sql(table_name, name)
if drop_default:
sql = "ALTER TABLE [%s] %s" % (table_name, drop_default)
self.execute(sql, [])
# Finally zap the column itself
self.execute(self.delete_column_string % (q_table_name, q_name), [])
def _find_indexes_for_column(self, table_name, name):
"Find the indexes that apply to a column, needed when deleting"
sql = """
SELECT si.name, si.id, sik.colid, sc.name
FROM dbo.sysindexes SI WITH (NOLOCK)
INNER JOIN dbo.sysindexkeys SIK WITH (NOLOCK)
ON SIK.id = Si.id
AND SIK.indid = SI.indid
INNER JOIN dbo.syscolumns SC WITH (NOLOCK)
ON SI.id = SC.id
AND SIK.colid = SC.colid
WHERE SI.indid !=0
AND Si.id = OBJECT_ID('%s')
AND SC.name = '%s'
"""
idx = self.execute(sql % (table_name, name), [])
return [i[0] for i in idx]
def _find_constraints_for_column(self, table_name, name, just_names=True):
"""
Find the constraints that apply to a column, needed when deleting. Defaults not included.
This is more general than the parent _constraints_affecting_columns, as on MSSQL this
includes PK and FK constraints.
"""
sql = """
SELECT CC.[CONSTRAINT_NAME]
,TC.[CONSTRAINT_TYPE]
,CHK.[CHECK_CLAUSE]
,RFD.TABLE_SCHEMA
,RFD.TABLE_NAME
,RFD.COLUMN_NAME
-- used for normalized names
,CC.TABLE_NAME
,CC.COLUMN_NAME
FROM [INFORMATION_SCHEMA].[TABLE_CONSTRAINTS] TC
JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE CC
ON TC.CONSTRAINT_CATALOG = CC.CONSTRAINT_CATALOG
AND TC.CONSTRAINT_SCHEMA = CC.CONSTRAINT_SCHEMA
AND TC.CONSTRAINT_NAME = CC.CONSTRAINT_NAME
LEFT JOIN INFORMATION_SCHEMA.CHECK_CONSTRAINTS CHK
ON CHK.CONSTRAINT_CATALOG = CC.CONSTRAINT_CATALOG
AND CHK.CONSTRAINT_SCHEMA = CC.CONSTRAINT_SCHEMA
AND CHK.CONSTRAINT_NAME = CC.CONSTRAINT_NAME
AND 'CHECK' = TC.CONSTRAINT_TYPE
LEFT JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS REF
ON REF.CONSTRAINT_CATALOG = CC.CONSTRAINT_CATALOG
AND REF.CONSTRAINT_SCHEMA = CC.CONSTRAINT_SCHEMA
AND REF.CONSTRAINT_NAME = CC.CONSTRAINT_NAME
AND 'FOREIGN KEY' = TC.CONSTRAINT_TYPE
LEFT JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE RFD
ON RFD.CONSTRAINT_CATALOG = REF.UNIQUE_CONSTRAINT_CATALOG
AND RFD.CONSTRAINT_SCHEMA = REF.UNIQUE_CONSTRAINT_SCHEMA
AND RFD.CONSTRAINT_NAME = REF.UNIQUE_CONSTRAINT_NAME
WHERE CC.CONSTRAINT_CATALOG = CC.TABLE_CATALOG
AND CC.CONSTRAINT_SCHEMA = CC.TABLE_SCHEMA
AND CC.TABLE_CATALOG = %s
AND CC.TABLE_SCHEMA = %s
AND CC.TABLE_NAME = %s
AND CC.COLUMN_NAME = %s
"""
db_name = self._get_setting('name')
schema_name = self._get_schema_name()
table = self.execute(sql, [db_name, schema_name, table_name, name])
if just_names:
return [r[0] for r in table]
all = {}
for r in table:
cons_name, type = r[:2]
if type=='PRIMARY KEY' or type=='UNIQUE':
cons = all.setdefault(cons_name, (type,[]))
cons[1].append(r[7])
elif type=='CHECK':
cons = (type, r[2])
elif type=='FOREIGN KEY':
if cons_name in all:
raise NotImplementedError("Multiple-column foreign keys are not supported")
else:
cons = (type, r[3:6])
else:
raise NotImplementedError("Don't know how to handle constraints of type "+ type)
all[cons_name] = cons
return all
@invalidate_table_constraints
def alter_column(self, table_name, name, field, explicit_name=True, ignore_constraints=False):
"""
Alters the given column name so it will match the given field.
Note that conversion between the two by the database must be possible.
Will not automatically add _id by default; to have this behavour, pass
explicit_name=False.
@param table_name: The name of the table to add the column to
@param name: The name of the column to alter
@param field: The new field definition to use
"""
self._fix_field_definition(field)
if not ignore_constraints:
qn = self.quote_name
sch = qn(self._get_schema_name())
tab = qn(table_name)
table = ".".join([sch, tab])
constraints = self._find_constraints_for_column(table_name, name, False)
for constraint in constraints.keys():
params = dict(table_name = table,
constraint_name = qn(constraint))
sql = self.drop_constraint_string % params
self.execute(sql, [])
ret_val = super(DatabaseOperations, self).alter_column(table_name, name, field, explicit_name, ignore_constraints=True)
if not ignore_constraints:
for cname, (ctype,args) in constraints.items():
params = dict(table = table,
constraint = qn(cname))
if ctype=='UNIQUE':
params['columns'] = ", ".join(map(qn,args))
sql = self.create_unique_sql % params
elif ctype=='PRIMARY KEY':
params['columns'] = ", ".join(map(qn,args))
sql = self.create_primary_key_string % params
elif ctype=='FOREIGN KEY':
continue
# Foreign keys taken care of below
#target = "%s.%s(%s)" % tuple(map(qn,args))
#params.update(column = qn(name), target = target)
#sql = self.create_foreign_key_sql % params
elif ctype=='CHECK':
warn(ConstraintDropped("CHECK "+ args, table_name, name))
continue
#TODO: Some check constraints should be restored; but not before the generic
# backend restores them.
#params['check'] = args
#sql = self.create_check_constraint_sql % params
else:
raise NotImplementedError("Don't know how to handle constraints of type "+ type)
self.execute(sql, [])
# Create foreign key if necessary
if field.rel and self.supports_foreign_keys:
self.execute(
self.foreign_key_sql(
table_name,
field.column,
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
return ret_val
def _alter_set_defaults(self, field, name, params, sqls):
"Subcommand of alter_column that sets default values (overrideable)"
# First drop the current default if one exists
table_name = self.quote_name(params['table_name'])
drop_default = self.drop_column_default_sql(table_name, name)
if drop_default:
sqls.append((drop_default, []))
# Next, set any default
if field.has_default():
default = field.get_default()
literal = self._value_to_unquoted_literal(field, default)
sqls.append(('ADD DEFAULT %s for %s' % (self._quote_string(literal), self.quote_name(name),), []))
def _value_to_unquoted_literal(self, field, value):
# Start with the field's own translation
conn = self._get_connection()
value = field.get_db_prep_save(value, connection=conn)
# This is still a Python object -- nobody expects to need a literal.
if isinstance(value, basestring):
return smart_unicode(value)
elif isinstance(value, (date,time,datetime)):
return value.isoformat()
else:
#TODO: Anybody else needs special translations?
return str(value)
def _default_value_workaround(self, value):
if isinstance(value, (date,time,datetime)):
return value.isoformat()
else:
return super(DatabaseOperations, self)._default_value_workaround(value)
def _quote_string(self, s):
return "'" + s.replace("'","''") + "'"
def drop_column_default_sql(self, table_name, name, q_name=None):
"MSSQL specific drop default, which is a pain"
sql = """
SELECT object_name(cdefault)
FROM syscolumns
WHERE id = object_id('%s')
AND name = '%s'
"""
cons = self.execute(sql % (table_name, name), [])
if cons and cons[0] and cons[0][0]:
return "DROP CONSTRAINT %s" % cons[0][0]
return None
def _fix_field_definition(self, field):
if isinstance(field, (fields.BooleanField, fields.NullBooleanField)):
if field.default == True:
field.default = 1
if field.default == False:
field.default = 0
# This is copied from South's generic add_column, with two modifications:
# 1) The sql-server-specific call to _fix_field_definition
# 2) Removing a default, when needed, by calling drop_default and not the more general alter_column
@invalidate_table_constraints
def add_column(self, table_name, name, field, keep_default=True):
"""
Adds the column 'name' to the table 'table_name'.
Uses the 'field' paramater, a django.db.models.fields.Field instance,
to generate the necessary sql
@param table_name: The name of the table to add the column to
@param name: The name of the column to add
@param field: The field to use
"""
self._fix_field_definition(field)
sql = self.column_sql(table_name, name, field)
if sql:
params = (
self.quote_name(table_name),
sql,
)
sql = self.add_column_string % params
self.execute(sql)
# Now, drop the default if we need to
if not keep_default and field.default is not None:
field.default = fields.NOT_PROVIDED
#self.alter_column(table_name, name, field, explicit_name=False, ignore_constraints=True)
self.drop_default(table_name, name, field)
@invalidate_table_constraints
def drop_default(self, table_name, name, field):
fragment = self.drop_column_default_sql(table_name, name)
if fragment:
table_name = self.quote_name(table_name)
sql = " ".join(["ALTER TABLE", table_name, fragment])
self.execute(sql)
@invalidate_table_constraints
def create_table(self, table_name, field_defs):
# Tweak stuff as needed
for _, f in field_defs:
self._fix_field_definition(f)
# Run
generic.DatabaseOperations.create_table(self, table_name, field_defs)
def _find_referencing_fks(self, table_name):
"MSSQL does not support cascading FKs when dropping tables, we need to implement."
# FK -- Foreign Keys
# UCTU -- Unique Constraints Table Usage
# FKTU -- Foreign Key Table Usage
# (last two are both really CONSTRAINT_TABLE_USAGE, different join conditions)
sql = """
SELECT FKTU.TABLE_SCHEMA as REFING_TABLE_SCHEMA,
FKTU.TABLE_NAME as REFING_TABLE_NAME,
FK.[CONSTRAINT_NAME] as FK_NAME
FROM [INFORMATION_SCHEMA].[REFERENTIAL_CONSTRAINTS] FK
JOIN [INFORMATION_SCHEMA].[CONSTRAINT_TABLE_USAGE] UCTU
ON FK.UNIQUE_CONSTRAINT_CATALOG = UCTU.CONSTRAINT_CATALOG and
FK.UNIQUE_CONSTRAINT_NAME = UCTU.CONSTRAINT_NAME and
FK.UNIQUE_CONSTRAINT_SCHEMA = UCTU.CONSTRAINT_SCHEMA
JOIN [INFORMATION_SCHEMA].[CONSTRAINT_TABLE_USAGE] FKTU
ON FK.CONSTRAINT_CATALOG = FKTU.CONSTRAINT_CATALOG and
FK.CONSTRAINT_NAME = FKTU.CONSTRAINT_NAME and
FK.CONSTRAINT_SCHEMA = FKTU.CONSTRAINT_SCHEMA
WHERE FK.CONSTRAINT_CATALOG = %s
AND UCTU.TABLE_SCHEMA = %s -- REFD_TABLE_SCHEMA
AND UCTU.TABLE_NAME = %s -- REFD_TABLE_NAME
"""
db_name = self._get_setting('name')
schema_name = self._get_schema_name()
return self.execute(sql, [db_name, schema_name, table_name])
@invalidate_table_constraints
def delete_table(self, table_name, cascade=True):
"""
Deletes the table 'table_name'.
"""
if cascade:
refing = self._find_referencing_fks(table_name)
for schmea, table, constraint in refing:
table = ".".join(map (self.quote_name, [schmea, table]))
params = dict(table_name = table,
constraint_name = self.quote_name(constraint))
sql = self.drop_constraint_string % params
self.execute(sql, [])
cascade = False
super(DatabaseOperations, self).delete_table(table_name, cascade)
@copy_column_constraints
@delete_column_constraints
def rename_column(self, table_name, old, new):
"""
Renames the column of 'table_name' from 'old' to 'new'.
WARNING - This isn't transactional on MSSQL!
"""
if old == new:
# No Operation
return
# Examples on the MS site show the table name not being quoted...
params = (table_name, self.quote_name(old), self.quote_name(new))
self.execute("EXEC sp_rename '%s.%s', %s, 'COLUMN'" % params)
@invalidate_table_constraints
def rename_table(self, old_table_name, table_name):
"""
Renames the table 'old_table_name' to 'table_name'.
WARNING - This isn't transactional on MSSQL!
"""
if old_table_name == table_name:
# No Operation
return
params = (self.quote_name(old_table_name), self.quote_name(table_name))
self.execute('EXEC sp_rename %s, %s' % params)
def _db_type_for_alter_column(self, field):
return self._db_positive_type_for_alter_column(DatabaseOperations, field)
def _alter_add_column_mods(self, field, name, params, sqls):
return self._alter_add_positive_check(DatabaseOperations, field, name, params, sqls)
@invalidate_table_constraints
def delete_foreign_key(self, table_name, column):
super(DatabaseOperations, self).delete_foreign_key(table_name, column)
# A FK also implies a non-unique index
find_index_sql = """
SELECT i.name -- s.name, t.name, c.name
FROM sys.tables t
INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
INNER JOIN sys.indexes i ON i.object_id = t.object_id
INNER JOIN sys.index_columns ic ON ic.object_id = t.object_id
INNER JOIN sys.columns c ON c.object_id = t.object_id
AND ic.column_id = c.column_id
WHERE i.is_unique=0 AND i.is_primary_key=0 AND i.is_unique_constraint=0
AND s.name = %s
AND t.name = %s
AND c.name = %s
"""
schema = self._get_schema_name()
indexes = self.execute(find_index_sql, [schema, table_name, column])
qn = self.quote_name
for index in (i[0] for i in indexes):
self.execute("DROP INDEX %s on %s.%s" % (qn(index), qn(schema), qn(table_name) ))
| peterbe/peekaboo | vendor-local/lib/python/south/db/sql_server/pyodbc.py | Python | mpl-2.0 | 18,409 |
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
__version__=''' $Id$ '''
#modification of users/robin/ttflist.py.
__doc__="""This provides some general-purpose tools for finding fonts.
The FontFinder object can search for font files. It aims to build
a catalogue of fonts which our framework can work with. It may be useful
if you are building GUIs or design-time interfaces and want to present users
with a choice of fonts.
There are 3 steps to using it
1. create FontFinder and set options and directories
2. search
3. query
>>> import fontfinder
>>> ff = fontfinder.FontFinder()
>>> ff.addDirectories([dir1, dir2, dir3])
>>> ff.search()
>>> ff.getFamilyNames() #or whichever queries you want...
Because the disk search takes some time to find and parse hundreds of fonts,
it can use a cache to store a file with all fonts found. The cache file name
For each font found, it creates a structure with
- the short font name
- the long font name
- the principal file (.pfb for type 1 fonts), and the metrics file if appropriate
- the time modified (unix time stamp)
- a type code ('ttf')
- the family name
- bold and italic attributes
One common use is to display families in a dialog for end users;
then select regular, bold and italic variants of the font. To get
the initial list, use getFamilyNames; these will be in alpha order.
>>> ff.getFamilyNames()
['Bitstream Vera Sans', 'Century Schoolbook L', 'Dingbats', 'LettErrorRobot',
'MS Gothic', 'MS Mincho', 'Nimbus Mono L', 'Nimbus Roman No9 L',
'Nimbus Sans L', 'Vera', 'Standard Symbols L',
'URW Bookman L', 'URW Chancery L', 'URW Gothic L', 'URW Palladio L']
One can then obtain a specific font as follows
>>> f = ff.getFont('Bitstream Vera Sans', bold=False, italic=True)
>>> f.fullName
'Bitstream Vera Sans'
>>> f.fileName
'C:\\code\\reportlab\\fonts\\Vera.ttf'
>>>
It can also produce an XML report of fonts found by family, for the benefit
of non-Python applications.
Future plans might include using this to auto-register fonts; and making it
update itself smartly on repeated instantiation.
"""
import sys, time, os, pickle, tempfile
from xml.sax.saxutils import quoteattr
try:
from hashlib import md5
except ImportError:
from md5 import md5
EXTENSIONS = ['.ttf','.ttc','.otf','.pfb','.pfa']
# PDF font flags (see PDF Reference Guide table 5.19)
FF_FIXED = 1 << 1-1
FF_SERIF = 1 << 2-1
FF_SYMBOLIC = 1 << 3-1
FF_SCRIPT = 1 << 4-1
FF_NONSYMBOLIC = 1 << 6-1
FF_ITALIC = 1 << 7-1
FF_ALLCAP = 1 << 17-1
FF_SMALLCAP = 1 << 18-1
FF_FORCEBOLD = 1 << 19-1
class FontDescriptor:
"""This is a short descriptive record about a font.
typeCode should be a file extension e.g. ['ttf','ttc','otf','pfb','pfa']
"""
def __init__(self):
self.name = None
self.fullName = None
self.familyName = None
self.styleName = None
self.isBold = False #true if it's somehow bold
self.isItalic = False #true if it's italic or oblique or somehow slanty
self.isFixedPitch = False
self.isSymbolic = False #false for Dingbats, Symbols etc.
self.typeCode = None #normally the extension minus the dot
self.fileName = None #full path to where we found it.
self.metricsFileName = None #defined only for type='type1pc', or 'type1mac'
self.timeModified = 0
def __repr__(self):
return "FontDescriptor(%s)" % self.name
def getTag(self):
"Return an XML tag representation"
attrs = []
for k, v in self.__dict__.items():
if k not in ['timeModified']:
if v:
attrs.append('%s=%s' % (k, quoteattr(str(v))))
return '<font ' + ' '.join(attrs) + '/>'
from reportlab.lib.utils import rl_isdir, rl_isfile, rl_listdir, rl_getmtime
class FontFinder:
def __init__(self, dirs=[], useCache=True, validate=False):
self.useCache = useCache
self.validate = validate
self._dirs = set(dirs)
self._fonts = []
self._skippedFiles = [] #list of filenames we did not handle
self._badFiles = [] #list of filenames we rejected
self._fontsByName = {}
self._fontsByFamily = {}
self._fontsByFamilyBoldItalic = {} #indexed by bold, italic
def addDirectory(self, dirName):
#aesthetics - if there are 2 copies of a font, should the first or last
#be picked up? might need reversing
if rl_isdir(dirName):
self._dirs.add(dirName)
def addDirectories(self, dirNames):
for dirName in dirNames:
self.addDirectory(dirName)
def getFamilyNames(self):
"Returns a list of the distinct font families found"
if not self._fontsByFamily:
fonts = self._fonts
for font in fonts:
fam = font.familyName
if fam in self._fontsByFamily:
self._fontsByFamily[fam].append(font)
else:
self._fontsByFamily[fam] = [font]
names = list(self._fontsByFamily.keys())
names.sort()
return names
def getFontsInFamily(self, familyName):
"Return list of all font objects with this family name"
return self._fontsByFamily.get(familyName,[])
def getFamilyXmlReport(self):
"""Reports on all families found as XML.
"""
lines = []
lines.append('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>')
lines.append("<font_families>")
for dirName in self._dirs:
lines.append(" <directory name=%s/>" % quoteattr(dirName))
for familyName in self.getFamilyNames():
if familyName: #skip null case
lines.append(' <family name=%s>' % quoteattr(familyName))
for font in self.getFontsInFamily(familyName):
lines.append(' ' + font.getTag())
lines.append(' </family>')
lines.append("</font_families>")
return '\n'.join(lines)
def getFontsWithAttributes(self, **kwds):
"""This is a general lightweight search."""
selected = []
for font in self._fonts:
OK = True
for k, v in kwds.items():
if getattr(font, k, None) != v:
OK = False
if OK:
selected.append(font)
return selected
def getFont(self, familyName, bold=False, italic=False):
"""Try to find a font matching the spec"""
for font in self._fonts:
if font.familyName == familyName:
if font.isBold == bold:
if font.isItalic == italic:
return font
raise KeyError("Cannot find font %s with bold=%s, italic=%s" % (familyName, bold, italic))
def _getCacheFileName(self):
"""Base this on the directories...same set of directories
should give same cache"""
hash = md5(''.join(self._dirs)).hexdigest()
from reportlab.lib.utils import get_rl_tempfile
fn = get_rl_tempfile('fonts_%s.dat' % hash)
return fn
def save(self, fileName):
f = open(fileName, 'w')
pickle.dump(self, f)
f.close()
def load(self, fileName):
f = open(fileName, 'r')
finder2 = pickle.load(f)
f.close()
self.__dict__.update(finder2.__dict__)
def search(self):
started = time.clock()
if not self._dirs:
raise ValueError("Font search path is empty! Please specify search directories using addDirectory or addDirectories")
if self.useCache:
cfn = self._getCacheFileName()
if rl_isfile(cfn):
try:
self.load(cfn)
#print "loaded cached file with %d fonts (%s)" % (len(self._fonts), cfn)
return
except:
pass #pickle load failed. Ho hum, maybe it's an old pickle. Better rebuild it.
from stat import ST_MTIME
for dirName in self._dirs:
fileNames = rl_listdir(dirName)
for fileName in fileNames:
root, ext = os.path.splitext(fileName)
if ext.lower() in EXTENSIONS:
#it's a font
f = FontDescriptor()
f.fileName = os.path.normpath(os.path.join(dirName, fileName))
f.timeModified = rl_getmtime(f.fileName)
ext = ext.lower()
if ext[0] == '.':
ext = ext[1:]
f.typeCode = ext #strip the dot
#what to do depends on type. We only accept .pfb if we
#have .afm to go with it, and don't handle .otf now.
if ext in ('otf', 'pfa'):
self._skippedFiles.append(fileName)
elif ext in ('ttf','ttc'):
#parsing should check it for us
from reportlab.pdfbase.ttfonts import TTFontFile, TTFError
try:
font = TTFontFile(fileName,validate=self.validate)
except TTFError:
self._badFiles.append(fileName)
continue
f.name = font.name
f.fullName = font.fullName
f.styleName = font.styleName
f.familyName = font.familyName
f.isBold = (FF_FORCEBOLD == FF_FORCEBOLD & font.flags)
f.isItalic = (FF_ITALIC == FF_ITALIC & font.flags)
elif ext == 'pfb':
# type 1; we need an AFM file or have to skip.
if rl_isfile(os.path.join(dirName, root + '.afm')):
f.metricsFileName = os.path.normpath(os.path.join(dirName, root + '.afm'))
elif rl_isfile(os.path.join(dirName, root + '.AFM')):
f.metricsFileName = os.path.normpath(os.path.join(dirName, root + '.AFM'))
else:
self._skippedFiles.append(fileName)
continue
from reportlab.pdfbase.pdfmetrics import parseAFMFile
(info, glyphs) = parseAFMFile(f.metricsFileName)
f.name = info['FontName']
f.fullName = info.get('FullName', f.name)
f.familyName = info.get('FamilyName', None)
f.isItalic = (float(info.get('ItalicAngle', 0)) > 0.0)
#if the weight has the word bold, deem it bold
f.isBold = ('bold' in info.get('Weight','').lower())
self._fonts.append(f)
if self.useCache:
self.save(cfn)
finished = time.clock()
## print "found %d fonts; skipped %d; bad %d. Took %0.2f seconds" % (
## len(self._fonts), len(self._skippedFiles), len(self._badFiles),
## finished - started
## )
def test():
#windows-centric test maybe
from reportlab import rl_config
ff = FontFinder()
ff.useCache = True
ff.validate = True
import reportlab
ff.addDirectory('C:\\windows\\fonts')
rlFontDir = os.path.join(os.path.dirname(reportlab.__file__), 'fonts')
ff.addDirectory(rlFontDir)
ff.search()
print('cache file name...')
print(ff._getCacheFileName())
print('families...')
for familyName in ff.getFamilyNames():
print('\t%s' % familyName)
print()
outw = sys.stdout.write
outw('fonts called Vera:')
for font in ff.getFontsInFamily('Bitstream Vera Sans'):
outw(' %s' % font.name)
print()
outw('Bold fonts\n\t')
for font in ff.getFontsWithAttributes(isBold=True, isItalic=False):
outw(font.fullName+' ')
print()
print('family report')
print(ff.getFamilyXmlReport())
if __name__=='__main__':
test()
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/reportlab/lib/fontfinder.py | Python | agpl-3.0 | 12,203 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AssessmentWorkflow'
db.create_table('workflow_assessmentworkflow', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('status', self.gf('model_utils.fields.StatusField')(default='peer', max_length=100, no_check_for_status=True)),
('status_changed', self.gf('model_utils.fields.MonitorField')(default=datetime.datetime.now, monitor=u'status')),
('submission_uuid', self.gf('django.db.models.fields.CharField')(unique=True, max_length=36, db_index=True)),
('uuid', self.gf('django.db.models.fields.CharField')(db_index=True, unique=True, max_length=36, blank=True)),
))
db.send_create_signal('workflow', ['AssessmentWorkflow'])
def backwards(self, orm):
# Deleting model 'AssessmentWorkflow'
db.delete_table('workflow_assessmentworkflow')
models = {
'workflow.assessmentworkflow': {
'Meta': {'ordering': "['-created']", 'object_name': 'AssessmentWorkflow'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'status': ('model_utils.fields.StatusField', [], {'default': "'peer'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submission_uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
}
}
complete_apps = ['workflow'] | EDUlib/edx-ora2 | openassessment/workflow/migrations/0001_initial.py | Python | agpl-3.0 | 2,344 |
# -*- coding: utf-8 -*-
# (c) 2015 Oihane Crucelaegui - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
import openerp.tests.common as common
class TestSaleOrderType(common.TransactionCase):
def setUp(self):
super(TestSaleOrderType, self).setUp()
self.sale_type_model = self.env['sale.order.type']
self.sale_order_model = self.env['sale.order']
self.stock_picking_model = self.env['stock.picking']
self.picking_type_out = self.env.ref('stock.picking_type_out')
self.sale_line_model = self.env['sale.order.line']
self.picking_model = self.env['stock.picking']
self.partner = self.env.ref('base.res_partner_1')
self.sequence = self.env['ir.sequence'].create({
'name': 'Test Sales Order',
'code': 'sale.order',
'prefix': 'TSO',
'padding': 3,
})
self.journal = self.env.ref('account.sales_journal')
self.refund_journal = self.env.ref('account.refund_sales_journal')
self.warehouse = self.env.ref('stock.stock_warehouse_shop0')
self.product = self.env.ref('product.product_product_4')
self.sale_type = self.sale_type_model.create({
'name': 'Test Sale Order Type',
'sequence_id': self.sequence.id,
'journal_id': self.journal.id,
'refund_journal_id': self.refund_journal.id,
'warehouse_id': self.warehouse.id,
'picking_policy': 'one',
'order_policy': 'picking',
'invoice_state': '2binvoiced',
})
self.partner.sale_type = self.sale_type
def test_sale_order_onchange_partner(self):
onchange_partner = self.sale_order_model.onchange_partner_id(
self.partner.id)
self.assertEqual(self.sale_type.id,
onchange_partner['value']['type_id'])
def test_sale_order_onchange_type(self):
sale_order = self.sale_order_model.new({'type_id': self.sale_type.id})
sale_order.onchange_type_id()
self.assertEqual(self.sale_type.warehouse_id,
sale_order.warehouse_id)
self.assertEqual(self.sale_type.picking_policy,
sale_order.picking_policy)
self.assertEqual(self.sale_type.order_policy, sale_order.order_policy)
def test_sale_order_confirm(self):
sale_order_dict = self.sale_order_model.onchange_partner_id(
self.partner.id)['value']
sale_order_dict['partner_id'] = self.partner.id
sale_line_dict = {
'product_id': self.product.id,
'name': self.product.name,
'product_uom_qty': 1.0,
'price_unit': self.product.lst_price,
}
sale_order_dict['order_line'] = [(0, 0, sale_line_dict)]
sale_order = self.sale_order_model.create(sale_order_dict)
sale_order.onchange_type_id()
sale_order.action_button_confirm()
for picking in sale_order.picking_ids:
self.assertEqual(self.sale_type.invoice_state,
picking.invoice_state)
def test_stock_picking_create(self):
self.picking_out = self.picking_model.create({
'partner_id': self.partner.id,
'picking_type_id': self.picking_type_out.id
})
self.assertTrue(self.picking_out.id)
| luistorresm/sale-workflow | sale_order_type/tests/test_sale_order_type.py | Python | agpl-3.0 | 3,383 |
"""clicksend_tts platform for notify component."""
import json
import logging
from aiohttp.hdrs import CONTENT_TYPE
import requests
import voluptuous as vol
from homeassistant.const import (
CONF_API_KEY, CONF_RECIPIENT, CONF_USERNAME, CONTENT_TYPE_JSON)
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (PLATFORM_SCHEMA,
BaseNotificationService)
_LOGGER = logging.getLogger(__name__)
BASE_API_URL = 'https://rest.clicksend.com/v3'
HEADERS = {CONTENT_TYPE: CONTENT_TYPE_JSON}
CONF_LANGUAGE = 'language'
CONF_VOICE = 'voice'
CONF_CALLER = 'caller'
DEFAULT_LANGUAGE = 'en-us'
DEFAULT_VOICE = 'female'
TIMEOUT = 5
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_RECIPIENT): cv.string,
vol.Optional(CONF_LANGUAGE, default=DEFAULT_LANGUAGE): cv.string,
vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): cv.string,
vol.Optional(CONF_CALLER): cv.string,
})
def get_service(hass, config, discovery_info=None):
"""Get the ClickSend notification service."""
if not _authenticate(config):
_LOGGER.error("You are not authorized to access ClickSend")
return None
return ClicksendNotificationService(config)
class ClicksendNotificationService(BaseNotificationService):
"""Implementation of a notification service for the ClickSend service."""
def __init__(self, config):
"""Initialize the service."""
self.username = config.get(CONF_USERNAME)
self.api_key = config.get(CONF_API_KEY)
self.recipient = config.get(CONF_RECIPIENT)
self.language = config.get(CONF_LANGUAGE)
self.voice = config.get(CONF_VOICE)
self.caller = config.get(CONF_CALLER)
if self.caller is None:
self.caller = self.recipient
def send_message(self, message="", **kwargs):
"""Send a voice call to a user."""
data = ({'messages': [{'source': 'hass.notify', 'from': self.caller,
'to': self.recipient, 'body': message,
'lang': self.language, 'voice': self.voice}]})
api_url = "{}/voice/send".format(BASE_API_URL)
resp = requests.post(api_url,
data=json.dumps(data),
headers=HEADERS,
auth=(self.username, self.api_key),
timeout=TIMEOUT)
if resp.status_code == 200:
return
obj = json.loads(resp.text)
response_msg = obj['response_msg']
response_code = obj['response_code']
_LOGGER.error("Error %s : %s (Code %s)", resp.status_code,
response_msg, response_code)
def _authenticate(config):
"""Authenticate with ClickSend."""
api_url = '{}/account'.format(BASE_API_URL)
resp = requests.get(api_url, headers=HEADERS,
auth=(config.get(CONF_USERNAME),
config.get(CONF_API_KEY)), timeout=TIMEOUT)
if resp.status_code != 200:
return False
return True
| jnewland/home-assistant | homeassistant/components/clicksend_tts/notify.py | Python | apache-2.0 | 3,208 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.providers.apache.spark.operators.spark_jdbc`."""
import warnings
from airflow.providers.apache.spark.operators.spark_jdbc import SparkJDBCOperator, SparkSubmitOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.apache.spark.operators.spark_jdbc`.",
DeprecationWarning,
stacklevel=2,
)
| apache/incubator-airflow | airflow/contrib/operators/spark_jdbc_operator.py | Python | apache-2.0 | 1,180 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author Mark McClain (DreamHost)
import sys
import mock
from quantum.db import migration
from quantum.db.migration import cli
from quantum.tests import base
class TestDbMigration(base.BaseTestCase):
def test_should_run_plugin_in_list(self):
self.assertTrue(migration.should_run('foo', ['foo', 'bar']))
self.assertFalse(migration.should_run('foo', ['bar']))
def test_should_run_plugin_wildcard(self):
self.assertTrue(migration.should_run('foo', ['*']))
class TestCli(base.BaseTestCase):
def setUp(self):
super(TestCli, self).setUp()
self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command')
self.do_alembic_cmd = self.do_alembic_cmd_p.start()
self.addCleanup(self.do_alembic_cmd_p.stop)
self.addCleanup(cli.CONF.reset)
def _main_test_helper(self, argv, func_name, exp_args=(), exp_kwargs={}):
with mock.patch.object(sys, 'argv', argv):
cli.main()
self.do_alembic_cmd.assert_has_calls(
[mock.call(mock.ANY, func_name, *exp_args, **exp_kwargs)]
)
def test_stamp(self):
self._main_test_helper(
['prog', 'stamp', 'foo'],
'stamp',
('foo',),
{'sql': False}
)
self._main_test_helper(
['prog', 'stamp', 'foo', '--sql'],
'stamp',
('foo',),
{'sql': True}
)
def test_current(self):
self._main_test_helper(['prog', 'current'], 'current')
def test_history(self):
self._main_test_helper(['prog', 'history'], 'history')
def test_check_migration(self):
self._main_test_helper(['prog', 'check_migration'], 'branches')
def test_database_sync_revision(self):
self._main_test_helper(
['prog', 'revision', '--autogenerate', '-m', 'message'],
'revision',
(),
{'message': 'message', 'sql': False, 'autogenerate': True}
)
self._main_test_helper(
['prog', 'revision', '--sql', '-m', 'message'],
'revision',
(),
{'message': 'message', 'sql': True, 'autogenerate': False}
)
def test_upgrade(self):
self._main_test_helper(
['prog', 'upgrade', '--sql', 'head'],
'upgrade',
('head',),
{'sql': True}
)
self._main_test_helper(
['prog', 'upgrade', '--delta', '3'],
'upgrade',
('+3',),
{'sql': False}
)
def test_downgrade(self):
self._main_test_helper(
['prog', 'downgrade', '--sql', 'folsom'],
'downgrade',
('folsom',),
{'sql': True}
)
self._main_test_helper(
['prog', 'downgrade', '--delta', '2'],
'downgrade',
('-2',),
{'sql': False}
)
| wallnerryan/quantum_migrate | quantum/tests/unit/test_db_migration.py | Python | apache-2.0 | 3,637 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SignatureDef utility functions implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import utils
from tensorflow.python.util.tf_export import tf_export
@tf_export('saved_model.signature_def_utils.build_signature_def')
def build_signature_def(inputs=None, outputs=None, method_name=None):
"""Utility function to build a SignatureDef protocol buffer.
Args:
inputs: Inputs of the SignatureDef defined as a proto map of string to
tensor info.
outputs: Outputs of the SignatureDef defined as a proto map of string to
tensor info.
method_name: Method name of the SignatureDef as a string.
Returns:
A SignatureDef protocol buffer constructed based on the supplied arguments.
"""
signature_def = meta_graph_pb2.SignatureDef()
if inputs is not None:
for item in inputs:
signature_def.inputs[item].CopyFrom(inputs[item])
if outputs is not None:
for item in outputs:
signature_def.outputs[item].CopyFrom(outputs[item])
if method_name is not None:
signature_def.method_name = method_name
return signature_def
@tf_export('saved_model.signature_def_utils.regression_signature_def')
def regression_signature_def(examples, predictions):
"""Creates regression signature from given examples and predictions.
This function produces signatures intended for use with the TensorFlow Serving
Regress API (tensorflow_serving/apis/prediction_service.proto), and so
constrains the input and output types to those allowed by TensorFlow Serving.
Args:
examples: A string `Tensor`, expected to accept serialized tf.Examples.
predictions: A float `Tensor`.
Returns:
A regression-flavored signature_def.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('Regression examples cannot be None.')
if not isinstance(examples, ops.Tensor):
raise ValueError('Regression examples must be a string Tensor.')
if predictions is None:
raise ValueError('Regression predictions cannot be None.')
input_tensor_info = utils.build_tensor_info(examples)
if input_tensor_info.dtype != types_pb2.DT_STRING:
raise ValueError('Regression examples must be a string Tensor.')
signature_inputs = {signature_constants.REGRESS_INPUTS: input_tensor_info}
output_tensor_info = utils.build_tensor_info(predictions)
if output_tensor_info.dtype != types_pb2.DT_FLOAT:
raise ValueError('Regression output must be a float Tensor.')
signature_outputs = {signature_constants.REGRESS_OUTPUTS: output_tensor_info}
signature_def = build_signature_def(
signature_inputs, signature_outputs,
signature_constants.REGRESS_METHOD_NAME)
return signature_def
@tf_export('saved_model.signature_def_utils.classification_signature_def')
def classification_signature_def(examples, classes, scores):
"""Creates classification signature from given examples and predictions.
This function produces signatures intended for use with the TensorFlow Serving
Classify API (tensorflow_serving/apis/prediction_service.proto), and so
constrains the input and output types to those allowed by TensorFlow Serving.
Args:
examples: A string `Tensor`, expected to accept serialized tf.Examples.
classes: A string `Tensor`. Note that the ClassificationResponse message
requires that class labels are strings, not integers or anything else.
scores: a float `Tensor`.
Returns:
A classification-flavored signature_def.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('Classification examples cannot be None.')
if not isinstance(examples, ops.Tensor):
raise ValueError('Classification examples must be a string Tensor.')
if classes is None and scores is None:
raise ValueError('Classification classes and scores cannot both be None.')
input_tensor_info = utils.build_tensor_info(examples)
if input_tensor_info.dtype != types_pb2.DT_STRING:
raise ValueError('Classification examples must be a string Tensor.')
signature_inputs = {signature_constants.CLASSIFY_INPUTS: input_tensor_info}
signature_outputs = {}
if classes is not None:
classes_tensor_info = utils.build_tensor_info(classes)
if classes_tensor_info.dtype != types_pb2.DT_STRING:
raise ValueError('Classification classes must be a string Tensor.')
signature_outputs[signature_constants.CLASSIFY_OUTPUT_CLASSES] = (
classes_tensor_info)
if scores is not None:
scores_tensor_info = utils.build_tensor_info(scores)
if scores_tensor_info.dtype != types_pb2.DT_FLOAT:
raise ValueError('Classification scores must be a float Tensor.')
signature_outputs[signature_constants.CLASSIFY_OUTPUT_SCORES] = (
scores_tensor_info)
signature_def = build_signature_def(
signature_inputs, signature_outputs,
signature_constants.CLASSIFY_METHOD_NAME)
return signature_def
@tf_export('saved_model.signature_def_utils.predict_signature_def')
def predict_signature_def(inputs, outputs):
"""Creates prediction signature from given inputs and outputs.
This function produces signatures intended for use with the TensorFlow Serving
Predict API (tensorflow_serving/apis/prediction_service.proto). This API
imposes no constraints on the input and output types.
Args:
inputs: dict of string to `Tensor`.
outputs: dict of string to `Tensor`.
Returns:
A prediction-flavored signature_def.
Raises:
ValueError: If inputs or outputs is `None`.
"""
if inputs is None or not inputs:
raise ValueError('Prediction inputs cannot be None or empty.')
if outputs is None or not outputs:
raise ValueError('Prediction outputs cannot be None or empty.')
signature_inputs = {key: utils.build_tensor_info(tensor)
for key, tensor in inputs.items()}
signature_outputs = {key: utils.build_tensor_info(tensor)
for key, tensor in outputs.items()}
signature_def = build_signature_def(
signature_inputs, signature_outputs,
signature_constants.PREDICT_METHOD_NAME)
return signature_def
@tf_export('saved_model.signature_def_utils.is_valid_signature')
def is_valid_signature(signature_def):
"""Determine whether a SignatureDef can be served by TensorFlow Serving."""
if signature_def is None:
return False
return (_is_valid_classification_signature(signature_def) or
_is_valid_regression_signature(signature_def) or
_is_valid_predict_signature(signature_def))
def _is_valid_predict_signature(signature_def):
"""Determine whether the argument is a servable 'predict' SignatureDef."""
if signature_def.method_name != signature_constants.PREDICT_METHOD_NAME:
return False
if not signature_def.inputs.keys():
return False
if not signature_def.outputs.keys():
return False
return True
def _is_valid_regression_signature(signature_def):
"""Determine whether the argument is a servable 'regress' SignatureDef."""
if signature_def.method_name != signature_constants.REGRESS_METHOD_NAME:
return False
if (set(signature_def.inputs.keys())
!= set([signature_constants.REGRESS_INPUTS])):
return False
if (signature_def.inputs[signature_constants.REGRESS_INPUTS].dtype !=
types_pb2.DT_STRING):
return False
if (set(signature_def.outputs.keys())
!= set([signature_constants.REGRESS_OUTPUTS])):
return False
if (signature_def.outputs[signature_constants.REGRESS_OUTPUTS].dtype !=
types_pb2.DT_FLOAT):
return False
return True
def _is_valid_classification_signature(signature_def):
"""Determine whether the argument is a servable 'classify' SignatureDef."""
if signature_def.method_name != signature_constants.CLASSIFY_METHOD_NAME:
return False
if (set(signature_def.inputs.keys())
!= set([signature_constants.CLASSIFY_INPUTS])):
return False
if (signature_def.inputs[signature_constants.CLASSIFY_INPUTS].dtype !=
types_pb2.DT_STRING):
return False
allowed_outputs = set([signature_constants.CLASSIFY_OUTPUT_CLASSES,
signature_constants.CLASSIFY_OUTPUT_SCORES])
if not signature_def.outputs.keys():
return False
if set(signature_def.outputs.keys()) - allowed_outputs:
return False
if (signature_constants.CLASSIFY_OUTPUT_CLASSES in signature_def.outputs
and
signature_def.outputs[signature_constants.CLASSIFY_OUTPUT_CLASSES].dtype
!= types_pb2.DT_STRING):
return False
if (signature_constants.CLASSIFY_OUTPUT_SCORES in signature_def.outputs
and
signature_def.outputs[signature_constants.CLASSIFY_OUTPUT_SCORES].dtype !=
types_pb2.DT_FLOAT):
return False
return True
def _get_shapes_from_tensor_info_dict(tensor_info_dict):
"""Returns a map of keys to TensorShape objects.
Args:
tensor_info_dict: map with TensorInfo proto as values.
Returns:
Map with corresponding TensorShape objects as values.
"""
return {
key: tensor_shape.TensorShape(tensor_info.tensor_shape)
for key, tensor_info in tensor_info_dict.items()
}
def _get_types_from_tensor_info_dict(tensor_info_dict):
"""Returns a map of keys to DType objects.
Args:
tensor_info_dict: map with TensorInfo proto as values.
Returns:
Map with corresponding DType objects as values.
"""
return {
key: dtypes.DType(tensor_info.dtype)
for key, tensor_info in tensor_info_dict.items()
}
def get_signature_def_input_shapes(signature):
"""Returns map of parameter names to their shapes.
Args:
signature: SignatureDef proto.
Returns:
Map from string to TensorShape objects.
"""
return _get_shapes_from_tensor_info_dict(signature.inputs)
def get_signature_def_input_types(signature):
"""Returns map of output names to their types.
Args:
signature: SignatureDef proto.
Returns:
Map from string to DType objects.
"""
return _get_types_from_tensor_info_dict(signature.inputs)
def get_signature_def_output_shapes(signature):
"""Returns map of output names to their shapes.
Args:
signature: SignatureDef proto.
Returns:
Map from string to TensorShape objects.
"""
return _get_shapes_from_tensor_info_dict(signature.outputs)
def get_signature_def_output_types(signature):
"""Returns map of output names to their types.
Args:
signature: SignatureDef proto.
Returns:
Map from string to DType objects.
"""
return _get_types_from_tensor_info_dict(signature.outputs)
| allenlavoie/tensorflow | tensorflow/python/saved_model/signature_def_utils_impl.py | Python | apache-2.0 | 11,620 |
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Defines infrastructure for resource resolution. This is used to find
various dependencies/assets/etc that WA objects rely on in a flexible way.
"""
import logging
from collections import defaultdict
# Note: this is the modified louie library in wlauto/external.
# prioritylist does not exist in vanilla louie.
from louie.prioritylist import PriorityList # pylint: disable=E0611,F0401
from wlauto.exceptions import ResourceError
class ResourceResolver(object):
"""
Discovers and registers getters, and then handles requests for
resources using registered getters.
"""
def __init__(self, config):
self.logger = logging.getLogger(self.__class__.__name__)
self.getters = defaultdict(PriorityList)
self.config = config
def load(self):
"""
Discover getters under the specified source. The source could
be either a python package/module or a path.
"""
for rescls in self.config.ext_loader.list_resource_getters():
getter = self.config.get_extension(rescls.name, self)
getter.register()
def get(self, resource, strict=True, *args, **kwargs):
"""
Uses registered getters to attempt to discover a resource of the specified
kind and matching the specified criteria. Returns path to the resource that
has been discovered. If a resource has not been discovered, this will raise
a ``ResourceError`` or, if ``strict`` has been set to ``False``, will return
``None``.
"""
self.logger.debug('Resolving {}'.format(resource))
for getter in self.getters[resource.name]:
self.logger.debug('Trying {}'.format(getter))
result = getter.get(resource, *args, **kwargs)
if result is not None:
self.logger.debug('Resource {} found using {}:'.format(resource, getter))
self.logger.debug('\t{}'.format(result))
return result
if strict:
if kwargs:
criteria = ', '.join(['{}:{}'.format(k, v) for k, v in kwargs.iteritems()])
raise ResourceError('{} ({}) could not be found'.format(resource, criteria))
else:
raise ResourceError('{} could not be found'.format(resource))
self.logger.debug('Resource {} not found.'.format(resource))
return None
def register(self, getter, kind, priority=0):
"""
Register the specified resource getter as being able to discover a resource
of the specified kind with the specified priority.
This method would typically be invoked by a getter inside its __init__.
The idea being that getters register themselves for resources they know
they can discover.
*priorities*
getters that are registered with the highest priority will be invoked first. If
multiple getters are registered under the same priority, they will be invoked
in the order they were registered (i.e. in the order they were discovered). This is
essentially non-deterministic.
Generally getters that are more likely to find a resource, or would find a
"better" version of the resource should register with higher (positive) priorities.
Fall-back getters that should only be invoked if a resource is not found by usual
means should register with lower (negative) priorities.
"""
self.logger.debug('Registering {}'.format(getter.name))
self.getters[kind].add(getter, priority)
def unregister(self, getter, kind):
"""
Unregister a getter that has been registered earlier.
"""
self.logger.debug('Unregistering {}'.format(getter.name))
try:
self.getters[kind].remove(getter)
except ValueError:
raise ValueError('Resource getter {} is not installed.'.format(getter.name))
| Sticklyman1936/workload-automation | wlauto/core/resolver.py | Python | apache-2.0 | 4,535 |
# encoding: utf-8
"""Path utility functions."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
# Derived from IPython.utils.path, which is
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import sys
import tempfile
pjoin = os.path.join
def get_home_dir():
"""Get the real path of the home directory"""
homedir = os.path.expanduser('~')
# Next line will make things work even when /home/ is a symlink to
# /usr/home as it is on FreeBSD, for example
homedir = os.path.realpath(homedir)
return homedir
_dtemps = {}
def _mkdtemp_once(name):
"""Make or reuse a temporary directory.
If this is called with the same name in the same process, it will return
the same directory.
"""
try:
return _dtemps[name]
except KeyError:
d = _dtemps[name] = tempfile.mkdtemp(prefix=name + '-')
return d
def jupyter_config_dir():
"""Get the Jupyter config directory for this platform and user.
Returns JUPYTER_CONFIG_DIR if defined, else ~/.jupyter
"""
env = os.environ
home_dir = get_home_dir()
if env.get('JUPYTER_NO_CONFIG'):
return _mkdtemp_once('jupyter-clean-cfg')
if env.get('JUPYTER_CONFIG_DIR'):
return env['JUPYTER_CONFIG_DIR']
return pjoin(home_dir, '.jupyter')
def jupyter_data_dir():
"""Get the config directory for Jupyter data files.
These are non-transient, non-configuration files.
Returns JUPYTER_DATA_DIR if defined, else a platform-appropriate path.
"""
env = os.environ
if env.get('JUPYTER_DATA_DIR'):
return env['JUPYTER_DATA_DIR']
home = get_home_dir()
if sys.platform == 'darwin':
return os.path.join(home, 'Library', 'Jupyter')
elif os.name == 'nt':
appdata = os.environ.get('APPDATA', None)
if appdata:
return pjoin(appdata, 'jupyter')
else:
return pjoin(jupyter_config_dir(), 'data')
else:
# Linux, non-OS X Unix, AIX, etc.
xdg = env.get("XDG_DATA_HOME", None)
if not xdg:
xdg = pjoin(home, '.local', 'share')
return pjoin(xdg, 'jupyter')
def jupyter_runtime_dir():
"""Return the runtime dir for transient jupyter files.
Returns JUPYTER_RUNTIME_DIR if defined.
Respects XDG_RUNTIME_DIR on non-OS X, non-Windows,
falls back on data_dir/runtime otherwise.
"""
env = os.environ
if env.get('JUPYTER_RUNTIME_DIR'):
return env['JUPYTER_RUNTIME_DIR']
if sys.platform == 'darwin':
return pjoin(jupyter_data_dir(), 'runtime')
elif os.name == 'nt':
return pjoin(jupyter_data_dir(), 'runtime')
else:
# Linux, non-OS X Unix, AIX, etc.
xdg = env.get("XDG_RUNTIME_DIR", None)
if xdg:
return pjoin(xdg, 'jupyter')
return pjoin(jupyter_data_dir(), 'runtime')
if os.name == 'nt':
programdata = os.environ.get('PROGRAMDATA', None)
if programdata:
SYSTEM_JUPYTER_PATH = [pjoin(programdata, 'jupyter')]
else: # PROGRAMDATA is not defined by default on XP.
SYSTEM_JUPYTER_PATH = [os.path.join(sys.prefix, 'share', 'jupyter')]
else:
SYSTEM_JUPYTER_PATH = [
"/usr/local/share/jupyter",
"/usr/share/jupyter",
]
ENV_JUPYTER_PATH = [os.path.join(sys.prefix, 'share', 'jupyter')]
def jupyter_path(*subdirs):
"""Return a list of directories to search for data files
JUPYTER_PATH environment variable has highest priority.
If *subdirs are given, that subdirectory will be added to each element.
Examples:
>>> jupyter_path()
['~/.local/jupyter', '/usr/local/share/jupyter']
>>> jupyter_path('kernels')
['~/.local/jupyter/kernels', '/usr/local/share/jupyter/kernels']
"""
paths = []
# highest priority is env
if os.environ.get('JUPYTER_PATH'):
paths.extend(
p.rstrip(os.sep)
for p in os.environ['JUPYTER_PATH'].split(os.pathsep)
)
# then user dir
paths.append(jupyter_data_dir())
# then sys.prefix
for p in ENV_JUPYTER_PATH:
if p not in SYSTEM_JUPYTER_PATH:
paths.append(p)
# finally, system
paths.extend(SYSTEM_JUPYTER_PATH)
# add subdir, if requested
if subdirs:
paths = [ pjoin(p, *subdirs) for p in paths ]
return paths
if os.name == 'nt':
programdata = os.environ.get('PROGRAMDATA', None)
if programdata:
SYSTEM_CONFIG_PATH = [os.path.join(programdata, 'jupyter')]
else: # PROGRAMDATA is not defined by default on XP.
SYSTEM_CONFIG_PATH = []
else:
SYSTEM_CONFIG_PATH = [
"/usr/local/etc/jupyter",
"/etc/jupyter",
]
ENV_CONFIG_PATH = [os.path.join(sys.prefix, 'etc', 'jupyter')]
def jupyter_config_path():
"""Return the search path for Jupyter config files as a list."""
paths = [jupyter_config_dir()]
if os.environ.get('JUPYTER_NO_CONFIG'):
return paths
for p in ENV_CONFIG_PATH:
if p not in SYSTEM_CONFIG_PATH:
paths.append(p)
paths.extend(SYSTEM_CONFIG_PATH)
return paths
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/jupyter_core/paths.py | Python | bsd-2-clause | 5,264 |
"""
MultiCall - a class which inherits its methods from a Tkinter widget (Text, for
example), but enables multiple calls of functions per virtual event - all
matching events will be called, not only the most specific one. This is done
by wrapping the event functions - event_add, event_delete and event_info.
MultiCall recognizes only a subset of legal event sequences. Sequences which
are not recognized are treated by the original Tk handling mechanism. A
more-specific event will be called before a less-specific event.
The recognized sequences are complete one-event sequences (no emacs-style
Ctrl-X Ctrl-C, no shortcuts like <3>), for all types of events.
Key/Button Press/Release events can have modifiers.
The recognized modifiers are Shift, Control, Option and Command for Mac, and
Control, Alt, Shift, Meta/M for other platforms.
For all events which were handled by MultiCall, a new member is added to the
event instance passed to the binded functions - mc_type. This is one of the
event type constants defined in this module (such as MC_KEYPRESS).
For Key/Button events (which are handled by MultiCall and may receive
modifiers), another member is added - mc_state. This member gives the state
of the recognized modifiers, as a combination of the modifier constants
also defined in this module (for example, MC_SHIFT).
Using these members is absolutely portable.
The order by which events are called is defined by these rules:
1. A more-specific event will be called before a less-specific event.
2. A recently-binded event will be called before a previously-binded event,
unless this conflicts with the first rule.
Each function will be called at most once for each event.
"""
import re
import sys
import tkinter
# the event type constants, which define the meaning of mc_type
MC_KEYPRESS=0; MC_KEYRELEASE=1; MC_BUTTONPRESS=2; MC_BUTTONRELEASE=3;
MC_ACTIVATE=4; MC_CIRCULATE=5; MC_COLORMAP=6; MC_CONFIGURE=7;
MC_DEACTIVATE=8; MC_DESTROY=9; MC_ENTER=10; MC_EXPOSE=11; MC_FOCUSIN=12;
MC_FOCUSOUT=13; MC_GRAVITY=14; MC_LEAVE=15; MC_MAP=16; MC_MOTION=17;
MC_MOUSEWHEEL=18; MC_PROPERTY=19; MC_REPARENT=20; MC_UNMAP=21; MC_VISIBILITY=22;
# the modifier state constants, which define the meaning of mc_state
MC_SHIFT = 1<<0; MC_CONTROL = 1<<2; MC_ALT = 1<<3; MC_META = 1<<5
MC_OPTION = 1<<6; MC_COMMAND = 1<<7
# define the list of modifiers, to be used in complex event types.
if sys.platform == "darwin":
_modifiers = (("Shift",), ("Control",), ("Option",), ("Command",))
_modifier_masks = (MC_SHIFT, MC_CONTROL, MC_OPTION, MC_COMMAND)
else:
_modifiers = (("Control",), ("Alt",), ("Shift",), ("Meta", "M"))
_modifier_masks = (MC_CONTROL, MC_ALT, MC_SHIFT, MC_META)
# a dictionary to map a modifier name into its number
_modifier_names = dict([(name, number)
for number in range(len(_modifiers))
for name in _modifiers[number]])
# In 3.4, if no shell window is ever open, the underlying Tk widget is
# destroyed before .__del__ methods here are called. The following
# is used to selectively ignore shutdown exceptions to avoid
# 'Exception ignored' messages. See http://bugs.python.org/issue20167
APPLICATION_GONE = "application has been destroyed"
# A binder is a class which binds functions to one type of event. It has two
# methods: bind and unbind, which get a function and a parsed sequence, as
# returned by _parse_sequence(). There are two types of binders:
# _SimpleBinder handles event types with no modifiers and no detail.
# No Python functions are called when no events are binded.
# _ComplexBinder handles event types with modifiers and a detail.
# A Python function is called each time an event is generated.
class _SimpleBinder:
def __init__(self, type, widget, widgetinst):
self.type = type
self.sequence = '<'+_types[type][0]+'>'
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = []
self.handlerid = None
def bind(self, triplet, func):
if not self.handlerid:
def handler(event, l = self.bindedfuncs, mc_type = self.type):
event.mc_type = mc_type
wascalled = {}
for i in range(len(l)-1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = func(event)
if r:
return r
self.handlerid = self.widget.bind(self.widgetinst,
self.sequence, handler)
self.bindedfuncs.append(func)
def unbind(self, triplet, func):
self.bindedfuncs.remove(func)
if not self.bindedfuncs:
self.widget.unbind(self.widgetinst, self.sequence, self.handlerid)
self.handlerid = None
def __del__(self):
if self.handlerid:
try:
self.widget.unbind(self.widgetinst, self.sequence,
self.handlerid)
except tkinter.TclError as e:
if not APPLICATION_GONE in e.args[0]:
raise
# An int in range(1 << len(_modifiers)) represents a combination of modifiers
# (if the least significant bit is on, _modifiers[0] is on, and so on).
# _state_subsets gives for each combination of modifiers, or *state*,
# a list of the states which are a subset of it. This list is ordered by the
# number of modifiers is the state - the most specific state comes first.
_states = range(1 << len(_modifiers))
_state_names = [''.join(m[0]+'-'
for i, m in enumerate(_modifiers)
if (1 << i) & s)
for s in _states]
def expand_substates(states):
'''For each item of states return a list containing all combinations of
that item with individual bits reset, sorted by the number of set bits.
'''
def nbits(n):
"number of bits set in n base 2"
nb = 0
while n:
n, rem = divmod(n, 2)
nb += rem
return nb
statelist = []
for state in states:
substates = list(set(state & x for x in states))
substates.sort(key=nbits, reverse=True)
statelist.append(substates)
return statelist
_state_subsets = expand_substates(_states)
# _state_codes gives for each state, the portable code to be passed as mc_state
_state_codes = []
for s in _states:
r = 0
for i in range(len(_modifiers)):
if (1 << i) & s:
r |= _modifier_masks[i]
_state_codes.append(r)
class _ComplexBinder:
# This class binds many functions, and only unbinds them when it is deleted.
# self.handlerids is the list of seqs and ids of binded handler functions.
# The binded functions sit in a dictionary of lists of lists, which maps
# a detail (or None) and a state into a list of functions.
# When a new detail is discovered, handlers for all the possible states
# are binded.
def __create_handler(self, lists, mc_type, mc_state):
def handler(event, lists = lists,
mc_type = mc_type, mc_state = mc_state,
ishandlerrunning = self.ishandlerrunning,
doafterhandler = self.doafterhandler):
ishandlerrunning[:] = [True]
event.mc_type = mc_type
event.mc_state = mc_state
wascalled = {}
r = None
for l in lists:
for i in range(len(l)-1, -1, -1):
func = l[i]
if func not in wascalled:
wascalled[func] = True
r = l[i](event)
if r:
break
if r:
break
ishandlerrunning[:] = []
# Call all functions in doafterhandler and remove them from list
for f in doafterhandler:
f()
doafterhandler[:] = []
if r:
return r
return handler
def __init__(self, type, widget, widgetinst):
self.type = type
self.typename = _types[type][0]
self.widget = widget
self.widgetinst = widgetinst
self.bindedfuncs = {None: [[] for s in _states]}
self.handlerids = []
# we don't want to change the lists of functions while a handler is
# running - it will mess up the loop and anyway, we usually want the
# change to happen from the next event. So we have a list of functions
# for the handler to run after it finishes calling the binded functions.
# It calls them only once.
# ishandlerrunning is a list. An empty one means no, otherwise - yes.
# this is done so that it would be mutable.
self.ishandlerrunning = []
self.doafterhandler = []
for s in _states:
lists = [self.bindedfuncs[None][i] for i in _state_subsets[s]]
handler = self.__create_handler(lists, type, _state_codes[s])
seq = '<'+_state_names[s]+self.typename+'>'
self.handlerids.append((seq, self.widget.bind(self.widgetinst,
seq, handler)))
def bind(self, triplet, func):
if triplet[2] not in self.bindedfuncs:
self.bindedfuncs[triplet[2]] = [[] for s in _states]
for s in _states:
lists = [ self.bindedfuncs[detail][i]
for detail in (triplet[2], None)
for i in _state_subsets[s] ]
handler = self.__create_handler(lists, self.type,
_state_codes[s])
seq = "<%s%s-%s>"% (_state_names[s], self.typename, triplet[2])
self.handlerids.append((seq, self.widget.bind(self.widgetinst,
seq, handler)))
doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].append(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def unbind(self, triplet, func):
doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].remove(func)
if not self.ishandlerrunning:
doit()
else:
self.doafterhandler.append(doit)
def __del__(self):
for seq, id in self.handlerids:
try:
self.widget.unbind(self.widgetinst, seq, id)
except tkinter.TclError as e:
if not APPLICATION_GONE in e.args[0]:
raise
# define the list of event types to be handled by MultiEvent. the order is
# compatible with the definition of event type constants.
_types = (
("KeyPress", "Key"), ("KeyRelease",), ("ButtonPress", "Button"),
("ButtonRelease",), ("Activate",), ("Circulate",), ("Colormap",),
("Configure",), ("Deactivate",), ("Destroy",), ("Enter",), ("Expose",),
("FocusIn",), ("FocusOut",), ("Gravity",), ("Leave",), ("Map",),
("Motion",), ("MouseWheel",), ("Property",), ("Reparent",), ("Unmap",),
("Visibility",),
)
# which binder should be used for every event type?
_binder_classes = (_ComplexBinder,) * 4 + (_SimpleBinder,) * (len(_types)-4)
# A dictionary to map a type name into its number
_type_names = dict([(name, number)
for number in range(len(_types))
for name in _types[number]])
_keysym_re = re.compile(r"^\w+$")
_button_re = re.compile(r"^[1-5]$")
def _parse_sequence(sequence):
"""Get a string which should describe an event sequence. If it is
successfully parsed as one, return a tuple containing the state (as an int),
the event type (as an index of _types), and the detail - None if none, or a
string if there is one. If the parsing is unsuccessful, return None.
"""
if not sequence or sequence[0] != '<' or sequence[-1] != '>':
return None
words = sequence[1:-1].split('-')
modifiers = 0
while words and words[0] in _modifier_names:
modifiers |= 1 << _modifier_names[words[0]]
del words[0]
if words and words[0] in _type_names:
type = _type_names[words[0]]
del words[0]
else:
return None
if _binder_classes[type] is _SimpleBinder:
if modifiers or words:
return None
else:
detail = None
else:
# _ComplexBinder
if type in [_type_names[s] for s in ("KeyPress", "KeyRelease")]:
type_re = _keysym_re
else:
type_re = _button_re
if not words:
detail = None
elif len(words) == 1 and type_re.match(words[0]):
detail = words[0]
else:
return None
return modifiers, type, detail
def _triplet_to_sequence(triplet):
if triplet[2]:
return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'-'+ \
triplet[2]+'>'
else:
return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'>'
_multicall_dict = {}
def MultiCallCreator(widget):
"""Return a MultiCall class which inherits its methods from the
given widget class (for example, Tkinter.Text). This is used
instead of a templating mechanism.
"""
if widget in _multicall_dict:
return _multicall_dict[widget]
class MultiCall (widget):
assert issubclass(widget, tkinter.Misc)
def __init__(self, *args, **kwargs):
widget.__init__(self, *args, **kwargs)
# a dictionary which maps a virtual event to a tuple with:
# 0. the function binded
# 1. a list of triplets - the sequences it is binded to
self.__eventinfo = {}
self.__binders = [_binder_classes[i](i, widget, self)
for i in range(len(_types))]
def bind(self, sequence=None, func=None, add=None):
#print("bind(%s, %s, %s)" % (sequence, func, add),
# file=sys.__stderr__)
if type(sequence) is str and len(sequence) > 2 and \
sequence[:2] == "<<" and sequence[-2:] == ">>":
if sequence in self.__eventinfo:
ei = self.__eventinfo[sequence]
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].unbind(triplet, ei[0])
ei[0] = func
if ei[0] is not None:
for triplet in ei[1]:
self.__binders[triplet[1]].bind(triplet, func)
else:
self.__eventinfo[sequence] = [func, []]
return widget.bind(self, sequence, func, add)
def unbind(self, sequence, funcid=None):
if type(sequence) is str and len(sequence) > 2 and \
sequence[:2] == "<<" and sequence[-2:] == ">>" and \
sequence in self.__eventinfo:
func, triplets = self.__eventinfo[sequence]
if func is not None:
for triplet in triplets:
self.__binders[triplet[1]].unbind(triplet, func)
self.__eventinfo[sequence][0] = None
return widget.unbind(self, sequence, funcid)
def event_add(self, virtual, *sequences):
#print("event_add(%s, %s)" % (repr(virtual), repr(sequences)),
# file=sys.__stderr__)
if virtual not in self.__eventinfo:
self.__eventinfo[virtual] = [None, []]
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
#print("Tkinter event_add(%s)" % seq, file=sys.__stderr__)
widget.event_add(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].bind(triplet, func)
triplets.append(triplet)
def event_delete(self, virtual, *sequences):
if virtual not in self.__eventinfo:
return
func, triplets = self.__eventinfo[virtual]
for seq in sequences:
triplet = _parse_sequence(seq)
if triplet is None:
#print("Tkinter event_delete: %s" % seq, file=sys.__stderr__)
widget.event_delete(self, virtual, seq)
else:
if func is not None:
self.__binders[triplet[1]].unbind(triplet, func)
triplets.remove(triplet)
def event_info(self, virtual=None):
if virtual is None or virtual not in self.__eventinfo:
return widget.event_info(self, virtual)
else:
return tuple(map(_triplet_to_sequence,
self.__eventinfo[virtual][1])) + \
widget.event_info(self, virtual)
def __del__(self):
for virtual in self.__eventinfo:
func, triplets = self.__eventinfo[virtual]
if func:
for triplet in triplets:
try:
self.__binders[triplet[1]].unbind(triplet, func)
except tkinter.TclError as e:
if not APPLICATION_GONE in e.args[0]:
raise
_multicall_dict[widget] = MultiCall
return MultiCall
def _multi_call(parent): # htest #
top = tkinter.Toplevel(parent)
top.title("Test MultiCall")
x, y = map(int, parent.geometry().split('+')[1:])
top.geometry("+%d+%d" % (x, y + 175))
text = MultiCallCreator(tkinter.Text)(top)
text.pack()
def bindseq(seq, n=[0]):
def handler(event):
print(seq)
text.bind("<<handler%d>>"%n[0], handler)
text.event_add("<<handler%d>>"%n[0], seq)
n[0] += 1
bindseq("<Key>")
bindseq("<Control-Key>")
bindseq("<Alt-Key-a>")
bindseq("<Control-Key-a>")
bindseq("<Alt-Control-Key-a>")
bindseq("<Key-b>")
bindseq("<Control-Button-1>")
bindseq("<Button-2>")
bindseq("<Alt-Button-1>")
bindseq("<FocusOut>")
bindseq("<Enter>")
bindseq("<Leave>")
if __name__ == "__main__":
from idlelib.idle_test.htest import run
run(_multi_call)
| yotchang4s/cafebabepy | src/main/python/idlelib/MultiCall.py | Python | bsd-3-clause | 18,548 |
from django.utils.translation import ugettext as _
from wagtail.admin.forms.search import SearchForm
from wagtail.search.backends import get_search_backend
from wagtail.search.index import class_is_indexed
class SearchableListMixin:
search_box_placeholder = _("Search")
search_fields = None
def get_search_form(self):
return SearchForm(self.request.GET if self.request.GET.get('q') else None, placeholder=self.search_box_placeholder)
def get_queryset(self):
queryset = super().get_queryset()
search_form = self.get_search_form()
if search_form.is_valid():
q = search_form.cleaned_data['q']
if class_is_indexed(queryset.model):
search_backend = get_search_backend()
queryset = search_backend.search(q, queryset, fields=self.search_fields)
else:
filters = {
field + '__icontains': q
for field in self.search_fields or []
}
queryset = queryset.filter(**filters)
return queryset
def get_context_data(self, **kwargs):
if 'search_form' not in kwargs:
kwargs['search_form'] = self.get_search_form()
kwargs['is_searching'] = bool(self.request.GET.get('q'))
return super().get_context_data(**kwargs)
| mikedingjan/wagtail | wagtail/admin/views/mixins.py | Python | bsd-3-clause | 1,358 |
"""
Wrapper class that takes a list of template loaders as an argument and attempts
to load templates from them in order, caching the result.
"""
import hashlib
import warnings
from django.template import Origin, Template, TemplateDoesNotExist
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_bytes
from django.utils.inspect import func_supports_parameter
from .base import Loader as BaseLoader
class Loader(BaseLoader):
def __init__(self, engine, loaders):
self.template_cache = {}
self.find_template_cache = {} # RemovedInDjango20Warning
self.get_template_cache = {}
self.loaders = engine.get_template_loaders(loaders)
super(Loader, self).__init__(engine)
def get_contents(self, origin):
return origin.loader.get_contents(origin)
def get_template(self, template_name, template_dirs=None, skip=None):
key = self.cache_key(template_name, template_dirs, skip)
cached = self.get_template_cache.get(key)
if cached:
if isinstance(cached, TemplateDoesNotExist):
raise cached
return cached
try:
template = super(Loader, self).get_template(
template_name, template_dirs, skip,
)
except TemplateDoesNotExist as e:
self.get_template_cache[key] = e
raise
else:
self.get_template_cache[key] = template
return template
def get_template_sources(self, template_name, template_dirs=None):
for loader in self.loaders:
args = [template_name]
# RemovedInDjango20Warning: Add template_dirs for compatibility
# with old loaders
if func_supports_parameter(loader.get_template_sources, 'template_dirs'):
args.append(template_dirs)
for origin in loader.get_template_sources(*args):
yield origin
def cache_key(self, template_name, template_dirs, skip=None):
"""
Generate a cache key for the template name, dirs, and skip.
If skip is provided, only origins that match template_name are included
in the cache key. This ensures each template is only parsed and cached
once if contained in different extend chains like:
x -> a -> a
y -> a -> a
z -> a -> a
"""
dirs_prefix = ''
skip_prefix = ''
if skip:
matching = [origin.name for origin in skip if origin.template_name == template_name]
if matching:
skip_prefix = self.generate_hash(matching)
if template_dirs:
dirs_prefix = self.generate_hash(template_dirs)
return ("%s-%s-%s" % (template_name, skip_prefix, dirs_prefix)).strip('-')
def generate_hash(self, values):
return hashlib.sha1(force_bytes('|'.join(values))).hexdigest()
@property
def supports_recursion(self):
"""
RemovedInDjango20Warning: This is an internal property used by the
ExtendsNode during the deprecation of non-recursive loaders.
"""
return all(hasattr(loader, 'get_contents') for loader in self.loaders)
def find_template(self, name, dirs=None):
"""
RemovedInDjango20Warning: An internal method to lookup the template
name in all the configured loaders.
"""
key = self.cache_key(name, dirs)
try:
result = self.find_template_cache[key]
except KeyError:
result = None
for loader in self.loaders:
try:
template, display_name = loader(name, dirs)
except TemplateDoesNotExist:
pass
else:
origin = Origin(
name=display_name,
template_name=name,
loader=loader,
)
result = template, origin
break
self.find_template_cache[key] = result
if result:
return result
else:
self.template_cache[key] = TemplateDoesNotExist
raise TemplateDoesNotExist(name)
def load_template(self, template_name, template_dirs=None):
warnings.warn(
'The load_template() method is deprecated. Use get_template() '
'instead.', RemovedInDjango20Warning,
)
key = self.cache_key(template_name, template_dirs)
template_tuple = self.template_cache.get(key)
# A cached previous failure:
if template_tuple is TemplateDoesNotExist:
raise TemplateDoesNotExist(template_name)
elif template_tuple is None:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = Template(template, origin, template_name, self.engine)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the template
# we were asked to load. This allows for correct identification (later)
# of the actual template that does not exist.
self.template_cache[key] = (template, origin)
self.template_cache[key] = (template, None)
return self.template_cache[key]
def reset(self):
"Empty the template cache."
self.template_cache.clear()
self.find_template_cache.clear() # RemovedInDjango20Warning
self.get_template_cache.clear()
| samabhi/pstHealth | venv/lib/python2.7/site-packages/django/template/loaders/cached.py | Python | mit | 5,775 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
try:
from crypt import crypt
except ImportError:
from thirdparty.fcrypt.fcrypt import crypt
_multiprocessing = None
try:
import multiprocessing
# problems on FreeBSD (Reference: http://www.eggheadcafe.com/microsoft/Python/35880259/multiprocessing-on-freebsd.aspx)
_ = multiprocessing.Queue()
except (ImportError, OSError):
pass
else:
_multiprocessing = multiprocessing
import gc
import os
import re
import tempfile
import time
from hashlib import md5
from hashlib import sha1
from hashlib import sha224
from hashlib import sha384
from hashlib import sha512
from Queue import Queue
from lib.core.common import Backend
from lib.core.common import checkFile
from lib.core.common import clearConsoleLine
from lib.core.common import dataToStdout
from lib.core.common import getFileItems
from lib.core.common import getPublicTypeMembers
from lib.core.common import hashDBRetrieve
from lib.core.common import hashDBWrite
from lib.core.common import normalizeUnicode
from lib.core.common import paths
from lib.core.common import readInput
from lib.core.common import singleTimeLogMessage
from lib.core.common import singleTimeWarnMessage
from lib.core.convert import hexdecode
from lib.core.convert import hexencode
from lib.core.convert import utf8encode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.enums import HASH
from lib.core.exception import SqlmapFilePathException
from lib.core.exception import SqlmapUserQuitException
from lib.core.settings import COMMON_PASSWORD_SUFFIXES
from lib.core.settings import COMMON_USER_COLUMNS
from lib.core.settings import DUMMY_USER_PREFIX
from lib.core.settings import HASH_MOD_ITEM_DISPLAY
from lib.core.settings import HASH_RECOGNITION_QUIT_THRESHOLD
from lib.core.settings import IS_WIN
from lib.core.settings import ITOA64
from lib.core.settings import ML
from lib.core.settings import NULL
from lib.core.settings import UNICODE_ENCODING
from lib.core.settings import ROTATING_CHARS
from lib.core.wordlist import Wordlist
from thirdparty.pydes.pyDes import des
from thirdparty.pydes.pyDes import CBC
def mysql_passwd(password, uppercase=True):
"""
Reference(s):
http://csl.sublevel3.org/mysql-password-function/
>>> mysql_passwd(password='testpass', uppercase=True)
'*00E247AC5F9AF26AE0194B41E1E769DEE1429A29'
"""
retVal = "*%s" % sha1(sha1(password).digest()).hexdigest()
return retVal.upper() if uppercase else retVal.lower()
def mysql_old_passwd(password, uppercase=True): # prior to version '4.1'
"""
Reference(s):
http://www.sfr-fresh.com/unix/privat/tpop3d-1.5.5.tar.gz:a/tpop3d-1.5.5/password.c
http://voidnetwork.org/5ynL0rd/darkc0de/python_script/darkMySQLi.html
>>> mysql_old_passwd(password='testpass', uppercase=True)
'7DCDA0D57290B453'
"""
a, b, c = 1345345333, 7, 0x12345671
for d in password:
if d == ' ' or d == '\t':
continue
e = ord(d)
a ^= (((a & 63) + b) * e) + (a << 8)
c += (c << 8) ^ a
b += e
retVal = "%08lx%08lx" % (a & ((1 << 31) - 1), c & ((1 << 31) - 1))
return retVal.upper() if uppercase else retVal.lower()
def postgres_passwd(password, username, uppercase=False):
"""
Reference(s):
http://pentestmonkey.net/blog/cracking-postgres-hashes/
>>> postgres_passwd(password='testpass', username='testuser', uppercase=False)
'md599e5ea7a6f7c3269995cba3927fd0093'
"""
retVal = "md5%s" % md5(password + username).hexdigest()
return retVal.upper() if uppercase else retVal.lower()
def mssql_passwd(password, salt, uppercase=False):
"""
Reference(s):
http://www.leidecker.info/projects/phrasendrescher/mssql.c
https://www.evilfingers.com/tools/GSAuditor.php
>>> mssql_passwd(password='testpass', salt='4086ceb6', uppercase=False)
'0x01004086ceb60c90646a8ab9889fe3ed8e5c150b5460ece8425a'
"""
binsalt = hexdecode(salt)
unistr = "".join(map(lambda c: ("%s\0" if ord(c) < 256 else "%s") % utf8encode(c), password))
retVal = "0100%s%s" % (salt, sha1(unistr + binsalt).hexdigest())
return "0x%s" % (retVal.upper() if uppercase else retVal.lower())
def mssql_old_passwd(password, salt, uppercase=True): # prior to version '2005'
"""
Reference(s):
www.exploit-db.com/download_pdf/15537/
http://www.leidecker.info/projects/phrasendrescher/mssql.c
https://www.evilfingers.com/tools/GSAuditor.php
>>> mssql_old_passwd(password='testpass', salt='4086ceb6', uppercase=True)
'0x01004086CEB60C90646A8AB9889FE3ED8E5C150B5460ECE8425AC7BB7255C0C81D79AA5D0E93D4BB077FB9A51DA0'
"""
binsalt = hexdecode(salt)
unistr = "".join(map(lambda c: ("%s\0" if ord(c) < 256 else "%s") % utf8encode(c), password))
retVal = "0100%s%s%s" % (salt, sha1(unistr + binsalt).hexdigest(), sha1(unistr.upper() + binsalt).hexdigest())
return "0x%s" % (retVal.upper() if uppercase else retVal.lower())
def mssql_new_passwd(password, salt, uppercase=False):
"""
Reference(s):
http://hashcat.net/forum/thread-1474.html
>>> mssql_new_passwd(password='testpass', salt='4086ceb6', uppercase=False)
'0x02004086ceb6eb051cdbc5bdae68ffc66c918d4977e592f6bdfc2b444a7214f71fa31c35902c5b7ae773ed5f4c50676d329120ace32ee6bc81c24f70711eb0fc6400e85ebf25'
"""
binsalt = hexdecode(salt)
unistr = "".join(map(lambda c: ("%s\0" if ord(c) < 256 else "%s") % utf8encode(c), password))
retVal = "0200%s%s" % (salt, sha512(unistr + binsalt).hexdigest())
return "0x%s" % (retVal.upper() if uppercase else retVal.lower())
def oracle_passwd(password, salt, uppercase=True):
"""
Reference(s):
https://www.evilfingers.com/tools/GSAuditor.php
http://www.notesbit.com/index.php/scripts-oracle/oracle-11g-new-password-algorithm-is-revealed-by-seclistsorg/
http://seclists.org/bugtraq/2007/Sep/304
>>> oracle_passwd(password='SHAlala', salt='1B7B5F82B7235E9E182C', uppercase=True)
'S:2BFCFDF5895014EE9BB2B9BA067B01E0389BB5711B7B5F82B7235E9E182C'
"""
binsalt = hexdecode(salt)
retVal = "s:%s%s" % (sha1(utf8encode(password) + binsalt).hexdigest(), salt)
return retVal.upper() if uppercase else retVal.lower()
def oracle_old_passwd(password, username, uppercase=True): # prior to version '11g'
"""
Reference(s):
http://www.notesbit.com/index.php/scripts-oracle/oracle-11g-new-password-algorithm-is-revealed-by-seclistsorg/
>>> oracle_old_passwd(password='tiger', username='scott', uppercase=True)
'F894844C34402B67'
"""
IV, pad = "\0" * 8, "\0"
if isinstance(username, unicode):
username = unicode.encode(username, UNICODE_ENCODING) # pyDes has issues with unicode strings
unistr = "".join("\0%s" % c for c in (username + password).upper())
cipher = des(hexdecode("0123456789ABCDEF"), CBC, IV, pad)
encrypted = cipher.encrypt(unistr)
cipher = des(encrypted[-8:], CBC, IV, pad)
encrypted = cipher.encrypt(unistr)
retVal = hexencode(encrypted[-8:])
return retVal.upper() if uppercase else retVal.lower()
def md5_generic_passwd(password, uppercase=False):
"""
>>> md5_generic_passwd(password='testpass', uppercase=False)
'179ad45c6ce2cb97cf1029e212046e81'
"""
retVal = md5(password).hexdigest()
return retVal.upper() if uppercase else retVal.lower()
def sha1_generic_passwd(password, uppercase=False):
"""
>>> sha1_generic_passwd(password='testpass', uppercase=False)
'206c80413b9a96c1312cc346b7d2517b84463edd'
"""
retVal = sha1(password).hexdigest()
return retVal.upper() if uppercase else retVal.lower()
def sha224_generic_passwd(password, uppercase=False):
"""
>>> sha224_generic_passwd(password='testpass', uppercase=False)
'648db6019764b598f75ab6b7616d2e82563a00eb1531680e19ac4c6f'
"""
retVal = sha224(password).hexdigest()
return retVal.upper() if uppercase else retVal.lower()
def sha384_generic_passwd(password, uppercase=False):
"""
>>> sha384_generic_passwd(password='testpass', uppercase=False)
'6823546e56adf46849343be991d4b1be9b432e42ed1b4bb90635a0e4b930e49b9ca007bc3e04bf0a4e0df6f1f82769bf'
"""
retVal = sha384(password).hexdigest()
return retVal.upper() if uppercase else retVal.lower()
def sha512_generic_passwd(password, uppercase=False):
"""
>>> sha512_generic_passwd(password='testpass', uppercase=False)
'78ddc8555bb1677ff5af75ba5fc02cb30bb592b0610277ae15055e189b77fe3fda496e5027a3d99ec85d54941adee1cc174b50438fdc21d82d0a79f85b58cf44'
"""
retVal = sha512(password).hexdigest()
return retVal.upper() if uppercase else retVal.lower()
def crypt_generic_passwd(password, salt, uppercase=False):
"""
Reference(s):
http://docs.python.org/library/crypt.html
http://helpful.knobs-dials.com/index.php/Hashing_notes
http://php.net/manual/en/function.crypt.php
http://carey.geek.nz/code/python-fcrypt/
>>> crypt_generic_passwd(password='rasmuslerdorf', salt='rl', uppercase=False)
'rl.3StKT.4T8M'
"""
retVal = crypt(password, salt)
return retVal.upper() if uppercase else retVal
def wordpress_passwd(password, salt, count, prefix, uppercase=False):
"""
Reference(s):
http://packetstormsecurity.org/files/74448/phpassbrute.py.txt
http://scriptserver.mainframe8.com/wordpress_password_hasher.php
>>> wordpress_passwd(password='testpass', salt='aD9ZLmkp', count=2048, prefix='$P$9aD9ZLmkp', uppercase=False)
'$P$9aD9ZLmkpsN4A83G8MefaaP888gVKX0'
"""
def _encode64(input_, count):
output = ''
i = 0
while i < count:
value = ord(input_[i])
i += 1
output = output + ITOA64[value & 0x3f]
if i < count:
value = value | (ord(input_[i]) << 8)
output = output + ITOA64[(value >> 6) & 0x3f]
i += 1
if i >= count:
break
if i < count:
value = value | (ord(input_[i]) << 16)
output = output + ITOA64[(value >> 12) & 0x3f]
i += 1
if i >= count:
break
output = output + ITOA64[(value >> 18) & 0x3f]
return output
cipher = md5(salt)
cipher.update(password)
hash_ = cipher.digest()
for i in xrange(count):
_ = md5(hash_)
_.update(password)
hash_ = _.digest()
retVal = prefix + _encode64(hash_, 16)
return retVal.upper() if uppercase else retVal
__functions__ = {
HASH.MYSQL: mysql_passwd,
HASH.MYSQL_OLD: mysql_old_passwd,
HASH.POSTGRES: postgres_passwd,
HASH.MSSQL: mssql_passwd,
HASH.MSSQL_OLD: mssql_old_passwd,
HASH.MSSQL_NEW: mssql_new_passwd,
HASH.ORACLE: oracle_passwd,
HASH.ORACLE_OLD: oracle_old_passwd,
HASH.MD5_GENERIC: md5_generic_passwd,
HASH.SHA1_GENERIC: sha1_generic_passwd,
HASH.SHA224_GENERIC: sha224_generic_passwd,
HASH.SHA384_GENERIC: sha384_generic_passwd,
HASH.SHA512_GENERIC: sha512_generic_passwd,
HASH.CRYPT_GENERIC: crypt_generic_passwd,
HASH.WORDPRESS: wordpress_passwd,
}
def storeHashesToFile(attack_dict):
if not attack_dict:
return
if kb.storeHashesChoice is None:
message = "do you want to store hashes to a temporary file "
message += "for eventual further processing with other tools [y/N] "
test = readInput(message, default="N")
kb.storeHashesChoice = test[0] in ("y", "Y")
if not kb.storeHashesChoice:
return
handle, filename = tempfile.mkstemp(prefix="sqlmaphashes-", suffix=".txt")
os.close(handle)
infoMsg = "writing hashes to a temporary file '%s' " % filename
logger.info(infoMsg)
items = set()
with open(filename, "w+") as f:
for user, hashes in attack_dict.items():
for hash_ in hashes:
hash_ = hash_.split()[0] if hash_ and hash_.strip() else hash_
if hash_ and hash_ != NULL and hashRecognition(hash_):
item = None
if user and not user.startswith(DUMMY_USER_PREFIX):
item = "%s:%s\n" % (user.encode(UNICODE_ENCODING), hash_.encode(UNICODE_ENCODING))
else:
item = "%s\n" % hash_.encode(UNICODE_ENCODING)
if item and item not in items:
f.write(item)
items.add(item)
def attackCachedUsersPasswords():
if kb.data.cachedUsersPasswords:
results = dictionaryAttack(kb.data.cachedUsersPasswords)
lut = {}
for (_, hash_, password) in results:
lut[hash_.lower()] = password
for user in kb.data.cachedUsersPasswords.keys():
for i in xrange(len(kb.data.cachedUsersPasswords[user])):
value = kb.data.cachedUsersPasswords[user][i].lower().split()[0]
if value in lut:
kb.data.cachedUsersPasswords[user][i] += "%s clear-text password: %s" % ('\n' if kb.data.cachedUsersPasswords[user][i][-1] != '\n' else '', lut[value])
def attackDumpedTable():
if kb.data.dumpedTable:
table = kb.data.dumpedTable
columns = table.keys()
count = table["__infos__"]["count"]
if not count:
return
infoMsg = "analyzing table dump for possible password hashes"
logger.info(infoMsg)
found = False
col_user = ''
col_passwords = set()
attack_dict = {}
for column in columns:
if column and column.lower() in COMMON_USER_COLUMNS:
col_user = column
break
for i in xrange(count):
if not found and i > HASH_RECOGNITION_QUIT_THRESHOLD:
break
for column in columns:
if column == col_user or column == '__infos__':
continue
if len(table[column]['values']) <= i:
continue
value = table[column]['values'][i]
if hashRecognition(value):
found = True
if col_user and i < len(table[col_user]['values']):
if table[col_user]['values'][i] not in attack_dict:
attack_dict[table[col_user]['values'][i]] = []
attack_dict[table[col_user]['values'][i]].append(value)
else:
attack_dict['%s%d' % (DUMMY_USER_PREFIX, i)] = [value]
col_passwords.add(column)
if attack_dict:
infoMsg = "recognized possible password hashes in column%s " % ("s" if len(col_passwords) > 1 else "")
infoMsg += "'%s'" % ", ".join(col for col in col_passwords)
logger.info(infoMsg)
storeHashesToFile(attack_dict)
message = "do you want to crack them via a dictionary-based attack? %s" % ("[y/N/q]" if conf.multipleTargets else "[Y/n/q]")
test = readInput(message, default="N" if conf.multipleTargets else "Y")
if test[0] in ("n", "N"):
return
elif test[0] in ("q", "Q"):
raise SqlmapUserQuitException
results = dictionaryAttack(attack_dict)
lut = dict()
for (_, hash_, password) in results:
if hash_:
lut[hash_.lower()] = password
infoMsg = "postprocessing table dump"
logger.info(infoMsg)
for i in xrange(count):
for column in columns:
if not (column == col_user or column == '__infos__' or len(table[column]['values']) <= i):
value = table[column]['values'][i]
if value and value.lower() in lut:
table[column]['values'][i] += " (%s)" % lut[value.lower()]
table[column]['length'] = max(table[column]['length'], len(table[column]['values'][i]))
def hashRecognition(value):
retVal = None
isOracle, isMySQL = Backend.isDbms(DBMS.ORACLE), Backend.isDbms(DBMS.MYSQL)
if isinstance(value, basestring):
for name, regex in getPublicTypeMembers(HASH):
# Hashes for Oracle and old MySQL look the same hence these checks
if isOracle and regex == HASH.MYSQL_OLD:
continue
elif isMySQL and regex == HASH.ORACLE_OLD:
continue
elif regex == HASH.CRYPT_GENERIC:
if any((value.lower() == value, value.upper() == value)):
continue
elif re.match(regex, value):
retVal = regex
break
return retVal
def _bruteProcessVariantA(attack_info, hash_regex, suffix, retVal, proc_id, proc_count, wordlists, custom_wordlist):
count = 0
rotator = 0
hashes = set([item[0][1] for item in attack_info])
wordlist = Wordlist(wordlists, proc_id, getattr(proc_count, "value", 0), custom_wordlist)
try:
for word in wordlist:
if not attack_info:
break
if not isinstance(word, basestring):
continue
if suffix:
word = word + suffix
try:
current = __functions__[hash_regex](password=word, uppercase=False)
count += 1
if current in hashes:
for item in attack_info[:]:
((user, hash_), _) = item
if hash_ == current:
retVal.put((user, hash_, word))
clearConsoleLine()
infoMsg = "\r[%s] [INFO] cracked password '%s'" % (time.strftime("%X"), word)
if user and not user.startswith(DUMMY_USER_PREFIX):
infoMsg += " for user '%s'\n" % user
else:
infoMsg += " for hash '%s'\n" % hash_
dataToStdout(infoMsg, True)
attack_info.remove(item)
elif (proc_id == 0 or getattr(proc_count, "value", 0) == 1) and count % HASH_MOD_ITEM_DISPLAY == 0 or hash_regex == HASH.ORACLE_OLD or hash_regex == HASH.CRYPT_GENERIC and IS_WIN:
rotator += 1
if rotator >= len(ROTATING_CHARS):
rotator = 0
status = 'current status: %s... %s' % (word.ljust(5)[:5], ROTATING_CHARS[rotator])
if not hasattr(conf, "api"):
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status))
except KeyboardInterrupt:
raise
except (UnicodeEncodeError, UnicodeDecodeError):
pass # ignore possible encoding problems caused by some words in custom dictionaries
except Exception, e:
warnMsg = "there was a problem while hashing entry: %s (%s). " % (repr(word), e)
warnMsg += "Please report by e-mail to %s" % ML
logger.critical(warnMsg)
except KeyboardInterrupt:
pass
finally:
if hasattr(proc_count, "value"):
with proc_count.get_lock():
proc_count.value -= 1
def _bruteProcessVariantB(user, hash_, kwargs, hash_regex, suffix, retVal, found, proc_id, proc_count, wordlists, custom_wordlist):
count = 0
rotator = 0
wordlist = Wordlist(wordlists, proc_id, getattr(proc_count, "value", 0), custom_wordlist)
try:
for word in wordlist:
if found.value:
break
current = __functions__[hash_regex](password=word, uppercase=False, **kwargs)
count += 1
if not isinstance(word, basestring):
continue
if suffix:
word = word + suffix
try:
if hash_ == current:
if hash_regex == HASH.ORACLE_OLD: # only for cosmetic purposes
word = word.upper()
retVal.put((user, hash_, word))
clearConsoleLine()
infoMsg = "\r[%s] [INFO] cracked password '%s'" % (time.strftime("%X"), word)
if user and not user.startswith(DUMMY_USER_PREFIX):
infoMsg += " for user '%s'\n" % user
else:
infoMsg += " for hash '%s'\n" % hash_
dataToStdout(infoMsg, True)
found.value = True
elif (proc_id == 0 or getattr(proc_count, "value", 0) == 1) and count % HASH_MOD_ITEM_DISPLAY == 0 or hash_regex == HASH.ORACLE_OLD or hash_regex == HASH.CRYPT_GENERIC and IS_WIN:
rotator += 1
if rotator >= len(ROTATING_CHARS):
rotator = 0
status = 'current status: %s... %s' % (word.ljust(5)[:5], ROTATING_CHARS[rotator])
if not user.startswith(DUMMY_USER_PREFIX):
status += ' (user: %s)' % user
if not hasattr(conf, "api"):
dataToStdout("\r[%s] [INFO] %s" % (time.strftime("%X"), status))
except KeyboardInterrupt:
raise
except (UnicodeEncodeError, UnicodeDecodeError):
pass # ignore possible encoding problems caused by some words in custom dictionaries
except Exception, e:
warnMsg = "there was a problem while hashing entry: %s (%s). " % (repr(word), e)
warnMsg += "Please report by e-mail to %s" % ML
logger.critical(warnMsg)
except KeyboardInterrupt:
pass
finally:
if hasattr(proc_count, "value"):
with proc_count.get_lock():
proc_count.value -= 1
def dictionaryAttack(attack_dict):
suffix_list = [""]
custom_wordlist = []
hash_regexes = []
results = []
resumes = []
processException = False
user_hash = []
for (_, hashes) in attack_dict.items():
for hash_ in hashes:
if not hash_:
continue
hash_ = hash_.split()[0]
regex = hashRecognition(hash_)
if regex and regex not in hash_regexes:
hash_regexes.append(regex)
infoMsg = "using hash method '%s'" % __functions__[regex].func_name
logger.info(infoMsg)
for hash_regex in hash_regexes:
keys = set()
attack_info = []
for (user, hashes) in attack_dict.items():
for hash_ in hashes:
if not hash_:
continue
hash_ = hash_.split()[0]
if re.match(hash_regex, hash_):
item = None
if hash_regex not in (HASH.CRYPT_GENERIC, HASH.WORDPRESS):
hash_ = hash_.lower()
if hash_regex in (HASH.MYSQL, HASH.MYSQL_OLD, HASH.MD5_GENERIC, HASH.SHA1_GENERIC):
item = [(user, hash_), {}]
elif hash_regex in (HASH.ORACLE_OLD, HASH.POSTGRES):
item = [(user, hash_), {'username': user}]
elif hash_regex in (HASH.ORACLE):
item = [(user, hash_), {'salt': hash_[-20:]}]
elif hash_regex in (HASH.MSSQL, HASH.MSSQL_OLD, HASH.MSSQL_NEW):
item = [(user, hash_), {'salt': hash_[6:14]}]
elif hash_regex in (HASH.CRYPT_GENERIC):
item = [(user, hash_), {'salt': hash_[0:2]}]
elif hash_regex in (HASH.WORDPRESS):
item = [(user, hash_), {'salt': hash_[4:12], 'count': 1 << ITOA64.index(hash_[3]), 'prefix': hash_[:12]}]
if item and hash_ not in keys:
resumed = hashDBRetrieve(hash_)
if not resumed:
attack_info.append(item)
user_hash.append(item[0])
else:
infoMsg = "resuming password '%s' for hash '%s'" % (resumed, hash_)
if user and not user.startswith(DUMMY_USER_PREFIX):
infoMsg += " for user '%s'" % user
logger.info(infoMsg)
resumes.append((user, hash_, resumed))
keys.add(hash_)
if not attack_info:
continue
if not kb.wordlists:
while not kb.wordlists:
# the slowest of all methods hence smaller default dict
if hash_regex in (HASH.ORACLE_OLD, HASH.WORDPRESS):
dictPaths = [paths.SMALL_DICT]
else:
dictPaths = [paths.WORDLIST]
message = "what dictionary do you want to use?\n"
message += "[1] default dictionary file '%s' (press Enter)\n" % dictPaths[0]
message += "[2] custom dictionary file\n"
message += "[3] file with list of dictionary files"
choice = readInput(message, default="1")
try:
if choice == "2":
message = "what's the custom dictionary's location?\n"
dictPaths = [readInput(message)]
logger.info("using custom dictionary")
elif choice == "3":
message = "what's the list file location?\n"
listPath = readInput(message)
checkFile(listPath)
dictPaths = getFileItems(listPath)
logger.info("using custom list of dictionaries")
else:
logger.info("using default dictionary")
for dictPath in dictPaths:
checkFile(dictPath)
kb.wordlists = dictPaths
except SqlmapFilePathException, msg:
warnMsg = "there was a problem while loading dictionaries"
warnMsg += " ('%s')" % msg
logger.critical(warnMsg)
message = "do you want to use common password suffixes? (slow!) [y/N] "
test = readInput(message, default="N")
if test[0] in ("y", "Y"):
suffix_list += COMMON_PASSWORD_SUFFIXES
infoMsg = "starting dictionary-based cracking (%s)" % __functions__[hash_regex].func_name
logger.info(infoMsg)
for item in attack_info:
((user, _), _) = item
if user and not user.startswith(DUMMY_USER_PREFIX):
custom_wordlist.append(normalizeUnicode(user))
if hash_regex in (HASH.MYSQL, HASH.MYSQL_OLD, HASH.MD5_GENERIC, HASH.SHA1_GENERIC):
for suffix in suffix_list:
if not attack_info or processException:
break
if suffix:
clearConsoleLine()
infoMsg = "using suffix '%s'" % suffix
logger.info(infoMsg)
retVal = None
processes = []
try:
if _multiprocessing:
if _multiprocessing.cpu_count() > 1:
infoMsg = "starting %d processes " % _multiprocessing.cpu_count()
singleTimeLogMessage(infoMsg)
gc.disable()
retVal = _multiprocessing.Queue()
count = _multiprocessing.Value('i', _multiprocessing.cpu_count())
for i in xrange(_multiprocessing.cpu_count()):
p = _multiprocessing.Process(target=_bruteProcessVariantA, args=(attack_info, hash_regex, suffix, retVal, i, count, kb.wordlists, custom_wordlist))
processes.append(p)
for p in processes:
p.daemon = True
p.start()
while count.value > 0:
time.sleep(0.5)
else:
warnMsg = "multiprocessing hash cracking is currently "
warnMsg += "not supported on this platform"
singleTimeWarnMessage(warnMsg)
retVal = Queue()
_bruteProcessVariantA(attack_info, hash_regex, suffix, retVal, 0, 1, kb.wordlists, custom_wordlist)
except KeyboardInterrupt:
print
processException = True
warnMsg = "user aborted during dictionary-based attack phase (Ctrl+C was pressed)"
logger.warn(warnMsg)
for process in processes:
try:
process.terminate()
process.join()
except OSError:
pass
finally:
if _multiprocessing:
gc.enable()
if retVal:
conf.hashDB.beginTransaction()
while not retVal.empty():
user, hash_, word = item = retVal.get(block=False)
attack_info = filter(lambda _: _[0][0] != user or _[0][1] != hash_, attack_info)
hashDBWrite(hash_, word)
results.append(item)
conf.hashDB.endTransaction()
clearConsoleLine()
else:
for ((user, hash_), kwargs) in attack_info:
if processException:
break
if any(_[0] == user and _[1] == hash_ for _ in results):
continue
count = 0
found = False
for suffix in suffix_list:
if found or processException:
break
if suffix:
clearConsoleLine()
infoMsg = "using suffix '%s'" % suffix
logger.info(infoMsg)
retVal = None
processes = []
try:
if _multiprocessing:
if _multiprocessing.cpu_count() > 1:
infoMsg = "starting %d processes " % _multiprocessing.cpu_count()
singleTimeLogMessage(infoMsg)
gc.disable()
retVal = _multiprocessing.Queue()
found_ = _multiprocessing.Value('i', False)
count = _multiprocessing.Value('i', _multiprocessing.cpu_count())
for i in xrange(_multiprocessing.cpu_count()):
p = _multiprocessing.Process(target=_bruteProcessVariantB, args=(user, hash_, kwargs, hash_regex, suffix, retVal, found_, i, count, kb.wordlists, custom_wordlist))
processes.append(p)
for p in processes:
p.daemon = True
p.start()
while count.value > 0:
time.sleep(0.5)
found = found_.value != 0
else:
warnMsg = "multiprocessing hash cracking is currently "
warnMsg += "not supported on this platform"
singleTimeWarnMessage(warnMsg)
class Value():
pass
retVal = Queue()
found_ = Value()
found_.value = False
_bruteProcessVariantB(user, hash_, kwargs, hash_regex, suffix, retVal, found_, 0, 1, kb.wordlists, custom_wordlist)
found = found_.value
except KeyboardInterrupt:
print
processException = True
warnMsg = "user aborted during dictionary-based attack phase (Ctrl+C was pressed)"
logger.warn(warnMsg)
for process in processes:
try:
process.terminate()
process.join()
except OSError:
pass
finally:
if _multiprocessing:
gc.enable()
if retVal:
conf.hashDB.beginTransaction()
while not retVal.empty():
user, hash_, word = item = retVal.get(block=False)
hashDBWrite(hash_, word)
results.append(item)
conf.hashDB.endTransaction()
clearConsoleLine()
results.extend(resumes)
if len(hash_regexes) == 0:
warnMsg = "unknown hash format. "
warnMsg += "Please report by e-mail to %s" % ML
logger.warn(warnMsg)
if len(results) == 0:
warnMsg = "no clear password(s) found"
logger.warn(warnMsg)
return results
| golismero/golismero | tools/sqlmap/lib/utils/hash.py | Python | gpl-2.0 | 34,425 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Customer References',
'category': 'Website',
'website': 'https://www.odoo.com/page/website-builder',
'summary': 'Publish Your Customer References',
'version': '1.0',
'description': """
Odoo Customer References
===========================
""",
'depends': [
'crm_partner_assign',
'website_partner',
'website_google_map',
],
'demo': [
'website_customer_demo.xml',
],
'data': [
'views/website_customer.xml',
'views/website_customer_view.xml',
'security/ir.model.access.csv',
'security/ir_rule.xml',
],
'qweb': [],
'installable': True,
}
| ghandiosm/Test | addons/website_customer/__openerp__.py | Python | gpl-3.0 | 764 |
"""
System Logging Handler
"""
__RCSID__ = "$Id$"
import logging
import Queue
import threading
from DIRAC.Core.Utilities import Network
class ServerHandler(logging.Handler, threading.Thread):
"""
ServerHandler is a custom handler from logging.
It has no equivalent in the standard logging library because it is highly linked to DIRAC.
It is useful to send log messages to a destination, like the StreamHandler to a stream, the FileHandler to a file.
Here, this handler send log messages to a DIRAC service: SystemLogging which store log messages in a database.
This handler send only log messages superior to WARN. It works in a thread, and send messages every 'sleepTime'.
When a message must be emit, it is added to queue before sending.
"""
def __init__(self, sleepTime, interactive, site):
"""
Initialization of the ServerHandler.
The queue is initialized with the hostname and the start of the thread.
:params sleepTime: integer, representing time in seconds where the handler can send messages.
:params interactive: not used at the moment.
:params site: the site where the log messages come from.
"""
super(ServerHandler, self).__init__()
threading.Thread.__init__(self)
self.__logQueue = Queue.Queue()
self.__sleepTime = sleepTime
self.__interactive = interactive
self.__site = site
self.__transactions = []
self.__hostname = Network.getFQDN()
self.__alive = True
self.__maxBundledLogs = 20
self.setDaemon(True)
self.start()
def emit(self, record):
"""
Add the record to the queue.
:params record: log record object
"""
self.__logQueue.put(record)
def run(self):
import time
while self.__alive:
self.__bundleLogs()
time.sleep(float(self.__sleepTime))
def __bundleLogs(self):
"""
Prepare the log to the sending.
This method create a tuple based on the record and add it to the bundle for the sending.
A tuple is necessary for because the service manage messages under this form.
"""
while not self.__logQueue.empty():
bundle = []
while (len(bundle) < self.__maxBundledLogs) and (not self.__logQueue.empty()):
record = self.__logQueue.get()
self.format(record)
logTuple = (record.componentname, record.levelname, record.created, record.getMessage(), record.varmessage,
record.pathname + ":" + str(record.lineno), record.name)
bundle.append(logTuple)
if bundle:
self.__sendLogToServer(bundle)
if self.__transactions:
self.__sendLogToServer()
def __sendLogToServer(self, logBundle=None):
"""
Send log to the SystemLogging service.
:params logBundle: list of logs ready to be send to the service
"""
from DIRAC.Core.DISET.RPCClient import RPCClient
if logBundle:
self.__transactions.append(logBundle)
transactionsLength = len(self.__transactions)
if transactionsLength > 100:
del self.__transactions[:transactionsLength - 100]
transactionsLength = 100
try:
oSock = RPCClient("Framework/SystemLogging")
except Exception:
return False
while transactionsLength:
result = oSock.addMessages(self.__transactions[0], self.__site, self.__hostname)
if result['OK']:
transactionsLength = transactionsLength - 1
self.__transactions.pop(0)
else:
return False
return True
| andresailer/DIRAC | FrameworkSystem/private/standardLogging/Handler/ServerHandler.py | Python | gpl-3.0 | 3,460 |
# -*- coding: utf-8 -*-
import itertools
from sqlparse import sql
from sqlparse import tokens as T
try:
next
except NameError: # Python < 2.6
next = lambda i: i.next()
def _group_left_right(tlist, ttype, value, cls,
check_right=lambda t: True,
check_left=lambda t: True,
include_semicolon=False):
[_group_left_right(sgroup, ttype, value, cls, check_right, check_left,
include_semicolon) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, cls)]
idx = 0
token = tlist.token_next_match(idx, ttype, value)
while token:
right = tlist.token_next(tlist.token_index(token))
left = tlist.token_prev(tlist.token_index(token))
if right is None or not check_right(right):
token = tlist.token_next_match(tlist.token_index(token) + 1,
ttype, value)
elif left is None or not check_left(left):
token = tlist.token_next_match(tlist.token_index(token) + 1,
ttype, value)
else:
if include_semicolon:
sright = tlist.token_next_match(tlist.token_index(right),
T.Punctuation, ';')
if sright is not None:
# only overwrite "right" if a semicolon is actually
# present.
right = sright
tokens = tlist.tokens_between(left, right)[1:]
if not isinstance(left, cls):
new = cls([left])
new_idx = tlist.token_index(left)
tlist.tokens.remove(left)
tlist.tokens.insert(new_idx, new)
left = new
left.tokens.extend(tokens)
for t in tokens:
tlist.tokens.remove(t)
token = tlist.token_next_match(tlist.token_index(left) + 1,
ttype, value)
def _group_matching(tlist, start_ttype, start_value, end_ttype, end_value,
cls, include_semicolon=False, recurse=False):
def _find_matching(i, tl, stt, sva, ett, eva):
depth = 1
for n in xrange(i, len(tl.tokens)):
t = tl.tokens[n]
if t.match(stt, sva):
depth += 1
elif t.match(ett, eva):
depth -= 1
if depth == 1:
return t
return None
[_group_matching(sgroup, start_ttype, start_value, end_ttype, end_value,
cls, include_semicolon) for sgroup in tlist.get_sublists()
if recurse]
if isinstance(tlist, cls):
idx = 1
else:
idx = 0
token = tlist.token_next_match(idx, start_ttype, start_value)
while token:
tidx = tlist.token_index(token)
end = _find_matching(tidx, tlist, start_ttype, start_value,
end_ttype, end_value)
if end is None:
idx = tidx + 1
else:
if include_semicolon:
next_ = tlist.token_next(tlist.token_index(end))
if next_ and next_.match(T.Punctuation, ';'):
end = next_
group = tlist.group_tokens(cls, tlist.tokens_between(token, end))
_group_matching(group, start_ttype, start_value,
end_ttype, end_value, cls, include_semicolon)
idx = tlist.token_index(group) + 1
token = tlist.token_next_match(idx, start_ttype, start_value)
def group_if(tlist):
_group_matching(tlist, T.Keyword, 'IF', T.Keyword, 'END IF', sql.If, True)
def group_for(tlist):
_group_matching(tlist, T.Keyword, 'FOR', T.Keyword, 'END LOOP',
sql.For, True)
def group_as(tlist):
def _right_valid(token):
# Currently limited to DML/DDL. Maybe additional more non SQL reserved
# keywords should appear here (see issue8).
return not token.ttype in (T.DML, T.DDL)
def _left_valid(token):
return token.ttype is not T.Keyword
_group_left_right(tlist, T.Keyword, 'AS', sql.Identifier,
check_right=_right_valid,
check_left=_left_valid)
def group_assignment(tlist):
_group_left_right(tlist, T.Assignment, ':=', sql.Assignment,
include_semicolon=True)
def group_comparison(tlist):
def _parts_valid(token):
return (token.ttype in (T.String.Symbol, T.Name, T.Number,
T.Number.Integer, T.Literal,
T.Literal.Number.Integer)
or isinstance(token, (sql.Identifier,)))
_group_left_right(tlist, T.Operator.Comparison, None, sql.Comparison,
check_left=_parts_valid, check_right=_parts_valid)
def group_case(tlist):
_group_matching(tlist, T.Keyword, 'CASE', T.Keyword, 'END', sql.Case,
include_semicolon=True, recurse=True)
def group_identifier(tlist):
def _consume_cycle(tl, i):
# TODO: Usage of Wildcard token is ambivalent here.
x = itertools.cycle((
lambda y: (y.match(T.Punctuation, '.')
or y.ttype is T.Operator
or y.ttype is T.Wildcard),
lambda y: (y.ttype in (T.String.Symbol,
T.String.Single,
T.Name,
T.Wildcard,
T.Literal.Number.Integer))))
for t in tl.tokens[i:]:
# Don't take whitespaces into account.
if t.ttype is T.Whitespace:
yield t
continue
if next(x)(t):
yield t
else:
raise StopIteration
def _next_token(tl, i):
# chooses the next token. if two tokens are found then the
# first is returned.
t1 = tl.token_next_by_type(
i, (T.String.Symbol, T.String.Single, T.Name))
t2 = tl.token_next_by_instance(i, sql.Function)
if t1 and t2:
i1 = tl.token_index(t1)
i2 = tl.token_index(t2)
if i1 > i2:
return t2
else:
return t1
elif t1:
return t1
else:
return t2
# bottom up approach: group subgroups first
[group_identifier(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, sql.Identifier)]
# real processing
idx = 0
token = _next_token(tlist, idx)
while token:
identifier_tokens = [token] + list(
_consume_cycle(tlist,
tlist.token_index(token) + 1))
# remove trailing whitespace
if identifier_tokens and identifier_tokens[-1].ttype is T.Whitespace:
identifier_tokens = identifier_tokens[:-1]
if not (len(identifier_tokens) == 1
and isinstance(identifier_tokens[0], sql.Function)):
group = tlist.group_tokens(sql.Identifier, identifier_tokens)
idx = tlist.token_index(group) + 1
else:
idx += 1
token = _next_token(tlist, idx)
def group_identifier_list(tlist):
[group_identifier_list(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, sql.IdentifierList)]
idx = 0
# Allowed list items
fend1_funcs = [lambda t: isinstance(t, (sql.Identifier, sql.Function,
sql.Case)),
lambda t: t.is_whitespace(),
lambda t: t.ttype == T.Name,
lambda t: t.ttype == T.Wildcard,
lambda t: t.match(T.Keyword, 'null'),
lambda t: t.match(T.Keyword, 'role'),
lambda t: t.ttype == T.Number.Integer,
lambda t: t.ttype == T.String.Single,
lambda t: t.ttype == T.Name.Placeholder,
lambda t: t.ttype == T.Keyword,
lambda t: isinstance(t, sql.Comparison),
lambda t: isinstance(t, sql.Comment),
]
tcomma = tlist.token_next_match(idx, T.Punctuation, ',')
start = None
while tcomma is not None:
before = tlist.token_prev(tcomma)
after = tlist.token_next(tcomma)
# Check if the tokens around tcomma belong to a list
bpassed = apassed = False
for func in fend1_funcs:
if before is not None and func(before):
bpassed = True
if after is not None and func(after):
apassed = True
if not bpassed or not apassed:
# Something's wrong here, skip ahead to next ","
start = None
tcomma = tlist.token_next_match(tlist.token_index(tcomma) + 1,
T.Punctuation, ',')
else:
if start is None:
start = before
next_ = tlist.token_next(after)
if next_ is None or not next_.match(T.Punctuation, ','):
# Reached the end of the list
tokens = tlist.tokens_between(start, after)
group = tlist.group_tokens(sql.IdentifierList, tokens)
start = None
tcomma = tlist.token_next_match(tlist.token_index(group) + 1,
T.Punctuation, ',')
else:
tcomma = next_
def group_parenthesis(tlist):
_group_matching(tlist, T.Punctuation, '(', T.Punctuation, ')',
sql.Parenthesis)
def group_comments(tlist):
[group_comments(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, sql.Comment)]
idx = 0
token = tlist.token_next_by_type(idx, T.Comment)
while token:
tidx = tlist.token_index(token)
end = tlist.token_not_matching(tidx + 1,
[lambda t: t.ttype in T.Comment,
lambda t: t.is_whitespace()])
if end is None:
idx = tidx + 1
else:
eidx = tlist.token_index(end)
grp_tokens = tlist.tokens_between(token,
tlist.token_prev(eidx, False))
group = tlist.group_tokens(sql.Comment, grp_tokens)
idx = tlist.token_index(group)
token = tlist.token_next_by_type(idx, T.Comment)
def group_where(tlist):
[group_where(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, sql.Where)]
idx = 0
token = tlist.token_next_match(idx, T.Keyword, 'WHERE')
stopwords = ('ORDER', 'GROUP', 'LIMIT', 'UNION')
while token:
tidx = tlist.token_index(token)
end = tlist.token_next_match(tidx + 1, T.Keyword, stopwords)
if end is None:
end = tlist._groupable_tokens[-1]
else:
end = tlist.tokens[tlist.token_index(end) - 1]
group = tlist.group_tokens(sql.Where,
tlist.tokens_between(token, end),
ignore_ws=True)
idx = tlist.token_index(group)
token = tlist.token_next_match(idx, T.Keyword, 'WHERE')
def group_aliased(tlist):
clss = (sql.Identifier, sql.Function, sql.Case)
[group_aliased(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, clss)]
idx = 0
token = tlist.token_next_by_instance(idx, clss)
while token:
next_ = tlist.token_next(tlist.token_index(token))
if next_ is not None and isinstance(next_, clss):
if not next_.value.upper().startswith('VARCHAR'):
grp = tlist.tokens_between(token, next_)[1:]
token.tokens.extend(grp)
for t in grp:
tlist.tokens.remove(t)
idx = tlist.token_index(token) + 1
token = tlist.token_next_by_instance(idx, clss)
def group_typecasts(tlist):
_group_left_right(tlist, T.Punctuation, '::', sql.Identifier)
def group_functions(tlist):
[group_functions(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, sql.Function)]
idx = 0
token = tlist.token_next_by_type(idx, T.Name)
while token:
next_ = tlist.token_next(token)
if not isinstance(next_, sql.Parenthesis):
idx = tlist.token_index(token) + 1
else:
func = tlist.group_tokens(sql.Function,
tlist.tokens_between(token, next_))
idx = tlist.token_index(func) + 1
token = tlist.token_next_by_type(idx, T.Name)
def group_order(tlist):
idx = 0
token = tlist.token_next_by_type(idx, T.Keyword.Order)
while token:
prev = tlist.token_prev(token)
if isinstance(prev, sql.Identifier):
ido = tlist.group_tokens(sql.Identifier,
tlist.tokens_between(prev, token))
idx = tlist.token_index(ido) + 1
else:
idx = tlist.token_index(token) + 1
token = tlist.token_next_by_type(idx, T.Keyword.Order)
def group(tlist):
for func in [
group_comments,
group_parenthesis,
group_functions,
group_where,
group_case,
group_identifier,
group_order,
group_typecasts,
group_as,
group_aliased,
group_assignment,
group_comparison,
group_identifier_list,
group_if,
group_for]:
func(tlist)
| andybab/Impala | shell/ext-py/sqlparse-0.1.7/sqlparse/engine/grouping.py | Python | apache-2.0 | 13,718 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import uuid
import mock
import mox
from oslo.config import cfg
import webob
from nova.api.openstack.compute import servers
from nova.compute import api as compute_api
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.image import glance
from nova import objects
from nova.openstack.common import jsonutils
from nova.openstack.common import uuidutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_block_device
from nova.tests import fake_instance
from nova.tests.image import fake
from nova.tests import matchers
from nova.tests import utils
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
FAKE_UUID = fakes.FAKE_UUID
INSTANCE_IDS = {FAKE_UUID: 1}
def return_server_not_found(*arg, **kwarg):
raise exception.NotFound()
def instance_update_and_get_original(context, instance_uuid, values,
update_cells=True,
columns_to_join=None,
):
inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
inst = dict(inst, **values)
return (inst, inst)
def instance_update(context, instance_uuid, kwargs, update_cells=True):
inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
return inst
class MockSetAdminPassword(object):
def __init__(self):
self.instance_id = None
self.password = None
def __call__(self, context, instance, password):
self.instance_id = instance['uuid']
self.password = password
class ServerActionsControllerTest(test.TestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
def setUp(self):
super(ServerActionsControllerTest, self).setUp()
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
host='fake_host'))
self.stubs.Set(db, 'instance_update_and_get_original',
instance_update_and_get_original)
fakes.stub_out_nw_api(self.stubs)
fakes.stub_out_compute_api_snapshot(self.stubs)
fake.stub_out_image_service(self.stubs)
self.flags(allow_instance_snapshots=True,
enable_instance_password=True)
self.uuid = FAKE_UUID
self.url = '/v2/fake/servers/%s/action' % self.uuid
self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
class FakeExtManager(object):
def is_loaded(self, ext):
return False
self.controller = servers.Controller(ext_mgr=FakeExtManager())
self.compute_api = self.controller.compute_api
self.context = context.RequestContext('fake', 'fake')
self.app = fakes.wsgi_app(init_only=('servers',),
fake_auth_context=self.context)
def _make_request(self, url, body):
req = webob.Request.blank('/v2/fake' + url)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.content_type = 'application/json'
return req.get_response(self.app)
def _stub_instance_get(self, uuid=None):
self.mox.StubOutWithMock(compute_api.API, 'get')
if uuid is None:
uuid = uuidutils.generate_uuid()
instance = fake_instance.fake_db_instance(
id=1, uuid=uuid, vm_state=vm_states.ACTIVE, task_state=None)
instance = objects.Instance._from_db_object(
self.context, objects.Instance(), instance)
self.compute_api.get(self.context, uuid,
want_objects=True).AndReturn(instance)
return instance
def _test_locked_instance(self, action, method=None, body_map=None,
compute_api_args_map=None):
if method is None:
method = action
if body_map is None:
body_map = {}
if compute_api_args_map is None:
compute_api_args_map = {}
instance = self._stub_instance_get()
args, kwargs = compute_api_args_map.get(action, ((), {}))
getattr(compute_api.API, method)(self.context, instance,
*args, **kwargs).AndRaise(
exception.InstanceIsLocked(instance_uuid=instance['uuid']))
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance['uuid'],
{action: body_map.get(action)})
self.assertEqual(409, res.status_int)
# Do these here instead of tearDown because this method is called
# more than once for the same test case
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_actions_with_locked_instance(self):
actions = ['resize', 'confirmResize', 'revertResize', 'reboot',
'rebuild']
method_translations = {'confirmResize': 'confirm_resize',
'revertResize': 'revert_resize'}
body_map = {'resize': {'flavorRef': '2'},
'reboot': {'type': 'HARD'},
'rebuild': {'imageRef': self.image_uuid,
'adminPass': 'TNc53Dr8s7vw'}}
args_map = {'resize': (('2'), {}),
'confirmResize': ((), {}),
'reboot': (('HARD',), {}),
'rebuild': ((self.image_uuid, 'TNc53Dr8s7vw'),
{'files_to_inject': None})}
for action in actions:
method = method_translations.get(action)
self.mox.StubOutWithMock(compute_api.API, method or action)
self._test_locked_instance(action, method=method,
body_map=body_map,
compute_api_args_map=args_map)
def test_server_change_password(self):
mock_method = MockSetAdminPassword()
self.stubs.Set(compute_api.API, 'set_admin_password', mock_method)
body = {'changePassword': {'adminPass': '1234pass'}}
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_change_password(req, FAKE_UUID, body)
self.assertEqual(mock_method.instance_id, self.uuid)
self.assertEqual(mock_method.password, '1234pass')
def test_server_change_password_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
mock_method = MockSetAdminPassword()
self.stubs.Set(compute_api.API, 'set_admin_password', mock_method)
body = {'changePassword': {'adminPass': '1234pass'}}
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_change_password(req, FAKE_UUID, body)
self.assertEqual(mock_method.instance_id, self.uuid)
# note,the mock still contains the password.
self.assertEqual(mock_method.password, '1234pass')
def test_server_change_password_not_a_string(self):
body = {'changePassword': {'adminPass': 1234}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_change_password,
req, FAKE_UUID, body)
def test_server_change_password_bad_request(self):
body = {'changePassword': {'pass': '12345'}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_change_password,
req, FAKE_UUID, body)
def test_server_change_password_empty_string(self):
mock_method = MockSetAdminPassword()
self.stubs.Set(compute_api.API, 'set_admin_password', mock_method)
body = {'changePassword': {'adminPass': ''}}
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_change_password(req, FAKE_UUID, body)
self.assertEqual(mock_method.instance_id, self.uuid)
self.assertEqual(mock_method.password, '')
def test_server_change_password_none(self):
body = {'changePassword': {'adminPass': None}}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_change_password,
req, FAKE_UUID, body)
def test_reboot_hard(self):
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_reboot(req, FAKE_UUID, body)
def test_reboot_soft(self):
body = dict(reboot=dict(type="SOFT"))
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_reboot(req, FAKE_UUID, body)
def test_reboot_incorrect_type(self):
body = dict(reboot=dict(type="NOT_A_TYPE"))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_reboot_missing_type(self):
body = dict(reboot=dict())
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_reboot_none(self):
body = dict(reboot=dict(type=None))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_reboot_not_found(self):
self.stubs.Set(db, 'instance_get_by_uuid',
return_server_not_found)
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_reboot,
req, str(uuid.uuid4()), body)
def test_reboot_raises_conflict_on_invalid_state(self):
body = dict(reboot=dict(type="HARD"))
def fake_reboot(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'reboot', fake_reboot)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_reboot_soft_with_soft_in_progress_raises_conflict(self):
body = dict(reboot=dict(type="SOFT"))
req = fakes.HTTPRequest.blank(self.url)
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING))
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_reboot_hard_with_soft_in_progress_does_not_raise(self):
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequest.blank(self.url)
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING))
self.controller._action_reboot(req, FAKE_UUID, body)
def test_reboot_hard_with_hard_in_progress_raises_conflict(self):
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequest.blank(self.url)
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING_HARD))
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_rebuild_preserve_ephemeral_is_ignored_when_ext_not_loaded(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE,
host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"preserve_ephemeral": False,
},
}
req = fakes.HTTPRequest.blank(self.url)
context = req.environ['nova.context']
self.mox.StubOutWithMock(compute_api.API, 'rebuild')
compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
mox.IgnoreArg(), files_to_inject=None)
self.mox.ReplayAll()
self.controller._action_rebuild(req, FAKE_UUID, body)
def _test_rebuild_preserve_ephemeral(self, value=None):
def fake_is_loaded(ext):
return ext == 'os-preserve-ephemeral-rebuild'
self.stubs.Set(self.controller.ext_mgr, 'is_loaded', fake_is_loaded)
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE,
host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
if value is not None:
body['rebuild']['preserve_ephemeral'] = value
req = fakes.HTTPRequest.blank(self.url)
context = req.environ['nova.context']
self.mox.StubOutWithMock(compute_api.API, 'rebuild')
if value is not None:
compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
mox.IgnoreArg(), preserve_ephemeral=value,
files_to_inject=None)
else:
compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
mox.IgnoreArg(), files_to_inject=None)
self.mox.ReplayAll()
self.controller._action_rebuild(req, FAKE_UUID, body)
def test_rebuild_preserve_ephemeral_true(self):
self._test_rebuild_preserve_ephemeral(True)
def test_rebuild_preserve_ephemeral_false(self):
self._test_rebuild_preserve_ephemeral(False)
def test_rebuild_preserve_ephemeral_default(self):
self._test_rebuild_preserve_ephemeral()
def test_rebuild_accepted_minimum(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
self_href = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
robj = self.controller._action_rebuild(req, FAKE_UUID, body)
body = robj.obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(len(body['server']['adminPass']),
CONF.password_length)
self.assertEqual(robj['location'], self_href)
def test_rebuild_instance_with_image_uuid(self):
info = dict(image_href_in_call=None)
def rebuild(self2, context, instance, image_href, *args, **kwargs):
info['image_href_in_call'] = image_href
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.stubs.Set(compute_api.API, 'rebuild', rebuild)
# proper local hrefs must start with 'http://localhost/v2/'
body = {
'rebuild': {
'imageRef': self.image_uuid,
},
}
req = fakes.HTTPRequest.blank('/v2/fake/servers/a/action')
self.controller._action_rebuild(req, FAKE_UUID, body)
self.assertEqual(info['image_href_in_call'], self.image_uuid)
def test_rebuild_instance_with_image_href_uses_uuid(self):
info = dict(image_href_in_call=None)
def rebuild(self2, context, instance, image_href, *args, **kwargs):
info['image_href_in_call'] = image_href
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.stubs.Set(compute_api.API, 'rebuild', rebuild)
# proper local hrefs must start with 'http://localhost/v2/'
body = {
'rebuild': {
'imageRef': self.image_href,
},
}
req = fakes.HTTPRequest.blank('/v2/fake/servers/a/action')
self.controller._action_rebuild(req, FAKE_UUID, body)
self.assertEqual(info['image_href_in_call'], self.image_uuid)
def test_rebuild_accepted_minimum_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
self_href = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
robj = self.controller._action_rebuild(req, FAKE_UUID, body)
body = robj.obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertNotIn("adminPass", body['server'])
self.assertEqual(robj['location'], self_href)
def test_rebuild_raises_conflict_on_invalid_state(self):
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
def fake_rebuild(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_accepted_with_metadata(self):
metadata = {'new': 'metadata'}
return_server = fakes.fake_instance_get(metadata=metadata,
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": metadata,
},
}
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertEqual(body['server']['metadata'], metadata)
def test_rebuild_accepted_with_bad_metadata(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": "stack",
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_with_too_large_metadata(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"metadata": {
256 * "k": "value"
}
}
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller._action_rebuild, req,
FAKE_UUID, body)
def test_rebuild_bad_entity(self):
body = {
"rebuild": {
"imageId": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_bad_personality(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"personality": [{
"path": "/path/to/file",
"contents": "INVALID b64",
}]
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_personality(self):
body = {
"rebuild": {
"imageRef": self._image_href,
"personality": [{
"path": "/path/to/file",
"contents": base64.b64encode("Test String"),
}]
},
}
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertNotIn('personality', body['server'])
def test_rebuild_admin_pass(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"adminPass": "asdf",
},
}
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(body['server']['adminPass'], 'asdf')
def test_rebuild_admin_pass_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"imageRef": self._image_href,
"adminPass": "asdf",
},
}
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertNotIn('adminPass', body['server'])
def test_rebuild_server_not_found(self):
def server_not_found(self, instance_id,
columns_to_join=None, use_slave=False):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(db, 'instance_get_by_uuid', server_not_found)
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_with_bad_image(self):
body = {
"rebuild": {
"imageRef": "foo",
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_accessIP(self):
attributes = {
'access_ip_v4': '172.19.0.1',
'access_ip_v6': 'fe80::1',
}
body = {
"rebuild": {
"imageRef": self._image_href,
"accessIPv4": "172.19.0.1",
"accessIPv6": "fe80::1",
},
}
data = {'changes': {}}
orig_get = compute_api.API.get
def wrap_get(*args, **kwargs):
data['instance'] = orig_get(*args, **kwargs)
return data['instance']
def fake_save(context, **kwargs):
data['changes'].update(data['instance'].obj_get_changes())
self.stubs.Set(compute_api.API, 'get', wrap_get)
self.stubs.Set(objects.Instance, 'save', fake_save)
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_rebuild(req, FAKE_UUID, body)
self.assertEqual(self._image_href, data['changes']['image_ref'])
self.assertEqual("", data['changes']['kernel_id'])
self.assertEqual("", data['changes']['ramdisk_id'])
self.assertEqual(task_states.REBUILDING, data['changes']['task_state'])
self.assertEqual(0, data['changes']['progress'])
for attr, value in attributes.items():
self.assertEqual(value, str(data['changes'][attr]))
def test_rebuild_when_kernel_not_exists(self):
def return_image_meta(*args, **kwargs):
image_meta_table = {
'2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
'155d900f-4e14-4e4c-a73d-069cbf4541e6':
{'id': 3, 'status': 'active', 'container_format': 'raw',
'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
}
image_id = args[2]
try:
image_meta = image_meta_table[str(image_id)]
except KeyError:
raise exception.ImageNotFound(image_id=image_id)
return image_meta
self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
body = {
"rebuild": {
"imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_rebuild_proper_kernel_ram(self):
instance_meta = {'kernel_id': None, 'ramdisk_id': None}
orig_get = compute_api.API.get
def wrap_get(*args, **kwargs):
inst = orig_get(*args, **kwargs)
instance_meta['instance'] = inst
return inst
def fake_save(context, **kwargs):
instance = instance_meta['instance']
for key in instance_meta.keys():
if key in instance.obj_what_changed():
instance_meta[key] = instance[key]
def return_image_meta(*args, **kwargs):
image_meta_table = {
'1': {'id': 1, 'status': 'active', 'container_format': 'aki'},
'2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
'155d900f-4e14-4e4c-a73d-069cbf4541e6':
{'id': 3, 'status': 'active', 'container_format': 'raw',
'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
}
image_id = args[2]
try:
image_meta = image_meta_table[str(image_id)]
except KeyError:
raise exception.ImageNotFound(image_id=image_id)
return image_meta
self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
self.stubs.Set(compute_api.API, 'get', wrap_get)
self.stubs.Set(objects.Instance, 'save', fake_save)
body = {
"rebuild": {
"imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
},
}
req = fakes.HTTPRequest.blank(self.url)
self.controller._action_rebuild(req, FAKE_UUID, body).obj
self.assertEqual(instance_meta['kernel_id'], '1')
self.assertEqual(instance_meta['ramdisk_id'], '2')
@mock.patch.object(compute_api.API, 'rebuild')
def test_rebuild_instance_raise_auto_disk_config_exc(self, mock_rebuild):
body = {
"rebuild": {
"imageRef": self._image_href,
},
}
req = fakes.HTTPRequest.blank(self.url)
mock_rebuild.side_effect = exception.AutoDiskConfigDisabledByImage(
image='dummy')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body)
def test_resize_server(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.resize_called = False
def resize_mock(*args):
self.resize_called = True
self.stubs.Set(compute_api.API, 'resize', resize_mock)
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_resize(req, FAKE_UUID, body)
self.assertEqual(self.resize_called, True)
def test_resize_server_no_flavor(self):
body = dict(resize=dict())
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_server_no_flavor_ref(self):
body = dict(resize=dict(flavorRef=None))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_with_server_not_found(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.stubs.Set(compute_api.API, 'get', return_server_not_found)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_with_image_exceptions(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
self.resize_called = 0
image_id = 'fake_image_id'
exceptions = [
(exception.ImageNotAuthorized(image_id=image_id),
webob.exc.HTTPUnauthorized),
(exception.ImageNotFound(image_id=image_id),
webob.exc.HTTPBadRequest),
(exception.Invalid, webob.exc.HTTPBadRequest),
]
raised, expected = map(iter, zip(*exceptions))
def _fake_resize(obj, context, instance, flavor_id):
self.resize_called += 1
raise raised.next()
self.stubs.Set(compute_api.API, 'resize', _fake_resize)
for call_no in range(len(exceptions)):
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(expected.next(),
self.controller._action_resize,
req, FAKE_UUID, body)
self.assertEqual(self.resize_called, call_no + 1)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.CannotResizeDisk(reason=''))
def test_resize_raises_cannot_resize_disk(self, mock_resize):
body = dict(resize=dict(flavorRef="http://localhost/3"))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
req, FAKE_UUID, body)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.FlavorNotFound(reason='',
flavor_id='fake_id'))
def test_resize_raises_flavor_not_found(self, mock_resize):
body = dict(resize=dict(flavorRef="http://localhost/3"))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_with_too_many_instances(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
def fake_resize(*args, **kwargs):
raise exception.TooManyInstances(message="TooManyInstance")
self.stubs.Set(compute_api.API, 'resize', fake_resize)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_raises_conflict_on_invalid_state(self):
body = dict(resize=dict(flavorRef="http://localhost/3"))
def fake_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'resize', fake_resize)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_resize,
req, FAKE_UUID, body)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.NoValidHost(reason=''))
def test_resize_raises_no_valid_host(self, mock_resize):
body = dict(resize=dict(flavorRef="http://localhost/3"))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
req, FAKE_UUID, body)
@mock.patch.object(compute_api.API, 'resize')
def test_resize_instance_raise_auto_disk_config_exc(self, mock_resize):
mock_resize.side_effect = exception.AutoDiskConfigDisabledByImage(
image='dummy')
body = dict(resize=dict(flavorRef="http://localhost/3"))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_confirm_resize_server(self):
body = dict(confirmResize=None)
self.confirm_resize_called = False
def cr_mock(*args):
self.confirm_resize_called = True
self.stubs.Set(compute_api.API, 'confirm_resize', cr_mock)
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_confirm_resize(req, FAKE_UUID, body)
self.assertEqual(self.confirm_resize_called, True)
def test_confirm_resize_migration_not_found(self):
body = dict(confirmResize=None)
def confirm_resize_mock(*args):
raise exception.MigrationNotFoundByStatus(instance_id=1,
status='finished')
self.stubs.Set(compute_api.API,
'confirm_resize',
confirm_resize_mock)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_confirm_resize,
req, FAKE_UUID, body)
def test_confirm_resize_raises_conflict_on_invalid_state(self):
body = dict(confirmResize=None)
def fake_confirm_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'confirm_resize',
fake_confirm_resize)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_confirm_resize,
req, FAKE_UUID, body)
def test_revert_resize_migration_not_found(self):
body = dict(revertResize=None)
def revert_resize_mock(*args):
raise exception.MigrationNotFoundByStatus(instance_id=1,
status='finished')
self.stubs.Set(compute_api.API,
'revert_resize',
revert_resize_mock)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_revert_resize,
req, FAKE_UUID, body)
def test_revert_resize_server_not_found(self):
body = dict(revertResize=None)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob. exc.HTTPNotFound,
self.controller._action_revert_resize,
req, "bad_server_id", body)
def test_revert_resize_server(self):
body = dict(revertResize=None)
self.revert_resize_called = False
def revert_mock(*args):
self.revert_resize_called = True
self.stubs.Set(compute_api.API, 'revert_resize', revert_mock)
req = fakes.HTTPRequest.blank(self.url)
body = self.controller._action_revert_resize(req, FAKE_UUID, body)
self.assertEqual(self.revert_resize_called, True)
def test_revert_resize_raises_conflict_on_invalid_state(self):
body = dict(revertResize=None)
def fake_revert_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'revert_resize',
fake_revert_resize)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_revert_resize,
req, FAKE_UUID, body)
def test_create_image(self):
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
req = fakes.HTTPRequest.blank(self.url)
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
self.assertEqual('http://localhost/v2/fake/images/123', location)
def test_create_image_glance_link_prefix(self):
self.flags(osapi_glance_link_prefix='https://glancehost')
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
req = fakes.HTTPRequest.blank(self.url)
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
self.assertEqual('https://glancehost/v2/fake/images/123', location)
def test_create_image_name_too_long(self):
long_name = 'a' * 260
body = {
'createImage': {
'name': long_name,
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image, req,
FAKE_UUID, body)
def _do_test_create_volume_backed_image(self, extra_properties):
def _fake_id(x):
return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
body = dict(createImage=dict(name='snapshot_of_volume_backed'))
if extra_properties:
body['createImage']['metadata'] = extra_properties
image_service = glance.get_default_image_service()
bdm = [dict(volume_id=_fake_id('a'),
volume_size=1,
device_name='vda',
delete_on_termination=False)]
props = dict(kernel_id=_fake_id('b'),
ramdisk_id=_fake_id('c'),
root_device_name='/dev/vda',
block_device_mapping=bdm)
original_image = dict(properties=props,
container_format='ami',
status='active',
is_public=True)
image_service.create(None, original_image)
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': _fake_id('a'),
'source_type': 'snapshot',
'destination_type': 'volume',
'volume_size': 1,
'device_name': 'vda',
'snapshot_id': 1,
'boot_index': 0,
'delete_on_termination': False,
'no_device': None})]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
instance = fakes.fake_instance_get(image_ref=original_image['id'],
vm_state=vm_states.ACTIVE,
root_device_name='/dev/vda')
self.stubs.Set(db, 'instance_get_by_uuid', instance)
volume = dict(id=_fake_id('a'),
size=1,
host='fake',
display_description='fake')
snapshot = dict(id=_fake_id('d'))
self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
volume_api = self.controller.compute_api.volume_api
volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
self.mox.ReplayAll()
req = fakes.HTTPRequest.blank(self.url)
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
image_id = location.replace('http://localhost/v2/fake/images/', '')
image = image_service.show(None, image_id)
self.assertEqual(image['name'], 'snapshot_of_volume_backed')
properties = image['properties']
self.assertEqual(properties['kernel_id'], _fake_id('b'))
self.assertEqual(properties['ramdisk_id'], _fake_id('c'))
self.assertEqual(properties['root_device_name'], '/dev/vda')
self.assertEqual(properties['bdm_v2'], True)
bdms = properties['block_device_mapping']
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['boot_index'], 0)
self.assertEqual(bdms[0]['source_type'], 'snapshot')
self.assertEqual(bdms[0]['destination_type'], 'volume')
self.assertEqual(bdms[0]['snapshot_id'], snapshot['id'])
for fld in ('connection_info', 'id',
'instance_uuid', 'device_name'):
self.assertNotIn(fld, bdms[0])
for k in extra_properties.keys():
self.assertEqual(properties[k], extra_properties[k])
def test_create_volume_backed_image_no_metadata(self):
self._do_test_create_volume_backed_image({})
def test_create_volume_backed_image_with_metadata(self):
self._do_test_create_volume_backed_image(dict(ImageType='Gold',
ImageVersion='2.0'))
def _test_create_volume_backed_image_with_metadata_from_volume(
self, extra_metadata=None):
def _fake_id(x):
return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
body = dict(createImage=dict(name='snapshot_of_volume_backed'))
if extra_metadata:
body['createImage']['metadata'] = extra_metadata
image_service = glance.get_default_image_service()
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': _fake_id('a'),
'source_type': 'snapshot',
'destination_type': 'volume',
'volume_size': 1,
'device_name': 'vda',
'snapshot_id': 1,
'boot_index': 0,
'delete_on_termination': False,
'no_device': None})]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
instance = fakes.fake_instance_get(image_ref='',
vm_state=vm_states.ACTIVE,
root_device_name='/dev/vda')
self.stubs.Set(db, 'instance_get_by_uuid', instance)
fake_metadata = {'test_key1': 'test_value1',
'test_key2': 'test_value2'}
volume = dict(id=_fake_id('a'),
size=1,
host='fake',
display_description='fake',
volume_image_metadata=fake_metadata)
snapshot = dict(id=_fake_id('d'))
self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
volume_api = self.controller.compute_api.volume_api
volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
req = fakes.HTTPRequest.blank(self.url)
self.mox.ReplayAll()
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
image_id = location.replace('http://localhost/v2/fake/images/', '')
image = image_service.show(None, image_id)
properties = image['properties']
self.assertEqual(properties['test_key1'], 'test_value1')
self.assertEqual(properties['test_key2'], 'test_value2')
if extra_metadata:
for key, val in extra_metadata.items():
self.assertEqual(properties[key], val)
def test_create_vol_backed_img_with_meta_from_vol_without_extra_meta(self):
self._test_create_volume_backed_image_with_metadata_from_volume()
def test_create_vol_backed_img_with_meta_from_vol_with_extra_meta(self):
self._test_create_volume_backed_image_with_metadata_from_volume(
extra_metadata={'a': 'b'})
def test_create_image_snapshots_disabled(self):
"""Don't permit a snapshot if the allow_instance_snapshots flag is
False
"""
self.flags(allow_instance_snapshots=False)
body = {
'createImage': {
'name': 'Snapshot 1',
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_with_metadata(self):
body = {
'createImage': {
'name': 'Snapshot 1',
'metadata': {'key': 'asdf'},
},
}
req = fakes.HTTPRequest.blank(self.url)
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
self.assertEqual('http://localhost/v2/fake/images/123', location)
def test_create_image_with_too_much_metadata(self):
body = {
'createImage': {
'name': 'Snapshot 1',
'metadata': {},
},
}
for num in range(CONF.quota_metadata_items + 1):
body['createImage']['metadata']['foo%i' % num] = "bar"
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_no_name(self):
body = {
'createImage': {},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_blank_name(self):
body = {
'createImage': {
'name': '',
}
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_bad_metadata(self):
body = {
'createImage': {
'name': 'geoff',
'metadata': 'henry',
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_raises_conflict_on_invalid_state(self):
def snapshot(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'snapshot', snapshot)
body = {
"createImage": {
"name": "test_snapshot",
},
}
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_create_image,
req, FAKE_UUID, body)
class TestServerActionXMLDeserializer(test.TestCase):
def setUp(self):
super(TestServerActionXMLDeserializer, self).setUp()
self.deserializer = servers.ActionDeserializer()
def test_create_image(self):
serial_request = """
<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"createImage": {
"name": "new-server-test",
},
}
self.assertEqual(request['body'], expected)
def test_create_image_with_metadata(self):
serial_request = """
<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test">
<metadata>
<meta key="key1">value1</meta>
</metadata>
</createImage>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"createImage": {
"name": "new-server-test",
"metadata": {"key1": "value1"},
},
}
self.assertEqual(request['body'], expected)
def test_change_pass(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<changePassword
xmlns="http://docs.openstack.org/compute/api/v1.1"
adminPass="1234pass"/> """
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"changePassword": {
"adminPass": "1234pass",
},
}
self.assertEqual(request['body'], expected)
def test_change_pass_no_pass(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<changePassword
xmlns="http://docs.openstack.org/compute/api/v1.1"/> """
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_change_pass_empty_pass(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<changePassword
xmlns="http://docs.openstack.org/compute/api/v1.1"
adminPass=""/> """
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"changePassword": {
"adminPass": "",
},
}
self.assertEqual(request['body'], expected)
def test_reboot(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<reboot
xmlns="http://docs.openstack.org/compute/api/v1.1"
type="HARD"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"reboot": {
"type": "HARD",
},
}
self.assertEqual(request['body'], expected)
def test_reboot_no_type(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<reboot
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_resize(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<resize
xmlns="http://docs.openstack.org/compute/api/v1.1"
flavorRef="http://localhost/flavors/3"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"resize": {"flavorRef": "http://localhost/flavors/3"},
}
self.assertEqual(request['body'], expected)
def test_resize_no_flavor_ref(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<resize
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_confirm_resize(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<confirmResize
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"confirmResize": None,
}
self.assertEqual(request['body'], expected)
def test_revert_resize(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<revertResize
xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"revertResize": None,
}
self.assertEqual(request['body'], expected)
def test_rebuild(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test"
imageRef="http://localhost/images/1">
<metadata>
<meta key="My Server Name">Apache1</meta>
</metadata>
<personality>
<file path="/etc/banner.txt">Mg==</file>
</personality>
</rebuild>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"rebuild": {
"name": "new-server-test",
"imageRef": "http://localhost/images/1",
"metadata": {
"My Server Name": "Apache1",
},
"personality": [
{"path": "/etc/banner.txt", "contents": "Mg=="},
],
},
}
self.assertThat(request['body'], matchers.DictMatches(expected))
def test_rebuild_minimum(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
imageRef="http://localhost/images/1"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"rebuild": {
"imageRef": "http://localhost/images/1",
},
}
self.assertThat(request['body'], matchers.DictMatches(expected))
def test_rebuild_no_imageRef(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
name="new-server-test">
<metadata>
<meta key="My Server Name">Apache1</meta>
</metadata>
<personality>
<file path="/etc/banner.txt">Mg==</file>
</personality>
</rebuild>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_rebuild_blank_name(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
imageRef="http://localhost/images/1"
name=""/>"""
self.assertRaises(AttributeError,
self.deserializer.deserialize,
serial_request,
'action')
def test_rebuild_preserve_ephemeral_passed(self):
serial_request = """<?xml version="1.0" encoding="UTF-8"?>
<rebuild
xmlns="http://docs.openstack.org/compute/api/v1.1"
imageRef="http://localhost/images/1"
preserve_ephemeral="true"/>"""
request = self.deserializer.deserialize(serial_request, 'action')
expected = {
"rebuild": {
"imageRef": "http://localhost/images/1",
"preserve_ephemeral": True,
},
}
self.assertThat(request['body'], matchers.DictMatches(expected))
def test_corrupt_xml(self):
"""Should throw a 400 error on corrupt xml."""
self.assertRaises(
exception.MalformedRequestBody,
self.deserializer.deserialize,
utils.killer_xml_body())
| tianweizhang/nova | nova/tests/api/openstack/compute/test_server_actions.py | Python | apache-2.0 | 59,521 |
# Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.common import test_lib
from neutron.plugins.vmware.common import sync
from neutron.tests.unit import test_extension_portsecurity as psec
from neutron.tests.unit.vmware.apiclient import fake
from neutron.tests.unit.vmware import get_fake_conf
from neutron.tests.unit.vmware import NSXAPI_NAME
from neutron.tests.unit.vmware import PLUGIN_NAME
from neutron.tests.unit.vmware import STUBS_PATH
class PortSecurityTestCase(psec.PortSecurityDBTestCase):
def setUp(self):
test_lib.test_config['config_files'] = [get_fake_conf('nsx.ini.test')]
# mock api client
self.fc = fake.FakeClient(STUBS_PATH)
self.mock_nsx = mock.patch(NSXAPI_NAME, autospec=True)
instance = self.mock_nsx.start()
instance.return_value.login.return_value = "the_cookie"
# Avoid runs of the synchronizer looping call
patch_sync = mock.patch.object(sync, '_start_loopingcall')
patch_sync.start()
instance.return_value.request.side_effect = self.fc.fake_request
super(PortSecurityTestCase, self).setUp(PLUGIN_NAME)
self.addCleanup(self.fc.reset_all)
self.addCleanup(self.mock_nsx.stop)
self.addCleanup(patch_sync.stop)
class TestPortSecurity(PortSecurityTestCase, psec.TestPortSecurity):
pass
| Juniper/neutron | neutron/tests/unit/vmware/extensions/test_portsecurity.py | Python | apache-2.0 | 1,953 |
from setuptools import setup
from nested.source_module import cc
from numba.pycc.platform import _patch_exec_command
def run_setup():
# Avoid sporadic crashes on Windows due to MSVCRT spawnve()
_patch_exec_command()
setup(ext_modules=[cc.distutils_extension()])
if __name__ == '__main__':
run_setup()
| seibert/numba | numba/tests/pycc_distutils_usecase/setup_setuptools_nested.py | Python | bsd-2-clause | 323 |
from __future__ import absolute_import
from ..message import Message
from . import register
@register
class pull_doc_req_1(Message):
''' Define the ``PULL-DOC-REQ`` message (revision 1) for requesting a
Bokeh server reply with a new Bokeh Document.
The ``content`` fragment of for this message is empty.
'''
msgtype = 'PULL-DOC-REQ'
revision = 1
@classmethod
def create(cls, **metadata):
''' Create an ``PULL-DOC-REQ`` message
Any keyword arguments will be put into the message ``metadata``
fragment as-is.
'''
header = cls.create_header()
return cls(header, metadata, {})
| mindriot101/bokeh | bokeh/protocol/messages/pull_doc_req.py | Python | bsd-3-clause | 663 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.utils import timezone
REASON_CHOICES = (
(0, _("Spam")),
(1, _("Other")),
)
class CommentFlag(models.Model):
moderator = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='st_comment_flags',
null=True, blank=True)
comment = models.OneToOneField('spirit_comment.Comment')
date = models.DateTimeField(default=timezone.now)
is_closed = models.BooleanField(default=False)
class Meta:
ordering = ['-date', '-pk']
verbose_name = _("comment flag")
verbose_name_plural = _("comments flags")
# def get_absolute_url(self):
# pass
class Flag(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='st_flags')
comment = models.ForeignKey('spirit_comment.Comment')
date = models.DateTimeField(default=timezone.now)
reason = models.IntegerField(_("reason"), choices=REASON_CHOICES)
body = models.TextField(_("body"), blank=True)
class Meta:
unique_together = ('user', 'comment')
ordering = ['-date', '-pk']
verbose_name = _("flag")
verbose_name_plural = _("flags")
| ramaseshan/Spirit | spirit/comment/flag/models.py | Python | mit | 1,345 |
#!/usr/bin/env python
# encoding: utf-8
#
# partially based on boost.py written by Gernot Vormayr
# written by Ruediger Sonderfeld <[email protected]>, 2008
# modified by Bjoern Michaelsen, 2008
# modified by Luca Fossati, 2008
# rewritten for waf 1.5.1, Thomas Nagy, 2008
# rewritten for waf 1.6.2, Sylvain Rouquette, 2011
'''
This is an extra tool, not bundled with the default waf binary.
To add the boost tool to the waf file:
$ ./waf-light --tools=compat15,boost
or, if you have waf >= 1.6.2
$ ./waf update --files=boost
When using this tool, the wscript will look like:
def options(opt):
opt.load('compiler_cxx boost')
def configure(conf):
conf.load('compiler_cxx boost')
conf.check_boost(lib='system filesystem')
def build(bld):
bld(source='main.cpp', target='app', use='BOOST')
Options are generated, in order to specify the location of boost includes/libraries.
The `check_boost` configuration function allows to specify the used boost libraries.
It can also provide default arguments to the --boost-static and --boost-mt command-line arguments.
Everything will be packaged together in a BOOST component that you can use.
When using MSVC, a lot of compilation flags need to match your BOOST build configuration:
- you may have to add /EHsc to your CXXFLAGS or define boost::throw_exception if BOOST_NO_EXCEPTIONS is defined.
Errors: C4530
- boost libraries will try to be smart and use the (pretty but often not useful) auto-linking feature of MSVC
So before calling `conf.check_boost` you might want to disabling by adding:
conf.env.DEFINES_BOOST += ['BOOST_ALL_NO_LIB']
Errors:
- boost might also be compiled with /MT, which links the runtime statically.
If you have problems with redefined symbols,
self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB']
self.env['CXXFLAGS_%s' % var] += ['/MD', '/EHsc']
Passing `--boost-linkage_autodetect` might help ensuring having a correct linkage in some basic cases.
'''
import sys
import re
from waflib import Utils, Logs, Errors
from waflib.Configure import conf
BOOST_LIBS = ['/usr/lib/x86_64-linux-gnu', '/usr/lib/i386-linux-gnu',
'/usr/lib', '/usr/local/lib', '/opt/local/lib', '/sw/lib', '/lib']
BOOST_INCLUDES = ['/usr/include', '/usr/local/include', '/opt/local/include', '/sw/include']
BOOST_VERSION_FILE = 'boost/version.hpp'
BOOST_VERSION_CODE = '''
#include <iostream>
#include <boost/version.hpp>
int main() { std::cout << BOOST_LIB_VERSION << std::endl; }
'''
BOOST_ERROR_CODE = '''
#include <boost/system/error_code.hpp>
int main() { boost::system::error_code c; }
'''
BOOST_THREAD_CODE = '''
#include <boost/thread.hpp>
int main() { boost::thread t; }
'''
# toolsets from {boost_dir}/tools/build/v2/tools/common.jam
PLATFORM = Utils.unversioned_sys_platform()
detect_intel = lambda env: (PLATFORM == 'win32') and 'iw' or 'il'
detect_clang = lambda env: (PLATFORM == 'darwin') and 'clang-darwin' or 'clang'
detect_mingw = lambda env: (re.search('MinGW', env.CXX[0])) and 'mgw' or 'gcc'
BOOST_TOOLSETS = {
'borland': 'bcb',
'clang': detect_clang,
'como': 'como',
'cw': 'cw',
'darwin': 'xgcc',
'edg': 'edg',
'g++': detect_mingw,
'gcc': detect_mingw,
'icpc': detect_intel,
'intel': detect_intel,
'kcc': 'kcc',
'kylix': 'bck',
'mipspro': 'mp',
'mingw': 'mgw',
'msvc': 'vc',
'qcc': 'qcc',
'sun': 'sw',
'sunc++': 'sw',
'tru64cxx': 'tru',
'vacpp': 'xlc'
}
def options(opt):
opt.add_option('--boost-includes', type='string',
default='', dest='boost_includes',
help='''path to the boost includes root (~boost root)
e.g. /path/to/boost_1_47_0''')
opt.add_option('--boost-libs', type='string',
default='', dest='boost_libs',
help='''path to the directory where the boost libs are
e.g. /path/to/boost_1_47_0/stage/lib''')
opt.add_option('--boost-static', action='store_true',
default=False, dest='boost_static',
help='link with static boost libraries (.lib/.a)')
opt.add_option('--boost-mt', action='store_true',
default=False, dest='boost_mt',
help='select multi-threaded libraries')
opt.add_option('--boost-abi', type='string', default='', dest='boost_abi',
help='''select libraries with tags (dgsyp, d for debug),
see doc Boost, Getting Started, chapter 6.1''')
opt.add_option('--boost-linkage_autodetect', action="store_true", dest='boost_linkage_autodetect',
help="auto-detect boost linkage options (don't get used to it / might break other stuff)")
opt.add_option('--boost-toolset', type='string',
default='', dest='boost_toolset',
help='force a toolset e.g. msvc, vc90, \
gcc, mingw, mgw45 (default: auto)')
py_version = '%d%d' % (sys.version_info[0], sys.version_info[1])
opt.add_option('--boost-python', type='string',
default=py_version, dest='boost_python',
help='select the lib python with this version \
(default: %s)' % py_version)
@conf
def __boost_get_version_file(self, d):
dnode = self.root.find_dir(d)
if dnode:
return dnode.find_node(BOOST_VERSION_FILE)
return None
@conf
def boost_get_version(self, d):
"""silently retrieve the boost version number"""
node = self.__boost_get_version_file(d)
if node:
try:
txt = node.read()
except (OSError, IOError):
Logs.error("Could not read the file %r" % node.abspath())
else:
re_but = re.compile('^#define\\s+BOOST_LIB_VERSION\\s+"(.*)"', re.M)
m = re_but.search(txt)
if m:
return m.group(1)
return self.check_cxx(fragment=BOOST_VERSION_CODE, includes=[d], execute=True, define_ret=True)
@conf
def boost_get_includes(self, *k, **kw):
includes = k and k[0] or kw.get('includes', None)
if includes and self.__boost_get_version_file(includes):
return includes
for d in Utils.to_list(self.environ.get('INCLUDE', '')) + BOOST_INCLUDES:
if self.__boost_get_version_file(d):
return d
if includes:
self.end_msg('headers not found in %s' % includes)
self.fatal('The configuration failed')
else:
self.end_msg('headers not found, please provide a --boost-includes argument (see help)')
self.fatal('The configuration failed')
@conf
def boost_get_toolset(self, cc):
toolset = cc
if not cc:
build_platform = Utils.unversioned_sys_platform()
if build_platform in BOOST_TOOLSETS:
cc = build_platform
else:
cc = self.env.CXX_NAME
if cc in BOOST_TOOLSETS:
toolset = BOOST_TOOLSETS[cc]
return isinstance(toolset, str) and toolset or toolset(self.env)
@conf
def __boost_get_libs_path(self, *k, **kw):
''' return the lib path and all the files in it '''
if 'files' in kw:
return self.root.find_dir('.'), Utils.to_list(kw['files'])
libs = k and k[0] or kw.get('libs', None)
if libs:
path = self.root.find_dir(libs)
files = path.ant_glob('*boost_*')
if not libs or not files:
for d in Utils.to_list(self.environ.get('LIB', [])) + BOOST_LIBS:
path = self.root.find_dir(d)
if path:
files = path.ant_glob('*boost_*')
if files:
break
path = self.root.find_dir(d + '64')
if path:
files = path.ant_glob('*boost_*')
if files:
break
if not path:
if libs:
self.end_msg('libs not found in %s' % libs)
self.fatal('The configuration failed')
else:
self.end_msg('libs not found, please provide a --boost-libs argument (see help)')
self.fatal('The configuration failed')
self.to_log('Found the boost path in %r with the libraries:' % path)
for x in files:
self.to_log(' %r' % x)
return path, files
@conf
def boost_get_libs(self, *k, **kw):
'''
return the lib path and the required libs
according to the parameters
'''
path, files = self.__boost_get_libs_path(**kw)
t = []
if kw.get('mt', False):
t.append('mt')
if kw.get('abi', None):
t.append(kw['abi'])
tags = t and '(-%s)+' % '-'.join(t) or ''
toolset = self.boost_get_toolset(kw.get('toolset', ''))
toolset_pat = '(-%s[0-9]{0,3})+' % toolset
version = '(-%s)+' % self.env.BOOST_VERSION
def find_lib(re_lib, files):
for file in files:
if re_lib.search(file.name):
self.to_log('Found boost lib %s' % file)
return file
return None
def format_lib_name(name):
if name.startswith('lib') and self.env.CC_NAME != 'msvc':
name = name[3:]
return name[:name.rfind('.')]
libs = []
for lib in Utils.to_list(k and k[0] or kw.get('lib', None)):
py = (lib == 'python') and '(-py%s)+' % kw['python'] or ''
# Trying libraries, from most strict match to least one
for pattern in ['boost_%s%s%s%s%s' % (lib, toolset_pat, tags, py, version),
'boost_%s%s%s%s' % (lib, tags, py, version),
'boost_%s%s%s' % (lib, tags, version),
# Give up trying to find the right version
'boost_%s%s%s%s' % (lib, toolset_pat, tags, py),
'boost_%s%s%s' % (lib, tags, py),
'boost_%s%s' % (lib, tags)]:
self.to_log('Trying pattern %s' % pattern)
file = find_lib(re.compile(pattern), files)
if file:
libs.append(format_lib_name(file.name))
break
else:
self.end_msg('lib %s not found in %s' % (lib, path.abspath()))
self.fatal('The configuration failed')
return path.abspath(), libs
@conf
def check_boost(self, *k, **kw):
"""
Initialize boost libraries to be used.
Keywords: you can pass the same parameters as with the command line (without "--boost-").
Note that the command line has the priority, and should preferably be used.
"""
if not self.env['CXX']:
self.fatal('load a c++ compiler first, conf.load("compiler_cxx")')
params = {'lib': k and k[0] or kw.get('lib', None)}
for key, value in self.options.__dict__.items():
if not key.startswith('boost_'):
continue
key = key[len('boost_'):]
params[key] = value and value or kw.get(key, '')
var = kw.get('uselib_store', 'BOOST')
self.start_msg('Checking boost includes')
try:
self.env['INCLUDES_%s' % var] = inc = self.boost_get_includes(**params)
self.env.BOOST_VERSION = self.boost_get_version(inc)
except Errors.WafError:
self.end_msg("not found", 'YELLOW')
raise
#self.env['INCLUDES_%s' % var] = inc = self.boost_get_includes(**params)
#self.env.BOOST_VERSION = self.boost_get_version(inc)
self.end_msg(self.env.BOOST_VERSION)
if Logs.verbose:
Logs.pprint('CYAN', ' path : %s' % self.env['INCLUDES_%s' % var])
if not params['lib']:
return
self.start_msg('Checking boost libs')
try:
suffix = params.get('static', None) and 'ST' or ''
path, libs = self.boost_get_libs(**params)
except Errors.WafError:
self.end_msg("not found", 'YELLOW')
raise
#suffix = params.get('static', None) and 'ST' or ''
#path, libs = self.boost_get_libs(**params)
self.env['%sLIBPATH_%s' % (suffix, var)] = [path]
self.env['%sLIB_%s' % (suffix, var)] = libs
self.end_msg('ok')
if Logs.verbose:
Logs.pprint('CYAN', ' path : %s' % path)
Logs.pprint('CYAN', ' libs : %s' % libs)
def try_link():
if 'system' in params['lib']:
self.check_cxx(fragment=BOOST_ERROR_CODE, use=var, execute=False)
if 'thread' in params['lib']:
self.check_cxx(fragment=BOOST_THREAD_CODE, use=var, execute=False)
if params.get('linkage_autodetect', False):
self.start_msg("Attempting to detect boost linkage flags")
toolset = self.boost_get_toolset(kw.get('toolset', ''))
if toolset in ['vc']:
# disable auto-linking feature, causing error LNK1181
# because the code wants to be linked against
self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB']
# if no dlls are present, we guess the .lib files are not stubs
has_dlls = False
for x in Utils.listdir(path):
if x.endswith(self.env.cxxshlib_PATTERN % ''):
has_dlls = True
break
if not has_dlls:
self.env['STLIBPATH_%s' % var] = [path]
self.env['STLIB_%s' % var] = libs
del self.env['LIB_%s' % var]
del self.env['LIBPATH_%s' % var]
# we attempt to play with some known-to-work CXXFLAGS combinations
for cxxflags in (['/MD', '/EHsc'], []):
self.env.stash()
self.env["CXXFLAGS_%s" % var] += cxxflags
try:
try_link()
self.end_msg("ok: winning cxxflags combination: %s" % (self.env["CXXFLAGS_%s" % var]))
exc = None
break
except Errors.ConfigurationError as e:
self.env.revert()
exc = e
if exc is not None:
self.end_msg("Could not auto-detect boost linking flags combination, you may report it to boost.py author", ex=exc)
self.fatal('The configuration failed')
else:
self.end_msg("Boost linkage flags auto-detection not implemented (needed ?) for this toolchain")
self.fatal('The configuration failed')
else:
self.start_msg('Checking for boost linkage')
try:
try_link()
except Errors.ConfigurationError as e:
self.end_msg("Could not link against boost libraries using supplied options")
self.fatal('The configuration failed')
self.end_msg('ok')
| Enchufa2/ns-3-dev-git | waf-tools/boost.py | Python | gpl-2.0 | 12,900 |
"""Functions that read and write gzipped files.
The user of the file doesn't have to worry about the compression,
but random access is not allowed."""
# based on Andrew Kuchling's minigzip.py distributed with the zlib module
import struct, sys, time, os
import zlib
import builtins
import io
import _compression
__all__ = ["GzipFile", "open", "compress", "decompress"]
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
READ, WRITE = 1, 2
def open(filename, mode="rb", compresslevel=9,
encoding=None, errors=None, newline=None):
"""Open a gzip-compressed file in binary or text mode.
The filename argument can be an actual filename (a str or bytes object), or
an existing file object to read from or write to.
The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or "ab" for
binary mode, or "rt", "wt", "xt" or "at" for text mode. The default mode is
"rb", and the default compresslevel is 9.
For binary mode, this function is equivalent to the GzipFile constructor:
GzipFile(filename, mode, compresslevel). In this case, the encoding, errors
and newline arguments must not be provided.
For text mode, a GzipFile object is created, and wrapped in an
io.TextIOWrapper instance with the specified encoding, error handling
behavior, and line ending(s).
"""
if "t" in mode:
if "b" in mode:
raise ValueError("Invalid mode: %r" % (mode,))
else:
if encoding is not None:
raise ValueError("Argument 'encoding' not supported in binary mode")
if errors is not None:
raise ValueError("Argument 'errors' not supported in binary mode")
if newline is not None:
raise ValueError("Argument 'newline' not supported in binary mode")
gz_mode = mode.replace("t", "")
if isinstance(filename, (str, bytes)):
binary_file = GzipFile(filename, gz_mode, compresslevel)
elif hasattr(filename, "read") or hasattr(filename, "write"):
binary_file = GzipFile(None, gz_mode, compresslevel, filename)
else:
raise TypeError("filename must be a str or bytes object, or a file")
if "t" in mode:
return io.TextIOWrapper(binary_file, encoding, errors, newline)
else:
return binary_file
def write32u(output, value):
# The L format writes the bit pattern correctly whether signed
# or unsigned.
output.write(struct.pack("<L", value))
class _PaddedFile:
"""Minimal read-only file object that prepends a string to the contents
of an actual file. Shouldn't be used outside of gzip.py, as it lacks
essential functionality."""
def __init__(self, f, prepend=b''):
self._buffer = prepend
self._length = len(prepend)
self.file = f
self._read = 0
def read(self, size):
if self._read is None:
return self.file.read(size)
if self._read + size <= self._length:
read = self._read
self._read += size
return self._buffer[read:self._read]
else:
read = self._read
self._read = None
return self._buffer[read:] + \
self.file.read(size-self._length+read)
def prepend(self, prepend=b''):
if self._read is None:
self._buffer = prepend
else: # Assume data was read since the last prepend() call
self._read -= len(prepend)
return
self._length = len(self._buffer)
self._read = 0
def seek(self, off):
self._read = None
self._buffer = None
return self.file.seek(off)
def seekable(self):
return True # Allows fast-forwarding even in unseekable streams
class GzipFile(_compression.BaseStream):
"""The GzipFile class simulates most of the methods of a file object with
the exception of the truncate() method.
This class only supports opening files in binary mode. If you need to open a
compressed file in text mode, use the gzip.open() function.
"""
# Overridden with internal file object to be closed, if only a filename
# is passed in
myfileobj = None
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None, mtime=None):
"""Constructor for the GzipFile class.
At least one of fileobj and filename must be given a
non-trivial value.
The new class instance is based on fileobj, which can be a regular
file, an io.BytesIO object, or any other object which simulates a file.
It defaults to None, in which case filename is opened to provide
a file object.
When fileobj is not None, the filename argument is only used to be
included in the gzip file header, which may includes the original
filename of the uncompressed file. It defaults to the filename of
fileobj, if discernible; otherwise, it defaults to the empty string,
and in this case the original filename is not included in the header.
The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', 'wb', 'x', or
'xb' depending on whether the file will be read or written. The default
is the mode of fileobj if discernible; otherwise, the default is 'rb'.
A mode of 'r' is equivalent to one of 'rb', and similarly for 'w' and
'wb', 'a' and 'ab', and 'x' and 'xb'.
The compresslevel argument is an integer from 0 to 9 controlling the
level of compression; 1 is fastest and produces the least compression,
and 9 is slowest and produces the most compression. 0 is no compression
at all. The default is 9.
The mtime argument is an optional numeric timestamp to be written
to the last modification time field in the stream when compressing.
If omitted or None, the current time is used.
"""
if mode and ('t' in mode or 'U' in mode):
raise ValueError("Invalid mode: {!r}".format(mode))
if mode and 'b' not in mode:
mode += 'b'
if fileobj is None:
fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')
if filename is None:
filename = getattr(fileobj, 'name', '')
if not isinstance(filename, (str, bytes)):
filename = ''
if mode is None:
mode = getattr(fileobj, 'mode', 'rb')
if mode.startswith('r'):
self.mode = READ
raw = _GzipReader(fileobj)
self._buffer = io.BufferedReader(raw)
self.name = filename
elif mode.startswith(('w', 'a', 'x')):
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
self._write_mtime = mtime
else:
raise ValueError("Invalid mode: {!r}".format(mode))
self.fileobj = fileobj
if self.mode == WRITE:
self._write_gzip_header()
@property
def filename(self):
import warnings
warnings.warn("use the name attribute", DeprecationWarning, 2)
if self.mode == WRITE and self.name[-3:] != ".gz":
return self.name + ".gz"
return self.name
@property
def mtime(self):
"""Last modification time read from stream, or None"""
return self._buffer.raw._last_mtime
def __repr__(self):
s = repr(self.fileobj)
return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
def _init_write(self, filename):
self.name = filename
self.crc = zlib.crc32(b"") & 0xffffffff
self.size = 0
self.writebuf = []
self.bufsize = 0
self.offset = 0 # Current file offset for seek(), tell(), etc
def _write_gzip_header(self):
self.fileobj.write(b'\037\213') # magic header
self.fileobj.write(b'\010') # compression method
try:
# RFC 1952 requires the FNAME field to be Latin-1. Do not
# include filenames that cannot be represented that way.
fname = os.path.basename(self.name)
if not isinstance(fname, bytes):
fname = fname.encode('latin-1')
if fname.endswith(b'.gz'):
fname = fname[:-3]
except UnicodeEncodeError:
fname = b''
flags = 0
if fname:
flags = FNAME
self.fileobj.write(chr(flags).encode('latin-1'))
mtime = self._write_mtime
if mtime is None:
mtime = time.time()
write32u(self.fileobj, int(mtime))
self.fileobj.write(b'\002')
self.fileobj.write(b'\377')
if fname:
self.fileobj.write(fname + b'\000')
def write(self,data):
self._check_not_closed()
if self.mode != WRITE:
import errno
raise OSError(errno.EBADF, "write() on read-only GzipFile object")
if self.fileobj is None:
raise ValueError("write() on closed GzipFile object")
if isinstance(data, bytes):
length = len(data)
else:
# accept any data that supports the buffer protocol
data = memoryview(data)
length = data.nbytes
if length > 0:
self.fileobj.write(self.compress.compress(data))
self.size += length
self.crc = zlib.crc32(data, self.crc) & 0xffffffff
self.offset += length
return length
def read(self, size=-1):
self._check_not_closed()
if self.mode != READ:
import errno
raise OSError(errno.EBADF, "read() on write-only GzipFile object")
return self._buffer.read(size)
def read1(self, size=-1):
"""Implements BufferedIOBase.read1()
Reads up to a buffer's worth of data is size is negative."""
self._check_not_closed()
if self.mode != READ:
import errno
raise OSError(errno.EBADF, "read1() on write-only GzipFile object")
if size < 0:
size = io.DEFAULT_BUFFER_SIZE
return self._buffer.read1(size)
def peek(self, n):
self._check_not_closed()
if self.mode != READ:
import errno
raise OSError(errno.EBADF, "peek() on write-only GzipFile object")
return self._buffer.peek(n)
@property
def closed(self):
return self.fileobj is None
def close(self):
fileobj = self.fileobj
if fileobj is None:
return
self.fileobj = None
try:
if self.mode == WRITE:
fileobj.write(self.compress.flush())
write32u(fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(fileobj, self.size & 0xffffffff)
elif self.mode == READ:
self._buffer.close()
finally:
myfileobj = self.myfileobj
if myfileobj:
self.myfileobj = None
myfileobj.close()
def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
self._check_not_closed()
if self.mode == WRITE:
# Ensure the compressor's buffer is flushed
self.fileobj.write(self.compress.flush(zlib_mode))
self.fileobj.flush()
def fileno(self):
"""Invoke the underlying file object's fileno() method.
This will raise AttributeError if the underlying file object
doesn't support fileno().
"""
return self.fileobj.fileno()
def rewind(self):
'''Return the uncompressed stream file position indicator to the
beginning of the file'''
if self.mode != READ:
raise OSError("Can't rewind in write mode")
self._buffer.seek(0)
def readable(self):
return self.mode == READ
def writable(self):
return self.mode == WRITE
def seekable(self):
return True
def seek(self, offset, whence=io.SEEK_SET):
if self.mode == WRITE:
if whence != io.SEEK_SET:
if whence == io.SEEK_CUR:
offset = self.offset + offset
else:
raise ValueError('Seek from end not supported')
if offset < self.offset:
raise OSError('Negative seek in write mode')
count = offset - self.offset
chunk = bytes(1024)
for i in range(count // 1024):
self.write(chunk)
self.write(bytes(count % 1024))
elif self.mode == READ:
self._check_not_closed()
return self._buffer.seek(offset, whence)
return self.offset
def readline(self, size=-1):
self._check_not_closed()
return self._buffer.readline(size)
class _GzipReader(_compression.DecompressReader):
def __init__(self, fp):
super().__init__(_PaddedFile(fp), zlib.decompressobj,
wbits=-zlib.MAX_WBITS)
# Set flag indicating start of a new member
self._new_member = True
self._last_mtime = None
def _init_read(self):
self._crc = zlib.crc32(b"") & 0xffffffff
self._stream_size = 0 # Decompressed size of unconcatenated stream
def _read_exact(self, n):
'''Read exactly *n* bytes from `self._fp`
This method is required because self._fp may be unbuffered,
i.e. return short reads.
'''
data = self._fp.read(n)
while len(data) < n:
b = self._fp.read(n - len(data))
if not b:
raise EOFError("Compressed file ended before the "
"end-of-stream marker was reached")
data += b
return data
def _read_gzip_header(self):
magic = self._fp.read(2)
if magic == b'':
return False
if magic != b'\037\213':
raise OSError('Not a gzipped file (%r)' % magic)
(method, flag,
self._last_mtime) = struct.unpack("<BBIxx", self._read_exact(8))
if method != 8:
raise OSError('Unknown compression method')
if flag & FEXTRA:
# Read & discard the extra field, if present
extra_len, = struct.unpack("<H", self._read_exact(2))
self._read_exact(extra_len)
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while True:
s = self._fp.read(1)
if not s or s==b'\000':
break
if flag & FCOMMENT:
# Read and discard a null-terminated string containing a comment
while True:
s = self._fp.read(1)
if not s or s==b'\000':
break
if flag & FHCRC:
self._read_exact(2) # Read & discard the 16-bit header CRC
return True
def read(self, size=-1):
if size < 0:
return self.readall()
# size=0 is special because decompress(max_length=0) is not supported
if not size:
return b""
# For certain input data, a single
# call to decompress() may not return
# any data. In this case, retry until we get some data or reach EOF.
while True:
if self._decompressor.eof:
# Ending case: we've come to the end of a member in the file,
# so finish up this member, and read a new gzip header.
# Check the CRC and file size, and set the flag so we read
# a new member
self._read_eof()
self._new_member = True
self._decompressor = self._decomp_factory(
**self._decomp_args)
if self._new_member:
# If the _new_member flag is set, we have to
# jump to the next member, if there is one.
self._init_read()
if not self._read_gzip_header():
self._size = self._pos
return b""
self._new_member = False
# Read a chunk of data from the file
buf = self._fp.read(io.DEFAULT_BUFFER_SIZE)
uncompress = self._decompressor.decompress(buf, size)
if self._decompressor.unconsumed_tail != b"":
self._fp.prepend(self._decompressor.unconsumed_tail)
elif self._decompressor.unused_data != b"":
# Prepend the already read bytes to the fileobj so they can
# be seen by _read_eof() and _read_gzip_header()
self._fp.prepend(self._decompressor.unused_data)
if uncompress != b"":
break
if buf == b"":
raise EOFError("Compressed file ended before the "
"end-of-stream marker was reached")
self._add_read_data( uncompress )
self._pos += len(uncompress)
return uncompress
def _add_read_data(self, data):
self._crc = zlib.crc32(data, self._crc) & 0xffffffff
self._stream_size = self._stream_size + len(data)
def _read_eof(self):
# We've read to the end of the file
# We check the that the computed CRC and size of the
# uncompressed data matches the stored values. Note that the size
# stored is the true file size mod 2**32.
crc32, isize = struct.unpack("<II", self._read_exact(8))
if crc32 != self._crc:
raise OSError("CRC check failed %s != %s" % (hex(crc32),
hex(self._crc)))
elif isize != (self._stream_size & 0xffffffff):
raise OSError("Incorrect length of data produced")
# Gzip files can be padded with zeroes and still have archives.
# Consume all zero bytes and set the file position to the first
# non-zero byte. See http://www.gzip.org/#faq8
c = b"\x00"
while c == b"\x00":
c = self._fp.read(1)
if c:
self._fp.prepend(c)
def _rewind(self):
super()._rewind()
self._new_member = True
def compress(data, compresslevel=9):
"""Compress data in one shot and return the compressed string.
Optional argument is the compression level, in range of 0-9.
"""
buf = io.BytesIO()
with GzipFile(fileobj=buf, mode='wb', compresslevel=compresslevel) as f:
f.write(data)
return buf.getvalue()
def decompress(data):
"""Decompress a gzip compressed string in one shot.
Return the decompressed string.
"""
with GzipFile(fileobj=io.BytesIO(data)) as f:
return f.read()
def _test():
# Act like gzip; with -d, act like gunzip.
# The input file is not deleted, however, nor are any other gzip
# options or features supported.
args = sys.argv[1:]
decompress = args and args[0] == "-d"
if decompress:
args = args[1:]
if not args:
args = ["-"]
for arg in args:
if decompress:
if arg == "-":
f = GzipFile(filename="", mode="rb", fileobj=sys.stdin.buffer)
g = sys.stdout.buffer
else:
if arg[-3:] != ".gz":
print("filename doesn't end in .gz:", repr(arg))
continue
f = open(arg, "rb")
g = builtins.open(arg[:-3], "wb")
else:
if arg == "-":
f = sys.stdin.buffer
g = GzipFile(filename="", mode="wb", fileobj=sys.stdout.buffer)
else:
f = builtins.open(arg, "rb")
g = open(arg + ".gz", "wb")
while True:
chunk = f.read(1024)
if not chunk:
break
g.write(chunk)
if g is not sys.stdout.buffer:
g.close()
if f is not sys.stdin.buffer:
f.close()
if __name__ == '__main__':
_test()
| Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/gzip.py | Python | gpl-3.0 | 20,313 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest.mock import patch
import pytest
from sqlalchemy import or_
from airflow import configuration, models
from airflow.providers.mysql.transfers.s3_to_mysql import S3ToMySqlOperator
from airflow.utils import db
from airflow.utils.session import create_session
class TestS3ToMySqlTransfer(unittest.TestCase):
def setUp(self):
configuration.conf.load_test_config()
db.merge_conn(
models.Connection(
conn_id='s3_test',
conn_type='s3',
schema='test',
extra='{"aws_access_key_id": "aws_access_key_id", "aws_secret_access_key":'
' "aws_secret_access_key"}',
)
)
db.merge_conn(
models.Connection(
conn_id='mysql_test',
conn_type='mysql',
host='some.host.com',
schema='test_db',
login='user',
password='password',
)
)
self.s3_to_mysql_transfer_kwargs = {
'aws_conn_id': 's3_test',
'mysql_conn_id': 'mysql_test',
's3_source_key': 'test/s3_to_mysql_test.csv',
'mysql_table': 'mysql_table',
'mysql_duplicate_key_handling': 'IGNORE',
'mysql_extra_options': """
FIELDS TERMINATED BY ','
IGNORE 1 LINES
""",
'task_id': 'task_id',
'dag': None,
}
@patch('airflow.providers.mysql.transfers.s3_to_mysql.S3Hook.download_file')
@patch('airflow.providers.mysql.transfers.s3_to_mysql.MySqlHook.bulk_load_custom')
@patch('airflow.providers.mysql.transfers.s3_to_mysql.os.remove')
def test_execute(self, mock_remove, mock_bulk_load_custom, mock_download_file):
S3ToMySqlOperator(**self.s3_to_mysql_transfer_kwargs).execute({})
mock_download_file.assert_called_once_with(key=self.s3_to_mysql_transfer_kwargs['s3_source_key'])
mock_bulk_load_custom.assert_called_once_with(
table=self.s3_to_mysql_transfer_kwargs['mysql_table'],
tmp_file=mock_download_file.return_value,
duplicate_key_handling=self.s3_to_mysql_transfer_kwargs['mysql_duplicate_key_handling'],
extra_options=self.s3_to_mysql_transfer_kwargs['mysql_extra_options'],
)
mock_remove.assert_called_once_with(mock_download_file.return_value)
@patch('airflow.providers.mysql.transfers.s3_to_mysql.S3Hook.download_file')
@patch('airflow.providers.mysql.transfers.s3_to_mysql.MySqlHook.bulk_load_custom')
@patch('airflow.providers.mysql.transfers.s3_to_mysql.os.remove')
def test_execute_exception(self, mock_remove, mock_bulk_load_custom, mock_download_file):
mock_bulk_load_custom.side_effect = Exception
with pytest.raises(Exception):
S3ToMySqlOperator(**self.s3_to_mysql_transfer_kwargs).execute({})
mock_download_file.assert_called_once_with(key=self.s3_to_mysql_transfer_kwargs['s3_source_key'])
mock_bulk_load_custom.assert_called_once_with(
table=self.s3_to_mysql_transfer_kwargs['mysql_table'],
tmp_file=mock_download_file.return_value,
duplicate_key_handling=self.s3_to_mysql_transfer_kwargs['mysql_duplicate_key_handling'],
extra_options=self.s3_to_mysql_transfer_kwargs['mysql_extra_options'],
)
mock_remove.assert_called_once_with(mock_download_file.return_value)
def tearDown(self):
with create_session() as session:
(
session.query(models.Connection)
.filter(
or_(models.Connection.conn_id == 's3_test', models.Connection.conn_id == 'mysql_test')
)
.delete()
)
| apache/incubator-airflow | tests/providers/mysql/transfers/test_s3_to_mysql.py | Python | apache-2.0 | 4,602 |
# Copyright 2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os, os.path
import vsan_policy
import vmdk_utils
import volume_kv
import vsan_info
import logging
import log_config
class TestVsanPolicy(unittest.TestCase):
""" Test VSAN Policy code """
@unittest.skipIf(not vsan_info.get_vsan_datastore(),
"VSAN is not found - skipping vsan_info tests")
def setUp(self):
self.policy_path = os.path.join(vsan_info.get_vsan_dockvols_path(),
'policies/test_policy')
self.name = 'test_policy'
self.content = ('(("proportionalCapacity" i50) '
'("hostFailuresToTolerate" i0))')
def tearDown(self):
try:
os.remove(self.policy_path)
except:
pass
def assertPoliciesEqual(self):
with open(self.policy_path) as f:
content = f.read()
# Remove the added newline
self.assertEqual(content[:-1], self.content)
def test_create(self):
self.assertEqual(None, vsan_policy.create(self.name, self.content))
self.assertPoliciesEqual()
def test_double_create_fails(self):
self.assertEqual(None, vsan_policy.create(self.name, self.content))
self.assertNotEqual(None, vsan_policy.create(self.name, self.content))
self.assertPoliciesEqual()
def test_create_delete(self):
self.assertEqual(None, vsan_policy.create(self.name, self.content))
self.assertPoliciesEqual()
self.assertEqual(None, vsan_policy.delete(self.name))
self.assertFalse(os.path.isfile(self.policy_path))
def test_delete_nonexistent_policy_fails(self):
log = log_config.get_logger('Delete_nonexistent_policy')
log.info("\nThis is a negative test to delete a non-existent vsan policy.\n"
"Test is expected to raise exception log - Failed to remove the policy file error. \n")
self.assertNotEqual(None, vsan_policy.delete(self.name))
def test_create_list(self):
self.assertEqual(None, vsan_policy.create(self.name, self.content))
policies = vsan_policy.get_policies()
self.assertTrue(self.content + '\n', policies[self.name])
if __name__ == '__main__':
volume_kv.init()
unittest.main()
| venilnoronha/docker-volume-vsphere | esx_service/vsan_policy_test.py | Python | apache-2.0 | 2,858 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium WebUI resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl/git cl, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
resources = input_api.PresubmitLocalPath()
path = input_api.os_path
affected_files = (f.AbsoluteLocalPath() for f in input_api.AffectedFiles())
would_affect_tests = (
path.join(resources, 'PRESUBMIT.py'),
path.join(resources, 'test_presubmit.py'),
path.join(resources, 'web_dev_style', 'css_checker.py'),
path.join(resources, 'web_dev_style', 'js_checker.py'),
)
if any(f for f in affected_files if f in would_affect_tests):
tests = [path.join(resources, 'test_presubmit.py')]
results.extend(
input_api.canned_checks.RunUnitTests(input_api, output_api, tests))
import sys
old_path = sys.path
try:
sys.path = [resources] + old_path
from web_dev_style import css_checker, js_checker
def is_resource(maybe_resource):
f = maybe_resource.AbsoluteLocalPath()
return f.endswith(('.css', '.html', '.js')) and f.startswith(resources)
results.extend(css_checker.CSSChecker(input_api, output_api,
file_filter=is_resource).RunChecks())
results.extend(js_checker.JSChecker(input_api, output_api,
file_filter=is_resource).RunChecks())
finally:
sys.path = old_path
return results
| zcbenz/cefode-chromium | chrome/browser/resources/PRESUBMIT.py | Python | bsd-3-clause | 2,027 |
from django.core.signing import b64_decode
from django.test import TestCase, override_settings
from django.urls import reverse
from .models import SomeObject
from .urls import ContactFormViewWithMsg, DeleteFormViewWithMsg
@override_settings(ROOT_URLCONF='messages_tests.urls')
class SuccessMessageMixinTests(TestCase):
def test_set_messages_success(self):
author = {'name': 'John Doe', 'slug': 'success-msg'}
add_url = reverse('add_success_msg')
req = self.client.post(add_url, author)
# Uncompressed message is stored in the cookie.
value = b64_decode(
req.cookies['messages'].value.split(":")[0].encode(),
).decode()
self.assertIn(ContactFormViewWithMsg.success_message % author, value)
def test_set_messages_success_on_delete(self):
object_to_delete = SomeObject.objects.create(name='MyObject')
delete_url = reverse('success_msg_on_delete', args=[object_to_delete.pk])
response = self.client.post(delete_url, follow=True)
self.assertContains(response, DeleteFormViewWithMsg.success_message)
| ar4s/django | tests/messages_tests/test_mixins.py | Python | bsd-3-clause | 1,107 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigiq_application_fastl4_tcp
short_description: Manages BIG-IQ FastL4 TCP applications
description:
- Manages BIG-IQ applications used for load balancing a TCP-based application
with a FastL4 profile.
version_added: 2.6
options:
name:
description:
- Name of the new application.
required: True
description:
description:
- Description of the application.
servers:
description:
- A list of servers that the application is hosted on.
- If you are familiar with other BIG-IP setting, you might also refer to this
list as the list of pool members.
- When creating a new application, at least one server is required.
suboptions:
address:
description:
- The IP address of the server.
required: True
port:
description:
- The port of the server.
- When creating a new application and specifying a server, if this parameter
is not provided, the default of C(8000) will be used.
default: 8000
inbound_virtual:
description:
- Settings to configure the virtual which will receive the inbound connection.
suboptions:
address:
description:
- Specifies destination IP address information to which the virtual server
sends traffic.
- This parameter is required when creating a new application.
required: True
netmask:
description:
- Specifies the netmask to associate with the given C(destination).
- This parameter is required when creating a new application.
required: True
port:
description:
- The port that the virtual listens for connections on.
- When creating a new application, if this parameter is not specified, the
default value of C(8080) will be used.
default: 8080
service_environment:
description:
- Specifies the name of service environment that the application will be
deployed to.
- When creating a new application, this parameter is required.
- The service environment type will be discovered by this module automatically.
Therefore, it is crucial that you maintain unique names for items in the
different service environment types.
- SSGs are not supported for this type of application.
add_analytics:
description:
- Collects statistics of the BIG-IP that the application is deployed to.
- This parameter is only relevant when specifying a C(service_environment) which
is a BIG-IP; not an SSG.
type: bool
default: no
state:
description:
- The state of the resource on the system.
- When C(present), guarantees that the resource exists with the provided attributes.
- When C(absent), removes the resource from the system.
default: present
choices:
- absent
- present
wait:
description:
- If the module should wait for the application to be created, deleted or updated.
type: bool
default: yes
extends_documentation_fragment: f5
notes:
- This module does not support updating of your application (whether deployed or not).
If you need to update the application, the recommended practice is to remove and
re-create.
- Requires BIG-IQ version 6.0 or greater.
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Load balance a TCP-based application with a FastL4 profile
bigiq_application_fastl4_tcp:
name: my-app
description: My description
service_environment: my-bigip-device
servers:
- address: 1.2.3.4
port: 8080
- address: 5.6.7.8
port: 8080
load_balancer:
name: foo
destination: 2.2.2.2
netmask: 255.255.255.255
port: 443
provider:
password: secret
server: lb.mydomain.com
user: admin
state: present
delegate_to: localhost
'''
RETURN = r'''
description:
description: The new description of the application of the resource.
returned: changed
type: string
sample: My application
service_environment:
description: The environment which the service was deployed to.
returned: changed
type: string
sample: my-ssg1
inbound_virtual_destination:
description: The destination of the virtual that was created.
returned: changed
type: string
sample: 6.7.8.9
inbound_virtual_netmask:
description: The network mask of the provided inbound destination.
returned: changed
type: string
sample: 255.255.255.0
inbound_virtual_port:
description: The port the inbound virtual address listens on.
returned: changed
type: int
sample: 80
servers:
description: List of servers, and their ports, that make up the application.
type: complex
returned: changed
contains:
address:
description: The IP address of the server.
returned: changed
type: string
sample: 2.3.4.5
port:
description: The port that the server listens on.
returned: changed
type: int
sample: 8080
sample: hash/dictionary of values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigiq import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
except ImportError:
from ansible.module_utils.network.f5.bigiq import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
try:
import netaddr
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
import time
class Parameters(AnsibleF5Parameters):
api_map = {
'templateReference': 'template_reference',
'subPath': 'sub_path',
'configSetName': 'config_set_name',
'defaultDeviceReference': 'default_device_reference',
'addAnalytics': 'add_analytics'
}
api_attributes = [
'resources', 'description', 'configSetName', 'subPath', 'templateReference',
'defaultDeviceReference', 'addAnalytics'
]
returnables = [
'resources', 'description', 'config_set_name', 'sub_path', 'template_reference',
'default_device_reference', 'servers', 'inbound_virtual', 'add_analytics'
]
updatables = [
'resources', 'description', 'config_set_name', 'sub_path', 'template_reference',
'default_device_reference', 'servers', 'add_analytics'
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def http_profile(self):
return "profile_http"
@property
def config_set_name(self):
return self.name
@property
def sub_path(self):
return self.name
@property
def template_reference(self):
filter = "name+eq+'Default-f5-FastL4-TCP-lb-template'"
uri = "https://{0}:{1}/mgmt/cm/global/templates/?$filter={2}&$top=1&$select=selfLink".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No default HTTP LB template was found."
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = dict(
link=response['items'][0]['selfLink']
)
return result
@property
def default_device_reference(self):
try:
# An IP address was specified
netaddr.IPAddress(self.service_environment)
filter = "address+eq+'{0}'".format(self.service_environment)
except netaddr.core.AddrFormatError:
# Assume a hostname was specified
filter = "hostname+eq+'{0}'".format(self.service_environment)
uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-adccore-allbigipDevices/devices/?$filter={2}&$top=1&$select=selfLink".format(
self.client.provider['server'],
self.client.provider['server_port'],
filter
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"The specified service_environment '{0}' was found.".format(self.service_environment)
)
elif 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
result = dict(
link=response['items'][0]['selfLink']
)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def resources(self):
result = dict()
result.update(self.tcp_monitor)
result.update(self.virtual)
result.update(self.pool)
result.update(self.nodes)
return result
@property
def virtual(self):
result = dict()
result['ltm:virtual:20e0ce0ae107'] = [
dict(
parameters=dict(
name='virtual',
destinationAddress=self.inbound_virtual['address'],
mask=self.inbound_virtual['netmask'],
destinationPort=self.inbound_virtual['port']
),
subcollectionResources=self.profiles
)
]
return result
@property
def profiles(self):
result = {
'profiles:53f9b3028d90': [
dict(
parameters=dict()
)
]
}
return result
@property
def pool(self):
result = dict()
result['ltm:pool:9fa59a7bfc5c'] = [
dict(
parameters=dict(
name='pool_0'
),
subcollectionResources=self.pool_members
)
]
return result
@property
def pool_members(self):
result = dict()
result['members:3e91bd30bbfb'] = []
for x in self.servers:
member = dict(
parameters=dict(
port=x['port'],
nodeReference=dict(
link='#/resources/ltm:node:3e91bd30bbfb/{0}'.format(x['address']),
fullPath='# {0}'.format(x['address'])
)
)
)
result['members:3e91bd30bbfb'].append(member)
return result
@property
def tcp_monitor(self):
result = dict()
result['ltm:monitor:tcp:f864a2efffea'] = [
dict(
parameters=dict(
name='monitor-tcp'
)
)
]
return result
@property
def nodes(self):
result = dict()
result['ltm:node:3e91bd30bbfb'] = []
for x in self.servers:
tmp = dict(
parameters=dict(
name=x['address'],
address=x['address']
)
)
result['ltm:node:3e91bd30bbfb'].append(tmp)
return result
@property
def node_addresses(self):
result = [x['address'] for x in self.servers]
return result
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.want.client = self.client
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return False
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/ap/query/v1/tenants/default/reports/AllApplicationsList?$filter=name+eq+'{2}'".format(
self.client.provider['server'],
self.client.provider['server_port'],
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and 'result' in response and 'totalItems' in response['result'] and response['result']['totalItems'] == 0:
return False
return True
def remove(self):
if self.module.check_mode:
return True
self_link = self.remove_from_device()
if self.want.wait:
self.wait_for_apply_template_task(self_link)
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
if self.want.service_environment is None:
raise F5ModuleError(
"A 'service_environment' must be specified when creating a new application."
)
if self.want.servers is None:
raise F5ModuleError(
"At least one 'servers' item is needed when creating a new application."
)
if self.want.inbound_virtual is None:
raise F5ModuleError(
"An 'inbound_virtual' must be specified when creating a new application."
)
self._set_changed_options()
if self.module.check_mode:
return True
self_link = self.create_on_device()
if self.want.wait:
self.wait_for_apply_template_task(self_link)
if not self.exists():
raise F5ModuleError(
"Failed to deploy application."
)
return True
def create_on_device(self):
params = self.changes.api_params()
params['mode'] = 'CREATE'
uri = 'https://{0}:{1}/mgmt/cm/global/tasks/apply-template'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['selfLink']
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
params = dict(
configSetName=self.want.name,
mode='DELETE'
)
uri = 'https://{0}:{1}/mgmt/cm/global/tasks/apply-template'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
return response['selfLink']
def wait_for_apply_template_task(self, self_link):
host = 'https://{0}:{1}'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
uri = self_link.replace('https://localhost', host)
while True:
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if response['status'] == 'FINISHED' and response.get('currentStep', None) == 'DONE':
return True
elif 'errorMessage' in response:
raise F5ModuleError(response['errorMessage'])
time.sleep(5)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
description=dict(),
servers=dict(
type='list',
options=dict(
address=dict(required=True),
port=dict(default=8000)
)
),
inbound_virtual=dict(
type='dict',
options=dict(
address=dict(required=True),
netmask=dict(required=True),
port=dict(default=8080)
)
),
service_environment=dict(),
add_analytics=dict(type='bool', default='no'),
state=dict(
default='present',
choices=['present', 'absent']
),
wait=dict(type='bool', default='yes')
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_NETADDR:
module.fail_json(msg="The python netaddr module is required")
try:
client = F5RestClient(module=module)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
exit_json(module, results, client)
except F5ModuleError as ex:
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| hryamzik/ansible | lib/ansible/modules/network/f5/bigiq_application_fastl4_tcp.py | Python | gpl-3.0 | 21,721 |
# -*- coding: utf-8 -*-
"""
Unit tests for instructor.enrollment methods.
"""
import json
from abc import ABCMeta
import mock
from ccx_keys.locator import CCXLocator
from django.conf import settings
from django.utils.translation import override as override_language
from django.utils.translation import get_language
from mock import patch
from nose.plugins.attrib import attr
from opaque_keys.edx.locator import CourseLocator
from six import text_type
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from courseware.models import StudentModule
from grades.subsection_grade_factory import SubsectionGradeFactory
from grades.tests.utils import answer_problem
from lms.djangoapps.ccx.tests.factories import CcxFactory
from lms.djangoapps.course_blocks.api import get_course_blocks
from lms.djangoapps.instructor.enrollment import (
EmailEnrollmentState,
enroll_email,
get_email_params,
render_message_to_string,
reset_student_attempts,
send_beta_role_email,
unenroll_email
)
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, get_mock_request
from student.models import CourseEnrollment, CourseEnrollmentAllowed, anonymous_id_for_user
from student.roles import CourseCcxCoachRole
from student.tests.factories import AdminFactory, UserFactory
from submissions import api as sub_api
from xmodule.modulestore.tests.django_utils import TEST_DATA_SPLIT_MODULESTORE, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@attr(shard=1)
class TestSettableEnrollmentState(CacheIsolationTestCase):
""" Test the basis class for enrollment tests. """
def setUp(self):
super(TestSettableEnrollmentState, self).setUp()
self.course_key = CourseLocator('Robot', 'fAKE', 'C--se--ID')
def test_mes_create(self):
"""
Test SettableEnrollmentState creation of user.
"""
mes = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False
)
# enrollment objects
eobjs = mes.create_user(self.course_key)
ees = EmailEnrollmentState(self.course_key, eobjs.email)
self.assertEqual(mes, ees)
class TestEnrollmentChangeBase(CacheIsolationTestCase):
"""
Test instructor enrollment administration against database effects.
Test methods in derived classes follow a strict format.
`action` is a function which is run
the test will pass if `action` mutates state from `before_ideal` to `after_ideal`
"""
__metaclass__ = ABCMeta
def setUp(self):
super(TestEnrollmentChangeBase, self).setUp()
self.course_key = CourseLocator('Robot', 'fAKE', 'C--se--ID')
def _run_state_change_test(self, before_ideal, after_ideal, action):
"""
Runs a state change test.
`before_ideal` and `after_ideal` are SettableEnrollmentState's
`action` is a function which will be run in the middle.
`action` should transition the world from before_ideal to after_ideal
`action` will be supplied the following arguments (None-able arguments)
`email` is an email string
"""
# initialize & check before
print "checking initialization..."
eobjs = before_ideal.create_user(self.course_key)
before = EmailEnrollmentState(self.course_key, eobjs.email)
self.assertEqual(before, before_ideal)
# do action
print "running action..."
action(eobjs.email)
# check after
print "checking effects..."
after = EmailEnrollmentState(self.course_key, eobjs.email)
self.assertEqual(after, after_ideal)
@attr(shard=1)
class TestInstructorEnrollDB(TestEnrollmentChangeBase):
""" Test instructor.enrollment.enroll_email """
def test_enroll(self):
before_ideal = SettableEnrollmentState(
user=True,
enrollment=False,
allowed=False,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False
)
action = lambda email: enroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_again(self):
before_ideal = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False,
)
after_ideal = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False,
)
action = lambda email: enroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_nouser(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False,
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=False,
)
action = lambda email: enroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_nouser_again(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=False,
)
action = lambda email: enroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_nouser_autoenroll(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False,
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=True,
)
action = lambda email: enroll_email(self.course_key, email, auto_enroll=True)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_nouser_change_autoenroll(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=True,
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=False,
)
action = lambda email: enroll_email(self.course_key, email, auto_enroll=False)
return self._run_state_change_test(before_ideal, after_ideal, action)
@attr(shard=1)
class TestInstructorUnenrollDB(TestEnrollmentChangeBase):
""" Test instructor.enrollment.unenroll_email """
def test_unenroll(self):
before_ideal = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=True,
enrollment=False,
allowed=False,
auto_enroll=False
)
action = lambda email: unenroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_unenroll_notenrolled(self):
before_ideal = SettableEnrollmentState(
user=True,
enrollment=False,
allowed=False,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=True,
enrollment=False,
allowed=False,
auto_enroll=False
)
action = lambda email: unenroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_unenroll_disallow(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=True
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False
)
action = lambda email: unenroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_unenroll_norecord(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False
)
action = lambda email: unenroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
@attr(shard=1)
class TestInstructorEnrollmentStudentModule(SharedModuleStoreTestCase):
""" Test student module manipulations. """
@classmethod
def setUpClass(cls):
super(TestInstructorEnrollmentStudentModule, cls).setUpClass()
cls.course = CourseFactory(
name='fake',
org='course',
run='id',
)
# pylint: disable=no-member
cls.course_key = cls.course.location.course_key
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
cls.parent = ItemFactory(
category="library_content",
parent=cls.course,
publish_item=True,
)
cls.child = ItemFactory(
category="html",
parent=cls.parent,
publish_item=True,
)
cls.unrelated = ItemFactory(
category="html",
parent=cls.course,
publish_item=True,
)
def setUp(self):
super(TestInstructorEnrollmentStudentModule, self).setUp()
self.user = UserFactory()
parent_state = json.dumps({'attempts': 32, 'otherstuff': 'alsorobots'})
child_state = json.dumps({'attempts': 10, 'whatever': 'things'})
unrelated_state = json.dumps({'attempts': 12, 'brains': 'zombie'})
StudentModule.objects.create(
student=self.user,
course_id=self.course_key,
module_state_key=self.parent.location,
state=parent_state,
)
StudentModule.objects.create(
student=self.user,
course_id=self.course_key,
module_state_key=self.child.location,
state=child_state,
)
StudentModule.objects.create(
student=self.user,
course_id=self.course_key,
module_state_key=self.unrelated.location,
state=unrelated_state,
)
def test_reset_student_attempts(self):
msk = self.course_key.make_usage_key('dummy', 'module')
original_state = json.dumps({'attempts': 32, 'otherstuff': 'alsorobots'})
StudentModule.objects.create(
student=self.user,
course_id=self.course_key,
module_state_key=msk,
state=original_state
)
# lambda to reload the module state from the database
module = lambda: StudentModule.objects.get(student=self.user, course_id=self.course_key, module_state_key=msk)
self.assertEqual(json.loads(module().state)['attempts'], 32)
reset_student_attempts(self.course_key, self.user, msk, requesting_user=self.user)
self.assertEqual(json.loads(module().state)['attempts'], 0)
@mock.patch('lms.djangoapps.grades.signals.handlers.PROBLEM_WEIGHTED_SCORE_CHANGED.send')
def test_delete_student_attempts(self, _mock_signal):
msk = self.course_key.make_usage_key('dummy', 'module')
original_state = json.dumps({'attempts': 32, 'otherstuff': 'alsorobots'})
StudentModule.objects.create(
student=self.user,
course_id=self.course_key,
module_state_key=msk,
state=original_state
)
self.assertEqual(
StudentModule.objects.filter(
student=self.user,
course_id=self.course_key,
module_state_key=msk
).count(), 1)
reset_student_attempts(self.course_key, self.user, msk, requesting_user=self.user, delete_module=True)
self.assertEqual(
StudentModule.objects.filter(
student=self.user,
course_id=self.course_key,
module_state_key=msk
).count(), 0)
# Disable the score change signal to prevent other components from being
# pulled into tests.
@mock.patch('lms.djangoapps.grades.signals.handlers.PROBLEM_WEIGHTED_SCORE_CHANGED.send')
@mock.patch('lms.djangoapps.grades.signals.handlers.submissions_score_set_handler')
@mock.patch('lms.djangoapps.grades.signals.handlers.submissions_score_reset_handler')
def test_delete_submission_scores(self, _mock_send_signal, mock_set_receiver, mock_reset_receiver):
user = UserFactory()
problem_location = self.course_key.make_usage_key('dummy', 'module')
# Create a student module for the user
StudentModule.objects.create(
student=user,
course_id=self.course_key,
module_state_key=problem_location,
state=json.dumps({})
)
# Create a submission and score for the student using the submissions API
student_item = {
'student_id': anonymous_id_for_user(user, self.course_key),
'course_id': text_type(self.course_key),
'item_id': text_type(problem_location),
'item_type': 'openassessment'
}
submission = sub_api.create_submission(student_item, 'test answer')
sub_api.set_score(submission['uuid'], 1, 2)
# Delete student state using the instructor dash
reset_student_attempts(
self.course_key, user, problem_location,
requesting_user=user,
delete_module=True,
)
# Make sure our grades signal receivers handled the reset properly
mock_set_receiver.assert_not_called()
mock_reset_receiver.assert_called_once()
# Verify that the student's scores have been reset in the submissions API
score = sub_api.get_score(student_item)
self.assertIs(score, None)
def get_state(self, location):
"""Reload and grab the module state from the database"""
return StudentModule.objects.get(
student=self.user, course_id=self.course_key, module_state_key=location
).state
def test_reset_student_attempts_children(self):
parent_state = json.loads(self.get_state(self.parent.location))
self.assertEqual(parent_state['attempts'], 32)
self.assertEqual(parent_state['otherstuff'], 'alsorobots')
child_state = json.loads(self.get_state(self.child.location))
self.assertEqual(child_state['attempts'], 10)
self.assertEqual(child_state['whatever'], 'things')
unrelated_state = json.loads(self.get_state(self.unrelated.location))
self.assertEqual(unrelated_state['attempts'], 12)
self.assertEqual(unrelated_state['brains'], 'zombie')
reset_student_attempts(self.course_key, self.user, self.parent.location, requesting_user=self.user)
parent_state = json.loads(self.get_state(self.parent.location))
self.assertEqual(json.loads(self.get_state(self.parent.location))['attempts'], 0)
self.assertEqual(parent_state['otherstuff'], 'alsorobots')
child_state = json.loads(self.get_state(self.child.location))
self.assertEqual(child_state['attempts'], 0)
self.assertEqual(child_state['whatever'], 'things')
unrelated_state = json.loads(self.get_state(self.unrelated.location))
self.assertEqual(unrelated_state['attempts'], 12)
self.assertEqual(unrelated_state['brains'], 'zombie')
def test_delete_submission_scores_attempts_children(self):
parent_state = json.loads(self.get_state(self.parent.location))
self.assertEqual(parent_state['attempts'], 32)
self.assertEqual(parent_state['otherstuff'], 'alsorobots')
child_state = json.loads(self.get_state(self.child.location))
self.assertEqual(child_state['attempts'], 10)
self.assertEqual(child_state['whatever'], 'things')
unrelated_state = json.loads(self.get_state(self.unrelated.location))
self.assertEqual(unrelated_state['attempts'], 12)
self.assertEqual(unrelated_state['brains'], 'zombie')
reset_student_attempts(
self.course_key,
self.user,
self.parent.location,
requesting_user=self.user,
delete_module=True,
)
self.assertRaises(StudentModule.DoesNotExist, self.get_state, self.parent.location)
self.assertRaises(StudentModule.DoesNotExist, self.get_state, self.child.location)
unrelated_state = json.loads(self.get_state(self.unrelated.location))
self.assertEqual(unrelated_state['attempts'], 12)
self.assertEqual(unrelated_state['brains'], 'zombie')
class TestStudentModuleGrading(SharedModuleStoreTestCase):
"""
Tests the effects of student module manipulations
on student grades.
"""
@classmethod
def setUpClass(cls):
super(TestStudentModuleGrading, cls).setUpClass()
cls.course = CourseFactory.create()
cls.chapter = ItemFactory.create(
parent=cls.course,
category="chapter",
display_name="Test Chapter"
)
cls.sequence = ItemFactory.create(
parent=cls.chapter,
category='sequential',
display_name="Test Sequential 1",
graded=True
)
cls.vertical = ItemFactory.create(
parent=cls.sequence,
category='vertical',
display_name='Test Vertical 1'
)
problem_xml = MultipleChoiceResponseXMLFactory().build_xml(
question_text='The correct answer is Choice 3',
choices=[False, False, True, False],
choice_names=['choice_0', 'choice_1', 'choice_2', 'choice_3']
)
cls.problem = ItemFactory.create(
parent=cls.vertical,
category="problem",
display_name="Test Problem",
data=problem_xml
)
cls.request = get_mock_request(UserFactory())
cls.user = cls.request.user
cls.instructor = UserFactory(username='staff', is_staff=True)
def _get_subsection_grade_and_verify(self, all_earned, all_possible, graded_earned, graded_possible):
"""
Retrieves the subsection grade and verifies that
its scores match those expected.
"""
subsection_grade_factory = SubsectionGradeFactory(
self.user,
self.course,
get_course_blocks(self.user, self.course.location)
)
grade = subsection_grade_factory.create(self.sequence)
self.assertEqual(grade.all_total.earned, all_earned)
self.assertEqual(grade.graded_total.earned, graded_earned)
self.assertEqual(grade.all_total.possible, all_possible)
self.assertEqual(grade.graded_total.possible, graded_possible)
@patch('crum.get_current_request')
def test_delete_student_state(self, _crum_mock):
problem_location = self.problem.location
self._get_subsection_grade_and_verify(0, 1, 0, 1)
answer_problem(course=self.course, request=self.request, problem=self.problem, score=1, max_value=1)
self._get_subsection_grade_and_verify(1, 1, 1, 1)
# Delete student state using the instructor dash
reset_student_attempts(
self.course.id,
self.user,
problem_location,
requesting_user=self.instructor,
delete_module=True,
)
# Verify that the student's grades are reset
self._get_subsection_grade_and_verify(0, 1, 0, 1)
class EnrollmentObjects(object):
"""
Container for enrollment objects.
`email` - student email
`user` - student User object
`cenr` - CourseEnrollment object
`cea` - CourseEnrollmentAllowed object
Any of the objects except email can be None.
"""
def __init__(self, email, user, cenr, cea):
self.email = email
self.user = user
self.cenr = cenr
self.cea = cea
class SettableEnrollmentState(EmailEnrollmentState):
"""
Settable enrollment state.
Used for testing state changes.
SettableEnrollmentState can be constructed and then
a call to create_user will make objects which
correspond to the state represented in the SettableEnrollmentState.
"""
def __init__(self, user=False, enrollment=False, allowed=False, auto_enroll=False): # pylint: disable=super-init-not-called
self.user = user
self.enrollment = enrollment
self.allowed = allowed
self.auto_enroll = auto_enroll
def __eq__(self, other):
return self.to_dict() == other.to_dict()
def __neq__(self, other):
return not self == other
def create_user(self, course_id=None):
"""
Utility method to possibly create and possibly enroll a user.
Creates a state matching the SettableEnrollmentState properties.
Returns a tuple of (
email,
User, (optionally None)
CourseEnrollment, (optionally None)
CourseEnrollmentAllowed, (optionally None)
)
"""
# if self.user=False, then this will just be used to generate an email.
email = "[email protected]"
if self.user:
user = UserFactory()
email = user.email
if self.enrollment:
cenr = CourseEnrollment.enroll(user, course_id)
return EnrollmentObjects(email, user, cenr, None)
else:
return EnrollmentObjects(email, user, None, None)
elif self.allowed:
cea = CourseEnrollmentAllowed.objects.create(
email=email,
course_id=course_id,
auto_enroll=self.auto_enroll,
)
return EnrollmentObjects(email, None, None, cea)
else:
return EnrollmentObjects(email, None, None, None)
@attr(shard=1)
class TestSendBetaRoleEmail(CacheIsolationTestCase):
"""
Test edge cases for `send_beta_role_email`
"""
def setUp(self):
super(TestSendBetaRoleEmail, self).setUp()
self.user = UserFactory.create()
self.email_params = {'course': 'Robot Super Course'}
def test_bad_action(self):
bad_action = 'beta_tester'
error_msg = "Unexpected action received '{}' - expected 'add' or 'remove'".format(bad_action)
with self.assertRaisesRegexp(ValueError, error_msg):
send_beta_role_email(bad_action, self.user, self.email_params)
@attr(shard=1)
class TestGetEmailParamsCCX(SharedModuleStoreTestCase):
"""
Test what URLs the function get_email_params for CCX student enrollment.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super(TestGetEmailParamsCCX, cls).setUpClass()
cls.course = CourseFactory.create()
@patch.dict('django.conf.settings.FEATURES', {'CUSTOM_COURSES_EDX': True})
def setUp(self):
super(TestGetEmailParamsCCX, self).setUp()
self.coach = AdminFactory.create()
role = CourseCcxCoachRole(self.course.id)
role.add_users(self.coach)
self.ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
self.course_key = CCXLocator.from_course_locator(self.course.id, self.ccx.id)
# Explicitly construct what we expect the course URLs to be
site = settings.SITE_NAME
self.course_url = u'https://{}/courses/{}/'.format(
site,
self.course_key
)
self.course_about_url = self.course_url + 'about'
self.registration_url = u'https://{}/register'.format(site)
@patch.dict('django.conf.settings.FEATURES', {'CUSTOM_COURSES_EDX': True})
def test_ccx_enrollment_email_params(self):
# For a CCX, what do we expect to get for the URLs?
# Also make sure `auto_enroll` is properly passed through.
result = get_email_params(
self.course,
True,
course_key=self.course_key,
display_name=self.ccx.display_name
)
self.assertEqual(result['display_name'], self.ccx.display_name)
self.assertEqual(result['auto_enroll'], True)
self.assertEqual(result['course_about_url'], self.course_about_url)
self.assertEqual(result['registration_url'], self.registration_url)
self.assertEqual(result['course_url'], self.course_url)
@attr(shard=1)
class TestGetEmailParams(SharedModuleStoreTestCase):
"""
Test what URLs the function get_email_params returns under different
production-like conditions.
"""
@classmethod
def setUpClass(cls):
super(TestGetEmailParams, cls).setUpClass()
cls.course = CourseFactory.create()
# Explicitly construct what we expect the course URLs to be
site = settings.SITE_NAME
cls.course_url = u'https://{}/courses/{}/'.format(
site,
text_type(cls.course.id)
)
cls.course_about_url = cls.course_url + 'about'
cls.registration_url = u'https://{}/register'.format(site)
def test_normal_params(self):
# For a normal site, what do we expect to get for the URLs?
# Also make sure `auto_enroll` is properly passed through.
result = get_email_params(self.course, False)
self.assertEqual(result['auto_enroll'], False)
self.assertEqual(result['course_about_url'], self.course_about_url)
self.assertEqual(result['registration_url'], self.registration_url)
self.assertEqual(result['course_url'], self.course_url)
def test_marketing_params(self):
# For a site with a marketing front end, what do we expect to get for the URLs?
# Also make sure `auto_enroll` is properly passed through.
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
result = get_email_params(self.course, True)
self.assertEqual(result['auto_enroll'], True)
# We should *not* get a course about url (LMS doesn't know what the marketing site URLs are)
self.assertEqual(result['course_about_url'], None)
self.assertEqual(result['registration_url'], self.registration_url)
self.assertEqual(result['course_url'], self.course_url)
@attr(shard=1)
class TestRenderMessageToString(SharedModuleStoreTestCase):
"""
Test that email templates can be rendered in a language chosen manually.
Test CCX enrollmet email.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super(TestRenderMessageToString, cls).setUpClass()
cls.course = CourseFactory.create()
cls.subject_template = 'emails/enroll_email_allowedsubject.txt'
cls.message_template = 'emails/enroll_email_allowedmessage.txt'
@patch.dict('django.conf.settings.FEATURES', {'CUSTOM_COURSES_EDX': True})
def setUp(self):
super(TestRenderMessageToString, self).setUp()
coach = AdminFactory.create()
role = CourseCcxCoachRole(self.course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=self.course.id, coach=coach)
self.course_key = CCXLocator.from_course_locator(self.course.id, self.ccx.id)
def get_email_params(self):
"""
Returns a dictionary of parameters used to render an email.
"""
email_params = get_email_params(self.course, True)
email_params["email_address"] = "[email protected]"
email_params["full_name"] = "Jean Reno"
return email_params
def get_email_params_ccx(self):
"""
Returns a dictionary of parameters used to render an email for CCX.
"""
email_params = get_email_params(
self.course,
True,
course_key=self.course_key,
display_name=self.ccx.display_name
)
email_params["email_address"] = "[email protected]"
email_params["full_name"] = "Jean Reno"
return email_params
def get_subject_and_message(self, language):
"""
Returns the subject and message rendered in the specified language.
"""
return render_message_to_string(
self.subject_template,
self.message_template,
self.get_email_params(),
language=language
)
def get_subject_and_message_ccx(self, subject_template, message_template):
"""
Returns the subject and message rendered in the specified language for CCX.
"""
return render_message_to_string(
subject_template,
message_template,
self.get_email_params_ccx()
)
def test_subject_and_message_translation(self):
subject, message = self.get_subject_and_message('fr')
language_after_rendering = get_language()
you_have_been_invited_in_french = u"Vous avez été invité"
self.assertIn(you_have_been_invited_in_french, subject)
self.assertIn(you_have_been_invited_in_french, message)
self.assertEqual(settings.LANGUAGE_CODE, language_after_rendering)
def test_platform_language_is_used_for_logged_in_user(self):
with override_language('zh_CN'): # simulate a user login
subject, message = self.get_subject_and_message(None)
self.assertIn("You have been", subject)
self.assertIn("You have been", message)
@patch.dict('django.conf.settings.FEATURES', {'CUSTOM_COURSES_EDX': True})
def test_render_enrollment_message_ccx_members(self):
"""
Test enrollment email template renders for CCX.
For EDX members.
"""
subject_template = 'emails/enroll_email_enrolledsubject.txt'
message_template = 'emails/enroll_email_enrolledmessage.txt'
subject, message = self.get_subject_and_message_ccx(subject_template, message_template)
self.assertIn(self.ccx.display_name, subject)
self.assertIn(self.ccx.display_name, message)
site = settings.SITE_NAME
course_url = u'https://{}/courses/{}/'.format(
site,
self.course_key
)
self.assertIn(course_url, message)
@patch.dict('django.conf.settings.FEATURES', {'CUSTOM_COURSES_EDX': True})
def test_render_unenrollment_message_ccx_members(self):
"""
Test unenrollment email template renders for CCX.
For EDX members.
"""
subject_template = 'emails/unenroll_email_subject.txt'
message_template = 'emails/unenroll_email_enrolledmessage.txt'
subject, message = self.get_subject_and_message_ccx(subject_template, message_template)
self.assertIn(self.ccx.display_name, subject)
self.assertIn(self.ccx.display_name, message)
@patch.dict('django.conf.settings.FEATURES', {'CUSTOM_COURSES_EDX': True})
def test_render_enrollment_message_ccx_non_members(self):
"""
Test enrollment email template renders for CCX.
For non EDX members.
"""
subject_template = 'emails/enroll_email_allowedsubject.txt'
message_template = 'emails/enroll_email_allowedmessage.txt'
subject, message = self.get_subject_and_message_ccx(subject_template, message_template)
self.assertIn(self.ccx.display_name, subject)
self.assertIn(self.ccx.display_name, message)
site = settings.SITE_NAME
registration_url = u'https://{}/register'.format(site)
self.assertIn(registration_url, message)
@patch.dict('django.conf.settings.FEATURES', {'CUSTOM_COURSES_EDX': True})
def test_render_unenrollment_message_ccx_non_members(self):
"""
Test unenrollment email template renders for CCX.
For non EDX members.
"""
subject_template = 'emails/unenroll_email_subject.txt'
message_template = 'emails/unenroll_email_allowedmessage.txt'
subject, message = self.get_subject_and_message_ccx(subject_template, message_template)
self.assertIn(self.ccx.display_name, subject)
self.assertIn(self.ccx.display_name, message)
| Stanford-Online/edx-platform | lms/djangoapps/instructor/tests/test_enrollment.py | Python | agpl-3.0 | 33,245 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Module dedicated functions/classes dealing with rate limiting requests.
"""
import collections
import copy
import httplib
import math
import re
import time
import webob.dec
import webob.exc
from nova.api.openstack.compute.views import limits as limits_views
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova import quota
from nova import utils
from nova import wsgi as base_wsgi
QUOTAS = quota.QUOTAS
LIMITS_PREFIX = "limits."
limits_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
class LimitsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('limits', selector='limits')
rates = xmlutil.SubTemplateElement(root, 'rates')
rate = xmlutil.SubTemplateElement(rates, 'rate', selector='rate')
rate.set('uri', 'uri')
rate.set('regex', 'regex')
limit = xmlutil.SubTemplateElement(rate, 'limit', selector='limit')
limit.set('value', 'value')
limit.set('verb', 'verb')
limit.set('remaining', 'remaining')
limit.set('unit', 'unit')
limit.set('next-available', 'next-available')
absolute = xmlutil.SubTemplateElement(root, 'absolute',
selector='absolute')
limit = xmlutil.SubTemplateElement(absolute, 'limit',
selector=xmlutil.get_items)
limit.set('name', 0)
limit.set('value', 1)
return xmlutil.MasterTemplate(root, 1, nsmap=limits_nsmap)
class LimitsController(wsgi.Controller):
"""Controller for accessing limits in the OpenStack API."""
@wsgi.serializers(xml=LimitsTemplate)
def index(self, req):
"""Return all global and rate limit information."""
context = req.environ['nova.context']
quotas = QUOTAS.get_project_quotas(context, context.project_id,
usages=False)
abs_limits = dict((k, v['limit']) for k, v in quotas.items())
rate_limits = req.environ.get("nova.limits", [])
builder = self._get_view_builder(req)
return builder.build(rate_limits, abs_limits)
def create(self, req, body):
"""Create a new limit."""
raise webob.exc.HTTPNotImplemented()
def delete(self, req, id):
"""Delete the limit."""
raise webob.exc.HTTPNotImplemented()
def detail(self, req):
"""Return limit details."""
raise webob.exc.HTTPNotImplemented()
def show(self, req, id):
"""Show limit information."""
raise webob.exc.HTTPNotImplemented()
def update(self, req, id, body):
"""Update existing limit."""
raise webob.exc.HTTPNotImplemented()
def _get_view_builder(self, req):
return limits_views.ViewBuilder()
class Limit(object):
"""
Stores information about a limit for HTTP requests.
"""
UNITS = dict([(v, k) for k, v in utils.TIME_UNITS.items()])
def __init__(self, verb, uri, regex, value, unit):
"""
Initialize a new `Limit`.
@param verb: HTTP verb (POST, PUT, etc.)
@param uri: Human-readable URI
@param regex: Regular expression format for this limit
@param value: Integer number of requests which can be made
@param unit: Unit of measure for the value parameter
"""
self.verb = verb
self.uri = uri
self.regex = regex
self.value = int(value)
self.unit = unit
self.unit_string = self.display_unit().lower()
self.remaining = int(value)
if value <= 0:
raise ValueError("Limit value must be > 0")
self.last_request = None
self.next_request = None
self.water_level = 0
self.capacity = self.unit
self.request_value = float(self.capacity) / float(self.value)
msg = _("Only %(value)s %(verb)s request(s) can be "
"made to %(uri)s every %(unit_string)s.")
self.error_message = msg % self.__dict__
def __call__(self, verb, url):
"""
Represents a call to this limit from a relevant request.
@param verb: string http verb (POST, GET, etc.)
@param url: string URL
"""
if self.verb != verb or not re.match(self.regex, url):
return
now = self._get_time()
if self.last_request is None:
self.last_request = now
leak_value = now - self.last_request
self.water_level -= leak_value
self.water_level = max(self.water_level, 0)
self.water_level += self.request_value
difference = self.water_level - self.capacity
self.last_request = now
if difference > 0:
self.water_level -= self.request_value
self.next_request = now + difference
return difference
cap = self.capacity
water = self.water_level
val = self.value
self.remaining = math.floor(((cap - water) / cap) * val)
self.next_request = now
def _get_time(self):
"""Retrieve the current time. Broken out for testability."""
return time.time()
def display_unit(self):
"""Display the string name of the unit."""
return self.UNITS.get(self.unit, "UNKNOWN")
def display(self):
"""Return a useful representation of this class."""
return {
"verb": self.verb,
"URI": self.uri,
"regex": self.regex,
"value": self.value,
"remaining": int(self.remaining),
"unit": self.display_unit(),
"resetTime": int(self.next_request or self._get_time()),
}
# "Limit" format is a dictionary with the HTTP verb, human-readable URI,
# a regular-expression to match, value and unit of measure (PER_DAY, etc.)
DEFAULT_LIMITS = [
Limit("POST", "*", ".*", 10, utils.TIME_UNITS['MINUTE']),
Limit("POST", "*/servers", "^/servers", 50, utils.TIME_UNITS['DAY']),
Limit("PUT", "*", ".*", 10, utils.TIME_UNITS['MINUTE']),
Limit("GET", "*changes_since*", ".*changes_since.*", 3,
utils.TIME_UNITS['MINUTE']),
Limit("DELETE", "*", ".*", 100, utils.TIME_UNITS['MINUTE']),
]
class RateLimitingMiddleware(base_wsgi.Middleware):
"""
Rate-limits requests passing through this middleware. All limit information
is stored in memory for this implementation.
"""
def __init__(self, application, limits=None, limiter=None, **kwargs):
"""
Initialize new `RateLimitingMiddleware`, which wraps the given WSGI
application and sets up the given limits.
@param application: WSGI application to wrap
@param limits: String describing limits
@param limiter: String identifying class for representing limits
Other parameters are passed to the constructor for the limiter.
"""
base_wsgi.Middleware.__init__(self, application)
# Select the limiter class
if limiter is None:
limiter = Limiter
else:
limiter = importutils.import_class(limiter)
# Parse the limits, if any are provided
if limits is not None:
limits = limiter.parse_limits(limits)
self._limiter = limiter(limits or DEFAULT_LIMITS, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""
Represents a single call through this middleware. We should record the
request if we have a limit relevant to it. If no limit is relevant to
the request, ignore it.
If the request should be rate limited, return a fault telling the user
they are over the limit and need to retry later.
"""
verb = req.method
url = req.url
context = req.environ.get("nova.context")
if context:
username = context.user_id
else:
username = None
delay, error = self._limiter.check_for_delay(verb, url, username)
if delay:
msg = _("This request was rate-limited.")
retry = time.time() + delay
return wsgi.RateLimitFault(msg, error, retry)
req.environ["nova.limits"] = self._limiter.get_limits(username)
return self.application
class Limiter(object):
"""
Rate-limit checking class which handles limits in memory.
"""
def __init__(self, limits, **kwargs):
"""
Initialize the new `Limiter`.
@param limits: List of `Limit` objects
"""
self.limits = copy.deepcopy(limits)
self.levels = collections.defaultdict(lambda: copy.deepcopy(limits))
# Pick up any per-user limit information
for key, value in kwargs.items():
if key.startswith(LIMITS_PREFIX):
username = key[len(LIMITS_PREFIX):]
self.levels[username] = self.parse_limits(value)
def get_limits(self, username=None):
"""
Return the limits for a given user.
"""
return [limit.display() for limit in self.levels[username]]
def check_for_delay(self, verb, url, username=None):
"""
Check the given verb/user/user triplet for limit.
@return: Tuple of delay (in seconds) and error message (or None, None)
"""
delays = []
for limit in self.levels[username]:
delay = limit(verb, url)
if delay:
delays.append((delay, limit.error_message))
if delays:
delays.sort()
return delays[0]
return None, None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor. We
# put this in the class so that subclasses can override the
# default limit parsing.
@staticmethod
def parse_limits(limits):
"""
Convert a string into a list of Limit instances. This
implementation expects a semicolon-separated sequence of
parenthesized groups, where each group contains a
comma-separated sequence consisting of HTTP method,
user-readable URI, a URI reg-exp, an integer number of
requests which can be made, and a unit of measure. Valid
values for the latter are "SECOND", "MINUTE", "HOUR", and
"DAY".
@return: List of Limit instances.
"""
# Handle empty limit strings
limits = limits.strip()
if not limits:
return []
# Split up the limits by semicolon
result = []
for group in limits.split(';'):
group = group.strip()
if group[:1] != '(' or group[-1:] != ')':
raise ValueError("Limit rules must be surrounded by "
"parentheses")
group = group[1:-1]
# Extract the Limit arguments
args = [a.strip() for a in group.split(',')]
if len(args) != 5:
raise ValueError("Limit rules must contain the following "
"arguments: verb, uri, regex, value, unit")
# Pull out the arguments
verb, uri, regex, value, unit = args
# Upper-case the verb
verb = verb.upper()
# Convert value--raises ValueError if it's not integer
value = int(value)
# Convert unit
unit = unit.upper()
if unit not in utils.TIME_UNITS:
raise ValueError("Invalid units specified")
unit = utils.TIME_UNITS[unit]
# Build a limit
result.append(Limit(verb, uri, regex, value, unit))
return result
class WsgiLimiter(object):
"""
Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`.
To use, POST ``/<username>`` with JSON data such as::
{
"verb" : GET,
"path" : "/servers"
}
and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds
header containing the number of seconds to wait before the action would
succeed.
"""
def __init__(self, limits=None):
"""
Initialize the new `WsgiLimiter`.
@param limits: List of `Limit` objects
"""
self._limiter = Limiter(limits or DEFAULT_LIMITS)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, request):
"""
Handles a call to this application. Returns 204 if the request is
acceptable to the limiter, else a 403 is returned with a relevant
header indicating when the request *will* succeed.
"""
if request.method != "POST":
raise webob.exc.HTTPMethodNotAllowed()
try:
info = dict(jsonutils.loads(request.body))
except ValueError:
raise webob.exc.HTTPBadRequest()
username = request.path_info_pop()
verb = info.get("verb")
path = info.get("path")
delay, error = self._limiter.check_for_delay(verb, path, username)
if delay:
headers = {"X-Wait-Seconds": "%.2f" % delay}
return webob.exc.HTTPForbidden(headers=headers, explanation=error)
else:
return webob.exc.HTTPNoContent()
class WsgiLimiterProxy(object):
"""
Rate-limit requests based on answers from a remote source.
"""
def __init__(self, limiter_address):
"""
Initialize the new `WsgiLimiterProxy`.
@param limiter_address: IP/port combination of where to request limit
"""
self.limiter_address = limiter_address
def check_for_delay(self, verb, path, username=None):
body = jsonutils.dumps({"verb": verb, "path": path})
headers = {"Content-Type": "application/json"}
conn = httplib.HTTPConnection(self.limiter_address)
if username:
conn.request("POST", "/%s" % (username), body, headers)
else:
conn.request("POST", "/", body, headers)
resp = conn.getresponse()
if 200 >= resp.status < 300:
return None, None
return resp.getheader("X-Wait-Seconds"), resp.read() or None
# Note: This method gets called before the class is instantiated,
# so this must be either a static method or a class method. It is
# used to develop a list of limits to feed to the constructor.
# This implementation returns an empty list, since all limit
# decisions are made by a remote server.
@staticmethod
def parse_limits(limits):
"""
Ignore a limits string--simply doesn't apply for the limit
proxy.
@return: Empty list.
"""
return []
class Limits(extensions.V3APIExtensionBase):
"""Limits rate core API."""
name = "Limits"
alias = "limits"
namespace = "http://docs.openstack.org/compute/core/limits/v3"
version = 1
def get_resources(self):
resources = [extensions.ResourceExtension(self.alias,
LimitsController(),
member_name='limit')]
return resources
def get_controller_extensions(self):
return []
| TieWei/nova | nova/api/openstack/compute/plugins/v3/limits.py | Python | apache-2.0 | 16,186 |
"""
Telegram platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.telegram/
"""
import io
import logging
import urllib
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_DATA, PLATFORM_SCHEMA, BaseNotificationService)
from homeassistant.const import (
CONF_API_KEY, ATTR_LOCATION, ATTR_LATITUDE, ATTR_LONGITUDE)
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['python-telegram-bot==5.3.0']
ATTR_PHOTO = 'photo'
ATTR_KEYBOARD = 'keyboard'
ATTR_DOCUMENT = 'document'
ATTR_CAPTION = 'caption'
ATTR_URL = 'url'
ATTR_FILE = 'file'
ATTR_USERNAME = 'username'
ATTR_PASSWORD = 'password'
CONF_CHAT_ID = 'chat_id'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_CHAT_ID): cv.string,
})
def get_service(hass, config, discovery_info=None):
"""Get the Telegram notification service."""
import telegram
try:
chat_id = config.get(CONF_CHAT_ID)
api_key = config.get(CONF_API_KEY)
bot = telegram.Bot(token=api_key)
username = bot.getMe()['username']
_LOGGER.info("Telegram bot is '%s'", username)
except urllib.error.HTTPError:
_LOGGER.error("Please check your access token")
return None
return TelegramNotificationService(api_key, chat_id)
def load_data(url=None, file=None, username=None, password=None):
"""Load photo/document into ByteIO/File container from a source."""
try:
if url is not None:
# load photo from url
if username is not None and password is not None:
req = requests.get(url, auth=(username, password), timeout=15)
else:
req = requests.get(url, timeout=15)
return io.BytesIO(req.content)
elif file is not None:
# load photo from file
return open(file, "rb")
else:
_LOGGER.warning("Can't load photo no photo found in params!")
except OSError as error:
_LOGGER.error("Can't load photo into ByteIO: %s", error)
return None
class TelegramNotificationService(BaseNotificationService):
"""Implement the notification service for Telegram."""
def __init__(self, api_key, chat_id):
"""Initialize the service."""
import telegram
self._api_key = api_key
self._chat_id = chat_id
self.bot = telegram.Bot(token=self._api_key)
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
import telegram
title = kwargs.get(ATTR_TITLE)
data = kwargs.get(ATTR_DATA)
# exists data for send a photo/location
if data is not None and ATTR_PHOTO in data:
photos = data.get(ATTR_PHOTO, None)
photos = photos if isinstance(photos, list) else [photos]
for photo_data in photos:
self.send_photo(photo_data)
return
elif data is not None and ATTR_LOCATION in data:
return self.send_location(data.get(ATTR_LOCATION))
elif data is not None and ATTR_DOCUMENT in data:
return self.send_document(data.get(ATTR_DOCUMENT))
elif data is not None and ATTR_KEYBOARD in data:
keys = data.get(ATTR_KEYBOARD)
keys = keys if isinstance(keys, list) else [keys]
return self.send_keyboard(message, keys)
if title:
text = '{} {}'.format(title, message)
else:
text = message
parse_mode = telegram.parsemode.ParseMode.MARKDOWN
# send message
try:
self.bot.sendMessage(chat_id=self._chat_id,
text=text,
parse_mode=parse_mode)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending message")
def send_keyboard(self, message, keys):
"""Display keyboard."""
import telegram
keyboard = telegram.ReplyKeyboardMarkup([
[key.strip() for key in row.split(",")] for row in keys])
try:
self.bot.sendMessage(chat_id=self._chat_id, text=message,
reply_markup=keyboard)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending message")
def send_photo(self, data):
"""Send a photo."""
import telegram
caption = data.get(ATTR_CAPTION)
# send photo
try:
photo = load_data(
url=data.get(ATTR_URL),
file=data.get(ATTR_FILE),
username=data.get(ATTR_USERNAME),
password=data.get(ATTR_PASSWORD),
)
self.bot.sendPhoto(chat_id=self._chat_id,
photo=photo, caption=caption)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending photo")
def send_document(self, data):
"""Send a document."""
import telegram
caption = data.get(ATTR_CAPTION)
# send photo
try:
document = load_data(
url=data.get(ATTR_URL),
file=data.get(ATTR_FILE),
username=data.get(ATTR_USERNAME),
password=data.get(ATTR_PASSWORD),
)
self.bot.sendDocument(chat_id=self._chat_id,
document=document, caption=caption)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending document")
def send_location(self, gps):
"""Send a location."""
import telegram
latitude = float(gps.get(ATTR_LATITUDE, 0.0))
longitude = float(gps.get(ATTR_LONGITUDE, 0.0))
# send location
try:
self.bot.sendLocation(chat_id=self._chat_id,
latitude=latitude, longitude=longitude)
except telegram.error.TelegramError:
_LOGGER.exception("Error sending location")
| kyvinh/home-assistant | homeassistant/components/notify/telegram.py | Python | apache-2.0 | 6,190 |
#!/usr/bin/env bash
"""
makenpz.py DIRECTORY
Build a npz containing all data files in the directory.
"""
from __future__ import division, print_function, absolute_import
import os
import numpy as np
from optparse import OptionParser
def main():
p = OptionParser()
options, args = p.parse_args()
if len(args) != 1:
p.error("no valid directory given")
inp = args[0]
outp = inp + ".npz"
files = []
for dirpath, dirnames, filenames in os.walk(inp):
for fn in filenames:
if fn.endswith('.txt'):
files.append(
(dirpath[len(inp)+1:] + '/' + fn[:-4],
os.path.join(dirpath, fn)))
data = {}
for key, fn in files:
key = key.replace('/', '-').strip('-')
try:
data[key] = np.loadtxt(fn)
except ValueError:
print("Failed to load", fn)
savez_compress(outp, **data)
def savez_compress(file, *args, **kwds):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, str):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict:
raise ValueError("Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
zip = zipfile.ZipFile(file, mode="w", compression=zipfile.ZIP_DEFLATED)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
np.lib.format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
if __name__ == "__main__":
main()
| pbrod/scipy | scipy/special/utils/makenpz.py | Python | bsd-3-clause | 2,226 |
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Fixed-Point parameter type testcases - FP32_Q15.16
List of tested functions :
--------------------------
- [setParameter] function
- [getParameter] function
Initial Settings :
------------------
FP32_Q15.16 :
- size = 32 bits
- 15 integer bits, 16 fractionnal bits
- range : [-32768, 32767.9999847412109375]
Test cases :
------------
- FP32_Q15.16 parameter min value = -32768
- FP32_Q15.16 parameter min value out of bounds = -32768.00001
- FP32_Q15.16 parameter max value = 32767.99998
- FP32_Q15.16 parameter max value out of bounds = 32767.999985
- FP32_Q15.16 parameter in nominal case = 12345.12345
"""
import commands
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of type FP32_Q15.16 - range [-32768,32767.9999847412109375]
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_DIR/FP32_Q15.16"
self.pfw.sendCmd("setTuningMode", "on")
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Nominal_Case(self):
"""
Testing FP32_Q15.16 in nominal case = 12345.12345
-------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set FP32_Q15.16 parameter in nominal case = 12345.12345
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- FP32_Q15.16 parameter set to 12345.12345
- Blackboard and filesystem values checked
"""
log.D(self.test_Nominal_Case.__doc__)
log.I("FP32_Q15.16 parameter in nominal case = 12345.12345")
value = "12345.1234"
hex_value = "0x30391f97"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s" % (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s" % (self.param_name, err))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s" % (self.param_name, err))
assert round(float(out),4) == float(value), log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/FP32_Q15.16') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin(self):
"""
Testing FP32_Q15.16 minimal value = -32768
------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set FP32_Q15.16 parameter min value = -32768
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- FP32_Q15.16 parameter set to -32768
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin.__doc__)
log.I("FP32_Q15.16 parameter min value = -32768")
value = "-32768"
hex_value = "0x80000000"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s" % (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s" % (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None,log.E("when setting parameter %s : %s" % (self.param_name, err))
assert round(float(out),5) == float(value), log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/FP32_Q15.16') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin_Overflow(self):
"""
Testing FP32_Q15.16 parameter value out of negative range
---------------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set FP32_Q15.16 to -32768.00001
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- FP32_Q15.16 parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin_Overflow.__doc__)
log.I("FP32_Q15.16 parameter min value out of bounds = -32768.000001")
value = "-32768.00001"
param_check = commands.getoutput('cat $PFW_RESULT/FP32_Q15.16')
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/FP32_Q15.16') == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
def test_TypeMax(self):
"""
Testing FP32_Q15.16 parameter maximum value
-------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set FP32_Q15.16 to 32767.99998
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- FP32_Q15.16 parameter set to 32767.99998
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax.__doc__)
log.I("FP32_Q15.16 parameter max value = 32767.99998")
value = "32767.99998"
hex_value = "0x7fffffff"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s" % (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s" % (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s" % (self.param_name, err))
assert round(float(out),5) == float(value), log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/FP32_Q15.16') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMax_Overflow(self):
"""
Testing FP32_Q15.16 parameter value out of positive range
---------------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set FP32_Q15.16 to 32767.999985
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- FP32_Q15.16 parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax_Overflow.__doc__)
log.I("FP32_Q15.16 parameter max value out of bounds = 32767.999985")
value = "32767.999985"
param_check = commands.getoutput('cat $PFW_RESULT/FP32_Q15.16')
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/FP32_Q15.16') == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
| geekboxzone/mmallow_external_parameter-framework | test/functional-tests/PfwTestCase/Types/tFP32_Q15_16.py | Python | bsd-3-clause | 10,981 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import strip_html
from frappe import _
from jinja2.utils import concat
from jinja2 import meta
import re
def build_template(context):
"""Returns a dict of block name and its rendered content"""
out = {}
render_blocks(context["template"], out, context)
# set_sidebar(out, context)
set_breadcrumbs(out, context)
set_title_and_header(out, context)
# meta
if "meta_block" not in out:
out["meta_block"] = frappe.get_template("templates/includes/meta_block.html").render(context)
add_index(out, context)
# render
content_context = {}
content_context.update(context)
content_context.update(out)
out["content"] = frappe.get_template("templates/includes/page_content.html").render(content_context)
separate_style_and_script(out, context)
add_hero(out, context)
return out
def render_blocks(template_path, out, context):
"""Build the template block by block from the main template."""
env = frappe.get_jenv()
source = frappe.local.jloader.get_source(frappe.local.jenv, template_path)[0]
for referenced_template_path in meta.find_referenced_templates(env.parse(source)):
if referenced_template_path:
render_blocks(referenced_template_path, out, context)
template = frappe.get_template(template_path)
for block, render in template.blocks.items():
new_context = template.new_context(context)
out[block] = concat(render(new_context))
def separate_style_and_script(out, context):
"""Extract `style` and `script` tags into separate blocks"""
out["style"] = re.sub("</?style[^<>]*>", "", out.get("style") or "")
out["script"] = re.sub("</?script[^<>]*>", "", out.get("script") or "")
def set_breadcrumbs(out, context):
"""Build breadcrumbs template (deprecated)"""
out["no_breadcrumbs"] = context.get("no_breadcrumbs", 0) or ("<!-- no-breadcrumbs -->" in out.get("content", ""))
# breadcrumbs
if not out["no_breadcrumbs"] and "breadcrumbs" not in out:
out["breadcrumbs"] = frappe.get_template("templates/includes/breadcrumbs.html").render(context)
def set_title_and_header(out, context):
"""Extract and set title and header from content or context."""
out["no_header"] = context.get("no_header", 0) or ("<!-- no-header -->" in out.get("content", ""))
if "<!-- title:" in out.get("content", ""):
out["title"] = re.findall('<!-- title:([^>]*) -->', out.get("content"))[0].strip()
if "title" not in out:
out["title"] = context.get("title")
if context.get("page_titles") and context.page_titles.get(context.pathname):
out["title"] = context.page_titles.get(context.pathname)[0]
# header
if out["no_header"]:
out["header"] = ""
else:
if "title" not in out and out.get("header"):
out["title"] = out["header"]
if not out.get("header") and "<h1" not in out.get("content", ""):
if out.get("title"):
out["header"] = out["title"]
if out.get("header") and not re.findall("<h.>", out["header"]):
out["header"] = "<h1>" + out["header"] + "</h1>"
if not out.get("header"):
out["no_header"] = 1
out["title"] = strip_html(out.get("title") or "")
def set_sidebar(out, context):
"""Include sidebar (deprecated)"""
out["has_sidebar"] = not (context.get("no_sidebar", 0) or ("<!-- no-sidebar -->" in out.get("content", "")))
if out.get("has_sidebar"):
out["sidebar"] = frappe.get_template("templates/includes/sidebar.html").render(context)
def add_index(out, context):
"""Add index, next button if `{index}`, `{next}` is present."""
# table of contents
if "{index}" in out.get("content", "") and context.get("children"):
html = frappe.get_template("templates/includes/static_index.html").render({
"items": context["children"]})
out["content"] = out["content"].replace("{index}", html)
# next and previous
if "{next}" in out.get("content", ""):
next_item = context.doc.get_next()
if next_item:
if next_item.name[0]!="/": next_item.name = "/" + next_item.name
html = ('<p class="btn-next-wrapper"><a class="btn-next" href="{name}">'+_("Next")+': {title}</a></p>').format(**next_item)
else:
html = ""
out["content"] = out["content"].replace("{next}", html)
def add_hero(out, context):
"""Add a hero element if specified in content or hooks.
Hero elements get full page width."""
out["hero"] = ""
if "<!-- start-hero -->" in out["content"]:
parts1 = out["content"].split("<!-- start-hero -->")
parts2 = parts1[1].split("<!-- end-hero -->")
out["content"] = parts1[0] + parts2[1]
out["hero"] = parts2[0]
elif context.hero and context.hero.get(context.pathname):
out["hero"] = frappe.render_template(context.hero[context.pathname][0], context)
| mbauskar/tele-frappe | frappe/website/template.py | Python | mit | 4,737 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import pos_box
from . import pos_details
from . import pos_open_statement
from . import pos_payment
| t3dev/odoo | addons/point_of_sale/wizard/__init__.py | Python | gpl-3.0 | 207 |
# -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lettuce import core
from sure import expect
from nose.tools import assert_equals
from nose.tools import assert_not_equals
STEP_WITH_TABLE = u'''
Given I have the following items in my shelf:
| name | description |
| Glass | a nice glass to drink grape juice |
| Pasta | a pasta to cook and eat with grape juice in the glass |
| Pasta | a pasta to cook and eat with grape juice in the glass |
'''
def test_step_definition():
"Step definition takes a function and a step, keeps its definition " \
"relative path, and line + 1 (to consider the decorator)"
def dumb():
pass
definition = core.StepDefinition("FOO BAR", dumb)
assert_equals(definition.function, dumb)
assert_equals(definition.file, core.fs.relpath(__file__).rstrip("c"))
assert_equals(definition.line, 37)
def test_step_description():
"Step description takes a line and filename, " \
"and keeps the relative path for filename"
description = core.StepDescription(10, __file__)
assert_equals(description.file, core.fs.relpath(__file__))
assert_not_equals(description.file, __file__)
assert_equals(description.line, 10)
def test_scenario_description():
"Scenario description takes a scenario, filename and " \
"a string, and keeps the relative path for filename and line"
string = '''
asdasdasdasd
8fg6f8g23o83g
dfjdsfjsdScenario: NAMEOFSCENARIOjdkasbdkajsb
Fsdad
Scenario: NAMEOFSCENARIO
da sodnasndjasdasd
'''
class ScenarioFake:
name = 'NAMEOFSCENARIO'
description = core.ScenarioDescription(
ScenarioFake, __file__, string, core.Language())
assert_equals(description.file, core.fs.relpath(__file__))
assert_not_equals(description.file, __file__)
assert_equals(description.line, 6)
def test_feature_description():
"Feature description takes a feature, filename and original " \
"string, and keeps the relative path for filename, line " \
"and description lines"
string = u'''
# lang: en-us
Feature: FEATURE NAME! #@@$%ˆ&*)(*%$E#
here comes
the description
of the scenario
really!
'''
class FakeFeature:
description = 'the description\nof the scenario\n'
description = core.FeatureDescription(
FakeFeature, __file__, string, core.Language())
assert_equals(description.file, core.fs.relpath(__file__))
assert_not_equals(description.file, __file__)
assert_equals(description.line, 3)
assert_equals(description.description_at, (5, 6))
def test_step_represent_string_when_not_defined():
"""Step.represent_string behaviour when not defined"""
class FakeFeature:
max_length = 10
class FakeScenario:
feature = FakeFeature
relative_path = core.fs.relpath(__file__)
step = core.Step('some sentence', '', 239, __file__)
step.scenario = FakeScenario
assert_equals(
step.represent_string('test'),
" test # %s:239\n" % relative_path,
)
def test_step_represent_string_when_defined():
"Step.represent_string behaviour when defined"
class FakeFeature:
max_length = 10
class FakeScenario:
feature = FakeFeature
class FakeScenarioDefinition:
line = 421
file = 'should/be/filename'
step = core.Step('some sentence', '', 239, "not a file")
step.scenario = FakeScenario
step.defined_at = FakeScenarioDefinition
assert_equals(
step.represent_string('foobar'),
" foobar # should/be/filename:421\n",
)
def test_step_represent_table():
"Step.represent_hashes"
step = core.Step.from_string(STEP_WITH_TABLE)
assert_equals(
step.represent_hashes(),
' | name | description |\n'
' | Glass | a nice glass to drink grape juice |\n'
' | Pasta | a pasta to cook and eat with grape juice in the glass |\n'
' | Pasta | a pasta to cook and eat with grape juice in the glass |\n'
)
STEP_WITH_MATRIX = u'''
Given i have the following matrix:
| a | b | ab |
| 2 | 24 | 3 |
'''
STEP_WITH_MATRIX2 = u'''
Given i have the following matrix:
| a | a |
| 2 | a |
| | 67 |
'''
def test_step_represent_matrix():
"Step with a more suggestive representation for a matrix"
step = core.Step.from_string(STEP_WITH_MATRIX2)
assert_equals(
step.represent_columns(),
' | a | a |\n'
' | 2 | a |\n'
' | | 67|\n'
)
SCENARIO_OUTLINE = u'''
Scenario: Regular numbers
Given I do fill description with '<value_one>'
And then, age with with '<and_other>'
Examples:
| value_one | and_other |
| first| primeiro |
|second |segundo|
'''
def test_scenario_outline_represent_examples():
"Step.represent_hashes"
step = core.Scenario.from_string(SCENARIO_OUTLINE)
assert_equals(
step.represent_examples(),
' | value_one | and_other |\n'
' | first | primeiro |\n'
' | second | segundo |\n'
)
| yangming85/lettuce | tests/unit/test_core.py | Python | gpl-3.0 | 6,094 |
#/******************************************************************************
# * Copyright (c) 2012 Jan Rheinländer <[email protected]> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This library is free software; you can redistribute it and/or *
# * modify it under the terms of the GNU Library General Public *
# * License as published by the Free Software Foundation; either *
# * version 2 of the License, or (at your option) any later version. *
# * *
# * This library is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this library; see the file COPYING.LIB. If not, *
# * write to the Free Software Foundation, Inc., 59 Temple Place, *
# * Suite 330, Boston, MA 02111-1307, USA *
# * *
# ******************************************************************************/
import FreeCAD, FreeCADGui
import Part, PartDesignGui
from PyQt4 import QtCore, QtGui
import Standards
import os
class TaskHole:
"Hole hole feature"
types = ["Linear", "Coaxial"]
typestr = ["Linear to two lines/planes", "Coaxial to a circle/cylinder"]
def __init__(self, feature):
self.form = None
self.extraStandards = []
self.feature = feature
p=os.path.realpath(__file__)
p=os.path.dirname(p)
self.ui = os.path.join(p, "TaskHole.ui")
def accept(self):
self.feature.touch()
FreeCAD.ActiveDocument.recompute()
FreeCADGui.ActiveDocument.resetEdit()
return True
def reject(self):
if (self.feature != None):
self.hideFeature() # Show the support again
document = self.feature.Document
body = FreeCADGui.activeView().getActiveObject("pdbody");
groove = self.feature.HoleGroove
sketch = groove.Sketch
plane = sketch.Support[0]
axis = plane.References[0][0]
body.removeObject(self.feature)
document.removeObject(self.feature.Name)
body.removeObject(groove)
document.removeObject(groove.Name)
body.removeObject(sketch)
try:
document.removeObject(sketch.Name)
except Exception:
pass # This always throws an exception: "Sketch support has been deleted" from SketchObject::execute()
body.removeObject(plane)
document.removeObject(plane.Name)
body.removeObject(axis)
document.removeObject(axis.Name)
FreeCADGui.ActiveDocument.resetEdit()
FreeCADGui.Control.closeDialog(self)
return True
def isAllowedAlterDocument(self):
return False
def isAllowedAlterView(self):
return False
def isAllowedAlterSelection(self):
return True
def getMainWindow(self):
"returns the main window"
# using QtGui.QApplication.activeWindow() isn't very reliable because if another
# widget than the mainwindow is active (e.g. a dialog) the wrong widget is
# returned
toplevel = QtGui.QApplication.topLevelWidgets()
for i in toplevel:
if i.metaObject().className() == "Gui::MainWindow":
return i
raise Exception("No main window found")
def setupUi(self):
mw = self.getMainWindow()
form = mw.findChild(QtGui.QWidget, "TaskHole")
if form is None:
return
form.tabWidget = form.findChild(QtGui.QTabWidget, "tabWidget")
# Type
form.tabType = form.tabWidget.findChild(QtGui.QWidget, "tab_type")
form.buttonThru = form.tabType.findChild(QtGui.QRadioButton, "buttonThru")
form.buttonDepth = form.tabType.findChild(QtGui.QRadioButton, "buttonDepth")
form.checkThreaded = form.tabType.findChild(QtGui.QCheckBox, "checkThreaded")
form.checkCounterbore = form.tabType.findChild(QtGui.QCheckBox, "checkCounterbore")
form.checkCountersink = form.tabType.findChild(QtGui.QCheckBox, "checkCountersink")
# Norm
form.tabNorm = form.tabWidget.findChild(QtGui.QWidget, "tab_norm")
form.checkCustom = form.tabNorm.findChild(QtGui.QCheckBox, "checkCustom")
form.comboNorm = form.tabNorm.findChild(QtGui.QComboBox, "comboNorm")
for std in Standards.getStandards("through"):
form.comboNorm.addItem(std)
form.comboTolerance = form.tabNorm.findChild(QtGui.QComboBox, "comboTolerance")
for tol in Standards.standards_tolerance:
form.comboTolerance.addItem(tol)
form.comboNormDia = form.tabNorm.findChild(QtGui.QComboBox, "comboNormDia")
form.comboNormBoltWasher = form.tabNorm.findChild(QtGui.QComboBox, "comboNormBoltWasher")
# Thread
form.tabThread = form.tabWidget.findChild(QtGui.QWidget, "tab_thread")
form.comboThreadNorm = form.tabThread.findChild(QtGui.QComboBox, "comboThreadNorm")
for std in Standards.getStandards("thread"):
form.comboThreadNorm.addItem(std)
form.comboThreadDia = form.tabThread.findChild(QtGui.QComboBox, "comboThreadDia")
form.checkCustomThreadLength = form.tabThread.findChild(QtGui.QCheckBox, "checkCustomThreadLength")
form.comboFinishNorm = form.tabThread.findChild(QtGui.QComboBox, "comboFinishNorm")
for std in Standards.getStandards("threaded"):
form.comboFinishNorm.addItem(std)
# Data
form.tabData = form.tabWidget.findChild(QtGui.QWidget, "tab_data")
form.spinDiameter = form.tabData.findChild(QtGui.QDoubleSpinBox, "spinDiameter")
form.spinDepth = form.tabData.findChild(QtGui.QDoubleSpinBox, "spinDepth")
form.spinCounterboreDiameter = form.tabData.findChild(QtGui.QDoubleSpinBox, "spinCounterboreDiameter")
form.spinCounterboreDepth = form.tabData.findChild(QtGui.QDoubleSpinBox, "spinCounterboreDepth")
form.spinCountersinkAngle = form.tabData.findChild(QtGui.QDoubleSpinBox, "spinCountersinkAngle")
form.spinThreadLength = form.tabData.findChild(QtGui.QDoubleSpinBox, "spinThreadLength")
# Position
form.tabPosition = form.tabWidget.findChild(QtGui.QWidget, "tab_position")
form.comboType = form.tabPosition.findChild(QtGui.QComboBox, "comboType")
for i in self.typestr:
form.comboType.addItem(i)
form.buttonSupport = form.tabPosition.findChild(QtGui.QPushButton, "buttonSupport")
form.lineSupport = form.tabPosition.findChild(QtGui.QLineEdit, "lineSupport")
form.buttonRef1 = form.tabPosition.findChild(QtGui.QPushButton, "buttonRef1")
form.lineRef1 = form.tabPosition.findChild(QtGui.QLineEdit, "lineRef1")
form.labelRef1 = form.tabPosition.findChild(QtGui.QLabel, "labelRef1")
form.spinRef1 = form.tabPosition.findChild(QtGui.QDoubleSpinBox, "spinRef1")
form.buttonRef2 = form.tabPosition.findChild(QtGui.QPushButton, "buttonRef2")
form.lineRef2 = form.tabPosition.findChild(QtGui.QLineEdit, "lineRef2")
form.labelRef2 = form.tabPosition.findChild(QtGui.QLabel, "labelRef2")
form.spinRef2 = form.tabPosition.findChild(QtGui.QDoubleSpinBox, "spinRef2")
self.form = form
# Connect Signals and Slots
# Type
self.form.buttonThru.toggled.connect(self.buttonThru)
self.form.buttonDepth.toggled.connect(self.buttonDepth)
self.form.checkThreaded.toggled.connect(self.checkThreaded)
self.form.checkCounterbore.toggled.connect(self.checkCounterbore)
self.form.checkCountersink.toggled.connect(self.checkCountersink)
# Norm
self.form.checkCustom.toggled.connect(self.checkCustom)
self.form.comboNorm.currentIndexChanged.connect(self.comboNorm)
self.form.comboTolerance.currentIndexChanged.connect(self.comboTolerance)
self.form.comboNormDia.currentIndexChanged.connect(self.comboNormDia)
self.form.comboNormBoltWasher.currentIndexChanged.connect(self.comboNormBoltWasher)
# Thread
self.form.comboThreadNorm.currentIndexChanged.connect(self.comboThreadNorm)
self.form.comboThreadDia.currentIndexChanged.connect(self.comboThreadDia)
self.form.checkCustomThreadLength.toggled.connect(self.checkCustomThreadLength)
self.form.comboFinishNorm.currentIndexChanged.connect(self.comboFinishNorm)
# Data
self.form.spinDiameter.valueChanged.connect(self.spinDiameter)
self.form.spinDepth.valueChanged.connect(self.spinDepth)
self.form.spinCounterboreDiameter.valueChanged.connect(self.spinCounterboreDiameter)
self.form.spinCounterboreDepth.valueChanged.connect(self.spinCounterboreDepth)
self.form.spinCountersinkAngle.valueChanged.connect(self.spinCountersinkAngle)
self.form.spinThreadLength.valueChanged.connect(self.spinThreadLength)
# Position
self.form.comboType.currentIndexChanged.connect(self.comboType)
self.form.buttonSupport.clicked.connect(self.buttonSupport)
self.form.buttonRef1.clicked.connect(self.buttonRef1)
self.form.spinRef1.valueChanged.connect(self.spinRef1)
self.form.buttonRef2.clicked.connect(self.buttonRef2)
self.form.spinRef2.valueChanged.connect(self.spinRef2)
# Update the UI
self.updateUI()
return True
def getRefText(self, ref):
(obj, element) = ref
if isinstance(element, basestring):
return obj.Name + ":" + element
elif isinstance(element, list):
return obj.Name + ":" + element[0]
else:
return obj.Name
def updateUI(self):
# Type
self.form.buttonThru.setChecked(self.feature.HoleType == "Thru")
self.form.buttonDepth.setChecked(self.feature.HoleType == "Depth")
self.form.checkThreaded.setChecked(self.feature.Threaded == True)
self.form.checkCounterbore.setChecked(self.feature.Counterbore == True)
self.form.checkCountersink.setChecked(self.feature.Countersink == True)
# Norm
if self.feature.Norm == "Custom":
self.form.checkCustom.setChecked(True)
self.form.comboNorm.setEnabled(False)
self.form.comboTolerance.setEnabled(False)
self.form.comboNormDia.setEnabled(False)
self.form.comboNormBoltWasher.setEnabled(False)
else:
if self.feature.Counterbore == True:
holetype = "counterbore"
elif self.feature.Countersink == True:
holetype = "countersink"
elif self.feature.Threaded == True:
holetype = "threaded"
else:
holetype = "through"
self.form.comboNorm.setEnabled(True)
self.form.comboTolerance.setEnabled(True)
self.form.comboNormDia.setEnabled(True)
if holetype == "counterbore":
self.form.comboNormBoltWasher.setEnabled(True)
else:
self.form.comboNormBoltWasher.setEnabled(False)
# comboNorm
standards = Standards.getStandards(holetype)
self.form.comboNorm.blockSignals(True)
self.form.comboNorm.clear()
for std in standards:
self.form.comboNorm.addItem(std)
if not self.feature.Norm in standards:
self.feature.Norm = standards[0]
else:
self.form.comboNorm.setCurrentIndex(standards.index(self.feature.Norm))
self.form.comboNorm.blockSignals(False)
# comboTolerance
self.form.comboTolerance.blockSignals(True)
self.form.comboTolerance.setCurrentIndex(Standards.standards_tolerance.index(self.feature.NormTolerance))
self.form.comboTolerance.blockSignals(False)
# comboNormDia
diameters = sorted(Standards.getBaseDiameters(self.feature.Norm))
self.form.comboNormDia.blockSignals(True)
self.form.comboNormDia.clear()
for dia in diameters:
self.form.comboNormDia.addItem("M%g" % dia)
if self.feature.NormDiameter in diameters:
self.form.comboNormDia.setCurrentIndex(diameters.index(self.feature.NormDiameter))
self.form.comboNormDia.blockSignals(False)
# comboNormBoltWasher
if holetype == "counterbore":
rowStandards = sorted(Standards.getRowStandards(self.feature.Norm))
self.form.comboNormBoltWasher.blockSignals(True)
self.form.comboNormBoltWasher.clear()
for std in rowStandards:
self.form.comboNormBoltWasher.addItem(std)
if self.feature.ExtraNorm in rowStandards:
self.form.comboNormBoltWasher.setCurrentIndex(rowStandards.index(self.feature.ExtraNorm))
self.form.comboNormBoltWasher.blockSignals(False)
# Dependent values
if holetype == "through":
self.feature.Diameter = Standards.getThroughHoleDia(self.feature.Norm, self.feature.NormDiameter, self.feature.NormTolerance)
elif holetype == "counterbore":
throughStandard = Standards.getThroughHoleStandard(self.feature.Norm)
self.feature.Diameter = Standards.getThroughHoleDia(throughStandard, self.feature.NormDiameter, self.feature.NormTolerance)
self.feature.CounterboreDiameter = Standards.getCounterboreDia(self.feature.Norm, self.feature.NormDiameter, self.feature.ExtraNorm)
# TODO: Calculate counter bore depth from standard for bolt and washer(s)
# Requires accessing all the norms for bolts
# self.feature.CounterboreDepth = calcCounterboreDepth(...)
elif holetype == "countersink":
throughStandard = Standards.getThroughHoleStandard(self.feature.Norm)
self.feature.Diameter = Standards.getThroughHoleDia(throughStandard, self.feature.NormDiameter, self.feature.NormTolerance)
self.feature.CounterboreDiameter = Standards.getCountersinkDia(self.feature.Norm, self.feature.NormDiameter)
self.feature.CountersinkAngle = Standards.getCountersinkAngle(self.feature.Norm, self.feature.NormDiameter) / 2.0
# Thread
if self.feature.Threaded == True:
if not self.feature.Counterbore and not self.feature.Countersink:
self.form.comboTolerance.setEnabled(False)
else:
self.form.tabNorm.setEnabled(True)
self.form.comboTolerance.setEnabled(False)
self.form.tabThread.setEnabled(True)
self.form.comboThreadNorm.blockSignals(True)
standards = Standards.getStandards("thread")
if not self.feature.NormThread in standards:
self.feature.NormThread = standards[0]
else:
self.form.comboThreadNorm.setCurrentIndex(standards.index(self.feature.NormThread))
self.form.comboThreadNorm.blockSignals(False)
threadDiameters = sorted(Standards.getBaseDiameters(self.feature.NormThread))
self.form.comboThreadDia.blockSignals(True)
self.form.comboThreadDia.clear()
for dia in threadDiameters:
self.form.comboThreadDia.addItem("M%g" % dia)
if self.feature.NormDiameter in threadDiameters:
self.form.comboThreadDia.setCurrentIndex(threadDiameters.index(self.feature.NormDiameter))
self.form.comboThreadDia.blockSignals(False)
if self.feature.NormThreadFinish == "Custom":
self.form.checkCustomThreadLength.setChecked(True)
self.form.comboFinishNorm.setEnabled(False)
else:
self.form.checkCustomThreadLength.setChecked(False)
self.form.comboFinishNorm.setEnabled(True)
self.form.comboFinishNorm.blockSignals(True)
standards = Standards.getStandards("threaded")
if not self.feature.NormThreadFinish in standards:
self.feature.NormThreadFinish = standards[0]
else:
self.form.comboFinishNorm.setCurrentIndex(standards.index(self.feature.NormThreadFinish))
self.form.comboFinishNorm.blockSignals(False)
flength = Standards.getThreadFinishLength(self.feature.NormThreadFinish, self.feature.NormDiameter)
tlength = self.feature.Depth - flength
if tlength > 0:
self.feature.ThreadLength = tlength # TODO: Warning message
# Dependents
self.feature.Diameter = Standards.getThreadCoreDiameter(self.feature.NormThread, self.feature.NormDiameter)
else:
self.form.tabThread.setEnabled(False)
# Dependents
self.form.spinDiameter.setEnabled(True)
# Data
self.form.spinDiameter.setValue(self.feature.Diameter)
self.form.spinDepth.setValue(self.feature.Depth)
if self.feature.HoleType == "Thru":
self.form.spinDepth.setEnabled(False)
else:
self.form.spinDepth.setEnabled(True)
if self.feature.Threaded == True:
self.form.spinThreadLength.setEnabled(True)
else:
self.form.spinThreadLength.setEnabled(False)
if self.feature.Counterbore == True:
self.form.spinCounterboreDiameter.setEnabled(True)
self.form.spinCounterboreDiameter.setValue(self.feature.CounterboreDiameter)
self.form.spinCounterboreDepth.setEnabled(True)
self.form.spinCounterboreDepth.setValue(self.feature.CounterboreDepth)
self.form.spinCountersinkAngle.setEnabled(False)
elif self.feature.Countersink == True:
self.form.spinCounterboreDiameter.setEnabled(True)
self.form.spinCounterboreDiameter.setValue(self.feature.CounterboreDiameter)
self.form.spinCounterboreDepth.setEnabled(False)
self.form.spinCountersinkAngle.setEnabled(True)
self.form.spinCountersinkAngle.setValue(self.feature.CountersinkAngle)
else:
self.form.spinCounterboreDiameter.setEnabled(False)
self.form.spinCounterboreDepth.setEnabled(False)
self.form.spinCountersinkAngle.setEnabled(False)
if self.feature.Norm == "Custom":
self.form.spinDiameter.setEnabled(True)
else:
self.form.spinDiameter.setEnabled(False)
if holetype == "counterbore":
# Diameter is taken from Norm
self.form.spinCounterboreDiameter.setEnabled(False)
elif holetype == "countersink":
# Values are taken from Norm
self.form.spinCounterboreDiameter.setEnabled(False)
self.form.spinCounterboreDepth.setEnabled(False)
self.form.spinCountersinkAngle.setEnabled(False)
if self.feature.Threaded == True:
self.form.spinDiameter.setEnabled(False)
if self.feature.NormThreadFinish != "Custom":
self.form.spinThreadLength.setEnabled(False)
self.form.spinThreadLength.setValue(self.feature.ThreadLength)
# Position
self.form.buttonSupport.setText("Face")
if self.feature.Support is None:
# First-time initialization
selection = FreeCADGui.Selection.getSelectionEx()
self.feature.Support = (selection[0].Object, selection[0].SubElementNames)
self.form.lineSupport.setText(self.getRefText(self.feature.Support))
if self.feature.PositionType == self.types[0]:
# Linear
self.form.buttonRef1.setText("Line/Plane")
self.form.buttonRef1.setEnabled(True)
self.form.buttonRef2.setText("Line/Plane")
self.form.buttonRef2.setEnabled(True)
self.form.lineRef1.setEnabled(True)
self.form.lineRef2.setEnabled(True)
self.form.labelRef1.setEnabled(True)
self.form.labelRef1.setText("Distance")
axis = self.feature.HoleGroove.Sketch.Support[0].References[0][0]
if len(axis.References) > 0 and axis.References[0] != None:
if (len(axis.References) == 3):
self.form.lineRef1.setText(self.getRefText(axis.References[1]))
else:
self.form.lineRef1.setText(self.getRefText(axis.References[0]))
self.form.spinRef1.setEnabled(True)
self.form.spinRef1.setValue(axis.Offset)
self.form.labelRef2.setEnabled(True)
self.form.labelRef2.setText("Distance")
if len(axis.References) > 1 and axis.References[1] != None:
if (len(axis.References) == 3):
self.form.lineRef2.setText(self.getRefText(axis.References[2]))
else:
self.form.lineRef2.setText(self.getRefText(axis.References[1]))
self.form.spinRef2.setEnabled(True)
self.form.spinRef2.setValue(axis.Offset2)
elif self.feature.PositionType == self.types[1]:
# Coaxial
self.form.buttonRef1.setText("Circle/Cylinder")
self.form.buttonRef1.setEnabled(True)
self.form.buttonRef2.setEnabled(False)
self.form.lineRef1.setEnabled(True)
axis = self.feature.HoleGroove.Sketch.Support[0].References[0][0]
if len(axis.References) > 0 and axis.References[0] != None:
self.form.lineRef1.setText(self.getRefText(axis.References[0]))
self.form.lineRef2.setEnabled(False)
self.form.labelRef1.setEnabled(False)
self.form.spinRef1.setEnabled(False)
self.form.labelRef2.setEnabled(False)
self.form.spinRef2.setEnabled(False)
else:
# Nothing else defined yet
pass
def getStandardButtons(self):
return int(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
def accept(self):
return True
def buttonThru(self, toggle):
if toggle == True:
self.feature.HoleType = "Thru"
def buttonDepth(self, toggle):
if toggle == True:
self.feature.HoleType = "Depth"
def checkThreaded(self, checked):
self.feature.Threaded = checked
self.updateUI()
def checkCounterbore(self, checked):
if checked == True:
self.feature.Countersink = False
self.feature.Counterbore = checked
self.updateUI()
def checkCountersink(self, checked):
if checked == True:
self.feature.Counterbore = False
self.feature.Countersink = checked
self.updateUI()
def checkCustom(self, checked):
if checked == True:
self.feature.Norm = "Custom"
else:
self.feature.Norm = str(self.form.comboNorm.currentText())
self.updateUI()
def comboNorm(self, index):
self.feature.Norm = str(self.form.comboNorm.itemText(index))
self.updateUI()
def comboTolerance(self, index):
self.feature.NormTolerance = str(self.form.comboTolerance.itemText(index))
self.updateUI()
def comboNormDia(self, index):
diameter = str(self.form.comboNormDia.itemText(index))
self.feature.NormDiameter = float(diameter[1:])
self.updateUI()
def comboNormBoltWasher(self, index):
self.feature.ExtraNorm = str(self.form.comboNormBoltWasher.itemText(index))
self.updateUI()
def comboThreadNorm(self, index):
self.feature.NormThread = str(self.form.comboThreadNorm.itemText(index))
self.updateUI()
def comboThreadDia(self, index):
diameter = str(self.form.comboThreadDia.itemText(index))
self.feature.NormDiameter = float(diameter[1:])
self.updateUI()
def checkCustomThreadLength(self, checked):
if checked == True:
self.feature.NormThreadFinish = "Custom"
else:
self.feature.NormThreadFinish = str(self.form.comboFinishNorm.currentText())
self.updateUI()
def comboFinishNorm(self, index):
self.feature.NormThreadFinish = str(self.form.comboFinishNorm.itemText(index))
self.updateUI()
def spinDiameter(self, val):
if (val > 0.0):
self.feature.Diameter = val
def spinDepth(self, val):
if (val > 0.0):
self.feature.Depth = val
self.updateUI() # required to update the thread length
def spinCounterboreDiameter(self, val):
if (val > self.feature.Diameter):
self.feature.CounterboreDiameter = val
def spinCounterboreDepth(self, val):
if (val > 0.0):
self.feature.CounterboreDepth = val
def spinCountersinkAngle(self, val):
if (val > 0.0):
self.feature.CountersinkAngle = val
def spinThreadLength(self, val):
if (val > 0.0):
self.feature.ThreadLength = val
def comboType(self, index):
self.feature.PositionType = self.types[index]
self.updateUI()
def addSelection(self, document, obj, element, position):
#FreeCAD.Console.PrintMessage("AddSelection() for " + document + "." + obj + "." + element + "\n")
# TODO: What is the position parameter?
if document == self.feature.Document.Name:
axis = self.feature.HoleGroove.Sketch.Support[0].References[0][0]
refs = axis.References
feature = eval("FreeCAD.getDocument('" + document + "')." + obj)
shape = eval("feature.Shape." + element)
if self.selectionMode == "Plane":
if shape.Surface.__class__ != Part.Plane:
FreeCAD.Console.PrintMessage("Selected face must be planar\n")
return
if self.feature.PositionType == self.types[0]:
# The Hole support is also the first reference of the sketch axis in Linear mode with edges selected
if len(refs) == 3:
refs[0] = (feature, element)
axis.References = refs
self.feature.Support = (feature, [element])
elif self.selectionMode == "LinearReference":
if shape.ShapeType == "Edge":
if shape.Curve.__class__ != Part.LineSegment:
FreeCAD.Console.PrintMessage("Selected edge must be linear\n")
return
if len(refs) > 1:
refs[1] = (feature, element)
else:
refs.append((feature, element))
elif shape.ShapeType == "Face":
if shape.Surface.__class__ != Part.Plane:
FreeCAD.Console.PrintMessage("Selected face must be planar\n")
return
if len(refs) > 0:
if len(refs) > 2:
refs = [(feature, element)]
else:
refs[0] = (feature, element)
else:
refs = [(feature, element)]
else:
FreeCAD.Console.PrintMessage("Wrong shape type selected\n")
return
axis.References = refs
axis.Document.recompute()
elif self.selectionMode == "LinearReference2":
if shape.ShapeType == "Edge":
if shape.Curve.__class__ != Part.LineSegment:
FreeCAD.Console.PrintMessage("Selected edge must be linear\n")
return
if len(refs) > 2:
refs[2] = (feature, element)
else:
refs.append((feature, element))
elif shape.ShapeType == "Face":
if shape.Surface.__class__ != Part.Plane:
FreeCAD.Console.PrintMessage("Selected face must be planar\n")
return
if len(refs) > 1:
if len(refs) > 2:
del refs[2]
refs[1] = (feature, element)
else:
refs.append((feature, element))
else:
FreeCAD.Console.PrintMessage("Wrong shape type selected\n")
return
axis.References = refs
axis.Document.recompute()
elif self.selectionMode == "CircularReference":
if shape.ShapeType == "Edge":
if shape.Curve.__class__ != Part.Circle:
FreeCAD.Console.PrintMessage("Selected edge must be arc or circle\n")
return
elif shape.ShapeType == "Face":
if shape.Surface.__class__ != Part.Cylinder:
FreeCAD.Console.PrintMessage("Selected face must be cylindrical\n")
return
else:
FreeCAD.Console.PrintMessage("Wrong shape type selected\n")
return
refs = [(feature, element)]
axis.References = refs
axis.Document.recompute()
else:
FreeCAD.Console.PrintMessage("Unknown selection mode: " + self.selectionMode + "\n")
self.selectionMode = ""
return
FreeCADGui.Selection.removeObserver(self)
FreeCADGui.Selection.clearSelection()
FreeCADGui.Selection.removeSelectionGate()
self.selectionMode = ""
self.updateUI()
self.showFeature()
def hideFeature(self):
# Make sure selection takes place on support, not on hole feature
if self.feature.Support != None:
FreeCADGui.ActiveDocument.hide(self.feature.Name)
(support, elements) = self.feature.Support
FreeCADGui.ActiveDocument.show(support.Name)
def showFeature(self):
if self.feature.Support != None:
FreeCADGui.ActiveDocument.show(self.feature.Name)
(support, elements) = self.feature.Support
FreeCADGui.ActiveDocument.hide(support.Name)
def buttonSupport(self):
FreeCADGui.Selection.addSelectionGate("SELECT Part::Feature SUBELEMENT Face COUNT 1")
FreeCADGui.Selection.addObserver(self)
# Currently support must be a planar face (but could also be a point or a construction plane in the future)
self.selectionMode = "Plane"
self.hideFeature()
def buttonRef1(self):
FreeCADGui.Selection.addSelectionGate("SELECT Part::Feature SUBELEMENT Edge COUNT 1 SELECT Part::Feature SUBELEMENT Face COUNT 1")
FreeCADGui.Selection.addObserver(self)
if self.feature.PositionType == self.types[0]:
self.selectionMode = "LinearReference"
elif self.feature.PositionType == self.types[1]:
self.selectionMode = "CircularReference"
self.hideFeature()
def buttonRef2(self):
FreeCADGui.Selection.addSelectionGate("SELECT Part::Feature SUBELEMENT Edge COUNT 1 SELECT Part::Feature SUBELEMENT Face COUNT 1")
FreeCADGui.Selection.addObserver(self)
self.selectionMode = "LinearReference2"
self.hideFeature()
def spinRef1(self, val):
axis = self.feature.HoleGroove.Sketch.Support[0].References[0][0]
axis.Offset = val
axis.Document.recompute()
def spinRef2(self, val):
axis = self.feature.HoleGroove.Sketch.Support[0].References[0][0]
axis.Offset2 = val
axis.Document.recompute()
| sanguinariojoe/FreeCAD | src/Mod/PartDesign/FeatureHole/TaskHole.py | Python | lgpl-2.1 | 33,054 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os.path
import warnings
import sys
try:
from setuptools import setup, Command
setuptools_available = True
except ImportError:
from distutils.core import setup, Command
setuptools_available = False
from distutils.spawn import spawn
try:
# This will create an exe that needs Microsoft Visual C++ 2008
# Redistributable Package
import py2exe
except ImportError:
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
print('Cannot import py2exe', file=sys.stderr)
exit(1)
py2exe_options = {
'bundle_files': 1,
'compressed': 1,
'optimize': 2,
'dist_dir': '.',
'dll_excludes': ['w9xpopen.exe', 'crypt32.dll'],
}
# Get the version from youtube_dl/version.py without importing the package
exec(compile(open('youtube_dl/version.py').read(),
'youtube_dl/version.py', 'exec'))
DESCRIPTION = 'YouTube video downloader'
LONG_DESCRIPTION = 'Command-line program to download videos from YouTube.com and other video sites'
py2exe_console = [{
'script': './youtube_dl/__main__.py',
'dest_base': 'youtube-dl',
'version': __version__,
'description': DESCRIPTION,
'comments': LONG_DESCRIPTION,
'product_name': 'youtube-dl',
'product_version': __version__,
}]
py2exe_params = {
'console': py2exe_console,
'options': {'py2exe': py2exe_options},
'zipfile': None
}
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
params = py2exe_params
else:
files_spec = [
('etc/bash_completion.d', ['youtube-dl.bash-completion']),
('etc/fish/completions', ['youtube-dl.fish']),
('share/doc/youtube_dl', ['README.txt']),
('share/man/man1', ['youtube-dl.1'])
]
root = os.path.dirname(os.path.abspath(__file__))
data_files = []
for dirname, files in files_spec:
resfiles = []
for fn in files:
if not os.path.exists(fn):
warnings.warn('Skipping file %s since it is not present. Type make to build all automatically generated files.' % fn)
else:
resfiles.append(fn)
data_files.append((dirname, resfiles))
params = {
'data_files': data_files,
}
if setuptools_available:
params['entry_points'] = {'console_scripts': ['youtube-dl = youtube_dl:main']}
else:
params['scripts'] = ['bin/youtube-dl']
class build_lazy_extractors(Command):
description = 'Build the extractor lazy loading module'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
spawn(
[sys.executable, 'devscripts/make_lazy_extractors.py', 'youtube_dl/extractor/lazy_extractors.py'],
dry_run=self.dry_run,
)
setup(
name='youtube_dl',
version=__version__,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url='https://github.com/rg3/youtube-dl',
author='Ricardo Garcia',
author_email='[email protected]',
maintainer='Philipp Hagemeister',
maintainer_email='[email protected]',
packages=[
'youtube_dl',
'youtube_dl.extractor', 'youtube_dl.downloader',
'youtube_dl.postprocessor'],
# Provokes warning on most systems (why?!)
# test_suite = 'nose.collector',
# test_requires = ['nosetest'],
classifiers=[
'Topic :: Multimedia :: Video',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'License :: Public Domain',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
cmdclass={'build_lazy_extractors': build_lazy_extractors},
**params
)
| mxamin/youtube-dl | setup.py | Python | unlicense | 4,030 |
def f(a):
return ++a
def g(a):
return +a
def g(a):
return --a
def foo():
if 1 == None:
print "whoa!"
else:
print "whew!"
if None == 1:
print "whoa!"
else:
print "whew!"
def bar():
a = b = 0
if 'a' <= b >= 'c':
print ""
| lavjain/incubator-hawq | tools/bin/pythonSrc/pychecker-0.8.18/pychecker2/tests/op.py | Python | apache-2.0 | 300 |
"""
==============================================
Estimate covariance matrix from a raw FIF file
==============================================
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
raw = io.Raw(fname)
include = [] # or stim channels ['STI 014']
raw.info['bads'] += ['EEG 053'] # bads + 1 more
# pick EEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=True, stim=False, eog=True,
include=include, exclude='bads')
# setup rejection
reject = dict(eeg=80e-6, eog=150e-6)
# Compute the covariance from the raw data
cov = mne.compute_raw_data_covariance(raw, picks=picks, reject=reject)
print(cov)
###############################################################################
# Show covariance
fig_cov, fig_svd = mne.viz.plot_cov(cov, raw.info, colorbar=True, proj=True)
# try setting proj to False to see the effect
| aestrivex/mne-python | examples/preprocessing/plot_estimate_covariance_matrix_raw.py | Python | bsd-3-clause | 1,087 |
# For Capstone Engine. AUTO-GENERATED FILE, DO NOT EDIT [xcore_const.py]
XCORE_OP_INVALID = 0
XCORE_OP_REG = 1
XCORE_OP_IMM = 2
XCORE_OP_MEM = 3
XCORE_REG_INVALID = 0
XCORE_REG_CP = 1
XCORE_REG_DP = 2
XCORE_REG_LR = 3
XCORE_REG_SP = 4
XCORE_REG_R0 = 5
XCORE_REG_R1 = 6
XCORE_REG_R2 = 7
XCORE_REG_R3 = 8
XCORE_REG_R4 = 9
XCORE_REG_R5 = 10
XCORE_REG_R6 = 11
XCORE_REG_R7 = 12
XCORE_REG_R8 = 13
XCORE_REG_R9 = 14
XCORE_REG_R10 = 15
XCORE_REG_R11 = 16
XCORE_REG_PC = 17
XCORE_REG_SCP = 18
XCORE_REG_SSR = 19
XCORE_REG_ET = 20
XCORE_REG_ED = 21
XCORE_REG_SED = 22
XCORE_REG_KEP = 23
XCORE_REG_KSP = 24
XCORE_REG_ID = 25
XCORE_REG_ENDING = 26
XCORE_INS_INVALID = 0
XCORE_INS_ADD = 1
XCORE_INS_ANDNOT = 2
XCORE_INS_AND = 3
XCORE_INS_ASHR = 4
XCORE_INS_BAU = 5
XCORE_INS_BITREV = 6
XCORE_INS_BLA = 7
XCORE_INS_BLAT = 8
XCORE_INS_BL = 9
XCORE_INS_BF = 10
XCORE_INS_BT = 11
XCORE_INS_BU = 12
XCORE_INS_BRU = 13
XCORE_INS_BYTEREV = 14
XCORE_INS_CHKCT = 15
XCORE_INS_CLRE = 16
XCORE_INS_CLRPT = 17
XCORE_INS_CLRSR = 18
XCORE_INS_CLZ = 19
XCORE_INS_CRC8 = 20
XCORE_INS_CRC32 = 21
XCORE_INS_DCALL = 22
XCORE_INS_DENTSP = 23
XCORE_INS_DGETREG = 24
XCORE_INS_DIVS = 25
XCORE_INS_DIVU = 26
XCORE_INS_DRESTSP = 27
XCORE_INS_DRET = 28
XCORE_INS_ECALLF = 29
XCORE_INS_ECALLT = 30
XCORE_INS_EDU = 31
XCORE_INS_EEF = 32
XCORE_INS_EET = 33
XCORE_INS_EEU = 34
XCORE_INS_ENDIN = 35
XCORE_INS_ENTSP = 36
XCORE_INS_EQ = 37
XCORE_INS_EXTDP = 38
XCORE_INS_EXTSP = 39
XCORE_INS_FREER = 40
XCORE_INS_FREET = 41
XCORE_INS_GETD = 42
XCORE_INS_GET = 43
XCORE_INS_GETN = 44
XCORE_INS_GETR = 45
XCORE_INS_GETSR = 46
XCORE_INS_GETST = 47
XCORE_INS_GETTS = 48
XCORE_INS_INCT = 49
XCORE_INS_INIT = 50
XCORE_INS_INPW = 51
XCORE_INS_INSHR = 52
XCORE_INS_INT = 53
XCORE_INS_IN = 54
XCORE_INS_KCALL = 55
XCORE_INS_KENTSP = 56
XCORE_INS_KRESTSP = 57
XCORE_INS_KRET = 58
XCORE_INS_LADD = 59
XCORE_INS_LD16S = 60
XCORE_INS_LD8U = 61
XCORE_INS_LDA16 = 62
XCORE_INS_LDAP = 63
XCORE_INS_LDAW = 64
XCORE_INS_LDC = 65
XCORE_INS_LDW = 66
XCORE_INS_LDIVU = 67
XCORE_INS_LMUL = 68
XCORE_INS_LSS = 69
XCORE_INS_LSUB = 70
XCORE_INS_LSU = 71
XCORE_INS_MACCS = 72
XCORE_INS_MACCU = 73
XCORE_INS_MJOIN = 74
XCORE_INS_MKMSK = 75
XCORE_INS_MSYNC = 76
XCORE_INS_MUL = 77
XCORE_INS_NEG = 78
XCORE_INS_NOT = 79
XCORE_INS_OR = 80
XCORE_INS_OUTCT = 81
XCORE_INS_OUTPW = 82
XCORE_INS_OUTSHR = 83
XCORE_INS_OUTT = 84
XCORE_INS_OUT = 85
XCORE_INS_PEEK = 86
XCORE_INS_REMS = 87
XCORE_INS_REMU = 88
XCORE_INS_RETSP = 89
XCORE_INS_SETCLK = 90
XCORE_INS_SET = 91
XCORE_INS_SETC = 92
XCORE_INS_SETD = 93
XCORE_INS_SETEV = 94
XCORE_INS_SETN = 95
XCORE_INS_SETPSC = 96
XCORE_INS_SETPT = 97
XCORE_INS_SETRDY = 98
XCORE_INS_SETSR = 99
XCORE_INS_SETTW = 100
XCORE_INS_SETV = 101
XCORE_INS_SEXT = 102
XCORE_INS_SHL = 103
XCORE_INS_SHR = 104
XCORE_INS_SSYNC = 105
XCORE_INS_ST16 = 106
XCORE_INS_ST8 = 107
XCORE_INS_STW = 108
XCORE_INS_SUB = 109
XCORE_INS_SYNCR = 110
XCORE_INS_TESTCT = 111
XCORE_INS_TESTLCL = 112
XCORE_INS_TESTWCT = 113
XCORE_INS_TSETMR = 114
XCORE_INS_START = 115
XCORE_INS_WAITEF = 116
XCORE_INS_WAITET = 117
XCORE_INS_WAITEU = 118
XCORE_INS_XOR = 119
XCORE_INS_ZEXT = 120
XCORE_INS_ENDING = 121
XCORE_GRP_INVALID = 0
XCORE_GRP_JUMP = 1
XCORE_GRP_ENDING = 2
| 0vercl0k/rp | src/third_party/capstone/bindings/python/capstone/xcore_const.py | Python | mit | 3,201 |
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from graphite.account.models import Profile
from graphite.logger import log
# There are a couple different json modules floating around out there with
# different APIs. Hide the ugliness here.
try:
import json
except ImportError:
import simplejson as json
if hasattr(json, 'read') and not hasattr(json, 'loads'):
json.loads = json.read
json.dumps = json.write
json.load = lambda file: json.read( file.read() )
json.dump = lambda obj, file: file.write( json.write(obj) )
def getProfile(request,allowDefault=True):
if request.user.is_authenticated():
try:
return request.user.profile
except ObjectDoesNotExist:
profile = Profile(user=request.user)
profile.save()
return profile
elif allowDefault:
return defaultProfile
def getProfileByUsername(username):
try:
user = User.objects.get(username=username)
return Profile.objects.get(user=user)
except ObjectDoesNotExist:
return None
try:
defaultUser = User.objects.get(username='default')
except User.DoesNotExist:
log.info("Default user does not exist, creating it...")
randomPassword = User.objects.make_random_password(length=16)
defaultUser = User.objects.create_user('default','[email protected]',randomPassword)
defaultUser.save()
try:
defaultProfile = Profile.objects.get(user=defaultUser)
except Profile.DoesNotExist:
log.info("Default profile does not exist, creating it...")
defaultProfile = Profile(user=defaultUser)
defaultProfile.save()
| mancdaz/graphite-buildpackage | webapp/graphite/util.py | Python | apache-2.0 | 2,163 |
from layer import *
class ReplicatedSoftmaxLayer(Layer):
def __init__(self, *args, **kwargs):
super(ReplicatedSoftmaxLayer, self).__init__(*args, **kwargs)
@classmethod
def IsLayerType(cls, proto):
return proto.hyperparams.activation == \
deepnet_pb2.Hyperparams.REPLICATED_SOFTMAX
def ApplyActivation(self):
state = self.state
temp = self.batchsize_temp
state.max(axis=0, target=temp)
state.add_row_mult(temp, -1)
cm.exp(state)
state.sum(axis=0, target=temp)
self.NN.divide(temp, target=temp)
state.mult_by_row(temp)
def Sample(self):
sample = self.sample
state = self.state
use_lightspeed = False
if use_lightspeed: # Do sampling on cpu.
temp = self.expanded_batch
state.sum(axis=0, target=self.temp)
state.div_by_row(self.temp, target=temp)
probs_cpu = temp.asarray().astype(np.float64)
numsamples = self.NN.asarray()
samples_cpu = lightspeed.SampleSoftmax(probs_cpu, numsamples)
sample.overwrite(samples_cpu.astype(np.float32))
else:
if self.proto.hyperparams.adaptive_prior > 0:
sample.assign(0)
temp_sample = self.expanded_batch
numsamples = int(self.proto.hyperparams.adaptive_prior)
for i in range(numsamples):
state.perturb_prob_for_softmax_sampling(target=temp_sample)
temp_sample.choose_max_and_accumulate(sample)
else:
NN = self.NN.asarray().reshape(-1)
numdims, batchsize = self.state.shape
max_samples = self.big_sample_matrix.shape[1]
for i in range(batchsize):
nn = NN[i]
factor = 1
if nn > max_samples:
nn = max_samples
factor = float(nn) / max_samples
samples = self.big_sample_matrix.slice(0, nn)
samples.assign(0)
samples.add_col_vec(self.state.slice(i, i+1))
samples.perturb_prob_for_softmax_sampling()
samples.choose_max(axis=0)
samples.sum(axis=1, target=sample.slice(i, i+1))
if factor > 1:
sample.slice(i, i+1).mult(factor)
def ComputeDeriv(self):
"""Compute derivative w.r.t input given derivative w.r.t output."""
raise Exception('Back prop through replicated softmax not implemented.')
def AllocateMemory(self, batchsize):
super(ReplicatedSoftmaxLayer, self).AllocateMemory(batchsize)
self.expansion_matrix = cm.CUDAMatrix(np.eye(self.numlabels))
self.big_sample_matrix = cm.empty((self.numlabels * self.dimensions, 1000))
def AllocateBatchsizeDependentMemory(self, batchsize):
super(ReplicatedSoftmaxLayer, self).AllocateBatchsizeDependentMemory(batchsize)
dimensions = self.dimensions
numlabels = self.numlabels
self.expanded_batch = cm.CUDAMatrix(np.zeros((numlabels * dimensions, batchsize)))
self.batchsize_temp = cm.CUDAMatrix(np.zeros((dimensions, batchsize)))
if self.is_input or self.is_initialized or self.is_output:
self.data = cm.CUDAMatrix(np.zeros((numlabels * dimensions, batchsize)))
self.NN = cm.CUDAMatrix(np.ones((1, batchsize)))
self.counter = cm.empty(self.NN.shape)
self.count_filter = cm.empty(self.NN.shape)
def ResetState(self, rand=False):
if self.hyperparams.normalize:
self.NN.assign(self.hyperparams.normalize_to)
else:
self.NN.assign(1)
super(ReplicatedSoftmaxLayer, self).ResetState(rand=rand)
def GetData(self):
self.state.assign(self.data)
h = self.hyperparams
self.state.sum(axis=0, target=self.NN)
self.NN.add(self.tiny) # To deal with documents of 0 words.
if h.multiplicative_prior > 0:
self.NN.mult(1 + h.multiplicative_prior)
self.state.mult(1 + h.multiplicative_prior)
if h.additive_prior > 0:
self.state.div_by_row(self.NN)
self.NN.add(h.additive_prior)
self.state.mult_by_row(self.NN)
if h.adaptive_prior > 0:
self.state.div_by_row(self.NN)
self.state.mult(h.adaptive_prior)
self.NN.assign(h.adaptive_prior)
def GetLoss(self, get_deriv=False):
"""Compute loss and also deriv w.r.t to it if asked for.
Compute the loss function. Targets should be in self.data, predictions
should be in self.state.
Args:
get_deriv: If True, compute the derivative w.r.t the loss function and put
it in self.deriv.
"""
perf = deepnet_pb2.Metrics()
perf.MergeFrom(self.proto.performance_stats)
perf.count = self.batchsize
tiny = self.tiny
temp = self.batchsize_temp
if self.loss_function == deepnet_pb2.Layer.SQUARED_LOSS:
if get_deriv:
target = self.deriv
else:
target = self.statesize
if self.hyperparams.normalize_error:
self.data.sum(axis=0, target=temp)
temp.add(self.tiny)
self.data.div_by_row(temp, target=target)
self.state.div_by_row(self.NN, target=self.expanded_batch)
target.subtract(self.expanded_batch)
else:
self.data.sum(axis=0, target=temp)
temp.add(self.tiny)
self.state.div_by_row(temp, target=target)
target.subtract(self.data)
error = target.euclid_norm()**2
perf.error = error
else:
raise Exception('Unknown loss function for Replicated Softmax units.')
return perf
def GetSparsityDivisor(self):
raise Exception('Sparsity not implemented for replicated softmax units.')
def CollectSufficientStatistics(self, neg=False):
"""Collect sufficient statistics for this layer."""
h = self.hyperparams
self.state.div_by_row(self.NN)
if not neg:
self.state.sum(axis=1, target=self.suff_stats)
else:
self.suff_stats.add_sums(self.state, axis=1, mult=-1.0)
self.state.mult_by_row(self.NN)
| abdulqayyum/deepnet | deepnet/replicated_softmax_layer.py | Python | bsd-3-clause | 5,731 |
"""
This module defines the SFrame class which provides the
ability to create, access and manipulate a remote scalable dataframe object.
SFrame acts similarly to pandas.DataFrame, but the data is completely immutable
and is stored column wise on the GraphLab Server side.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
import graphlab.connect as _mt
import graphlab.connect.main as glconnect
from graphlab.cython.cy_type_utils import infer_type_of_list
from graphlab.cython.context import debug_trace as cython_context
from graphlab.cython.cy_sframe import UnitySFrameProxy
from graphlab.util import _check_canvas_enabled, _make_internal_url, _is_callable
from graphlab.data_structures.sarray import SArray, _create_sequential_sarray
import graphlab.aggregate
import graphlab
import array
from prettytable import PrettyTable
from textwrap import wrap
import datetime
import inspect
from graphlab.deps import pandas, HAS_PANDAS
import time
import itertools
import os
import subprocess
import uuid
import platform
__all__ = ['SFrame']
SFRAME_GARBAGE_COLLECTOR = []
FOOTER_STRS = ['Note: Only the head of the SFrame is printed.',
'You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns.']
LAZY_FOOTER_STRS = ['Note: Only the head of the SFrame is printed. This SFrame is lazily evaluated.',
'You can use len(sf) to force materialization.']
SFRAME_ROOTS = [# Binary/lib location in production egg
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)), '..')),
# Build tree location of SFrame binaries
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', 'sframe')),
# Location of python sources
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', 'unity', 'python', 'graphlab')),
# Build tree dependency location
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', '..', '..', 'deps', 'local', 'lib'))
]
RDD_SFRAME_PICKLE = "rddtosf_pickle"
RDD_SFRAME_NONPICKLE = "rddtosf_nonpickle"
SFRAME_RDD_PICKLE = "sftordd_pickle"
HDFS_LIB = "libhdfs.so"
RDD_JAR_FILE = "graphlab-create-spark-integration.jar"
SYS_UTIL_PY = "sys_util.py"
RDD_SUPPORT_INITED = False
BINARY_PATHS = {}
STAGING_DIR = None
RDD_SUPPORT = True
PRODUCTION_RUN = False
YARN_OS = None
SPARK_SUPPORT_NAMES = {'RDD_SFRAME_PATH':'rddtosf_pickle',
'RDD_SFRAME_NONPICKLE_PATH':'rddtosf_nonpickle',
'SFRAME_RDD_PATH':'sftordd_pickle',
'HDFS_LIB_PATH':'libhdfs.so',
'RDD_JAR_PATH':'graphlab-create-spark-integration.jar',
'SYS_UTIL_PY_PATH':'sys_util.py',
'SPARK_PIPE_WRAPPER_PATH':'spark_pipe_wrapper'}
first = True
for i in SFRAME_ROOTS:
for key,val in SPARK_SUPPORT_NAMES.iteritems():
tmp_path = os.path.join(i, val)
if key not in BINARY_PATHS and os.path.isfile(tmp_path):
BINARY_PATHS[key] = tmp_path
if all(name in BINARY_PATHS for name in SPARK_SUPPORT_NAMES.keys()):
if first:
PRODUCTION_RUN = True
break
first = False
if not all(name in BINARY_PATHS for name in SPARK_SUPPORT_NAMES.keys()):
RDD_SUPPORT = False
def get_spark_integration_jar_path():
"""
The absolute path of the jar file required to enable GraphLab Create's
integration with Apache Spark.
"""
if 'RDD_JAR_PATH' not in BINARY_PATHS:
raise RuntimeError("Could not find a spark integration jar. "\
"Does your version of GraphLab Create support Spark Integration (is it >= 1.0)?")
return BINARY_PATHS['RDD_JAR_PATH']
def __rdd_support_init__(sprk_ctx):
global YARN_OS
global RDD_SUPPORT_INITED
global STAGING_DIR
global BINARY_PATHS
if not RDD_SUPPORT or RDD_SUPPORT_INITED:
return
# Make sure our GraphLabUtil scala functions are accessible from the driver
try:
tmp = sprk_ctx._jvm.org.graphlab.create.GraphLabUtil.EscapeString(sprk_ctx._jvm.java.lang.String("1,2,3,4"))
except:
raise RuntimeError("Could not execute RDD translation functions. "\
"Please make sure you have started Spark "\
"(either with spark-submit or pyspark) with the following flag set:\n"\
"'--driver-class-path " + BINARY_PATHS['RDD_JAR_PATH']+"'\n"\
"OR set the property spark.driver.extraClassPath in spark-defaults.conf")
dummy_rdd = sprk_ctx.parallelize([1])
if PRODUCTION_RUN and sprk_ctx.master == 'yarn-client':
# Get cluster operating system
os_rdd = dummy_rdd.map(lambda x: platform.system())
YARN_OS = os_rdd.collect()[0]
# Set binary path
for i in BINARY_PATHS.keys():
s = BINARY_PATHS[i]
if os.path.basename(s) == SPARK_SUPPORT_NAMES['SYS_UTIL_PY_PATH']:
continue
if YARN_OS == 'Linux':
BINARY_PATHS[i] = os.path.join(os.path.dirname(s), 'linux', os.path.basename(s))
elif YARN_OS == 'Darwin':
BINARY_PATHS[i] = os.path.join(os.path.dirname(s), 'osx', os.path.basename(s))
else:
raise RuntimeError("YARN cluster has unsupported operating system "\
"(something other than Linux or Mac OS X). "\
"Cannot convert RDDs on this cluster to SFrame.")
# Create staging directory
staging_dir = '.graphlabStaging'
if sprk_ctx.master == 'yarn-client':
tmp_loc = None
# Get that staging directory's full name
tmp_loc = dummy_rdd.map(
lambda x: subprocess.check_output(
["hdfs", "getconf", "-confKey", "fs.defaultFS"]).rstrip()).collect()[0]
STAGING_DIR = os.path.join(tmp_loc, "user", sprk_ctx.sparkUser(), staging_dir)
if STAGING_DIR is None:
raise RuntimeError("Failed to create a staging directory on HDFS. "\
"Do your cluster nodes have a working hdfs client?")
# Actually create the staging dir
unity = glconnect.get_unity()
unity.__mkdir__(STAGING_DIR)
unity.__chmod__(STAGING_DIR, 0777)
elif sprk_ctx.master[0:5] == 'local':
# Save the output sframes to the same temp workspace this engine is
# using
#TODO: Consider cases where server and client aren't on the same machine
unity = glconnect.get_unity()
STAGING_DIR = unity.get_current_cache_file_location()
if STAGING_DIR is None:
raise RuntimeError("Could not retrieve local staging directory! \
Please contact us on http://forum.dato.com.")
else:
raise RuntimeError("Your spark context's master is '" +
str(sprk_ctx.master) +
"'. Only 'local' and 'yarn-client' are supported.")
if sprk_ctx.master == 'yarn-client':
sprk_ctx.addFile(BINARY_PATHS['RDD_SFRAME_PATH'])
sprk_ctx.addFile(BINARY_PATHS['HDFS_LIB_PATH'])
sprk_ctx.addFile(BINARY_PATHS['SFRAME_RDD_PATH'])
sprk_ctx.addFile(BINARY_PATHS['RDD_SFRAME_NONPICKLE_PATH'])
sprk_ctx.addFile(BINARY_PATHS['SYS_UTIL_PY_PATH'])
sprk_ctx.addFile(BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'])
sprk_ctx._jsc.addJar(BINARY_PATHS['RDD_JAR_PATH'])
RDD_SUPPORT_INITED = True
def load_sframe(filename):
"""
Load an SFrame. The filename extension is used to determine the format
automatically. This function is particularly useful for SFrames previously
saved in binary format. For CSV imports the ``SFrame.read_csv`` function
provides greater control. If the SFrame is in binary format, ``filename`` is
actually a directory, created when the SFrame is saved.
Parameters
----------
filename : string
Location of the file to load. Can be a local path or a remote URL.
Returns
-------
out : SFrame
See Also
--------
SFrame.save, SFrame.read_csv
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf.save('my_sframe') # 'my_sframe' is a directory
>>> sf_loaded = graphlab.load_sframe('my_sframe')
"""
sf = SFrame(data=filename)
return sf
class SFrame(object):
"""
A tabular, column-mutable dataframe object that can scale to big data. The
data in SFrame is stored column-wise on the GraphLab Server side, and is
stored on persistent storage (e.g. disk) to avoid being constrained by
memory size. Each column in an SFrame is a size-immutable
:class:`~graphlab.SArray`, but SFrames are mutable in that columns can be
added and subtracted with ease. An SFrame essentially acts as an ordered
dict of SArrays.
Currently, we support constructing an SFrame from the following data
formats:
* csv file (comma separated value)
* sframe directory archive (A directory where an sframe was saved
previously)
* general text file (with csv parsing options, See :py:meth:`read_csv()`)
* a Python dictionary
* pandas.DataFrame
* JSON
* Apache Avro
* PySpark RDD
and from the following sources:
* your local file system
* the GraphLab Server's file system
* HDFS
* Amazon S3
* HTTP(S).
Only basic examples of construction are covered here. For more information
and examples, please see the `User Guide <https://dato.com/learn/user
guide/index.html#Working_with_data_Tabular_data>`_, `API Translator
<https://dato.com/learn/translator>`_, `How-Tos
<https://dato.com/learn/how-to>`_, and data science `Gallery
<https://dato.com/learn/gallery>`_.
Parameters
----------
data : array | pandas.DataFrame | string | dict, optional
The actual interpretation of this field is dependent on the ``format``
parameter. If ``data`` is an array or Pandas DataFrame, the contents are
stored in the SFrame. If ``data`` is a string, it is interpreted as a
file. Files can be read from local file system or urls (local://,
hdfs://, s3://, http://).
format : string, optional
Format of the data. The default, "auto" will automatically infer the
input data format. The inference rules are simple: If the data is an
array or a dataframe, it is associated with 'array' and 'dataframe'
respectively. If the data is a string, it is interpreted as a file, and
the file extension is used to infer the file format. The explicit
options are:
- "auto"
- "array"
- "dict"
- "sarray"
- "dataframe"
- "csv"
- "tsv"
- "sframe".
See Also
--------
read_csv:
Create a new SFrame from a csv file. Preferred for text and CSV formats,
because it has a lot more options for controlling the parser.
save : Save an SFrame for later use.
Notes
-----
- When working with the GraphLab EC2 instance (see
:py:func:`graphlab.aws.launch_EC2()`), an SFrame cannot be constructed
using local file path, because it involves a potentially large amount of
data transfer from client to server. However, it is still okay to use a
remote file path. See the examples below. A similar restriction applies to
:py:class:`graphlab.SGraph` and :py:class:`graphlab.SArray`.
- When reading from HDFS on Linux we must guess the location of your java
installation. By default, we will use the location pointed to by the
JAVA_HOME environment variable. If this is not set, we check many common
installation paths. You may use two environment variables to override
this behavior. GRAPHLAB_JAVA_HOME allows you to specify a specific java
installation and overrides JAVA_HOME. GRAPHLAB_LIBJVM_DIRECTORY
overrides all and expects the exact directory that your preferred
libjvm.so file is located. Use this ONLY if you'd like to use a
non-standard JVM.
Examples
--------
>>> import graphlab
>>> from graphlab import SFrame
**Construction**
Construct an SFrame from a dataframe and transfers the dataframe object
across the network.
>>> df = pandas.DataFrame()
>>> sf = SFrame(data=df)
Construct an SFrame from a local csv file (only works for local server).
>>> sf = SFrame(data='~/mydata/foo.csv')
Construct an SFrame from a csv file on Amazon S3. This requires the
environment variables: *AWS_ACCESS_KEY_ID* and *AWS_SECRET_ACCESS_KEY* to be
set before the python session started. Alternatively, you can use
:py:func:`graphlab.aws.set_credentials()` to set the credentials after
python is started and :py:func:`graphlab.aws.get_credentials()` to verify
these environment variables.
>>> sf = SFrame(data='s3://mybucket/foo.csv')
Read from HDFS using a specific java installation (environment variable
only applies when using Linux)
>>> import os
>>> os.environ['GRAPHLAB_JAVA_HOME'] = '/my/path/to/java'
>>> from graphlab import SFrame
>>> sf = SFrame("hdfs://mycluster.example.com:8020/user/myname/coolfile.txt")
An SFrame can be constructed from a dictionary of values or SArrays:
>>> sf = gl.SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
Or equivalently:
>>> ids = SArray([1,2,3])
>>> vals = SArray(['A','B','C'])
>>> sf = SFrame({'id':ids,'val':vals})
It can also be constructed from an array of SArrays in which case column
names are automatically assigned.
>>> ids = SArray([1,2,3])
>>> vals = SArray(['A','B','C'])
>>> sf = SFrame([ids, vals])
>>> sf
Columns:
X1 int
X2 str
Rows: 3
Data:
X1 X2
0 1 A
1 2 B
2 3 C
If the SFrame is constructed from a list of values, an SFrame of a single
column is constructed.
>>> sf = SFrame([1,2,3])
>>> sf
Columns:
X1 int
Rows: 3
Data:
X1
0 1
1 2
2 3
**Parsing**
The :py:func:`graphlab.SFrame.read_csv()` is quite powerful and, can be
used to import a variety of row-based formats.
First, some simple cases:
>>> !cat ratings.csv
user_id,movie_id,rating
10210,1,1
10213,2,5
10217,2,2
10102,1,3
10109,3,4
10117,5,2
10122,2,4
10114,1,5
10125,1,1
>>> gl.SFrame.read_csv('ratings.csv')
Columns:
user_id int
movie_id int
rating int
Rows: 9
Data:
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 10210 | 1 | 1 |
| 10213 | 2 | 5 |
| 10217 | 2 | 2 |
| 10102 | 1 | 3 |
| 10109 | 3 | 4 |
| 10117 | 5 | 2 |
| 10122 | 2 | 4 |
| 10114 | 1 | 5 |
| 10125 | 1 | 1 |
+---------+----------+--------+
[9 rows x 3 columns]
Delimiters can be specified, if "," is not the delimiter, for instance
space ' ' in this case. Only single character delimiters are supported.
>>> !cat ratings.csv
user_id movie_id rating
10210 1 1
10213 2 5
10217 2 2
10102 1 3
10109 3 4
10117 5 2
10122 2 4
10114 1 5
10125 1 1
>>> gl.SFrame.read_csv('ratings.csv', delimiter=' ')
By default, "NA" or a missing element are interpreted as missing values.
>>> !cat ratings2.csv
user,movie,rating
"tom",,1
harry,5,
jack,2,2
bill,,
>>> gl.SFrame.read_csv('ratings2.csv')
Columns:
user str
movie int
rating int
Rows: 4
Data:
+---------+-------+--------+
| user | movie | rating |
+---------+-------+--------+
| tom | None | 1 |
| harry | 5 | None |
| jack | 2 | 2 |
| missing | None | None |
+---------+-------+--------+
[4 rows x 3 columns]
Furthermore due to the dictionary types and list types, can handle parsing
of JSON-like formats.
>>> !cat ratings3.csv
business, categories, ratings
"Restaurant 1", [1 4 9 10], {"funny":5, "cool":2}
"Restaurant 2", [], {"happy":2, "sad":2}
"Restaurant 3", [2, 11, 12], {}
>>> gl.SFrame.read_csv('ratings3.csv')
Columns:
business str
categories array
ratings dict
Rows: 3
Data:
+--------------+--------------------------------+-------------------------+
| business | categories | ratings |
+--------------+--------------------------------+-------------------------+
| Restaurant 1 | array('d', [1.0, 4.0, 9.0, ... | {'funny': 5, 'cool': 2} |
| Restaurant 2 | array('d') | {'sad': 2, 'happy': 2} |
| Restaurant 3 | array('d', [2.0, 11.0, 12.0]) | {} |
+--------------+--------------------------------+-------------------------+
[3 rows x 3 columns]
The list and dictionary parsers are quite flexible and can absorb a
variety of purely formatted inputs. Also, note that the list and dictionary
types are recursive, allowing for arbitrary values to be contained.
All these are valid lists:
>>> !cat interesting_lists.csv
list
[]
[1,2,3]
[1;2,3]
[1 2 3]
[{a:b}]
["c",d, e]
[[a]]
>>> gl.SFrame.read_csv('interesting_lists.csv')
Columns:
list list
Rows: 7
Data:
+-----------------+
| list |
+-----------------+
| [] |
| [1, 2, 3] |
| [1, 2, 3] |
| [1, 2, 3] |
| [{'a': 'b'}] |
| ['c', 'd', 'e'] |
| [['a']] |
+-----------------+
[7 rows x 1 columns]
All these are valid dicts:
>>> !cat interesting_dicts.csv
dict
{"classic":1,"dict":1}
{space:1 seperated:1}
{emptyvalue:}
{}
{:}
{recursive1:[{a:b}]}
{:[{:[a]}]}
>>> gl.SFrame.read_csv('interesting_dicts.csv')
Columns:
dict dict
Rows: 7
Data:
+------------------------------+
| dict |
+------------------------------+
| {'dict': 1, 'classic': 1} |
| {'seperated': 1, 'space': 1} |
| {'emptyvalue': None} |
| {} |
| {None: None} |
| {'recursive1': [{'a': 'b'}]} |
| {None: [{None: array('d')}]} |
+------------------------------+
[7 rows x 1 columns]
**Saving**
Save and load the sframe in native format.
>>> sf.save('mysframedir')
>>> sf2 = graphlab.load_sframe('mysframedir')
**Column Manipulation **
An SFrame is composed of a collection of columns of SArrays, and individual
SArrays can be extracted easily. For instance given an SFrame:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
The "id" column can be extracted using:
>>> sf["id"]
dtype: int
Rows: 3
[1, 2, 3]
And can be deleted using:
>>> del sf["id"]
Multiple columns can be selected by passing a list of column names:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C'],'val2':[5,6,7]})
>>> sf
Columns:
id int
val str
val2 int
Rows: 3
Data:
id val val2
0 1 A 5
1 2 B 6
2 3 C 7
>>> sf2 = sf[['id','val']]
>>> sf2
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
The same mechanism can be used to re-order columns:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
>>> sf[['val','id']]
>>> sf
Columns:
val str
id int
Rows: 3
Data:
val id
0 A 1
1 B 2
2 C 3
**Element Access and Slicing**
SFrames can be accessed by integer keys just like a regular python list.
Such operations may not be fast on large datasets so looping over an SFrame
should be avoided.
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf[0]
{'id': 1, 'val': 'A'}
>>> sf[2]
{'id': 3, 'val': 'C'}
>>> sf[5]
IndexError: SFrame index out of range
Negative indices can be used to access elements from the tail of the array
>>> sf[-1] # returns the last element
{'id': 3, 'val': 'C'}
>>> sf[-2] # returns the second to last element
{'id': 2, 'val': 'B'}
The SFrame also supports the full range of python slicing operators:
>>> sf[1000:] # Returns an SFrame containing rows 1000 to the end
>>> sf[:1000] # Returns an SFrame containing rows 0 to row 999 inclusive
>>> sf[0:1000:2] # Returns an SFrame containing rows 0 to row 1000 in steps of 2
>>> sf[-100:] # Returns an SFrame containing last 100 rows
>>> sf[-100:len(sf):2] # Returns an SFrame containing last 100 rows in steps of 2
**Logical Filter**
An SFrame can be filtered using
>>> sframe[binary_filter]
where sframe is an SFrame and binary_filter is an SArray of the same length.
The result is a new SFrame which contains only rows of the SFrame where its
matching row in the binary_filter is non zero.
This permits the use of boolean operators that can be used to perform
logical filtering operations. For instance, given an SFrame
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
>>> sf[(sf['id'] >= 1) & (sf['id'] <= 2)]
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
See :class:`~graphlab.SArray` for more details on the use of the logical
filter.
This can also be used more generally to provide filtering capability which
is otherwise not expressible with simple boolean functions. For instance:
>>> sf[sf['id'].apply(lambda x: math.log(x) <= 1)]
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
Or alternatively:
>>> sf[sf.apply(lambda x: math.log(x['id']) <= 1)]
Create an SFrame from a Python dictionary.
>>> from graphlab import SFrame
>>> sf = SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
"""
__slots__ = ['shape', '__proxy__', '_proxy']
def __init__(self, data=None,
format='auto',
_proxy=None):
"""__init__(data=list(), format='auto')
Construct a new SFrame from a url or a pandas.DataFrame.
"""
# emit metrics for num_rows, num_columns, and type (local://, s3, hdfs, http)
tracker = _mt._get_metric_tracker()
if (_proxy):
self.__proxy__ = _proxy
else:
self.__proxy__ = UnitySFrameProxy(glconnect.get_client())
_format = None
if (format == 'auto'):
if (HAS_PANDAS and isinstance(data, pandas.DataFrame)):
_format = 'dataframe'
tracker.track('sframe.location.memory', value=1)
elif (isinstance(data, str) or isinstance(data, unicode)):
if data.find('://') == -1:
suffix = 'local'
else:
suffix = data.split('://')[0]
tracker.track(('sframe.location.%s' % (suffix)), value=1)
if data.endswith(('.csv', '.csv.gz')):
_format = 'csv'
elif data.endswith(('.tsv', '.tsv.gz')):
_format = 'tsv'
elif data.endswith(('.txt', '.txt.gz')):
print "Assuming file is csv. For other delimiters, " + \
"please use `SFrame.read_csv`."
_format = 'csv'
else:
_format = 'sframe'
elif type(data) == SArray:
_format = 'sarray'
elif isinstance(data, SFrame):
_format = 'sframe_obj'
elif (hasattr(data, 'iteritems')):
_format = 'dict'
tracker.track('sframe.location.memory', value=1)
elif hasattr(data, '__iter__'):
_format = 'array'
tracker.track('sframe.location.memory', value=1)
elif data is None:
_format = 'empty'
else:
raise ValueError('Cannot infer input type for data ' + str(data))
else:
_format = format
tracker.track(('sframe.format.%s' % _format), value=1)
with cython_context():
if (_format == 'dataframe'):
self.__proxy__.load_from_dataframe(data)
elif (_format == 'sframe_obj'):
for col in data.column_names():
self.__proxy__.add_column(data[col].__proxy__, col)
elif (_format == 'sarray'):
self.__proxy__.add_column(data.__proxy__, "")
elif (_format == 'array'):
if len(data) > 0:
unique_types = set([type(x) for x in data if x is not None])
if len(unique_types) == 1 and SArray in unique_types:
for arr in data:
self.add_column(arr)
elif SArray in unique_types:
raise ValueError("Cannot create SFrame from mix of regular values and SArrays")
else:
self.__proxy__.add_column(SArray(data).__proxy__, "")
elif (_format == 'dict'):
for key,val in iter(sorted(data.iteritems())):
if (type(val) == SArray):
self.__proxy__.add_column(val.__proxy__, key)
else:
self.__proxy__.add_column(SArray(val).__proxy__, key)
elif (_format == 'csv'):
url = _make_internal_url(data)
tmpsf = SFrame.read_csv(url, delimiter=',', header=True)
self.__proxy__ = tmpsf.__proxy__
elif (_format == 'tsv'):
url = _make_internal_url(data)
tmpsf = SFrame.read_csv(url, delimiter='\t', header=True)
self.__proxy__ = tmpsf.__proxy__
elif (_format == 'sframe'):
url = _make_internal_url(data)
self.__proxy__.load_from_sframe_index(url)
elif (_format == 'empty'):
pass
else:
raise ValueError('Unknown input type: ' + format)
sframe_size = -1
if self.__has_size__():
sframe_size = self.num_rows()
tracker.track('sframe.row.size', value=sframe_size)
tracker.track('sframe.col.size', value=self.num_cols())
@staticmethod
def _infer_column_types_from_lines(first_rows):
if (len(first_rows.column_names()) < 1):
print "Insufficient number of columns to perform type inference"
raise RuntimeError("Insufficient columns ")
if len(first_rows) < 1:
print "Insufficient number of rows to perform type inference"
raise RuntimeError("Insufficient rows")
# gets all the values column-wise
all_column_values_transposed = [list(first_rows[col])
for col in first_rows.column_names()]
# transpose
all_column_values = [list(x) for x in zip(*all_column_values_transposed)]
all_column_type_hints = [[type(t) for t in vals] for vals in all_column_values]
# collect the hints
# if every line was inferred to have a different number of elements, die
if len(set(len(x) for x in all_column_type_hints)) != 1:
print "Unable to infer column types. Defaulting to str"
return str
import types
column_type_hints = all_column_type_hints[0]
# now perform type combining across rows
for i in range(1, len(all_column_type_hints)):
currow = all_column_type_hints[i]
for j in range(len(column_type_hints)):
# combine types
d = set([currow[j], column_type_hints[j]])
if (len(d) == 1):
# easy case. both agree on the type
continue
if ((int in d) and (float in d)):
# one is an int, one is a float. its a float
column_type_hints[j] = float
elif ((array.array in d) and (list in d)):
# one is an array , one is a list. its a list
column_type_hints[j] = list
elif types.NoneType in d:
# one is a NoneType. assign to other type
if currow[j] != types.NoneType:
column_type_hints[j] = currow[j]
else:
column_type_hints[j] = str
# final pass. everything whih is still NoneType is now a str
for i in range(len(column_type_hints)):
if column_type_hints[i] == types.NoneType:
column_type_hints[i] = str
return column_type_hints
@classmethod
def _read_csv_impl(cls,
url,
delimiter=',',
header=True,
error_bad_lines=False,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
nrows=None,
verbose=True,
store_errors=True):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs, and
returns a pair containing the SFrame and optionally
(if store_errors=True) a dict of filenames to SArrays
indicating for each file, what are the incorrectly parsed lines
encountered.
Parameters
----------
store_errors : bool
If true, the output errors dict will be filled.
See `read_csv` for the rest of the parameters.
"""
parsing_config = dict()
parsing_config["delimiter"] = delimiter
parsing_config["use_header"] = header
parsing_config["continue_on_failure"] = not error_bad_lines
parsing_config["comment_char"] = comment_char
parsing_config["escape_char"] = escape_char
parsing_config["double_quote"] = double_quote
parsing_config["quote_char"] = quote_char
parsing_config["skip_initial_space"] = skip_initial_space
parsing_config["store_errors"] = store_errors
if type(na_values) is str:
na_values = [na_values]
if na_values is not None and len(na_values) > 0:
parsing_config["na_values"] = na_values
if nrows != None:
parsing_config["row_limit"] = nrows
proxy = UnitySFrameProxy(glconnect.get_client())
internal_url = _make_internal_url(url)
if (not verbose):
glconnect.get_client().set_log_progress(False)
# Attempt to automatically detect the column types. Either produce a
# list of types; otherwise default to all str types.
column_type_inference_was_used = False
if column_type_hints is None:
try:
# Get the first 100 rows (using all the desired arguments).
first_rows = graphlab.SFrame.read_csv(url, nrows=100,
column_type_hints=type(None),
header=header,
delimiter=delimiter,
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
na_values = na_values)
column_type_hints = SFrame._infer_column_types_from_lines(first_rows)
typelist = '[' + ','.join(t.__name__ for t in column_type_hints) + ']'
print "------------------------------------------------------"
print "Inferred types from first line of file as "
print "column_type_hints="+ typelist
print "If parsing fails due to incorrect types, you can correct"
print "the inferred type list above and pass it to read_csv in"
print "the column_type_hints argument"
print "------------------------------------------------------"
column_type_inference_was_used = True
except Exception as e:
if type(e) == RuntimeError and "CSV parsing cancelled" in e.message:
raise e
# If the above fails, default back to str for all columns.
column_type_hints = str
print 'Could not detect types. Using str for each column.'
if type(column_type_hints) is type:
type_hints = {'__all_columns__': column_type_hints}
elif type(column_type_hints) is list:
type_hints = dict(zip(['__X%d__' % i for i in range(len(column_type_hints))], column_type_hints))
elif type(column_type_hints) is dict:
type_hints = column_type_hints
else:
raise TypeError("Invalid type for column_type_hints. Must be a dictionary, list or a single type.")
_mt._get_metric_tracker().track('sframe.csv.parse')
suffix=''
if url.find('://') == -1:
suffix = 'local'
else:
suffix = url.split('://')[0]
_mt._get_metric_tracker().track(('sframe.location.%s' % (suffix)), value=1)
try:
with cython_context():
errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints)
except Exception as e:
if type(e) == RuntimeError and "CSV parsing cancelled" in e.message:
raise e
if column_type_inference_was_used:
# try again
print "Unable to parse the file with automatic type inference."
print "Defaulting to column_type_hints=str"
type_hints = {'__all_columns__': str}
try:
with cython_context():
errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints)
except:
raise
else:
raise
glconnect.get_client().set_log_progress(True)
return (cls(_proxy=proxy), { f: SArray(_proxy = es) for (f, es) in errors.iteritems() })
@classmethod
def read_csv_with_errors(cls,
url,
delimiter=',',
header=True,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
nrows=None,
verbose=True):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs, and
returns a pair containing the SFrame and a dict of filenames to SArrays
indicating for each file, what are the incorrectly parsed lines
encountered.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
delimiter : string, optional
This describes the delimiter used for parsing csv files.
header : bool, optional
If true, uses the first row as the column names. Otherwise use the
default column names: 'X1, X2, ...'.
comment_char : string, optional
The character which denotes that the
remainder of the line is a comment.
escape_char : string, optional
Character which begins a C escape sequence
double_quote : bool, optional
If True, two consecutive quotes in a string are parsed to a single
quote.
quote_char : string, optional
Character sequence that indicates a quote.
skip_initial_space : bool, optional
Ignore extra spaces at the start of a field
column_type_hints : None, type, list[type], dict[string, type], optional
This provides type hints for each column. By default, this method
attempts to detect the type of each column automatically.
Supported types are int, float, str, list, dict, and array.array.
* If a single type is provided, the type will be
applied to all columns. For instance, column_type_hints=float
will force all columns to be parsed as float.
* If a list of types is provided, the types applies
to each column in order, e.g.[int, float, str]
will parse the first column as int, second as float and third as
string.
* If a dictionary of column name to type is provided,
each type value in the dictionary is applied to the key it
belongs to.
For instance {'user':int} will hint that the column called "user"
should be parsed as an integer, and the rest will default to
string.
na_values : str | list of str, optional
A string or list of strings to be interpreted as missing values.
nrows : int, optional
If set, only this many rows will be read from the file.
verbose : bool, optional
If True, print the progress.
Returns
-------
out : tuple
The first element is the SFrame with good data. The second element
is a dictionary of filenames to SArrays indicating for each file,
what are the incorrectly parsed lines encountered.
See Also
--------
read_csv, SFrame
Examples
--------
>>> bad_url = 'https://s3.amazonaws.com/gl-testdata/bad_csv_example.csv'
>>> (sf, bad_lines) = graphlab.SFrame.read_csv_with_errors(bad_url)
>>> sf
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[98 rows x 3 columns]
>>> bad_lines
{'https://s3.amazonaws.com/gl-testdata/bad_csv_example.csv': dtype: str
Rows: 1
['x,y,z,a,b,c']}
"""
return cls._read_csv_impl(url,
delimiter=delimiter,
header=header,
error_bad_lines=False, # we are storing errors,
# thus we must not fail
# on bad lines
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
column_type_hints=column_type_hints,
na_values=na_values,
nrows=nrows,
verbose=verbose,
store_errors=True)
@classmethod
def read_csv(cls,
url,
delimiter=',',
header=True,
error_bad_lines=False,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
nrows=None,
verbose=True):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
delimiter : string, optional
This describes the delimiter used for parsing csv files.
header : bool, optional
If true, uses the first row as the column names. Otherwise use the
default column names : 'X1, X2, ...'.
error_bad_lines : bool
If true, will fail upon encountering a bad line. If false, will
continue parsing skipping lines which fail to parse correctly.
A sample of the first 10 encountered bad lines will be printed.
comment_char : string, optional
The character which denotes that the remainder of the line is a
comment.
escape_char : string, optional
Character which begins a C escape sequence
double_quote : bool, optional
If True, two consecutive quotes in a string are parsed to a single
quote.
quote_char : string, optional
Character sequence that indicates a quote.
skip_initial_space : bool, optional
Ignore extra spaces at the start of a field
column_type_hints : None, type, list[type], dict[string, type], optional
This provides type hints for each column. By default, this method
attempts to detect the type of each column automatically.
Supported types are int, float, str, list, dict, and array.array.
* If a single type is provided, the type will be
applied to all columns. For instance, column_type_hints=float
will force all columns to be parsed as float.
* If a list of types is provided, the types applies
to each column in order, e.g.[int, float, str]
will parse the first column as int, second as float and third as
string.
* If a dictionary of column name to type is provided,
each type value in the dictionary is applied to the key it
belongs to.
For instance {'user':int} will hint that the column called "user"
should be parsed as an integer, and the rest will default to
string.
na_values : str | list of str, optional
A string or list of strings to be interpreted as missing values.
nrows : int, optional
If set, only this many rows will be read from the file.
verbose : bool, optional
If True, print the progress.
Returns
-------
out : SFrame
See Also
--------
read_csv_with_errors, SFrame
Examples
--------
Read a regular csv file, with all default options, automatically
determine types:
>>> url = 'http://s3.amazonaws.com/gl-testdata/rating_data_example.csv'
>>> sf = graphlab.SFrame.read_csv(url)
>>> sf
Columns:
user_id int
movie_id int
rating int
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Read only the first 100 lines of the csv file:
>>> sf = graphlab.SFrame.read_csv(url, nrows=100)
>>> sf
Columns:
user_id int
movie_id int
rating int
Rows: 100
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[100 rows x 3 columns]
Read all columns as str type
>>> sf = graphlab.SFrame.read_csv(url, column_type_hints=str)
>>> sf
Columns:
user_id str
movie_id str
rating str
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Specify types for a subset of columns and leave the rest to be str.
>>> sf = graphlab.SFrame.read_csv(url,
... column_type_hints={
... 'user_id':int, 'rating':float
... })
>>> sf
Columns:
user_id str
movie_id str
rating float
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3.0 |
| 25907 | 1663 | 3.0 |
| 25923 | 1663 | 3.0 |
| 25924 | 1663 | 3.0 |
| 25928 | 1663 | 2.0 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Not treat first line as header:
>>> sf = graphlab.SFrame.read_csv(url, header=False)
>>> sf
Columns:
X1 str
X2 str
X3 str
Rows: 10001
+---------+----------+--------+
| X1 | X2 | X3 |
+---------+----------+--------+
| user_id | movie_id | rating |
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10001 rows x 3 columns]
Treat '3' as missing value:
>>> sf = graphlab.SFrame.read_csv(url, na_values=['3'], column_type_hints=str)
>>> sf
Columns:
user_id str
movie_id str
rating str
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | None |
| 25907 | 1663 | None |
| 25923 | 1663 | None |
| 25924 | 1663 | None |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Throw error on parse failure:
>>> bad_url = 'https://s3.amazonaws.com/gl-testdata/bad_csv_example.csv'
>>> sf = graphlab.SFrame.read_csv(bad_url, error_bad_lines=True)
RuntimeError: Runtime Exception. Unable to parse line "x,y,z,a,b,c"
Set error_bad_lines=False to skip bad lines
"""
return cls._read_csv_impl(url,
delimiter=delimiter,
header=header,
error_bad_lines=error_bad_lines,
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
column_type_hints=column_type_hints,
na_values=na_values,
nrows=nrows,
verbose=verbose,
store_errors=False)[0]
def to_schema_rdd(self,sc,sql,number_of_partitions=4):
"""
Convert the current SFrame to the Spark SchemaRDD.
To enable this function, you must add the jar file bundled with GraphLab
Create to the Spark driver's classpath. This must happen BEFORE Spark
launches its JVM, or else it will have no effect. To do this, first get
the location of the packaged jar with
`graphlab.get_spark_integration_jar_path`. You then have two options:
1. Add the path to the jar to your spark-defaults.conf file. The
property to set is 'spark.driver.extraClassPath'.
OR
2. Add the jar's path as a command line option to your favorite way to
start pyspark (either spark-submit or pyspark). For this, use the
command line option '--driver-class-path'.
Parameters
----------
sc : SparkContext
sc is an existing SparkContext.
sql : SQLContext
sql is an existing SQLContext.
number_of_partitions : int
number of partitions for the output rdd
Returns
----------
out: SchemaRDD
Examples
--------
>>> from pyspark import SparkContext, SQLContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> sqlc = SQLContext(sc)
>>> sf = SFrame({'x': [1,2,3], 'y': ['fish', 'chips', 'salad']})
>>> rdd = sf.to_schema_rdd(sc, sqlc)
>>> rdd.collect()
[Row(x=1, y=u'fish'), Row(x=2, y=u'chips'), Row(x=3, y=u'salad')]
"""
def homogeneous_type(seq):
if seq is None or len(seq) == 0:
return True
iseq = iter(seq)
first_type = type(next(iseq))
return True if all( (type(x) is first_type) for x in iseq ) else False
if len(self) == 0:
raise ValueError("SFrame is empty")
column_names = self.column_names()
first_row = self.head(1)[0]
for name in column_names:
if hasattr(first_row[name],'__iter__') and homogeneous_type(first_row[name]) is not True:
raise TypeError("Support for translation to Spark SchemaRDD not enabled for heterogeneous iterable type (column: %s). Use SFrame.to_rdd()." % name)
for _type in self.column_types():
if(_type.__name__ == 'datetime'):
raise TypeError("Support for translation to Spark SchemaRDD not enabled for datetime type. Use SFrame.to_rdd() ")
rdd = self.to_rdd(sc,number_of_partitions);
from pyspark.sql import Row
rowRdd = rdd.map(lambda x: Row(**x))
return sql.inferSchema(rowRdd)
def to_rdd(self, sc, number_of_partitions=4):
"""
Convert the current SFrame to the Spark RDD.
To enable this function, you must add the jar file bundled with GraphLab
Create to the Spark driver's classpath. This must happen BEFORE Spark
launches its JVM, or else it will have no effect. To do this, first get
the location of the packaged jar with
`graphlab.get_spark_integration_jar_path`. You then have two options:
1. Add the path to the jar to your spark-defaults.conf file. The
property to set is 'spark.driver.extraClassPath'.
OR
2. Add the jar's path as a command line option to your favorite way to
start pyspark (either spark-submit or pyspark). For this, use the
command line option '--driver-class-path'.
Parameters
----------
sc : SparkContext
sc is an existing SparkContext.
number_of_partitions: int
number of partitions for the output rdd
Returns
----------
out: RDD
Examples
--------
>>> from pyspark import SparkContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> sf = SFrame({'x': [1,2,3], 'y': ['fish', 'chips', 'salad']})
>>> rdd = sf.to_rdd(sc)
>>> rdd.collect()
[{'x': 1L, 'y': 'fish'}, {'x': 2L, 'y': 'chips'}, {'x': 3L, 'y': 'salad'}]
"""
_mt._get_metric_tracker().track('sframe.to_rdd')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
for _type in self.column_types():
if(_type.__name__ == 'Image'):
raise TypeError("Support for translation to Spark RDDs not enabled for Image type.")
if type(number_of_partitions) is not int:
raise ValueError("number_of_partitions parameter expects an integer type")
if number_of_partitions == 0:
raise ValueError("number_of_partitions can not be initialized to zero")
# Save SFrame in a temporary place
tmp_loc = self.__get_staging_dir__(sc)
sf_loc = os.path.join(tmp_loc, str(uuid.uuid4()))
self.save(sf_loc)
# Keep track of the temporary sframe that is saved(). We need to delete it eventually.
dummysf = load_sframe(sf_loc)
dummysf.__proxy__.delete_on_close()
SFRAME_GARBAGE_COLLECTOR.append(dummysf)
sframe_len = self.__len__()
small_partition_size = sframe_len/number_of_partitions
big_partition_size = small_partition_size + 1
num_big_partition_size = sframe_len % number_of_partitions
num_small_partition_size = number_of_partitions - num_big_partition_size
count = 0
start_index = 0
ranges = []
while(count < number_of_partitions):
if(count < num_big_partition_size):
ranges.append((str(start_index)+":"+str(start_index + big_partition_size)))
start_index = start_index + big_partition_size
else:
ranges.append((str(start_index)+":"+str(start_index + small_partition_size)))
start_index = start_index + small_partition_size
count+=1
from pyspark import RDD
rdd = sc.parallelize(ranges,number_of_partitions)
if sc.master[0:5] == 'local':
pipeRdd = sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + \
" " + BINARY_PATHS['SFRAME_RDD_PATH'] + " " + sf_loc)
elif sc.master == 'yarn-client':
pipeRdd = sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] + \
" " + "./" + SPARK_SUPPORT_NAMES['SFRAME_RDD_PATH'] + \
" " + sf_loc)
serializedRdd = sc._jvm.org.graphlab.create.GraphLabUtil.stringToByte(pipeRdd)
import pyspark
output_rdd = RDD(serializedRdd,sc,pyspark.serializers.PickleSerializer())
return output_rdd
@classmethod
def __get_staging_dir__(cls,cur_sc):
if not RDD_SUPPORT_INITED:
__rdd_support_init__(cur_sc)
return STAGING_DIR
@classmethod
def from_rdd(cls, rdd):
"""
Convert a Spark RDD into a GraphLab Create SFrame.
To enable this function, you must add the jar file bundled with GraphLab
Create to the Spark driver's classpath. This must happen BEFORE Spark
launches its JVM, or else it will have no effect. To do this, first get
the location of the packaged jar with
`graphlab.get_spark_integration_jar_path`. You then have two options:
1. Add the path to the jar to your spark-defaults.conf file. The
property to set is 'spark.driver.extraClassPath'.
OR
2. Add the jar's path as a command line option to your favorite way to
start pyspark (either spark-submit or pyspark). For this, use the
command line option '--driver-class-path'.
Parameters
----------
rdd : pyspark.rdd.RDD
Returns
-------
out : SFrame
Examples
--------
>>> from pyspark import SparkContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> rdd = sc.parallelize([1,2,3])
>>> sf = SFrame.from_rdd(rdd)
>>> sf
Data:
+-----+
| X1 |
+-----+
| 1.0 |
| 2.0 |
| 3.0 |
+-----+
[3 rows x 1 columns]
"""
_mt._get_metric_tracker().track('sframe.from_rdd')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
checkRes = rdd.take(1);
if len(checkRes) > 0 and checkRes[0].__class__.__name__ == 'Row' and rdd.__class__.__name__ not in {'SchemaRDD','DataFrame'}:
raise Exception("Conversion from RDD(pyspark.sql.Row) to SFrame not supported. Please call inferSchema(RDD) first.")
if(rdd._jrdd_deserializer.__class__.__name__ == 'UTF8Deserializer'):
return SFrame.__from_UTF8Deserialized_rdd__(rdd)
sf_names = None
rdd_type = "rdd"
if rdd.__class__.__name__ in {'SchemaRDD','DataFrame'}:
rdd_type = "schemardd"
first_row = rdd.take(1)[0]
if hasattr(first_row, 'keys'):
sf_names = first_row.keys()
else:
sf_names = first_row.__FIELDS__
sf_names = [str(i) for i in sf_names]
cur_sc = rdd.ctx
tmp_loc = SFrame.__get_staging_dir__(cur_sc)
if tmp_loc is None:
raise RuntimeError("Could not determine staging directory for SFrame files.")
mode = "batch"
if(rdd._jrdd_deserializer.__class__.__name__ == 'PickleSerializer'):
mode = "pickle"
if cur_sc.master[0:5] == 'local':
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.byteToString(
rdd._jrdd).pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + " " + \
BINARY_PATHS['RDD_SFRAME_PATH'] + " " + tmp_loc +\
" " + mode + " " + rdd_type)
else:
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.byteToString(
rdd._jrdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] +\
" " + "./" + SPARK_SUPPORT_NAMES['RDD_SFRAME_PATH'] + " " +\
tmp_loc + " " + mode + " " + rdd_type)
# We get the location of an SFrame index file per Spark partition in
# the result. We assume that this is in partition order.
res = t.collect()
out_sf = cls()
sframe_list = []
for url in res:
sf = SFrame()
sf.__proxy__.load_from_sframe_index(_make_internal_url(url))
sf.__proxy__.delete_on_close()
out_sf_coltypes = out_sf.column_types()
if(len(out_sf_coltypes) != 0):
sf_coltypes = sf.column_types()
sf_temp_names = sf.column_names()
out_sf_temp_names = out_sf.column_names()
for i in range(len(sf_coltypes)):
if sf_coltypes[i] != out_sf_coltypes[i]:
print "mismatch for types %s and %s" % (sf_coltypes[i],out_sf_coltypes[i])
sf[sf_temp_names[i]] = sf[sf_temp_names[i]].astype(str)
out_sf[out_sf_temp_names[i]] = out_sf[out_sf_temp_names[i]].astype(str)
out_sf = out_sf.append(sf)
out_sf.__proxy__.delete_on_close()
if sf_names is not None:
out_names = out_sf.column_names()
if(set(out_names) != set(sf_names)):
out_sf = out_sf.rename(dict(zip(out_names, sf_names)))
return out_sf
@classmethod
def __from_UTF8Deserialized_rdd__(cls, rdd):
_mt._get_metric_tracker().track('sframe.__from_UTF8Deserialized_rdd__')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
cur_sc = rdd.ctx
sf_names = None
sf_types = None
tmp_loc = SFrame.__get_staging_dir__(cur_sc)
if tmp_loc is None:
raise RuntimeError("Could not determine staging directory for SFrame files.")
if(rdd.__class__.__name__ in {'SchemaRDD','DataFrame'}):
first_row = rdd.take(1)[0]
if hasattr(first_row, 'keys'):
sf_names = first_row.keys()
sf_types = [type(i) for i in first_row.values()]
else:
sf_names = first_row.__FIELDS__
sf_types = [type(i) for i in first_row]
sf_names = [str(i) for i in sf_names]
for _type in sf_types:
if(_type != int and _type != str and _type != float and _type != unicode):
raise TypeError("Only int, str, and float are supported for now")
types = ""
for i in sf_types:
types += i.__name__ + ","
if cur_sc.master[0:5] == 'local':
t = rdd._jschema_rdd.toJavaStringOfValues().pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + " " +\
BINARY_PATHS['RDD_SFRAME_NONPICKLE_PATH'] + " " + tmp_loc +\
" " + types)
else:
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.toJavaStringOfValues(
rdd._jschema_rdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] +\
" " + "./" +\
SPARK_SUPPORT_NAMES['RDD_SFRAME_NONPICKLE_PATH'] + " " +\
tmp_loc + " " + types)
else:
if cur_sc.master[0:5] == 'local':
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
BINARY_PATHS['SPARK_PIPE_WRAPPER_PATH'] + " " +\
BINARY_PATHS['RDD_SFRAME_NONPICKLE_PATH'] + " " +\
tmp_loc)
else:
t = cur_sc._jvm.org.graphlab.create.GraphLabUtil.pythonToJava(
rdd._jrdd).pipe(
"./" + SPARK_SUPPORT_NAMES['SPARK_PIPE_WRAPPER_PATH'] +\
" " + "./" +\
SPARK_SUPPORT_NAMES['RDD_SFRAME_NONPICKLE_PATH'] + " " +\
tmp_loc)
# We get the location of an SFrame index file per Spark partition in
# the result. We assume that this is in partition order.
res = t.collect()
out_sf = cls()
sframe_list = []
for url in res:
sf = SFrame()
sf.__proxy__.load_from_sframe_index(_make_internal_url(url))
sf.__proxy__.delete_on_close()
out_sf = out_sf.append(sf)
out_sf.__proxy__.delete_on_close()
if sf_names is not None:
out_names = out_sf.column_names()
if(set(out_names) != set(sf_names)):
out_sf = out_sf.rename(dict(zip(out_names, sf_names)))
return out_sf
@classmethod
def from_odbc(cls, db, sql, verbose=False):
"""
Convert a table or query from a database to an SFrame.
This function does not do any checking on the given SQL query, and
cannot know what effect it will have on the database. Any side effects
from the query will be reflected on the database. If no result
rows are returned, an empty SFrame is created.
Keep in mind the default case your database stores table names in. In
some cases, you may need to add quotation marks (or whatever character
your database uses to quote identifiers), especially if you created the
table using `to_odbc`.
Parameters
----------
db : `graphlab.extensions._odbc_connection.unity_odbc_connection`
An ODBC connection object. This can only be obtained by calling
`graphlab.connect_odbc`. Check that documentation for how to create
this object.
sql : str
A SQL query. The query must be acceptable by the ODBC driver used by
`graphlab.extensions._odbc_connection.unity_odbc_connection`.
Returns
-------
out : SFrame
Notes
-----
This functionality is only supported when using GraphLab Create
entirely on your local machine. Therefore, GraphLab Create's EC2 and
Hadoop execution modes will not be able to use ODBC. Note that this
does not apply to the machine your database is running, which can (and
often will) be running on a separate machine.
Examples
--------
>>> db = graphlab.connect_odbc("DSN=my_awesome_dsn;UID=user;PWD=mypassword")
>>> a_table = graphlab.SFrame.from_odbc(db, "SELECT * FROM a_table")
>>> join_result = graphlab.SFrame.from_odbc(db, 'SELECT * FROM "MyTable" a, "AnotherTable" b WHERE a.id=b.id')
"""
result = db.execute_query(sql)
if not isinstance(result, SFrame):
raise RuntimeError("Cannot create an SFrame for query. No result set.")
cls = result
return cls
def to_odbc(self, db, table_name, append_if_exists=False, verbose=True):
"""
Convert an SFrame to a table in a database.
By default, searches for a table in the database with the given name.
If found, this will attempt to append all the rows of the SFrame to the
end of the table. If not, this will create a new table with the given
name. This behavior is toggled with the `append_if_exists` flag.
When creating a new table, GraphLab Create uses a heuristic approach to
pick a corresponding type for each column in the SFrame using the type
information supplied by the database's ODBC driver. Your driver must
support giving this type information for GraphLab Create to support
writing to the database.
To allow more expressive and accurate naming, `to_odbc` puts quotes
around each identifier (table names and column names). Depending on
your database, you may need to refer to the created table with quote
characters around the name. This character is not the same for all
databases, but '"' is the most common.
Parameters
----------
db : `graphlab.extensions._odbc_connection.unity_odbc_connection`
An ODBC connection object. This can only be obtained by calling
`graphlab.connect_odbc`. Check that documentation for how to create
this object.
table_name : str
The name of the table you would like to create/append to.
append_if_exists : bool
If True, this will attempt to append to the table named `table_name`
if it is found to exist in the database.
verbose : bool
Print progress updates on the insertion process.
Notes
-----
This functionality is only supported when using GraphLab Create
entirely on your local machine. Therefore, GraphLab Create's EC2 and
Hadoop execution modes will not be able to use ODBC. Note that this
"local machine" rule does not apply to the machine your database is
running on, which can (and often will) be running on a separate
machine.
Examples
--------
>>> db = graphlab.connect_odbc("DSN=my_awesome_dsn;UID=user;PWD=mypassword")
>>> sf = graphlab.SFrame({'a':[1,2,3],'b':['hi','pika','bye']})
>>> sf.to_odbc(db, 'a_cool_table')
"""
if (not verbose):
glconnect.get_client().set_log_progress(False)
db._insert_sframe(self, table_name, append_if_exists)
if (not verbose):
glconnect.get_client().set_log_progress(True)
def __repr__(self):
"""
Returns a string description of the frame
"""
printed_sf = self._imagecols_to_stringcols()
ret = self.__get_column_description__()
if self.__has_size__():
ret = ret + "Rows: " + str(len(self)) + "\n\n"
else:
ret = ret + "Rows: Unknown" + "\n\n"
ret = ret + "Data:\n"
if (len(printed_sf.head()) > 0):
ret = ret + str(self)
else:
ret = ret + "\t[]"
return ret
def __get_column_description__(self):
colnames = self.column_names()
coltypes = self.column_types()
ret = "Columns:\n"
if len(colnames) > 0:
for i in range(len(colnames)):
ret = ret + "\t" + colnames[i] + "\t" + coltypes[i].__name__ + "\n"
ret = ret + "\n"
else:
ret = ret + "\tNone\n\n"
return ret
def __get_pretty_tables__(self, wrap_text=False, max_row_width=80,
max_column_width=30, max_columns=20,
max_rows_to_display=60):
"""
Returns a list of pretty print tables representing the current SFrame.
If the number of columns is larger than max_columns, the last pretty
table will contain an extra column of "...".
Parameters
----------
wrap_text : bool, optional
max_row_width : int, optional
Max number of characters per table.
max_column_width : int, optional
Max number of characters per column.
max_columns : int, optional
Max number of columns per table.
max_rows_to_display : int, optional
Max number of rows to display.
Returns
-------
out : list[PrettyTable]
"""
headsf = self.head(max_rows_to_display)
if headsf.shape == (0, 0):
return [PrettyTable()]
# convert array.array column to list column so they print like [...]
# and not array('d', ...)
for col in headsf.column_names():
if headsf[col].dtype() is array.array:
headsf[col] = headsf[col].astype(list)
def _value_to_str(value):
if (type(value) is array.array):
return str(list(value))
elif (type(value) is list):
return '[' + ", ".join(_value_to_str(x) for x in value) + ']'
else:
return str(value)
def _escape_space(s):
return "".join([ch.encode('string_escape') if ch.isspace() else ch for ch in s])
def _truncate_respect_unicode(s, max_length):
if (len(s) <= max_length):
return s
else:
u = unicode(s, 'utf-8', errors='replace')
return u[:max_length].encode('utf-8')
def _truncate_str(s, wrap_str=False):
"""
Truncate and optionally wrap the input string as unicode, replace
unconvertible character with a diamond ?.
"""
s = _escape_space(s)
if len(s) <= max_column_width:
return unicode(s, 'utf-8', errors='replace')
else:
ret = ''
# if wrap_str is true, wrap the text and take at most 2 rows
if wrap_str:
wrapped_lines = wrap(s, max_column_width)
if len(wrapped_lines) == 1:
return wrapped_lines[0]
last_line = wrapped_lines[1]
if len(last_line) >= max_column_width:
last_line = _truncate_respect_unicode(last_line, max_column_width - 4)
ret = wrapped_lines[0] + '\n' + last_line + ' ...'
else:
ret = _truncate_respect_unicode(s, max_column_width - 4) + '...'
return unicode(ret, 'utf-8', errors='replace')
columns = self.column_names()[:max_columns]
columns.reverse() # reverse the order of columns and we will pop from the end
num_column_of_last_table = 0
row_of_tables = []
# let's build a list of tables with max_columns
# each table should satisfy, max_row_width, and max_column_width
while len(columns) > 0:
tbl = PrettyTable()
table_width = 0
num_column_of_last_table = 0
while len(columns) > 0:
col = columns.pop()
# check the max length of element in the column
if len(headsf) > 0:
col_width = min(max_column_width, max(len(str(x)) for x in headsf[col]))
else:
col_width = max_column_width
if (table_width + col_width < max_row_width):
# truncate the header if necessary
header = _truncate_str(col, wrap_text)
tbl.add_column(header, [_truncate_str(_value_to_str(x), wrap_text) for x in headsf[col]])
table_width = str(tbl).find('\n')
num_column_of_last_table += 1
else:
# the column does not fit in the current table, push it back to columns
columns.append(col)
break
tbl.align = 'c'
row_of_tables.append(tbl)
# add a column of all "..." if there are more columns than displayed
if self.num_cols() > max_columns:
row_of_tables[-1].add_column('...', ['...'] * len(headsf))
num_column_of_last_table += 1
# add a row of all "..." if there are more rows than displayed
if self.__has_size__() and self.num_rows() > headsf.num_rows():
row_of_tables[-1].add_row(['...'] * num_column_of_last_table)
return row_of_tables
def print_rows(self, num_rows=10, num_columns=40, max_column_width=30,
max_row_width=80):
"""
Print the first M rows and N columns of the SFrame in human readable
format.
Parameters
----------
num_rows : int, optional
Number of rows to print.
num_columns : int, optional
Number of columns to print.
max_column_width : int, optional
Maximum width of a column. Columns use fewer characters if possible.
max_row_width : int, optional
Maximum width of a printed row. Columns beyond this width wrap to a
new line. `max_row_width` is automatically reset to be the
larger of itself and `max_column_width`.
See Also
--------
head, tail
"""
max_row_width = max(max_row_width, max_column_width + 1)
printed_sf = self._imagecols_to_stringcols(num_rows)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=False,
max_rows_to_display=num_rows,
max_columns=num_columns,
max_column_width=max_column_width,
max_row_width=max_row_width)
footer = "[%d rows x %d columns]\n" % self.shape
print '\n'.join([str(tb) for tb in row_of_tables]) + "\n" + footer
def _imagecols_to_stringcols(self, num_rows=10):
# A list of column types
types = self.column_types()
# A list of indexable column names
names = self.column_names()
# Constructing names of sframe columns that are of image type
image_column_names = [names[i] for i in range(len(names)) if types[i] == graphlab.Image]
#If there are image-type columns, copy the SFrame and cast the top MAX_NUM_ROWS_TO_DISPLAY of those columns to string
if len(image_column_names) > 0:
printed_sf = SFrame()
for t in names:
if t in image_column_names:
printed_sf[t] = self[t]._head_str(num_rows)
else:
printed_sf[t] = self[t].head(num_rows)
else:
printed_sf = self
return printed_sf
def __str__(self, num_rows=10, footer=True):
"""
Returns a string containing the first 10 elements of the frame, along
with a description of the frame.
"""
MAX_ROWS_TO_DISPLAY = num_rows
printed_sf = self._imagecols_to_stringcols(MAX_ROWS_TO_DISPLAY)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=False, max_rows_to_display=MAX_ROWS_TO_DISPLAY)
if (not footer):
return '\n'.join([str(tb) for tb in row_of_tables])
if self.__has_size__():
footer = '[%d rows x %d columns]\n' % self.shape
if (self.num_rows() > MAX_ROWS_TO_DISPLAY):
footer += '\n'.join(FOOTER_STRS)
else:
footer = '[? rows x %d columns]\n' % self.num_columns()
footer += '\n'.join(LAZY_FOOTER_STRS)
return '\n'.join([str(tb) for tb in row_of_tables]) + "\n" + footer
def _repr_html_(self):
MAX_ROWS_TO_DISPLAY = 10
printed_sf = self._imagecols_to_stringcols(MAX_ROWS_TO_DISPLAY)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=True, max_row_width=120, max_columns=40, max_column_width=25, max_rows_to_display=MAX_ROWS_TO_DISPLAY)
if self.__has_size__():
footer = '[%d rows x %d columns]<br/>' % self.shape
if (self.num_rows() > MAX_ROWS_TO_DISPLAY):
footer += '<br/>'.join(FOOTER_STRS)
else:
footer = '[? rows x %d columns]<br/>' % self.num_columns()
footer += '<br/>'.join(LAZY_FOOTER_STRS)
begin = '<div style="max-height:1000px;max-width:1500px;overflow:auto;">'
end = '\n</div>'
return begin + '\n'.join([tb.get_html_string(format=True) for tb in row_of_tables]) + "\n" + footer + end
def __nonzero__(self):
"""
Returns true if the frame is not empty.
"""
return self.num_rows() != 0
def __len__(self):
"""
Returns the number of rows of the sframe.
"""
return self.num_rows()
def __copy__(self):
"""
Returns a shallow copy of the sframe.
"""
return self.select_columns(self.column_names())
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
raise NotImplementedError
def _row_selector(self, other):
"""
Where other is an SArray of identical length as the current Frame,
this returns a selection of a subset of rows in the current SFrame
where the corresponding row in the selector is non-zero.
"""
if type(other) is SArray:
if len(other) != len(self):
raise IndexError("Cannot perform logical indexing on arrays of different length.")
with cython_context():
return SFrame(_proxy=self.__proxy__.logical_filter(other.__proxy__))
def dtype(self):
"""
The type of each column.
Returns
-------
out : list[type]
Column types of the SFrame.
See Also
--------
column_types
"""
return self.column_types()
def num_rows(self):
"""
The number of rows in this SFrame.
Returns
-------
out : int
Number of rows in the SFrame.
See Also
--------
num_columns
"""
return self.__proxy__.num_rows()
def num_cols(self):
"""
The number of columns in this SFrame.
Returns
-------
out : int
Number of columns in the SFrame.
See Also
--------
num_columns, num_rows
"""
return self.__proxy__.num_columns()
def num_columns(self):
"""
The number of columns in this SFrame.
Returns
-------
out : int
Number of columns in the SFrame.
See Also
--------
num_cols, num_rows
"""
return self.__proxy__.num_columns()
def column_names(self):
"""
The name of each column in the SFrame.
Returns
-------
out : list[string]
Column names of the SFrame.
See Also
--------
rename
"""
return self.__proxy__.column_names()
def column_types(self):
"""
The type of each column in the SFrame.
Returns
-------
out : list[type]
Column types of the SFrame.
See Also
--------
dtype
"""
return self.__proxy__.dtype()
def head(self, n=10):
"""
The first n rows of the SFrame.
Parameters
----------
n : int, optional
The number of rows to fetch.
Returns
-------
out : SFrame
A new SFrame which contains the first n rows of the current SFrame
See Also
--------
tail, print_rows
"""
return SFrame(_proxy=self.__proxy__.head(n))
def to_dataframe(self):
"""
Convert this SFrame to pandas.DataFrame.
This operation will construct a pandas.DataFrame in memory. Care must
be taken when size of the returned object is big.
Returns
-------
out : pandas.DataFrame
The dataframe which contains all rows of SFrame
"""
assert HAS_PANDAS
df = pandas.DataFrame()
for i in range(self.num_columns()):
column_name = self.column_names()[i]
df[column_name] = list(self[column_name])
if len(df[column_name]) == 0:
df[column_name] = df[column_name].astype(self.column_types()[i])
return df
def tail(self, n=10):
"""
The last n rows of the SFrame.
Parameters
----------
n : int, optional
The number of rows to fetch.
Returns
-------
out : SFrame
A new SFrame which contains the last n rows of the current SFrame
See Also
--------
head, print_rows
"""
return SFrame(_proxy=self.__proxy__.tail(n))
def apply(self, fn, dtype=None, seed=None):
"""
Transform each row to an :class:`~graphlab.SArray` according to a
specified function. Returns a new SArray of ``dtype`` where each element
in this SArray is transformed by `fn(x)` where `x` is a single row in
the sframe represented as a dictionary. The ``fn`` should return
exactly one value which can be cast into type ``dtype``. If ``dtype`` is
not specified, the first 100 rows of the SFrame are used to make a guess
of the target data type.
Parameters
----------
fn : function
The function to transform each row of the SFrame. The return
type should be convertible to `dtype` if `dtype` is not None.
This can also be a toolkit extension function which is compiled
as a native shared library using SDK.
dtype : dtype, optional
The dtype of the new SArray. If None, the first 100
elements of the array are used to guess the target
data type.
seed : int, optional
Used as the seed if a random number generator is included in `fn`.
Returns
-------
out : SArray
The SArray transformed by fn. Each element of the SArray is of
type ``dtype``
Examples
--------
Concatenate strings from several columns:
>>> sf = graphlab.SFrame({'user_id': [1, 2, 3], 'movie_id': [3, 3, 6],
'rating': [4, 5, 1]})
>>> sf.apply(lambda x: str(x['user_id']) + str(x['movie_id']) + str(x['rating']))
dtype: str
Rows: 3
['134', '235', '361']
Using native toolkit extension function:
.. code-block:: c++
#include <graphlab/sdk/toolkit_function_macros.hpp>
double mean(const std::map<flexible_type, flexible_type>& dict) {
double sum = 0.0;
for (const auto& kv: dict) sum += (double)kv.second;
return sum / dict.size();
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(mean, "row");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> import example
>>> sf = graphlab.SFrame({'x0': [1, 2, 3], 'x1': [2, 3, 1],
... 'x2': [3, 1, 2]})
>>> sf.apply(example.mean)
dtype: float
Rows: 3
[2.0,2.0,2.0]
"""
assert _is_callable(fn), "Input must be a function"
test_sf = self[:10]
dryrun = [fn(row) for row in test_sf]
if dtype is None:
dtype = SArray(dryrun).dtype()
if not seed:
seed = int(time.time())
_mt._get_metric_tracker().track('sframe.apply')
nativefn = None
try:
import graphlab.extensions as extensions
nativefn = extensions._build_native_function_call(fn)
except:
pass
if nativefn is not None:
# this is a toolkit lambda. We can do something about it
with cython_context():
return SArray(_proxy=self.__proxy__.transform_native(nativefn, dtype, seed))
with cython_context():
return SArray(_proxy=self.__proxy__.transform(fn, dtype, seed))
def flat_map(self, column_names, fn, column_types='auto', seed=None):
"""
Map each row of the SFrame to multiple rows in a new SFrame via a
function.
The output of `fn` must have type List[List[...]]. Each inner list
will be a single row in the new output, and the collection of these
rows within the outer list make up the data for the output SFrame.
All rows must have the same length and the same order of types to
make sure the result columns are homogeneously typed. For example, if
the first element emitted into in the outer list by `fn` is
[43, 2.3, 'string'], then all other elements emitted into the outer
list must be a list with three elements, where the first is an int,
second is a float, and third is a string. If column_types is not
specified, the first 10 rows of the SFrame are used to determine the
column types of the returned sframe.
Parameters
----------
column_names : list[str]
The column names for the returned SFrame.
fn : function
The function that maps each of the sframe row into multiple rows,
returning List[List[...]]. All outputted rows must have the same
length and order of types.
column_types : list[type], optional
The column types of the output SFrame. Default value will be
automatically inferred by running `fn` on the first 10 rows of the
input. If the types cannot be inferred from the first 10 rows, an
error is raised.
seed : int, optional
Used as the seed if a random number generator is included in `fn`.
Returns
-------
out : SFrame
A new SFrame containing the results of the flat_map of the
original SFrame.
Examples
---------
Repeat each row according to the value in the 'number' column.
>>> sf = graphlab.SFrame({'letter': ['a', 'b', 'c'],
... 'number': [1, 2, 3]})
>>> sf.flat_map(['number', 'letter'],
... lambda x: [list(x.itervalues()) for i in range(0, x['number'])])
+--------+--------+
| number | letter |
+--------+--------+
| 1 | a |
| 2 | b |
| 2 | b |
| 3 | c |
| 3 | c |
| 3 | c |
+--------+--------+
[6 rows x 2 columns]
"""
assert inspect.isfunction(fn), "Input must be a function"
if not seed:
seed = int(time.time())
_mt._get_metric_tracker().track('sframe.flat_map')
# determine the column_types
if column_types == 'auto':
types = set()
sample = self[0:10]
results = [fn(row) for row in sample]
for rows in results:
if type(rows) is not list:
raise TypeError("Output type of the lambda function must be a list of lists")
# note: this skips empty lists
for row in rows:
if type(row) is not list:
raise TypeError("Output type of the lambda function must be a list of lists")
types.add(tuple([type(v) for v in row]))
if len(types) == 0:
raise TypeError, \
"Could not infer output column types from the first ten rows " +\
"of the SFrame. Please use the 'column_types' parameter to " +\
"set the types."
if len(types) > 1:
raise TypeError("Mapped rows must have the same length and types")
column_types = list(types.pop())
assert type(column_types) is list
assert len(column_types) == len(column_names), "Number of output columns must match the size of column names"
with cython_context():
return SFrame(_proxy=self.__proxy__.flat_map(fn, column_names, column_types, seed))
def sample(self, fraction, seed=None):
"""
Sample the current SFrame's rows.
Parameters
----------
fraction : float
Approximate fraction of the rows to fetch. Must be between 0 and 1.
The number of rows returned is approximately the fraction times the
number of rows.
seed : int, optional
Seed for the random number generator used to sample.
Returns
-------
out : SFrame
A new SFrame containing sampled rows of the current SFrame.
Examples
--------
Suppose we have an SFrame with 6,145 rows.
>>> import random
>>> sf = SFrame({'id': range(0, 6145)})
Retrieve about 30% of the SFrame rows with repeatable results by
setting the random seed.
>>> len(sf.sample(.3, seed=5))
1783
"""
if not seed:
seed = int(time.time())
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
_mt._get_metric_tracker().track('sframe.sample')
if (self.num_rows() == 0 or self.num_cols() == 0):
return self
else:
with cython_context():
return SFrame(_proxy=self.__proxy__.sample(fraction, seed))
def random_split(self, fraction, seed=None):
"""
Randomly split the rows of an SFrame into two SFrames. The first SFrame
contains *M* rows, sampled uniformly (without replacement) from the
original SFrame. *M* is approximately the fraction times the original
number of rows. The second SFrame contains the remaining rows of the
original SFrame.
Parameters
----------
fraction : float
Approximate fraction of the rows to fetch for the first returned
SFrame. Must be between 0 and 1.
seed : int, optional
Seed for the random number generator used to split.
Returns
-------
out : tuple [SFrame]
Two new SFrames.
Examples
--------
Suppose we have an SFrame with 1,024 rows and we want to randomly split
it into training and testing datasets with about a 90%/10% split.
>>> sf = graphlab.SFrame({'id': range(1024)})
>>> sf_train, sf_test = sf.random_split(.9, seed=5)
>>> print len(sf_train), len(sf_test)
922 102
"""
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
if (self.num_rows() == 0 or self.num_cols() == 0):
return (SFrame(), SFrame())
if not seed:
seed = int(time.time())
# The server side requires this to be an int, so cast if we can
try:
seed = int(seed)
except ValueError:
raise ValueError('The \'seed\' parameter must be of type int.')
_mt._get_metric_tracker().track('sframe.random_split')
with cython_context():
proxy_pair = self.__proxy__.random_split(fraction, seed)
return (SFrame(data=[], _proxy=proxy_pair[0]), SFrame(data=[], _proxy=proxy_pair[1]))
def topk(self, column_name, k=10, reverse=False):
"""
Get top k rows according to the given column. Result is according to and
sorted by `column_name` in the given order (default is descending).
When `k` is small, `topk` is more efficient than `sort`.
Parameters
----------
column_name : string
The column to sort on
k : int, optional
The number of rows to return
reverse : bool, optional
If True, return the top k rows in ascending order, otherwise, in
descending order.
Returns
-------
out : SFrame
an SFrame containing the top k rows sorted by column_name.
See Also
--------
sort
Examples
--------
>>> sf = graphlab.SFrame({'id': range(1000)})
>>> sf['value'] = -sf['id']
>>> sf.topk('id', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 999 | -999 |
| 998 | -998 |
| 997 | -997 |
+--------+--------+
[3 rows x 2 columns]
>>> sf.topk('value', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 1 | -1 |
| 2 | -2 |
| 3 | -3 |
+--------+--------+
[3 rows x 2 columns]
"""
if type(column_name) is not str:
raise TypeError("column_name must be a string")
_mt._get_metric_tracker().track('sframe.topk')
sf = self[self[column_name].topk_index(k, reverse)]
return sf.sort(column_name, ascending=reverse)
def save(self, filename, format=None):
"""
Save the SFrame to a file system for later use.
Parameters
----------
filename : string
The location to save the SFrame. Either a local directory or a
remote URL. If the format is 'binary', a directory will be created
at the location which will contain the sframe.
format : {'binary', 'csv'}, optional
Format in which to save the SFrame. Binary saved SFrames can be
loaded much faster and without any format conversion losses. If not
given, will try to infer the format from filename given. If file
name ends with 'csv' or '.csv.gz', then save as 'csv' format,
otherwise save as 'binary' format.
See Also
--------
load_sframe, SFrame
Examples
--------
>>> # Save the sframe into binary format
>>> sf.save('data/training_data_sframe')
>>> # Save the sframe into csv format
>>> sf.save('data/training_data.csv', format='csv')
"""
_mt._get_metric_tracker().track('sframe.save', properties={'format':format})
if format == None:
if filename.endswith(('.csv', '.csv.gz')):
format = 'csv'
else:
format = 'binary'
else:
if format is 'csv':
if not filename.endswith(('.csv', '.csv.gz')):
filename = filename + '.csv'
elif format is not 'binary':
raise ValueError("Invalid format: {}. Supported formats are 'csv' and 'binary'".format(format))
## Save the SFrame
url = _make_internal_url(filename)
with cython_context():
if format is 'binary':
self.__proxy__.save(url)
elif format is 'csv':
assert filename.endswith(('.csv', '.csv.gz'))
self.__proxy__.save_as_csv(url, {})
else:
raise ValueError("Unsupported format: {}".format(format))
def select_column(self, key):
"""
Get a reference to the :class:`~graphlab.SArray` that corresponds with
the given key. Throws an exception if the key is something other than a
string or if the key is not found.
Parameters
----------
key : str
The column name.
Returns
-------
out : SArray
The SArray that is referred by ``key``.
See Also
--------
select_columns
Examples
--------
>>> sf = graphlab.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie']})
>>> # This line is equivalent to `sa = sf['user_name']`
>>> sa = sf.select_column('user_name')
>>> sa
dtype: str
Rows: 3
['alice', 'bob', 'charlie']
"""
if not isinstance(key, str):
raise TypeError("Invalid key type: must be str")
with cython_context():
return SArray(data=[], _proxy=self.__proxy__.select_column(key))
def select_columns(self, keylist):
"""
Get SFrame composed only of the columns referred to in the given list of
keys. Throws an exception if ANY of the keys are not in this SFrame or
if ``keylist`` is anything other than a list of strings.
Parameters
----------
keylist : list[str]
The list of column names.
Returns
-------
out : SFrame
A new SFrame that is made up of the columns referred to in
``keylist`` from the current SFrame.
See Also
--------
select_column
Examples
--------
>>> sf = graphlab.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie'],
... 'zipcode': [98101, 98102, 98103]
... })
>>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]`
>>> sf2 = sf.select_columns(['user_id', 'zipcode'])
>>> sf2
+---------+---------+
| user_id | zipcode |
+---------+---------+
| 1 | 98101 |
| 2 | 98102 |
| 3 | 98103 |
+---------+---------+
[3 rows x 2 columns]
"""
if not hasattr(keylist, '__iter__'):
raise TypeError("keylist must be an iterable")
if not all([isinstance(x, str) for x in keylist]):
raise TypeError("Invalid key type: must be str")
key_set = set(keylist)
if (len(key_set)) != len(keylist):
for key in key_set:
if keylist.count(key) > 1:
raise ValueError("There are duplicate keys in key list: '" + key + "'")
with cython_context():
return SFrame(data=[], _proxy=self.__proxy__.select_columns(keylist))
def add_column(self, data, name=""):
"""
Add a column to this SFrame. The number of elements in the data given
must match the length of every other column of the SFrame. This
operation modifies the current SFrame in place and returns self. If no
name is given, a default name is chosen.
Parameters
----------
data : SArray
The 'column' of data to add.
name : string, optional
The name of the column. If no name is given, a default name is
chosen.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_columns
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sa = graphlab.SArray(['cat', 'dog', 'fossa'])
>>> # This line is equivalant to `sf['species'] = sa`
>>> sf.add_column(sa, name='species')
>>> sf
+----+-----+---------+
| id | val | species |
+----+-----+---------+
| 1 | A | cat |
| 2 | B | dog |
| 3 | C | fossa |
+----+-----+---------+
[3 rows x 3 columns]
"""
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
raise TypeError("Must give column as SArray")
if not isinstance(name, str):
raise TypeError("Invalid column name: must be str")
with cython_context():
self.__proxy__.add_column(data.__proxy__, name)
return self
def add_columns(self, data, namelist=None):
"""
Adds multiple columns to this SFrame. The number of elements in all
columns must match the length of every other column of the SFrame. This
operation modifies the current SFrame in place and returns self.
Parameters
----------
data : list[SArray] or SFrame
The columns to add.
namelist : list of string, optional
A list of column names. All names must be specified. ``namelist`` is
ignored if data is an SFrame.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_column
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf2 = graphlab.SFrame({'species': ['cat', 'dog', 'fossa'],
... 'age': [3, 5, 9]})
>>> sf.add_columns(sf2)
>>> sf
+----+-----+-----+---------+
| id | val | age | species |
+----+-----+-----+---------+
| 1 | A | 3 | cat |
| 2 | B | 5 | dog |
| 3 | C | 9 | fossa |
+----+-----+-----+---------+
[3 rows x 4 columns]
"""
datalist = data
if isinstance(data, SFrame):
other = data
datalist = [other.select_column(name) for name in other.column_names()]
namelist = other.column_names()
my_columns = set(self.column_names())
for name in namelist:
if name in my_columns:
raise ValueError("Column '" + name + "' already exists in current SFrame")
else:
if not hasattr(datalist, '__iter__'):
raise TypeError("datalist must be an iterable")
if not hasattr(namelist, '__iter__'):
raise TypeError("namelist must be an iterable")
if not all([isinstance(x, SArray) for x in datalist]):
raise TypeError("Must give column as SArray")
if not all([isinstance(x, str) for x in namelist]):
raise TypeError("Invalid column name in list : must all be str")
with cython_context():
self.__proxy__.add_columns([x.__proxy__ for x in datalist], namelist)
return self
def remove_column(self, name):
"""
Remove a column from this SFrame. This operation modifies the current
SFrame in place and returns self.
Parameters
----------
name : string
The name of the column to remove.
Returns
-------
out : SFrame
The SFrame with given column removed.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> # This is equivalent to `del sf['val']`
>>> sf.remove_column('val')
>>> sf
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
"""
if name not in self.column_names():
raise KeyError('Cannot find column %s' % name)
colid = self.column_names().index(name)
with cython_context():
self.__proxy__.remove_column(colid)
return self
def remove_columns(self, column_names):
"""
Remove one or more columns from this SFrame. This operation modifies the current
SFrame in place and returns self.
Parameters
----------
column_names : list or iterable
A list or iterable of column names.
Returns
-------
out : SFrame
The SFrame with given columns removed.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val1': ['A', 'B', 'C'], 'val2' : [10, 11, 12]})
>>> sf.remove_columns(['val1', 'val2'])
>>> sf
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
"""
column_names = list(column_names)
existing_columns = dict((k, i) for i, k in enumerate(self.column_names()))
for name in column_names:
if name not in existing_columns:
raise KeyError('Cannot find column %s' % name)
# Delete it going backwards so we don't invalidate indices
deletion_indices = sorted(existing_columns[name] for name in column_names)
for colid in reversed(deletion_indices):
with cython_context():
self.__proxy__.remove_column(colid)
return self
def swap_columns(self, column_1, column_2):
"""
Swap the columns with the given names. This operation modifies the
current SFrame in place and returns self.
Parameters
----------
column_1 : string
Name of column to swap
column_2 : string
Name of other column to swap
Returns
-------
out : SFrame
The SFrame with swapped columns.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf.swap_columns('id', 'val')
>>> sf
+-----+-----+
| val | id |
+-----+-----+
| A | 1 |
| B | 2 |
| C | 3 |
+----+-----+
[3 rows x 2 columns]
"""
colnames = self.column_names()
colid_1 = colnames.index(column_1)
colid_2 = colnames.index(column_2)
with cython_context():
self.__proxy__.swap_columns(colid_1, colid_2)
return self
def rename(self, names):
"""
Rename the given columns. ``names`` is expected to be a dict specifying
the old and new names. This changes the names of the columns given as
the keys and replaces them with the names given as the values. This
operation modifies the current SFrame in place and returns self.
Parameters
----------
names : dict [string, string]
Dictionary of [old_name, new_name]
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
column_names
Examples
--------
>>> sf = SFrame({'X1': ['Alice','Bob'],
... 'X2': ['123 Fake Street','456 Fake Street']})
>>> sf.rename({'X1': 'name', 'X2':'address'})
>>> sf
+-------+-----------------+
| name | address |
+-------+-----------------+
| Alice | 123 Fake Street |
| Bob | 456 Fake Street |
+-------+-----------------+
[2 rows x 2 columns]
"""
if (type(names) is not dict):
raise TypeError('names must be a dictionary: oldname -> newname')
all_columns = set(self.column_names())
for k in names:
if not k in all_columns:
raise ValueError('Cannot find column %s in the SFrame' % k)
with cython_context():
for k in names:
colid = self.column_names().index(k)
self.__proxy__.set_column_name(colid, names[k])
return self
def __getitem__(self, key):
"""
This method does things based on the type of `key`.
If `key` is:
* str
Calls `select_column` on `key`
* SArray
Performs a logical filter. Expects given SArray to be the same
length as all columns in current SFrame. Every row
corresponding with an entry in the given SArray that is
equivalent to False is filtered from the result.
* int
Returns a single row of the SFrame (the `key`th one) as a dictionary.
* slice
Returns an SFrame including only the sliced rows.
"""
if type(key) is SArray:
return self._row_selector(key)
elif type(key) is list:
return self.select_columns(key)
elif type(key) is str:
return self.select_column(key)
elif type(key) is int:
if key < 0:
key = len(self) + key
if key >= len(self):
raise IndexError("SFrame index out of range")
return list(SFrame(_proxy = self.__proxy__.copy_range(key, 1, key+1)))[0]
elif type(key) is slice:
start = key.start
stop = key.stop
step = key.step
if start is None:
start = 0
if stop is None:
stop = len(self)
if step is None:
step = 1
# handle negative indices
if start < 0:
start = len(self) + start
if stop < 0:
stop = len(self) + stop
return SFrame(_proxy = self.__proxy__.copy_range(start, step, stop))
else:
raise TypeError("Invalid index type: must be SArray, list, or str")
def __setitem__(self, key, value):
"""
A wrapper around add_column(s). Key can be either a list or a str. If
value is an SArray, it is added to the SFrame as a column. If it is a
constant value (int, str, or float), then a column is created where
every entry is equal to the constant value. Existing columns can also
be replaced using this wrapper.
"""
if type(key) is list:
self.add_columns(value, key)
elif type(key) is str:
sa_value = None
if (type(value) is SArray):
sa_value = value
elif hasattr(value, '__iter__'): # wrap list, array... to sarray
sa_value = SArray(value)
else: # create an sarray of constant value
sa_value = SArray.from_const(value, self.num_rows())
# set new column
if not key in self.column_names():
with cython_context():
self.add_column(sa_value, key)
else:
# special case if replacing the only column.
# server would fail the replacement if the new column has different
# length than current one, which doesn't make sense if we are replacing
# the only column. To support this, we first take out the only column
# and then put it back if exception happens
single_column = (self.num_cols() == 1)
if (single_column):
tmpname = key
saved_column = self.select_column(key)
self.remove_column(key)
else:
# add the column to a unique column name.
tmpname = '__' + '-'.join(self.column_names())
try:
self.add_column(sa_value, tmpname)
except Exception as e:
if (single_column):
self.add_column(saved_column, key)
raise
if (not single_column):
# if add succeeded, remove the column name and rename tmpname->columnname.
self.swap_columns(key, tmpname)
self.remove_column(key)
self.rename({tmpname: key})
else:
raise TypeError('Cannot set column with key type ' + str(type(key)))
def __delitem__(self, key):
"""
Wrapper around remove_column.
"""
self.remove_column(key)
def __materialize__(self):
"""
For an SFrame that is lazily evaluated, force the persistence of the
SFrame to disk, committing all lazy evaluated operations.
"""
with cython_context():
self.__proxy__.materialize()
def __is_materialized__(self):
"""
Returns whether or not the SFrame has been materialized.
"""
return self.__proxy__.is_materialized()
def __has_size__(self):
"""
Returns whether or not the size of the SFrame is known.
"""
return self.__proxy__.has_size()
def __iter__(self):
"""
Provides an iterator to the rows of the SFrame.
"""
_mt._get_metric_tracker().track('sframe.__iter__')
def generator():
elems_at_a_time = 262144
self.__proxy__.begin_iterator()
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
column_names = self.column_names()
while(True):
for j in ret:
yield dict(zip(column_names, j))
if len(ret) == elems_at_a_time:
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
else:
break
return generator()
def append(self, other):
"""
Add the rows of an SFrame to the end of this SFrame.
Both SFrames must have the same set of columns with the same column
names and column types.
Parameters
----------
other : SFrame
Another SFrame whose rows are appended to the current SFrame.
Returns
-------
out : SFrame
The result SFrame from the append operation.
Examples
--------
>>> sf = graphlab.SFrame({'id': [4, 6, 8], 'val': ['D', 'F', 'H']})
>>> sf2 = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf = sf.append(sf2)
>>> sf
+----+-----+
| id | val |
+----+-----+
| 4 | D |
| 6 | F |
| 8 | H |
| 1 | A |
| 2 | B |
| 3 | C |
+----+-----+
[6 rows x 2 columns]
"""
_mt._get_metric_tracker().track('sframe.append')
if type(other) is not SFrame:
raise RuntimeError("SFrame append can only work with SFrame")
left_empty = len(self.column_names()) == 0
right_empty = len(other.column_names()) == 0
if (left_empty and right_empty):
return SFrame()
if (left_empty or right_empty):
non_empty_sframe = self if right_empty else other
return non_empty_sframe
my_column_names = self.column_names()
my_column_types = self.column_types()
other_column_names = other.column_names()
if (len(my_column_names) != len(other_column_names)):
raise RuntimeError("Two SFrames have to have the same number of columns")
# check if the order of column name is the same
column_name_order_match = True
for i in range(len(my_column_names)):
if other_column_names[i] != my_column_names[i]:
column_name_order_match = False
break;
processed_other_frame = other
if not column_name_order_match:
# we allow name order of two sframes to be different, so we create a new sframe from
# "other" sframe to make it has exactly the same shape
processed_other_frame = SFrame()
for i in range(len(my_column_names)):
col_name = my_column_names[i]
if(col_name not in other_column_names):
raise RuntimeError("Column " + my_column_names[i] + " does not exist in second SFrame")
other_column = other.select_column(col_name);
processed_other_frame.add_column(other_column, col_name)
# check column type
if my_column_types[i] != other_column.dtype():
raise RuntimeError("Column " + my_column_names[i] + " type is not the same in two SFrames, one is " + str(my_column_types[i]) + ", the other is " + str(other_column.dtype()))
with cython_context():
processed_other_frame.__materialize__()
return SFrame(_proxy=self.__proxy__.append(processed_other_frame.__proxy__))
def groupby(self, key_columns, operations, *args):
"""
Perform a group on the key_columns followed by aggregations on the
columns listed in operations.
The operations parameter is a dictionary that indicates which
aggregation operators to use and which columns to use them on. The
available operators are SUM, MAX, MIN, COUNT, AVG, VAR, STDV, CONCAT,
SELECT_ONE, ARGMIN, ARGMAX, and QUANTILE. For convenience, aggregators
MEAN, STD, and VARIANCE are available as synonyms for AVG, STDV, and
VAR. See :mod:`~graphlab.aggregate` for more detail on the aggregators.
Parameters
----------
key_columns : string | list[string]
Column(s) to group by. Key columns can be of any type other than
dictionary.
operations : dict, list
Dictionary of columns and aggregation operations. Each key is a
output column name and each value is an aggregator. This can also
be a list of aggregators, in which case column names will be
automatically assigned.
*args
All other remaining arguments will be interpreted in the same
way as the operations argument.
Returns
-------
out_sf : SFrame
A new SFrame, with a column for each groupby column and each
aggregation operation.
See Also
--------
aggregate
Examples
--------
Suppose we have an SFrame with movie ratings by many users.
>>> import graphlab.aggregate as agg
>>> url = 'http://s3.amazonaws.com/gl-testdata/rating_data_example.csv'
>>> sf = graphlab.SFrame.read_csv(url)
>>> sf
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| 25933 | 1663 | 4 |
| 25934 | 1663 | 4 |
| 25935 | 1663 | 4 |
| 25936 | 1663 | 5 |
| 25937 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Compute the number of occurrences of each user.
>>> user_count = sf.groupby(key_columns='user_id',
... operations={'count': agg.COUNT()})
>>> user_count
+---------+-------+
| user_id | count |
+---------+-------+
| 62361 | 1 |
| 30727 | 1 |
| 40111 | 1 |
| 50513 | 1 |
| 35140 | 1 |
| 42352 | 1 |
| 29667 | 1 |
| 46242 | 1 |
| 58310 | 1 |
| 64614 | 1 |
| ... | ... |
+---------+-------+
[9852 rows x 2 columns]
Compute the mean and standard deviation of ratings per user.
>>> user_rating_stats = sf.groupby(key_columns='user_id',
... operations={
... 'mean_rating': agg.MEAN('rating'),
... 'std_rating': agg.STD('rating')
... })
>>> user_rating_stats
+---------+-------------+------------+
| user_id | mean_rating | std_rating |
+---------+-------------+------------+
| 62361 | 5.0 | 0.0 |
| 30727 | 4.0 | 0.0 |
| 40111 | 2.0 | 0.0 |
| 50513 | 4.0 | 0.0 |
| 35140 | 4.0 | 0.0 |
| 42352 | 5.0 | 0.0 |
| 29667 | 4.0 | 0.0 |
| 46242 | 5.0 | 0.0 |
| 58310 | 2.0 | 0.0 |
| 64614 | 2.0 | 0.0 |
| ... | ... | ... |
+---------+-------------+------------+
[9852 rows x 3 columns]
Compute the movie with the minimum rating per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
... operations={
... 'worst_movies': agg.ARGMIN('rating','movie_id')
... })
>>> chosen_movies
+---------+-------------+
| user_id | worst_movies |
+---------+-------------+
| 62361 | 1663 |
| 30727 | 1663 |
| 40111 | 1663 |
| 50513 | 1663 |
| 35140 | 1663 |
| 42352 | 1663 |
| 29667 | 1663 |
| 46242 | 1663 |
| 58310 | 1663 |
| 64614 | 1663 |
| ... | ... |
+---------+-------------+
[9852 rows x 2 columns]
Compute the movie with the max rating per user and also the movie with
the maximum imdb-ranking per user.
>>> sf['imdb-ranking'] = sf['rating'] * 10
>>> chosen_movies = sf.groupby(key_columns='user_id',
... operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie_id')})
>>> chosen_movies
+---------+------------------+------------------------+
| user_id | max_rating_movie | max_imdb_ranking_movie |
+---------+------------------+------------------------+
| 62361 | 1663 | 16630 |
| 30727 | 1663 | 16630 |
| 40111 | 1663 | 16630 |
| 50513 | 1663 | 16630 |
| 35140 | 1663 | 16630 |
| 42352 | 1663 | 16630 |
| 29667 | 1663 | 16630 |
| 46242 | 1663 | 16630 |
| 58310 | 1663 | 16630 |
| 64614 | 1663 | 16630 |
| ... | ... | ... |
+---------+------------------+------------------------+
[9852 rows x 3 columns]
Compute the movie with the max rating per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
operations={'best_movies': agg.ARGMAX('rating','movie')})
Compute the movie with the max rating per user and also the movie with the maximum imdb-ranking per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie')})
Compute the count, mean, and standard deviation of ratings per (user,
time), automatically assigning output column names.
>>> sf['time'] = sf.apply(lambda x: (x['user_id'] + x['movie_id']) % 11 + 2000)
>>> user_rating_stats = sf.groupby(['user_id', 'time'],
... [agg.COUNT(),
... agg.AVG('rating'),
... agg.STDV('rating')])
>>> user_rating_stats
+------+---------+-------+---------------+----------------+
| time | user_id | Count | Avg of rating | Stdv of rating |
+------+---------+-------+---------------+----------------+
| 2006 | 61285 | 1 | 4.0 | 0.0 |
| 2000 | 36078 | 1 | 4.0 | 0.0 |
| 2003 | 47158 | 1 | 3.0 | 0.0 |
| 2007 | 34446 | 1 | 3.0 | 0.0 |
| 2010 | 47990 | 1 | 3.0 | 0.0 |
| 2003 | 42120 | 1 | 5.0 | 0.0 |
| 2007 | 44940 | 1 | 4.0 | 0.0 |
| 2008 | 58240 | 1 | 4.0 | 0.0 |
| 2002 | 102 | 1 | 1.0 | 0.0 |
| 2009 | 52708 | 1 | 3.0 | 0.0 |
| ... | ... | ... | ... | ... |
+------+---------+-------+---------------+----------------+
[10000 rows x 5 columns]
The groupby function can take a variable length list of aggregation
specifiers so if we want the count and the 0.25 and 0.75 quantiles of
ratings:
>>> user_rating_stats = sf.groupby(['user_id', 'time'], agg.COUNT(),
... {'rating_quantiles': agg.QUANTILE('rating',[0.25, 0.75])})
>>> user_rating_stats
+------+---------+-------+------------------------+
| time | user_id | Count | rating_quantiles |
+------+---------+-------+------------------------+
| 2006 | 61285 | 1 | array('d', [4.0, 4.0]) |
| 2000 | 36078 | 1 | array('d', [4.0, 4.0]) |
| 2003 | 47158 | 1 | array('d', [3.0, 3.0]) |
| 2007 | 34446 | 1 | array('d', [3.0, 3.0]) |
| 2010 | 47990 | 1 | array('d', [3.0, 3.0]) |
| 2003 | 42120 | 1 | array('d', [5.0, 5.0]) |
| 2007 | 44940 | 1 | array('d', [4.0, 4.0]) |
| 2008 | 58240 | 1 | array('d', [4.0, 4.0]) |
| 2002 | 102 | 1 | array('d', [1.0, 1.0]) |
| 2009 | 52708 | 1 | array('d', [3.0, 3.0]) |
| ... | ... | ... | ... |
+------+---------+-------+------------------------+
[10000 rows x 4 columns]
To put all items a user rated into one list value by their star rating:
>>> user_rating_stats = sf.groupby(["user_id", "rating"],
... {"rated_movie_ids":agg.CONCAT("movie_id")})
>>> user_rating_stats
+--------+---------+----------------------+
| rating | user_id | rated_movie_ids |
+--------+---------+----------------------+
| 3 | 31434 | array('d', [1663.0]) |
| 5 | 25944 | array('d', [1663.0]) |
| 4 | 38827 | array('d', [1663.0]) |
| 4 | 51437 | array('d', [1663.0]) |
| 4 | 42549 | array('d', [1663.0]) |
| 4 | 49532 | array('d', [1663.0]) |
| 3 | 26124 | array('d', [1663.0]) |
| 4 | 46336 | array('d', [1663.0]) |
| 4 | 52133 | array('d', [1663.0]) |
| 5 | 62361 | array('d', [1663.0]) |
| ... | ... | ... |
+--------+---------+----------------------+
[9952 rows x 3 columns]
To put all items and rating of a given user together into a dictionary
value:
>>> user_rating_stats = sf.groupby("user_id",
... {"movie_rating":agg.CONCAT("movie_id", "rating")})
>>> user_rating_stats
+---------+--------------+
| user_id | movie_rating |
+---------+--------------+
| 62361 | {1663: 5} |
| 30727 | {1663: 4} |
| 40111 | {1663: 2} |
| 50513 | {1663: 4} |
| 35140 | {1663: 4} |
| 42352 | {1663: 5} |
| 29667 | {1663: 4} |
| 46242 | {1663: 5} |
| 58310 | {1663: 2} |
| 64614 | {1663: 2} |
| ... | ... |
+---------+--------------+
[9852 rows x 2 columns]
"""
# some basic checking first
# make sure key_columns is a list
if isinstance(key_columns, str):
key_columns = [key_columns]
# check that every column is a string, and is a valid column name
my_column_names = self.column_names()
key_columns_array = []
for column in key_columns:
if not isinstance(column, str):
raise TypeError("Column name must be a string")
if column not in my_column_names:
raise KeyError("Column " + column + " does not exist in SFrame")
if self[column].dtype() == dict:
raise TypeError("Cannot group on a dictionary column.")
key_columns_array.append(column)
group_output_columns = []
group_columns = []
group_ops = []
all_ops = [operations] + list(args)
for op_entry in all_ops:
# if it is not a dict, nor a list, it is just a single aggregator
# element (probably COUNT). wrap it in a list so we can reuse the
# list processing code
operation = op_entry
if not(isinstance(operation, list) or isinstance(operation, dict)):
operation = [operation]
if isinstance(operation, dict):
# now sweep the dict and add to group_columns and group_ops
for key in operation:
val = operation[key]
if type(val) is tuple:
(op, column) = val
if (op == '__builtin__avg__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__avg__'
if (op == '__builtin__sum__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__sum__'
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and ((type(column[0]) is tuple) != (type(key) is tuple)):
raise TypeError("Output column(s) and aggregate column(s) for aggregate operation should be either all tuple or all string.")
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple:
for (col,output) in zip(column[0],key):
group_columns = group_columns + [[col,column[1]]]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [output]
else:
group_columns = group_columns + [column]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [key]
elif val == graphlab.aggregate.COUNT:
group_output_columns = group_output_columns + [key]
val = graphlab.aggregate.COUNT()
(op, column) = val
group_columns = group_columns + [column]
group_ops = group_ops + [op]
else:
raise TypeError("Unexpected type in aggregator definition of output column: " + key)
elif isinstance(operation, list):
# we will be using automatically defined column names
for val in operation:
if type(val) is tuple:
(op, column) = val
if (op == '__builtin__avg__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__avg__'
if (op == '__builtin__sum__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__sum__'
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple:
for col in column[0]:
group_columns = group_columns + [[col,column[1]]]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [""]
else:
group_columns = group_columns + [column]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [""]
elif val == graphlab.aggregate.COUNT:
group_output_columns = group_output_columns + [""]
val = graphlab.aggregate.COUNT()
(op, column) = val
group_columns = group_columns + [column]
group_ops = group_ops + [op]
else:
raise TypeError("Unexpected type in aggregator definition.")
# let's validate group_columns and group_ops are valid
for (cols, op) in zip(group_columns, group_ops):
for col in cols:
if not isinstance(col, str):
raise TypeError("Column name must be a string")
if not isinstance(op, str):
raise TypeError("Operation type not recognized.")
if op is not graphlab.aggregate.COUNT()[0]:
for col in cols:
if col not in my_column_names:
raise KeyError("Column " + col + " does not exist in SFrame")
_mt._get_metric_tracker().track('sframe.groupby', properties={'operator':op})
with cython_context():
return SFrame(_proxy=self.__proxy__.groupby_aggregate(key_columns_array, group_columns,
group_output_columns, group_ops))
def join(self, right, on=None, how='inner'):
"""
Merge two SFrames. Merges the current (left) SFrame with the given
(right) SFrame using a SQL-style equi-join operation by columns.
Parameters
----------
right : SFrame
The SFrame to join.
on : None | str | list | dict, optional
The column name(s) representing the set of join keys. Each row that
has the same value in this set of columns will be merged together.
* If 'None' is given, join will use all columns that have the same
name as the set of join keys.
* If a str is given, this is interpreted as a join using one column,
where both SFrames have the same column name.
* If a list is given, this is interpreted as a join using one or
more column names, where each column name given exists in both
SFrames.
* If a dict is given, each dict key is taken as a column name in the
left SFrame, and each dict value is taken as the column name in
right SFrame that will be joined together. e.g.
{'left_col_name':'right_col_name'}.
how : {'left', 'right', 'outer', 'inner'}, optional
The type of join to perform. 'inner' is default.
* inner: Equivalent to a SQL inner join. Result consists of the
rows from the two frames whose join key values match exactly,
merged together into one SFrame.
* left: Equivalent to a SQL left outer join. Result is the union
between the result of an inner join and the rest of the rows from
the left SFrame, merged with missing values.
* right: Equivalent to a SQL right outer join. Result is the union
between the result of an inner join and the rest of the rows from
the right SFrame, merged with missing values.
* outer: Equivalent to a SQL full outer join. Result is
the union between the result of a left outer join and a right
outer join.
Returns
-------
out : SFrame
Examples
--------
>>> animals = graphlab.SFrame({'id': [1, 2, 3, 4],
... 'name': ['dog', 'cat', 'sheep', 'cow']})
>>> sounds = graphlab.SFrame({'id': [1, 3, 4, 5],
... 'sound': ['woof', 'baa', 'moo', 'oink']})
>>> animals.join(sounds, how='inner')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
+----+-------+-------+
[3 rows x 3 columns]
>>> animals.join(sounds, on='id', how='left')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 2 | cat | None |
+----+-------+-------+
[4 rows x 3 columns]
>>> animals.join(sounds, on=['id'], how='right')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 5 | None | oink |
+----+-------+-------+
[4 rows x 3 columns]
>>> animals.join(sounds, on={'id':'id'}, how='outer')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 5 | None | oink |
| 2 | cat | None |
+----+-------+-------+
[5 rows x 3 columns]
"""
_mt._get_metric_tracker().track('sframe.join', properties={'type':how})
available_join_types = ['left','right','outer','inner']
if not isinstance(right, SFrame):
raise TypeError("Can only join two SFrames")
if how not in available_join_types:
raise ValueError("Invalid join type")
join_keys = dict()
if on is None:
left_names = self.column_names()
right_names = right.column_names()
common_columns = [name for name in left_names if name in right_names]
for name in common_columns:
join_keys[name] = name
elif type(on) is str:
join_keys[on] = on
elif type(on) is list:
for name in on:
if type(name) is not str:
raise TypeError("Join keys must each be a str.")
join_keys[name] = name
elif type(on) is dict:
join_keys = on
else:
raise TypeError("Must pass a str, list, or dict of join keys")
with cython_context():
return SFrame(_proxy=self.__proxy__.join(right.__proxy__, how, join_keys))
def filter_by(self, values, column_name, exclude=False):
"""
Filter an SFrame by values inside an iterable object. Result is an
SFrame that only includes (or excludes) the rows that have a column
with the given ``column_name`` which holds one of the values in the
given ``values`` :class:`~graphlab.SArray`. If ``values`` is not an
SArray, we attempt to convert it to one before filtering.
Parameters
----------
values : SArray | list | numpy.ndarray | pandas.Series | str
The values to use to filter the SFrame. The resulting SFrame will
only include rows that have one of these values in the given
column.
column_name : str
The column of the SFrame to match with the given `values`.
exclude : bool
If True, the result SFrame will contain all rows EXCEPT those that
have one of ``values`` in ``column_name``.
Returns
-------
out : SFrame
The filtered SFrame.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3, 4],
... 'animal_type': ['dog', 'cat', 'cow', 'horse'],
... 'name': ['bob', 'jim', 'jimbob', 'bobjim']})
>>> household_pets = ['cat', 'hamster', 'dog', 'fish', 'bird', 'snake']
>>> sf.filter_by(household_pets, 'animal_type')
+-------------+----+------+
| animal_type | id | name |
+-------------+----+------+
| dog | 1 | bob |
| cat | 2 | jim |
+-------------+----+------+
[2 rows x 3 columns]
>>> sf.filter_by(household_pets, 'animal_type', exclude=True)
+-------------+----+--------+
| animal_type | id | name |
+-------------+----+--------+
| horse | 4 | bobjim |
| cow | 3 | jimbob |
+-------------+----+--------+
[2 rows x 3 columns]
"""
_mt._get_metric_tracker().track('sframe.filter_by')
if type(column_name) is not str:
raise TypeError("Must pass a str as column_name")
if type(values) is not SArray:
# If we were given a single element, try to put in list and convert
# to SArray
if not hasattr(values, '__iter__'):
values = [values]
values = SArray(values)
value_sf = SFrame()
value_sf.add_column(values, column_name)
# Make sure the values list has unique values, or else join will not
# filter.
value_sf = value_sf.groupby(column_name, {})
existing_columns = self.column_names()
if column_name not in existing_columns:
raise KeyError("Column '" + column_name + "' not in SFrame.")
existing_type = self.column_types()[self.column_names().index(column_name)]
given_type = value_sf.column_types()[0]
if given_type != existing_type:
raise TypeError("Type of given values does not match type of column '" +
column_name + "' in SFrame.")
with cython_context():
if exclude:
id_name = "id"
# Make sure this name is unique so we know what to remove in
# the result
while id_name in existing_columns:
id_name += "1"
value_sf = value_sf.add_row_number(id_name)
tmp = SFrame(_proxy=self.__proxy__.join(value_sf.__proxy__,
'left',
{column_name:column_name}))
ret_sf = tmp[tmp[id_name] == None]
del ret_sf[id_name]
return ret_sf
else:
return SFrame(_proxy=self.__proxy__.join(value_sf.__proxy__,
'inner',
{column_name:column_name}))
@_check_canvas_enabled
def show(self, columns=None, view=None, x=None, y=None):
"""
show(columns=None, view=None, x=None, y=None)
Visualize the SFrame with GraphLab Create :mod:`~graphlab.canvas`. This function
starts Canvas if it is not already running. If the SFrame has already been plotted,
this function will update the plot.
Parameters
----------
columns : list of str, optional
The columns of this SFrame to show in the SFrame view. In an
interactive browser target of Canvas, the columns will be selectable
and reorderable through the UI as well. If not specified, the
SFrame view will use all columns of the SFrame.
view : str, optional
The name of the SFrame view to show. Can be one of:
- None: Use the default (depends on which Canvas target is set).
- 'Table': Show a scrollable, tabular view of the data in the
SFrame.
- 'Summary': Show a list of columns with some summary statistics
and plots for each column.
- 'Scatter Plot': Show a scatter plot of two numeric columns.
- 'Heat Map': Show a heat map of two numeric columns.
- 'Bar Chart': Show a bar chart of one numeric and one categorical
column.
- 'Line Chart': Show a line chart of one numeric and one
categorical column.
x : str, optional
The column to use for the X axis in a Scatter Plot, Heat Map, Bar
Chart, or Line Chart view. Must be the name of one of the columns
in this SFrame. For Scatter Plot and Heat Map, the column must be
numeric (int or float). If not set, defaults to the first available
valid column.
y : str, optional
The column to use for the Y axis in a Scatter Plot, Heat Map, Bar
Chart, or Line Chart view. Must be the name of one of the numeric
columns in this SFrame. If not set, defaults to the second
available numeric column.
Returns
-------
view : graphlab.canvas.view.View
An object representing the GraphLab Canvas view.
See Also
--------
canvas
Examples
--------
Suppose 'sf' is an SFrame, we can view it in GraphLab Canvas using:
>>> sf.show()
To choose a column filter (applied to all SFrame views):
>>> sf.show(columns=["Foo", "Bar"]) # use only columns 'Foo' and 'Bar'
>>> sf.show(columns=sf.column_names()[3:7]) # use columns 3-7
To choose a specific view of the SFrame:
>>> sf.show(view="Summary")
>>> sf.show(view="Table")
>>> sf.show(view="Bar Chart", x="col1", y="col2")
>>> sf.show(view="Line Chart", x="col1", y="col2")
>>> sf.show(view="Scatter Plot", x="col1", y="col2")
>>> sf.show(view="Heat Map", x="col1", y="col2")
"""
import graphlab.canvas
import graphlab.canvas.inspect
import graphlab.canvas.views.sframe
graphlab.canvas.inspect.find_vars(self)
return graphlab.canvas.show(graphlab.canvas.views.sframe.SFrameView(self, params={
'view': view,
'columns': columns,
'x': x,
'y': y
}))
def pack_columns(self, columns=None, column_prefix=None, dtype=list,
fill_na=None, remove_prefix=True, new_column_name=None):
"""
Pack two or more columns of the current SFrame into one single
column.The result is a new SFrame with the unaffected columns from the
original SFrame plus the newly created column.
The list of columns that are packed is chosen through either the
``columns`` or ``column_prefix`` parameter. Only one of the parameters
is allowed to be provided. ``columns`` explicitly specifies the list of
columns to pack, while ``column_prefix`` specifies that all columns that
have the given prefix are to be packed.
The type of the resulting column is decided by the ``dtype`` parameter.
Allowed values for ``dtype`` are dict, array.array and list:
- *dict*: pack to a dictionary SArray where column name becomes
dictionary key and column value becomes dictionary value
- *array.array*: pack all values from the packing columns into an array
- *list*: pack all values from the packing columns into a list.
Parameters
----------
columns : list[str], optional
A list of column names to be packed. There needs to have at least
two columns to pack. If omitted and `column_prefix` is not
specified, all columns from current SFrame are packed. This
parameter is mutually exclusive with the `column_prefix` parameter.
column_prefix : str, optional
Pack all columns with the given `column_prefix`.
This parameter is mutually exclusive with the `columns` parameter.
dtype : dict | array.array | list, optional
The resulting packed column type. If not provided, dtype is list.
fill_na : value, optional
Value to fill into packed column if missing value is encountered.
If packing to dictionary, `fill_na` is only applicable to dictionary
values; missing keys are not replaced.
remove_prefix : bool, optional
If True and `column_prefix` is specified, the dictionary key will
be constructed by removing the prefix from the column name.
This option is only applicable when packing to dict type.
new_column_name : str, optional
Packed column name. If not given and `column_prefix` is given,
then the prefix will be used as the new column name, otherwise name
is generated automatically.
Returns
-------
out : SFrame
An SFrame that contains columns that are not packed, plus the newly
packed column.
See Also
--------
unpack
Notes
-----
- There must be at least two columns to pack.
- If packing to dictionary, missing key is always dropped. Missing
values are dropped if fill_na is not provided, otherwise, missing
value is replaced by 'fill_na'. If packing to list or array, missing
values will be kept. If 'fill_na' is provided, the missing value is
replaced with 'fill_na' value.
Examples
--------
Suppose 'sf' is an an SFrame that maintains business category
information:
>>> sf = graphlab.SFrame({'business': range(1, 5),
... 'category.retail': [1, None, 1, None],
... 'category.food': [1, 1, None, None],
... 'category.service': [None, 1, 1, None],
... 'category.shop': [1, 1, None, 1]})
>>> sf
+----------+-----------------+---------------+------------------+---------------+
| business | category.retail | category.food | category.service | category.shop |
+----------+-----------------+---------------+------------------+---------------+
| 1 | 1 | 1 | None | 1 |
| 2 | None | 1 | 1 | 1 |
| 3 | 1 | None | 1 | None |
| 4 | None | 1 | None | 1 |
+----------+-----------------+---------------+------------------+---------------+
[4 rows x 5 columns]
To pack all category columns into a list:
>>> sf.pack_columns(column_prefix='category')
+----------+--------------------+
| business | X2 |
+----------+--------------------+
| 1 | [1, 1, None, 1] |
| 2 | [None, 1, 1, 1] |
| 3 | [1, None, 1, None] |
| 4 | [None, 1, None, 1] |
+----------+--------------------+
[4 rows x 2 columns]
To pack all category columns into a dictionary, with new column name:
>>> sf.pack_columns(column_prefix='category', dtype=dict,
... new_column_name='category')
+----------+--------------------------------+
| business | category |
+----------+--------------------------------+
| 1 | {'food': 1, 'shop': 1, 're ... |
| 2 | {'food': 1, 'shop': 1, 'se ... |
| 3 | {'retail': 1, 'service': 1} |
| 4 | {'food': 1, 'shop': 1} |
+----------+--------------------------------+
[4 rows x 2 columns]
To keep column prefix in the resulting dict key:
>>> sf.pack_columns(column_prefix='category', dtype=dict,
remove_prefix=False)
+----------+--------------------------------+
| business | X2 |
+----------+--------------------------------+
| 1 | {'category.retail': 1, 'ca ... |
| 2 | {'category.food': 1, 'cate ... |
| 3 | {'category.retail': 1, 'ca ... |
| 4 | {'category.food': 1, 'cate ... |
+----------+--------------------------------+
[4 rows x 2 columns]
To explicitly pack a set of columns:
>>> sf.pack_columns(columns = ['business', 'category.retail',
'category.food', 'category.service',
'category.shop'])
+-----------------------+
| X1 |
+-----------------------+
| [1, 1, 1, None, 1] |
| [2, None, 1, 1, 1] |
| [3, 1, None, 1, None] |
| [4, None, 1, None, 1] |
+-----------------------+
[4 rows x 1 columns]
To pack all columns with name starting with 'category' into an array
type, and with missing value replaced with 0:
>>> sf.pack_columns(column_prefix="category", dtype=array.array,
... fill_na=0)
+----------+--------------------------------+
| business | X2 |
+----------+--------------------------------+
| 1 | array('d', [1.0, 1.0, 0.0, ... |
| 2 | array('d', [0.0, 1.0, 1.0, ... |
| 3 | array('d', [1.0, 0.0, 1.0, ... |
| 4 | array('d', [0.0, 1.0, 0.0, ... |
+----------+--------------------------------+
[4 rows x 2 columns]
"""
if columns != None and column_prefix != None:
raise ValueError("'columns' and 'column_prefix' parameter cannot be given at the same time.")
if new_column_name == None and column_prefix != None:
new_column_name = column_prefix
if column_prefix != None:
if type(column_prefix) != str:
raise TypeError("'column_prefix' must be a string")
columns = [name for name in self.column_names() if name.startswith(column_prefix)]
if len(columns) == 0:
raise ValueError("There is no column starts with prefix '" + column_prefix + "'")
elif columns == None:
columns = self.column_names()
else:
if not hasattr(columns, '__iter__'):
raise TypeError("columns must be an iterable type")
column_names = set(self.column_names())
for column in columns:
if (column not in column_names):
raise ValueError("Current SFrame has no column called '" + str(column) + "'.")
# check duplicate names
if len(set(columns)) != len(columns):
raise ValueError("There is duplicate column names in columns parameter")
if (len(columns) <= 1):
raise ValueError("Please provide at least two columns to pack")
if (dtype not in (dict, list, array.array)):
raise ValueError("Resulting dtype has to be one of dict/array.array/list type")
# fill_na value for array needs to be numeric
if dtype == array.array:
if (fill_na != None) and (type(fill_na) not in (int, float)):
raise ValueError("fill_na value for array needs to be numeric type")
# all columns have to be numeric type
for column in columns:
if self[column].dtype() not in (int, float):
raise TypeError("Column '" + column + "' type is not numeric, cannot pack into array type")
# generate dict key names if pack to dictionary
# we try to be smart here
# if all column names are like: a.b, a.c, a.d,...
# we then use "b", "c", "d", etc as the dictionary key during packing
if (dtype == dict) and (column_prefix != None) and (remove_prefix == True):
size_prefix = len(column_prefix)
first_char = set([c[size_prefix:size_prefix+1] for c in columns])
if ((len(first_char) == 1) and first_char.pop() in ['.','-','_']):
dict_keys = [name[size_prefix+1:] for name in columns]
else:
dict_keys = [name[size_prefix:] for name in columns]
else:
dict_keys = columns
rest_columns = [name for name in self.column_names() if name not in columns]
if new_column_name != None:
if type(new_column_name) != str:
raise TypeError("'new_column_name' has to be a string")
if new_column_name in rest_columns:
raise KeyError("Current SFrame already contains a column name " + new_column_name)
else:
new_column_name = ""
_mt._get_metric_tracker().track('sframe.pack_columns')
ret_sa = None
with cython_context():
ret_sa = SArray(_proxy=self.__proxy__.pack_columns(columns, dict_keys, dtype, fill_na))
new_sf = self.select_columns(rest_columns)
new_sf.add_column(ret_sa, new_column_name)
return new_sf
def split_datetime(self, expand_column, column_name_prefix=None, limit=None, tzone=False):
"""
Splits a datetime column of SFrame to multiple columns, with each value in a
separate column. Returns a new SFrame with the expanded column replaced with
a list of new columns. The expanded column must be of datetime type.
For more details regarding name generation and
other, refer to :py:func:`graphlab.SArray.split_datetim()`
Parameters
----------
expand_column : str
Name of the unpacked column.
column_name_prefix : str, optional
If provided, expanded column names would start with the given prefix.
If not provided, the default value is the name of the expanded column.
limit : list[str], optional
Limits the set of datetime elements to expand.
Elements are 'year','month','day','hour','minute',
and 'second'.
tzone : bool, optional
A boolean parameter that determines whether to show the timezone
column or not. Defaults to False.
Returns
-------
out : SFrame
A new SFrame that contains rest of columns from original SFrame with
the given column replaced with a collection of expanded columns.
Examples
---------
>>> sf
Columns:
id int
submission datetime
Rows: 2
Data:
+----+-------------------------------------------------+
| id | submission |
+----+-------------------------------------------------+
| 1 | datetime(2011, 1, 21, 7, 17, 21, tzinfo=GMT(+1))|
| 2 | datetime(2011, 1, 21, 5, 43, 21, tzinfo=GMT(+1))|
+----+-------------------------------------------------+
>>> sf.split_datetime('submission',limit=['hour','minute'])
Columns:
id int
submission.hour int
submission.minute int
Rows: 2
Data:
+----+-----------------+-------------------+
| id | submission.hour | submission.minute |
+----+-----------------+-------------------+
| 1 | 7 | 17 |
| 2 | 5 | 43 |
+----+-----------------+-------------------+
"""
if expand_column not in self.column_names():
raise KeyError("column '" + expand_column + "' does not exist in current SFrame")
if column_name_prefix == None:
column_name_prefix = expand_column
new_sf = self[expand_column].split_datetime(column_name_prefix, limit, tzone)
# construct return SFrame, check if there is conflict
rest_columns = [name for name in self.column_names() if name != expand_column]
new_names = new_sf.column_names()
while set(new_names).intersection(rest_columns):
new_names = [name + ".1" for name in new_names]
new_sf.rename(dict(zip(new_sf.column_names(), new_names)))
_mt._get_metric_tracker().track('sframe.split_datetime')
ret_sf = self.select_columns(rest_columns)
ret_sf.add_columns(new_sf)
return ret_sf
def unpack(self, unpack_column, column_name_prefix=None, column_types=None,
na_value=None, limit=None):
"""
Expand one column of this SFrame to multiple columns with each value in
a separate column. Returns a new SFrame with the unpacked column
replaced with a list of new columns. The column must be of
list/array/dict type.
For more details regarding name generation, missing value handling and
other, refer to the SArray version of
:py:func:`~graphlab.SArray.unpack()`.
Parameters
----------
unpack_column : str
Name of the unpacked column
column_name_prefix : str, optional
If provided, unpacked column names would start with the given
prefix. If not provided, default value is the name of the unpacked
column.
column_types : [type], optional
Column types for the unpacked columns.
If not provided, column types are automatically inferred from first
100 rows. For array type, default column types are float. If
provided, column_types also restricts how many columns to unpack.
na_value : flexible_type, optional
If provided, convert all values that are equal to "na_value" to
missing value (None).
limit : list[str] | list[int], optional
Control unpacking only a subset of list/array/dict value. For
dictionary SArray, `limit` is a list of dictionary keys to restrict.
For list/array SArray, `limit` is a list of integers that are
indexes into the list/array value.
Returns
-------
out : SFrame
A new SFrame that contains rest of columns from original SFrame with
the given column replaced with a collection of unpacked columns.
See Also
--------
pack_columns, SArray.unpack
Examples
---------
>>> sf = graphlab.SFrame({'id': [1,2,3],
... 'wc': [{'a': 1}, {'b': 2}, {'a': 1, 'b': 2}]})
+----+------------------+
| id | wc |
+----+------------------+
| 1 | {'a': 1} |
| 2 | {'b': 2} |
| 3 | {'a': 1, 'b': 2} |
+----+------------------+
[3 rows x 2 columns]
>>> sf.unpack('wc')
+----+------+------+
| id | wc.a | wc.b |
+----+------+------+
| 1 | 1 | None |
| 2 | None | 2 |
| 3 | 1 | 2 |
+----+------+------+
[3 rows x 3 columns]
To not have prefix in the generated column name:
>>> sf.unpack('wc', column_name_prefix="")
+----+------+------+
| id | a | b |
+----+------+------+
| 1 | 1 | None |
| 2 | None | 2 |
| 3 | 1 | 2 |
+----+------+------+
[3 rows x 3 columns]
To limit subset of keys to unpack:
>>> sf.unpack('wc', limit=['b'])
+----+------+
| id | wc.b |
+----+------+
| 1 | None |
| 2 | 2 |
| 3 | 2 |
+----+------+
[3 rows x 3 columns]
To unpack an array column:
>>> sf = graphlab.SFrame({'id': [1,2,3],
... 'friends': [array.array('d', [1.0, 2.0, 3.0]),
... array.array('d', [2.0, 3.0, 4.0]),
... array.array('d', [3.0, 4.0, 5.0])]})
>>> sf
+----+-----------------------------+
| id | friends |
+----+-----------------------------+
| 1 | array('d', [1.0, 2.0, 3.0]) |
| 2 | array('d', [2.0, 3.0, 4.0]) |
| 3 | array('d', [3.0, 4.0, 5.0]) |
+----+-----------------------------+
[3 rows x 2 columns]
>>> sf.unpack('friends')
+----+-----------+-----------+-----------+
| id | friends.0 | friends.1 | friends.2 |
+----+-----------+-----------+-----------+
| 1 | 1.0 | 2.0 | 3.0 |
| 2 | 2.0 | 3.0 | 4.0 |
| 3 | 3.0 | 4.0 | 5.0 |
+----+-----------+-----------+-----------+
[3 rows x 4 columns]
"""
if unpack_column not in self.column_names():
raise KeyError("column '" + unpack_column + "' does not exist in current SFrame")
if column_name_prefix == None:
column_name_prefix = unpack_column
new_sf = self[unpack_column].unpack(column_name_prefix, column_types, na_value, limit)
# construct return SFrame, check if there is conflict
rest_columns = [name for name in self.column_names() if name != unpack_column]
new_names = new_sf.column_names()
while set(new_names).intersection(rest_columns):
new_names = [name + ".1" for name in new_names]
new_sf.rename(dict(zip(new_sf.column_names(), new_names)))
_mt._get_metric_tracker().track('sframe.unpack')
ret_sf = self.select_columns(rest_columns)
ret_sf.add_columns(new_sf)
return ret_sf
def stack(self, column_name, new_column_name=None, drop_na=False):
"""
Convert a "wide" column of an SFrame to one or two "tall" columns by
stacking all values.
The stack works only for columns of dict, list, or array type. If the
column is dict type, two new columns are created as a result of
stacking: one column holds the key and another column holds the value.
The rest of the columns are repeated for each key/value pair.
If the column is array or list type, one new column is created as a
result of stacking. With each row holds one element of the array or list
value, and the rest columns from the same original row repeated.
The new SFrame includes the newly created column and all columns other
than the one that is stacked.
Parameters
--------------
column_name : str
The column to stack. This column must be of dict/list/array type
new_column_name : str | list of str, optional
The new column name(s). If original column is list/array type,
new_column_name must a string. If original column is dict type,
new_column_name must be a list of two strings. If not given, column
names are generated automatically.
drop_na : boolean, optional
If True, missing values and empty list/array/dict are all dropped
from the resulting column(s). If False, missing values are
maintained in stacked column(s).
Returns
-------
out : SFrame
A new SFrame that contains newly stacked column(s) plus columns in
original SFrame other than the stacked column.
See Also
--------
unstack
Examples
---------
Suppose 'sf' is an SFrame that contains a column of dict type:
>>> sf = graphlab.SFrame({'topic':[1,2,3,4],
... 'words': [{'a':3, 'cat':2},
... {'a':1, 'the':2},
... {'the':1, 'dog':3},
... {}]
... })
+-------+----------------------+
| topic | words |
+-------+----------------------+
| 1 | {'a': 3, 'cat': 2} |
| 2 | {'a': 1, 'the': 2} |
| 3 | {'the': 1, 'dog': 3} |
| 4 | {} |
+-------+----------------------+
[4 rows x 2 columns]
Stack would stack all keys in one column and all values in another
column:
>>> sf.stack('words', new_column_name=['word', 'count'])
+-------+------+-------+
| topic | word | count |
+-------+------+-------+
| 1 | a | 3 |
| 1 | cat | 2 |
| 2 | a | 1 |
| 2 | the | 2 |
| 3 | the | 1 |
| 3 | dog | 3 |
| 4 | None | None |
+-------+------+-------+
[7 rows x 3 columns]
Observe that since topic 4 had no words, an empty row is inserted.
To drop that row, set dropna=True in the parameters to stack.
Suppose 'sf' is an SFrame that contains a user and his/her friends,
where 'friends' columns is an array type. Stack on 'friends' column
would create a user/friend list for each user/friend pair:
>>> sf = graphlab.SFrame({'topic':[1,2,3],
... 'friends':[[2,3,4], [5,6],
... [4,5,10,None]]
... })
>>> sf
+-------+------------------+
| topic | friends |
+-------+------------------+
| 1 | [2, 3, 4] |
| 2 | [5, 6] |
| 3 | [4, 5, 10, None] |
+----- -+------------------+
[3 rows x 2 columns]
>>> sf.stack('friends', new_column_name='friend')
+------+--------+
| user | friend |
+------+--------+
| 1 | 2 |
| 1 | 3 |
| 1 | 4 |
| 2 | 5 |
| 2 | 6 |
| 3 | 4 |
| 3 | 5 |
| 3 | 10 |
| 3 | None |
+------+--------+
[9 rows x 2 columns]
"""
# validate column_name
column_name = str(column_name)
if column_name not in self.column_names():
raise ValueError("Cannot find column '" + str(column_name) + "' in the SFrame.")
stack_column_type = self[column_name].dtype()
if (stack_column_type not in [dict, array.array, list]):
raise TypeError("Stack is only supported for column of dict/list/array type.")
if (new_column_name != None):
if stack_column_type == dict:
if (type(new_column_name) is not list):
raise TypeError("new_column_name has to be a list to stack dict type")
elif (len(new_column_name) != 2):
raise TypeError("new_column_name must have length of two")
else:
if (type(new_column_name) != str):
raise TypeError("new_column_name has to be a str")
new_column_name = [new_column_name]
# check if the new column name conflicts with existing ones
for name in new_column_name:
if (name in self.column_names()) and (name != column_name):
raise ValueError("Column with name '" + name + "' already exists, pick a new column name")
else:
if stack_column_type == dict:
new_column_name = ["",""]
else:
new_column_name = [""]
# infer column types
head_row = SArray(self[column_name].head(100)).dropna()
if (len(head_row) == 0):
raise ValueError("Cannot infer column type because there is not enough rows to infer value")
if stack_column_type == dict:
# infer key/value type
keys = []; values = []
for row in head_row:
for val in row:
keys.append(val)
if val != None: values.append(row[val])
new_column_type = [
infer_type_of_list(keys),
infer_type_of_list(values)
]
else:
values = [v for v in itertools.chain.from_iterable(head_row)]
new_column_type = [infer_type_of_list(values)]
_mt._get_metric_tracker().track('sframe.stack')
with cython_context():
return SFrame(_proxy=self.__proxy__.stack(column_name, new_column_name, new_column_type, drop_na))
def unstack(self, column, new_column_name=None):
"""
Concatenate values from one or two columns into one column, grouping by
all other columns. The resulting column could be of type list, array or
dictionary. If ``column`` is a numeric column, the result will be of
array.array type. If ``column`` is a non-numeric column, the new column
will be of list type. If ``column`` is a list of two columns, the new
column will be of dict type where the keys are taken from the first
column in the list.
Parameters
----------
column : str | [str, str]
The column(s) that is(are) to be concatenated.
If str, then collapsed column type is either array or list.
If [str, str], then collapsed column type is dict
new_column_name : str, optional
New column name. If not given, a name is generated automatically.
Returns
-------
out : SFrame
A new SFrame containing the grouped columns as well as the new
column.
See Also
--------
stack : The inverse of unstack.
groupby : ``unstack`` is a special version of ``groupby`` that uses the
:mod:`~graphlab.aggregate.CONCAT` aggregator
Notes
-----
- There is no guarantee the resulting SFrame maintains the same order as
the original SFrame.
- Missing values are maintained during unstack.
- When unstacking into a dictionary, if there is more than one instance
of a given key for a particular group, an arbitrary value is selected.
Examples
--------
>>> sf = graphlab.SFrame({'count':[4, 2, 1, 1, 2, None],
... 'topic':['cat', 'cat', 'dog', 'elephant', 'elephant', 'fish'],
... 'word':['a', 'c', 'c', 'a', 'b', None]})
>>> sf.unstack(column=['word', 'count'], new_column_name='words')
+----------+------------------+
| topic | words |
+----------+------------------+
| elephant | {'a': 1, 'b': 2} |
| dog | {'c': 1} |
| cat | {'a': 4, 'c': 2} |
| fish | None |
+----------+------------------+
[4 rows x 2 columns]
>>> sf = graphlab.SFrame({'friend': [2, 3, 4, 5, 6, 4, 5, 2, 3],
... 'user': [1, 1, 1, 2, 2, 2, 3, 4, 4]})
>>> sf.unstack('friend', new_column_name='friends')
+------+-----------------------------+
| user | friends |
+------+-----------------------------+
| 3 | array('d', [5.0]) |
| 1 | array('d', [2.0, 4.0, 3.0]) |
| 2 | array('d', [5.0, 6.0, 4.0]) |
| 4 | array('d', [2.0, 3.0]) |
+------+-----------------------------+
[4 rows x 2 columns]
"""
if (type(column) != str and len(column) != 2):
raise TypeError("'column' parameter has to be either a string or a list of two strings.")
_mt._get_metric_tracker().track('sframe.unstack')
with cython_context():
if type(column) == str:
key_columns = [i for i in self.column_names() if i != column]
if new_column_name != None:
return self.groupby(key_columns, {new_column_name : graphlab.aggregate.CONCAT(column)})
else:
return self.groupby(key_columns, graphlab.aggregate.CONCAT(column))
elif len(column) == 2:
key_columns = [i for i in self.column_names() if i not in column]
if new_column_name != None:
return self.groupby(key_columns, {new_column_name:graphlab.aggregate.CONCAT(column[0], column[1])})
else:
return self.groupby(key_columns, graphlab.aggregate.CONCAT(column[0], column[1]))
def unique(self):
"""
Remove duplicate rows of the SFrame. Will not necessarily preserve the
order of the given SFrame in the new SFrame.
Returns
-------
out : SFrame
A new SFrame that contains the unique rows of the current SFrame.
Raises
------
TypeError
If any column in the SFrame is a dictionary type.
See Also
--------
SArray.unique
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3,3,4], 'value':[1,2,3,3,4]})
>>> sf
+----+-------+
| id | value |
+----+-------+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
| 3 | 3 |
| 4 | 4 |
+----+-------+
[5 rows x 2 columns]
>>> sf.unique()
+----+-------+
| id | value |
+----+-------+
| 2 | 2 |
| 4 | 4 |
| 3 | 3 |
| 1 | 1 |
+----+-------+
[4 rows x 2 columns]
"""
return self.groupby(self.column_names(),{})
def sort(self, sort_columns, ascending=True):
"""
Sort current SFrame by the given columns, using the given sort order.
Only columns that are type of str, int and float can be sorted.
Parameters
----------
sort_columns : str | list of str | list of (str, bool) pairs
Names of columns to be sorted. The result will be sorted first by
first column, followed by second column, and so on. All columns will
be sorted in the same order as governed by the `ascending`
parameter. To control the sort ordering for each column
individually, `sort_columns` must be a list of (str, bool) pairs.
Given this case, the first value is the column name and the second
value is a boolean indicating whether the sort order is ascending.
ascending : bool, optional
Sort all columns in the given order.
Returns
-------
out : SFrame
A new SFrame that is sorted according to given sort criteria
See Also
--------
topk
Examples
--------
Suppose 'sf' is an sframe that has three columns 'a', 'b', 'c'.
To sort by column 'a', ascending
>>> sf = graphlab.SFrame({'a':[1,3,2,1],
... 'b':['a','c','b','b'],
... 'c':['x','y','z','y']})
>>> sf
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 3 | c | y |
| 2 | b | z |
| 1 | b | y |
+---+---+---+
[4 rows x 3 columns]
>>> sf.sort('a')
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 1 | b | y |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a', descending
>>> sf.sort('a', ascending = False)
+---+---+---+
| a | b | c |
+---+---+---+
| 3 | c | y |
| 2 | b | z |
| 1 | a | x |
| 1 | b | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a' and 'b', all ascending
>>> sf.sort(['a', 'b'])
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 1 | b | y |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a' ascending, and then by column 'c' descending
>>> sf.sort([('a', True), ('c', False)])
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | b | y |
| 1 | a | x |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
"""
sort_column_names = []
sort_column_orders = []
# validate sort_columns
if (type(sort_columns) == str):
sort_column_names = [sort_columns]
elif (type(sort_columns) == list):
if (len(sort_columns) == 0):
raise ValueError("Please provide at least one column to sort")
first_param_types = set([type(i) for i in sort_columns])
if (len(first_param_types) != 1):
raise ValueError("sort_columns element are not of the same type")
first_param_type = first_param_types.pop()
if (first_param_type == tuple):
sort_column_names = [i[0] for i in sort_columns]
sort_column_orders = [i[1] for i in sort_columns]
elif(first_param_type == str):
sort_column_names = sort_columns
else:
raise TypeError("sort_columns type is not supported")
else:
raise TypeError("sort_columns type is not correct. Supported types are str, list of str or list of (str,bool) pair.")
# use the second parameter if the sort order is not given
if (len(sort_column_orders) == 0):
sort_column_orders = [ascending for i in sort_column_names]
# make sure all column exists
my_column_names = set(self.column_names())
for column in sort_column_names:
if (type(column) != str):
raise TypeError("Only string parameter can be passed in as column names")
if (column not in my_column_names):
raise ValueError("SFrame has no column named: '" + str(column) + "'")
if (self[column].dtype() not in (str, int, float,datetime.datetime)):
raise TypeError("Only columns of type (str, int, float) can be sorted")
_mt._get_metric_tracker().track('sframe.sort')
with cython_context():
return SFrame(_proxy=self.__proxy__.sort(sort_column_names, sort_column_orders))
def dropna(self, columns=None, how='any'):
"""
Remove missing values from an SFrame. A missing value is either ``None``
or ``NaN``. If ``how`` is 'any', a row will be removed if any of the
columns in the ``columns`` parameter contains at least one missing
value. If ``how`` is 'all', a row will be removed if all of the columns
in the ``columns`` parameter are missing values.
If the ``columns`` parameter is not specified, the default is to
consider all columns when searching for missing values.
Parameters
----------
columns : list or str, optional
The columns to use when looking for missing values. By default, all
columns are used.
how : {'any', 'all'}, optional
Specifies whether a row should be dropped if at least one column
has missing values, or if all columns have missing values. 'any' is
default.
Returns
-------
out : SFrame
SFrame with missing values removed (according to the given rules).
See Also
--------
dropna_split : Drops missing rows from the SFrame and returns them.
Examples
--------
Drop all missing values.
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> sf.dropna()
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
Drop rows where every value is missing.
>>> sf.dropna(any="all")
+------+---+
| a | b |
+------+---+
| 1 | a |
| None | b |
+------+---+
[2 rows x 2 columns]
Drop rows where column 'a' has a missing value.
>>> sf.dropna('a', any="all")
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
"""
_mt._get_metric_tracker().track('sframe.dropna')
# If the user gives me an empty list (the indicator to use all columns)
# NA values being dropped would not be the expected behavior. This
# is a NOOP, so let's not bother the server
if type(columns) is list and len(columns) == 0:
return SFrame(_proxy=self.__proxy__)
(columns, all_behavior) = self.__dropna_errchk(columns, how)
with cython_context():
return SFrame(_proxy=self.__proxy__.drop_missing_values(columns, all_behavior, False))
def dropna_split(self, columns=None, how='any'):
"""
Split rows with missing values from this SFrame. This function has the
same functionality as :py:func:`~graphlab.SFrame.dropna`, but returns a
tuple of two SFrames. The first item is the expected output from
:py:func:`~graphlab.SFrame.dropna`, and the second item contains all the
rows filtered out by the `dropna` algorithm.
Parameters
----------
columns : list or str, optional
The columns to use when looking for missing values. By default, all
columns are used.
how : {'any', 'all'}, optional
Specifies whether a row should be dropped if at least one column
has missing values, or if all columns have missing values. 'any' is
default.
Returns
-------
out : (SFrame, SFrame)
(SFrame with missing values removed,
SFrame with the removed missing values)
See Also
--------
dropna
Examples
--------
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> good, bad = sf.dropna_split()
>>> good
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
>>> bad
+------+------+
| a | b |
+------+------+
| None | b |
| None | None |
+------+------+
[2 rows x 2 columns]
"""
_mt._get_metric_tracker().track('sframe.dropna_split')
# If the user gives me an empty list (the indicator to use all columns)
# NA values being dropped would not be the expected behavior. This
# is a NOOP, so let's not bother the server
if type(columns) is list and len(columns) == 0:
return (SFrame(_proxy=self.__proxy__), SFrame())
(columns, all_behavior) = self.__dropna_errchk(columns, how)
sframe_tuple = self.__proxy__.drop_missing_values(columns, all_behavior, True)
if len(sframe_tuple) != 2:
raise RuntimeError("Did not return two SFrames!")
with cython_context():
return (SFrame(_proxy=sframe_tuple[0]), SFrame(_proxy=sframe_tuple[1]))
def __dropna_errchk(self, columns, how):
if columns is None:
# Default behavior is to consider every column, specified to
# the server by an empty list (to avoid sending all the column
# in this case, since it is the most common)
columns = list()
elif type(columns) is str:
columns = [columns]
elif type(columns) is not list:
raise TypeError("Must give columns as a list, str, or 'None'")
else:
# Verify that we are only passing strings in our list
list_types = set([type(i) for i in columns])
if (str not in list_types) or (len(list_types) > 1):
raise TypeError("All columns must be of 'str' type")
if how not in ['any','all']:
raise ValueError("Must specify 'any' or 'all'")
if how == 'all':
all_behavior = True
else:
all_behavior = False
return (columns, all_behavior)
def fillna(self, column, value):
"""
Fill all missing values with a given value in a given column. If the
``value`` is not the same type as the values in ``column``, this method
attempts to convert the value to the original column's type. If this
fails, an error is raised.
Parameters
----------
column : str
The name of the column to modify.
value : type convertible to SArray's type
The value used to replace all missing values.
Returns
-------
out : SFrame
A new SFrame with the specified value in place of missing values.
See Also
--------
dropna
Examples
--------
>>> sf = graphlab.SFrame({'a':[1, None, None],
... 'b':['13.1', '17.2', None]})
>>> sf = sf.fillna('a', 0)
>>> sf
+---+------+
| a | b |
+---+------+
| 1 | 13.1 |
| 0 | 17.2 |
| 0 | None |
+---+------+
[3 rows x 2 columns]
"""
# Normal error checking
if type(column) is not str:
raise TypeError("Must give column name as a str")
ret = self[self.column_names()]
ret[column] = ret[column].fillna(value)
return ret
def add_row_number(self, column_name='id', start=0):
"""
Returns a new SFrame with a new column that numbers each row
sequentially. By default the count starts at 0, but this can be changed
to a positive or negative number. The new column will be named with
the given column name. An error will be raised if the given column
name already exists in the SFrame.
Parameters
----------
column_name : str, optional
The name of the new column that will hold the row numbers.
start : int, optional
The number used to start the row number count.
Returns
-------
out : SFrame
The new SFrame with a column name
Notes
-----
The range of numbers is constrained by a signed 64-bit integer, so
beware of overflow if you think the results in the row number column
will be greater than 9 quintillion.
Examples
--------
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> sf.add_row_number()
+----+------+------+
| id | a | b |
+----+------+------+
| 0 | 1 | a |
| 1 | None | b |
| 2 | None | None |
+----+------+------+
[3 rows x 3 columns]
"""
_mt._get_metric_tracker().track('sframe.add_row_number')
if type(column_name) is not str:
raise TypeError("Must give column_name as strs")
if type(start) is not int:
raise TypeError("Must give start as int")
if column_name in self.column_names():
raise RuntimeError("Column '" + column_name + "' already exists in the current SFrame")
the_col = _create_sequential_sarray(self.num_rows(), start)
# Make sure the row number column is the first column
new_sf = SFrame()
new_sf.add_column(the_col, column_name)
new_sf.add_columns(self)
return new_sf
@property
def shape(self):
"""
The shape of the SFrame, in a tuple. The first entry is the number of
rows, the second is the number of columns.
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf.shape
(3, 2)
"""
return (self.num_rows(), self.num_cols())
@property
def __proxy__(self):
return self._proxy
@__proxy__.setter
def __proxy__(self, value):
assert type(value) is UnitySFrameProxy
self._proxy = value
| ypkang/Dato-Core | src/unity/python/graphlab/data_structures/sframe.py | Python | agpl-3.0 | 196,438 |
def _path_inject(paths):
import sys
sys.path[:0] = paths
| lovexiaov/SandwichApp | venv/lib/python2.7/site-packages/py2app/bootstrap/path_inject.py | Python | apache-2.0 | 65 |
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import errno
import httplib
import os
import re
import socket
import time
import boto
from boto import config, storage_uri_for_key
from boto.connection import AWSAuthConnection
from boto.exception import ResumableDownloadException
from boto.exception import ResumableTransferDisposition
from boto.s3.keyfile import KeyFile
from boto.gs.key import Key as GSKey
"""
Resumable download handler.
Resumable downloads will retry failed downloads, resuming at the byte count
completed by the last download attempt. If too many retries happen with no
progress (per configurable num_retries param), the download will be aborted.
The caller can optionally specify a tracker_file_name param in the
ResumableDownloadHandler constructor. If you do this, that file will
save the state needed to allow retrying later, in a separate process
(e.g., in a later run of gsutil).
Note that resumable downloads work across providers (they depend only
on support Range GETs), but this code is in the boto.s3 package
because it is the wrong abstraction level to go in the top-level boto
package.
TODO: At some point we should refactor the code to have a storage_service
package where all these provider-independent files go.
"""
class ByteTranslatingCallbackHandler(object):
"""
Proxy class that translates progress callbacks made by
boto.s3.Key.get_file(), taking into account that we're resuming
a download.
"""
def __init__(self, proxied_cb, download_start_point):
self.proxied_cb = proxied_cb
self.download_start_point = download_start_point
def call(self, total_bytes_uploaded, total_size):
self.proxied_cb(self.download_start_point + total_bytes_uploaded,
total_size)
def get_cur_file_size(fp, position_to_eof=False):
"""
Returns size of file, optionally leaving fp positioned at EOF.
"""
if isinstance(fp, KeyFile) and not position_to_eof:
# Avoid EOF seek for KeyFile case as it's very inefficient.
return fp.getkey().size
if not position_to_eof:
cur_pos = fp.tell()
fp.seek(0, os.SEEK_END)
cur_file_size = fp.tell()
if not position_to_eof:
fp.seek(cur_pos, os.SEEK_SET)
return cur_file_size
class ResumableDownloadHandler(object):
"""
Handler for resumable downloads.
"""
MIN_ETAG_LEN = 5
RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error,
socket.gaierror)
def __init__(self, tracker_file_name=None, num_retries=None):
"""
Constructor. Instantiate once for each downloaded file.
:type tracker_file_name: string
:param tracker_file_name: optional file name to save tracking info
about this download. If supplied and the current process fails
the download, it can be retried in a new process. If called
with an existing file containing an unexpired timestamp,
we'll resume the transfer for this file; else we'll start a
new resumable download.
:type num_retries: int
:param num_retries: the number of times we'll re-try a resumable
download making no progress. (Count resets every time we get
progress, so download can span many more than this number of
retries.)
"""
self.tracker_file_name = tracker_file_name
self.num_retries = num_retries
self.etag_value_for_current_download = None
if tracker_file_name:
self._load_tracker_file_etag()
# Save download_start_point in instance state so caller can
# find how much was transferred by this ResumableDownloadHandler
# (across retries).
self.download_start_point = None
def _load_tracker_file_etag(self):
f = None
try:
f = open(self.tracker_file_name, 'r')
self.etag_value_for_current_download = f.readline().rstrip('\n')
# We used to match an MD5-based regex to ensure that the etag was
# read correctly. Since ETags need not be MD5s, we now do a simple
# length sanity check instead.
if len(self.etag_value_for_current_download) < self.MIN_ETAG_LEN:
print('Couldn\'t read etag in tracker file (%s). Restarting '
'download from scratch.' % self.tracker_file_name)
except IOError, e:
# Ignore non-existent file (happens first time a download
# is attempted on an object), but warn user for other errors.
if e.errno != errno.ENOENT:
# Will restart because
# self.etag_value_for_current_download == None.
print('Couldn\'t read URI tracker file (%s): %s. Restarting '
'download from scratch.' %
(self.tracker_file_name, e.strerror))
finally:
if f:
f.close()
def _save_tracker_info(self, key):
self.etag_value_for_current_download = key.etag.strip('"\'')
if not self.tracker_file_name:
return
f = None
try:
f = open(self.tracker_file_name, 'w')
f.write('%s\n' % self.etag_value_for_current_download)
except IOError, e:
raise ResumableDownloadException(
'Couldn\'t write tracker file (%s): %s.\nThis can happen'
'if you\'re using an incorrectly configured download tool\n'
'(e.g., gsutil configured to save tracker files to an '
'unwritable directory)' %
(self.tracker_file_name, e.strerror),
ResumableTransferDisposition.ABORT)
finally:
if f:
f.close()
def _remove_tracker_file(self):
if (self.tracker_file_name and
os.path.exists(self.tracker_file_name)):
os.unlink(self.tracker_file_name)
def _attempt_resumable_download(self, key, fp, headers, cb, num_cb,
torrent, version_id, hash_algs):
"""
Attempts a resumable download.
Raises ResumableDownloadException if any problems occur.
"""
cur_file_size = get_cur_file_size(fp, position_to_eof=True)
if (cur_file_size and
self.etag_value_for_current_download and
self.etag_value_for_current_download == key.etag.strip('"\'')):
# Try to resume existing transfer.
if cur_file_size > key.size:
raise ResumableDownloadException(
'%s is larger (%d) than %s (%d).\nDeleting tracker file, so '
'if you re-try this download it will start from scratch' %
(fp.name, cur_file_size, str(storage_uri_for_key(key)),
key.size), ResumableTransferDisposition.ABORT)
elif cur_file_size == key.size:
if key.bucket.connection.debug >= 1:
print 'Download complete.'
return
if key.bucket.connection.debug >= 1:
print 'Resuming download.'
headers = headers.copy()
headers['Range'] = 'bytes=%d-%d' % (cur_file_size, key.size - 1)
cb = ByteTranslatingCallbackHandler(cb, cur_file_size).call
self.download_start_point = cur_file_size
else:
if key.bucket.connection.debug >= 1:
print 'Starting new resumable download.'
self._save_tracker_info(key)
self.download_start_point = 0
# Truncate the file, in case a new resumable download is being
# started atop an existing file.
fp.truncate(0)
# Disable AWSAuthConnection-level retry behavior, since that would
# cause downloads to restart from scratch.
if isinstance(key, GSKey):
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0, hash_algs=hash_algs)
else:
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0)
fp.flush()
def get_file(self, key, fp, headers, cb=None, num_cb=10, torrent=False,
version_id=None, hash_algs=None):
"""
Retrieves a file from a Key
:type key: :class:`boto.s3.key.Key` or subclass
:param key: The Key object from which upload is to be downloaded
:type fp: file
:param fp: File pointer into which data should be downloaded
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: (optional) a callback function that will be called to report
progress on the download. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted from the storage service and
the second representing the total number of bytes that need
to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be
called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type version_id: string
:param version_id: The version ID (optional)
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary of hash algorithms and
corresponding hashing class that implements update() and digest().
Defaults to {'md5': hashlib/md5.md5}.
Raises ResumableDownloadException if a problem occurs during
the transfer.
"""
debug = key.bucket.connection.debug
if not headers:
headers = {}
# Use num-retries from constructor if one was provided; else check
# for a value specified in the boto config file; else default to 6.
if self.num_retries is None:
self.num_retries = config.getint('Boto', 'num_retries', 6)
progress_less_iterations = 0
while True: # Retry as long as we're making progress.
had_file_bytes_before_attempt = get_cur_file_size(fp)
try:
self._attempt_resumable_download(key, fp, headers, cb, num_cb,
torrent, version_id, hash_algs)
# Download succceded, so remove the tracker file (if have one).
self._remove_tracker_file()
# Previously, check_final_md5() was called here to validate
# downloaded file's checksum, however, to be consistent with
# non-resumable downloads, this call was removed. Checksum
# validation of file contents should be done by the caller.
if debug >= 1:
print 'Resumable download complete.'
return
except self.RETRYABLE_EXCEPTIONS, e:
if debug >= 1:
print('Caught exception (%s)' % e.__repr__())
if isinstance(e, IOError) and e.errno == errno.EPIPE:
# Broken pipe error causes httplib to immediately
# close the socket (http://bugs.python.org/issue5542),
# so we need to close and reopen the key before resuming
# the download.
if isinstance(key, GSKey):
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0, hash_algs=hash_algs)
else:
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0)
except ResumableDownloadException, e:
if (e.disposition ==
ResumableTransferDisposition.ABORT_CUR_PROCESS):
if debug >= 1:
print('Caught non-retryable ResumableDownloadException '
'(%s)' % e.message)
raise
elif (e.disposition ==
ResumableTransferDisposition.ABORT):
if debug >= 1:
print('Caught non-retryable ResumableDownloadException '
'(%s); aborting and removing tracker file' %
e.message)
self._remove_tracker_file()
raise
else:
if debug >= 1:
print('Caught ResumableDownloadException (%s) - will '
'retry' % e.message)
# At this point we had a re-tryable failure; see if made progress.
if get_cur_file_size(fp) > had_file_bytes_before_attempt:
progress_less_iterations = 0
else:
progress_less_iterations += 1
if progress_less_iterations > self.num_retries:
# Don't retry any longer in the current process.
raise ResumableDownloadException(
'Too many resumable download attempts failed without '
'progress. You might try this download again later',
ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Close the key, in case a previous download died partway
# through and left data in the underlying key HTTP buffer.
# Do this within a try/except block in case the connection is
# closed (since key.close() attempts to do a final read, in which
# case this read attempt would get an IncompleteRead exception,
# which we can safely ignore.
try:
key.close()
except httplib.IncompleteRead:
pass
sleep_time_secs = 2**progress_less_iterations
if debug >= 1:
print('Got retryable failure (%d progress-less in a row).\n'
'Sleeping %d seconds before re-trying' %
(progress_less_iterations, sleep_time_secs))
time.sleep(sleep_time_secs)
| donny/mako-mori | external/boto/s3/resumable_download_handler.py | Python | mit | 15,584 |
# -*- coding: utf-8 -*-
"""
flask.blueprints
~~~~~~~~~~~~~~~~
Blueprints are the recommended way to implement larger or more
pluggable applications in Flask 0.7 and later.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from .helpers import _PackageBoundObject, _endpoint_from_view_func
class BlueprintSetupState(object):
"""Temporary holder object for registering a blueprint with the
application. An instance of this class is created by the
:meth:`~flask.Blueprint.make_setup_state` method and later passed
to all register callback functions.
"""
def __init__(self, blueprint, app, options, first_registration):
#: a reference to the current application
self.app = app
#: a reference to the blurprint that created this setup state.
self.blueprint = blueprint
#: a dictionary with all options that were passed to the
#: :meth:`~flask.Flask.register_blueprint` method.
self.options = options
#: as blueprints can be registered multiple times with the
#: application and not everything wants to be registered
#: multiple times on it, this attribute can be used to figure
#: out if the blueprint was registered in the past already.
self.first_registration = first_registration
subdomain = self.options.get('subdomain')
if subdomain is None:
subdomain = self.blueprint.subdomain
#: The subdomain that the blueprint should be active for, `None`
#: otherwise.
self.subdomain = subdomain
url_prefix = self.options.get('url_prefix')
if url_prefix is None:
url_prefix = self.blueprint.url_prefix
#: The prefix that should be used for all URLs defined on the
#: blueprint.
self.url_prefix = url_prefix
#: A dictionary with URL defaults that is added to each and every
#: URL that was defined with the blueprint.
self.url_defaults = dict(self.blueprint.url_values_defaults)
self.url_defaults.update(self.options.get('url_defaults', ()))
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""A helper method to register a rule (and optionally a view function)
to the application. The endpoint is automatically prefixed with the
blueprint's name.
"""
if self.url_prefix:
rule = self.url_prefix + rule
options.setdefault('subdomain', self.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
defaults = self.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
class Blueprint(_PackageBoundObject):
"""Represents a blueprint. A blueprint is an object that records
functions that will be called with the
:class:`~flask.blueprint.BlueprintSetupState` later to register functions
or other things on the main application. See :ref:`blueprints` for more
information.
.. versionadded:: 0.7
"""
warn_on_modifications = False
_got_registered_once = False
def __init__(self, name, import_name, static_folder=None,
static_url_path=None, template_folder=None,
url_prefix=None, subdomain=None, url_defaults=None):
_PackageBoundObject.__init__(self, import_name, template_folder)
self.name = name
self.url_prefix = url_prefix
self.subdomain = subdomain
self.static_folder = static_folder
self.static_url_path = static_url_path
self.deferred_functions = []
self.view_functions = {}
if url_defaults is None:
url_defaults = {}
self.url_values_defaults = url_defaults
def record(self, func):
"""Registers a function that is called when the blueprint is
registered on the application. This function is called with the
state as argument as returned by the :meth:`make_setup_state`
method.
"""
if self._got_registered_once and self.warn_on_modifications:
from warnings import warn
warn(Warning('The blueprint was already registered once '
'but is getting modified now. These changes '
'will not show up.'))
self.deferred_functions.append(func)
def record_once(self, func):
"""Works like :meth:`record` but wraps the function in another
function that will ensure the function is only called once. If the
blueprint is registered a second time on the application, the
function passed is not called.
"""
def wrapper(state):
if state.first_registration:
func(state)
return self.record(update_wrapper(wrapper, func))
def make_setup_state(self, app, options, first_registration=False):
"""Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
object that is later passed to the register callback functions.
Subclasses can override this to return a subclass of the setup state.
"""
return BlueprintSetupState(self, app, options, first_registration)
def register(self, app, options, first_registration=False):
"""Called by :meth:`Flask.register_blueprint` to register a blueprint
on the application. This can be overridden to customize the register
behavior. Keyword arguments from
:func:`~flask.Flask.register_blueprint` are directly forwarded to this
method in the `options` dictionary.
"""
self._got_registered_once = True
state = self.make_setup_state(app, options, first_registration)
if self.has_static_folder:
state.add_url_rule(self.static_url_path + '/<path:filename>',
view_func=self.send_static_file,
endpoint='static')
for deferred in self.deferred_functions:
deferred(state)
def route(self, rule, **options):
"""Like :meth:`Flask.route` but for a blueprint. The endpoint for the
:func:`url_for` function is prefixed with the name of the blueprint.
"""
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for
the :func:`url_for` function is prefixed with the name of the blueprint.
"""
if endpoint:
assert '.' not in endpoint, "Blueprint endpoint's should not contain dot's"
self.record(lambda s:
s.add_url_rule(rule, endpoint, view_func, **options))
def endpoint(self, endpoint):
"""Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
"""
def decorator(f):
def register_endpoint(state):
state.app.view_functions[endpoint] = f
self.record_once(register_endpoint)
return f
return decorator
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f
def before_app_request(self, f):
"""Like :meth:`Flask.before_request`. Such a function is executed
before each request, even if outside of a blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(None, []).append(f))
return f
def before_app_first_request(self, f):
"""Like :meth:`Flask.before_first_request`. Such a function is
executed before the first request to the application.
"""
self.record_once(lambda s: s.app.before_first_request_funcs.append(f))
return f
def after_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. This function
is only executed after each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(self.name, []).append(f))
return f
def after_app_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f
def teardown_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. This
function is only executed when tearing down requests handled by a
function of that blueprint. Teardown request functions are executed
when the request context is popped, even when no actual request was
performed.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(self.name, []).append(f))
return f
def teardown_app_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. Such a
function is executed when tearing down each request, even if outside of
the blueprint.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(None, []).append(f))
return f
def context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. This
function is only executed for requests handled by a blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(self.name, []).append(f))
return f
def app_context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. Such a
function is executed each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(None, []).append(f))
return f
def app_errorhandler(self, code):
"""Like :meth:`Flask.errorhandler` but for a blueprint. This
handler is used for all requests, even if outside of the blueprint.
"""
def decorator(f):
self.record_once(lambda s: s.app.errorhandler(code)(f))
return f
return decorator
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for this
blueprint. It's called before the view functions are called and
can modify the url values provided.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(self.name, []).append(f))
return f
def url_defaults(self, f):
"""Callback function for URL defaults for this blueprint. It's called
with the endpoint and values and should update the values passed
in place.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(self.name, []).append(f))
return f
def app_url_value_preprocessor(self, f):
"""Same as :meth:`url_value_preprocessor` but application wide.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(None, []).append(f))
return f
def app_url_defaults(self, f):
"""Same as :meth:`url_defaults` but application wide.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(None, []).append(f))
return f
def errorhandler(self, code_or_exception):
"""Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
of the :class:`~flask.Flask` object.
"""
def decorator(f):
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
return f
return decorator
| samabhi/pstHealth | venv/lib/python2.7/site-packages/flask/blueprints.py | Python | mit | 13,253 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_interface_policy_port_security
short_description: Manage port security (l2:PortSecurityPol)
description:
- Manage port security on Cisco ACI fabrics.
notes:
- More information about the internal APIC class B(l2:PortSecurityPol) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
options:
port_security:
description:
- The name of the port security.
required: yes
aliases: [ name ]
description:
description:
- The description for the contract.
aliases: [ descr ]
max_end_points:
description:
- Maximum number of end points (range 0-12000).
- The APIC defaults new port-security policies to C(0).
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
# FIXME: Add more, better examples
EXAMPLES = r'''
- aci_interface_policy_port_security:
host: '{{ inventory_hostname }}'
username: '{{ username }}'
password: '{{ password }}'
port_security: '{{ port_security }}'
description: '{{ descr }}'
max_end_points: '{{ max_end_points }}'
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
port_security=dict(type='str', required=False, aliases=['name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
max_end_points=dict(type='int'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
protocol=dict(type='str', removed_in_version='2.6'), # Deprecated in v2.6
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['port_security']],
['state', 'present', ['port_security']],
],
)
port_security = module.params['port_security']
description = module.params['description']
max_end_points = module.params['max_end_points']
if max_end_points is not None and max_end_points not in range(12001):
module.fail_json(msg='The "max_end_points" must be between 0 and 12000')
state = module.params['state']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='l2PortSecurityPol',
aci_rn='infra/portsecurityP-{0}'.format(port_security),
filter_target='eq(l2PortSecurityPol.name, "{0}")'.format(port_security),
module_object=port_security,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='l2PortSecurityPol',
class_config=dict(
name=port_security,
descr=description,
maximum=max_end_points,
),
)
aci.get_diff(aci_class='l2PortSecurityPol')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| bregman-arie/ansible | lib/ansible/modules/network/aci/aci_interface_policy_port_security.py | Python | gpl-3.0 | 6,582 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Tests autojump.
"""
from __future__ import division
import autojump
import contextlib
import random
import os
import shutil
import sys
import tempfile
import unittest
@contextlib.contextmanager
def no_stderr():
savestderr = sys.stderr
class DevNull(object):
def write(self, _): pass
sys.stderr = DevNull()
yield
sys.stderr = savestderr
# test suite
class TestAutojump(unittest.TestCase):
def setUp(self):
autojump.CONFIG_DIR = tempfile.mkdtemp()
autojump.TESTING = True
self.fd, self.fname = tempfile.mkstemp()
self.db = autojump.Database(self.fname)
random.seed()
def tearDown(self):
os.remove(self.fname)
if os.path.isfile(self.fname + ".bak"):
os.remove(self.fname + ".bak")
if (os.path.exists(autojump.CONFIG_DIR) and
('tmp' in autojump.CONFIG_DIR or 'temp' in autojump.CONFIG_DIR)):
shutil.rmtree(autojump.CONFIG_DIR)
def test_config(self):
self.assertEqual(autojump.COMPLETION_SEPARATOR, '__')
def test_db_add(self):
self.db.add('/1', 100)
self.assertEqual(self.db.get_weight('/1'), 100)
self.db.add('/2', 10)
self.assertEqual(self.db.get_weight('/2'), 10)
self.db.add('/2', 10)
self.assertEqual(self.db.get_weight('/2'), 14.142135623730951)
def test_db_get_weight(self):
self.assertEqual(self.db.get_weight('/'), 0)
def test_db_decay(self):
self.db.add('/1', 10)
self.db.decay()
self.assertTrue(self.db.get_weight('/1') < 10)
def test_db_load_existing(self):
self.db = autojump.Database('tests/database.txt')
self.assertTrue(len(self.db) > 0)
def test_db_load_empty(self):
# setup
_, fname = tempfile.mkstemp()
db = autojump.Database(fname)
try:
# test
self.assertEquals(len(self.db), 0)
finally:
# teardown
os.remove(fname)
def test_db_load_backup(self):
# setup
fname = '/tmp/autojump_test_db_load_backup_' + str(random.randint(0,32678))
db = autojump.Database(fname)
db.add('/1')
os.rename(fname, fname + '.bak')
try:
# test
with no_stderr():
db = autojump.Database(fname)
self.assertTrue(len(db.data) > 0)
self.assertTrue(os.path.isfile(fname))
finally:
# teardown
os.remove(fname)
os.remove(fname + '.bak')
def test_db_purge(self):
self.db.add('/1')
self.db.purge()
self.assertEquals(len(self.db), 0)
def test_db_save(self):
# setup
fname = '/tmp/autojump_test_db_save_' + str(random.randint(0,32678)) + '.txt'
db = autojump.Database(fname)
try:
# test
db.save()
self.assertTrue(os.path.isfile(fname))
finally:
# teardown
os.remove(fname)
os.remove(fname + '.bak')
def test_db_trim(self):
self.db.add('/1')
self.db.add('/2')
self.db.add('/3')
self.db.add('/4')
self.db.add('/5')
self.db.add('/6')
self.db.add('/7')
self.db.add('/8')
self.db.add('/9')
self.db.add('/10')
self.assertEquals(len(self.db), 10)
self.db.trim()
self.assertEquals(len(self.db), 9)
def test_db_decode(self):
#FIXME
self.assertEquals(autojump.decode('foo'), 'foo')
def test_db_unico(self):
#FIXME
self.assertEquals(autojump.unico('foo'), u'foo')
def test_match_normal(self):
max_matches = 1
self.db.add('/foo', 10)
self.db.add('/foo/bar', 20)
patterns = [u'']
results = autojump.find_matches(self.db, patterns, max_matches)
self.assertEquals(results[0], '/foo/bar')
patterns = [u'random']
results = autojump.find_matches(self.db, patterns, max_matches)
self.assertTrue(len(results) == 0)
patterns = [u'fo']
results = autojump.find_matches(self.db, patterns, max_matches)
self.assertEquals(results[0], '/foo')
self.db.add('/foo/bat', 15)
patterns = [u'ba']
results = autojump.find_matches(self.db, patterns, max_matches)
self.assertEquals(results[0], '/foo/bar')
self.db.add('/code/inbox', 5)
self.db.add('/home/user/inbox', 10)
patterns = [u'inbox']
results = autojump.find_matches(self.db, patterns, max_matches)
self.assertEquals(results[0], '/home/user/inbox')
patterns = [u'co', u'in']
results = autojump.find_matches(self.db, patterns, max_matches)
self.assertEquals(results[0], '/code/inbox')
def test_match_completion(self):
max_matches = 9
ignore_case = True
self.db.add('/1')
self.db.add('/2')
self.db.add('/3')
self.db.add('/4')
self.db.add('/5', 20)
self.db.add('/6', 15)
self.db.add('/7')
self.db.add('/8')
self.db.add('/9')
patterns = [u'']
results = autojump.find_matches(self.db, patterns, max_matches, ignore_case)
self.assertEquals(results, ['/5', '/6', '/9', '/8', '/7', '/4', '/3', '/2', '/1'])
def test_match_case_insensitive(self):
max_matches = 1
ignore_case = True
self.db.add('/FOO', 20)
self.db.add('/foo', 10)
patterns = [u'fo']
results = autojump.find_matches(self.db, patterns, max_matches, ignore_case)
self.assertEquals(results[0], '/FOO')
def test_match_fuzzy(self):
max_matches = 1
ignore_case = True
fuzzy_search = True
self.db.add('/foo', 10)
self.db.add('/foo/bar', 20)
self.db.add('/abcdefg', 10)
patterns = [u'random']
results = autojump.find_matches(self.db, patterns, max_matches, ignore_case, fuzzy_search)
self.assertTrue(len(results) == 0)
patterns = [u'abcdefg']
results = autojump.find_matches(self.db, patterns, max_matches, ignore_case, fuzzy_search)
self.assertEquals(results[0], '/abcdefg')
patterns = [u'abcefg']
results = autojump.find_matches(self.db, patterns, max_matches, ignore_case, fuzzy_search)
self.assertEquals(results[0], '/abcdefg')
patterns = [u'bacef']
results = autojump.find_matches(self.db, patterns, max_matches, ignore_case, fuzzy_search)
self.assertEquals(results[0], '/abcdefg')
if __name__ == '__main__':
unittest.main()
| xuwangyin/autojump | tests/runtests.py | Python | gpl-3.0 | 6,698 |
# -*- coding: utf-8 -*-
# ConnectorUnit needs to be registered
from . import binder
from . import import_synchronizer
from . import export_synchronizer
from . import delete_synchronizer
from . import backend_adapter
| acsone/connector-magento | magentoerpconnect/unit/__init__.py | Python | agpl-3.0 | 217 |
Subsets and Splits