repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
alpire/picoCTF-web | unit_tests/common.py | 1 | 2755 | # coding: utf-8
"""
Common Testing Functionality.
"""
import api
from functools import wraps
base_team = {
"team_name": "team¢",
"school": "Test HS",
"password": "much_protected",
"eligible": True
}
new_team_user = {
"username": "valid",
"firstname": "Fred",
"lastname": "Hacker",
"password": "valid",
"email": "validʃ@hs.edu",
"ctf-emails": False,
"create-new-team": "true",
"background": "student_hs",
"country": "US",
"team-school-new": "hacker edu",
"team-name-new": base_team['team_name'],
"team-password-new": base_team['password']
}
base_user = {
"username": "valid2",
"firstname": "Fred",
"lastname": "Hacker",
"password": "valid",
"email": "[email protected]",
"ctf-emails": False,
"create-new-team": "false",
"background": "student_hs",
"country": "US",
"team-name-existing": base_team['team_name'],
"team-password-existing": base_team['password']
}
def clear_cache():
"""
Clears the cache before the function is run.
"""
def clear(f):
@wraps(f)
def wrapper(*args, **kwargs):
api.cache.clear_all()
return f(*args, **kwargs)
return wrapper
return clear
def ensure_empty_collections(*collections):
"""
Clears collections listed after function has completed.
Will throw an assertion if any collection is not empty when called.
"""
def clear(f):
@wraps(f)
def wrapper(*args, **kwargs):
db = api.common.get_conn()
collection_size = lambda name: len(list(db[name].find()))
for collection in collections:
assert collection_size(collection) == 0, "Collection was not empty: " + collection
result = f(*args, **kwargs)
return result
return wrapper
return clear
def clear_collections(*collections):
"""
Clears collections listed after function has completed.
Will throw an assertion if any collection is not empty when called.
"""
def clear(f):
@wraps(f)
def wrapper(*args, **kwargs):
db = api.common.get_conn()
try:
result = f(*args, **kwargs)
finally:
#Clear the collections.
for collection in collections:
db[collection].remove()
#Ensure they are then empty.
for collection in collections:
collection_size = lambda collection: len(list(db[collection].find()))
assert collection_size(collection) == 0, "Collection: {} was not able to be cleared.".format(collection)
return result
return wrapper
return clear
| mit |
juicer/juicer | juicer/common/RPM.py | 2 | 1984 | # -*- coding: utf-8 -*-
# Juicer - Administer Pulp and Release Carts
# Copyright © 2012,2013, Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os.path
import juicer.utils
import juicer.utils.Log
import re
class RPM(object):
def __init__(self, source):
self.pgk_name = os.path.basename(source)
# Source is the original location of this RPM. That includes
# both http://.... RPMs and local /home/bro/... ones.
self.source = source
# If this rpm has to be synced later we'll use this to filter
# out just those RPMs.
self.modified = False
url_regex = re.compile(r'^(http)s?://')
if url_regex.match(self.source):
self.synced = False
self.path = None
else:
self.synced = True
self.path = source
def sync(self, destination):
dest_file = os.path.join(destination, self.pgk_name)
# This is the case with stuff that already exists locally
if self.synced and self.source:
return True
if not os.path.exists(destination):
os.mkdir(destination)
self.path = dest_file
juicer.utils.Log.log_debug("Beginning remote->local sync: %s->%s" % (self.source, self.path))
juicer.utils.save_url_as(self.source, dest_file)
self.modified = True
self.synced = True
| gpl-3.0 |
theflofly/tensorflow | tensorflow/python/training/sync_replicas_optimizer.py | 8 | 21785 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Synchronize replicas for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import types_pb2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
from tensorflow.python.training import session_manager
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# Please note that the gradients from replicas are averaged instead of summed
# (as in the old sync_replicas_optimizer) so you need to increase the learning
# rate according to the number of replicas. This change is introduced to be
# consistent with how gradients are aggregated (averaged) within a batch in a
# replica.
@tf_export(v1=["train.SyncReplicasOptimizer"])
class SyncReplicasOptimizer(optimizer.Optimizer):
"""Class to synchronize, aggregate gradients and pass them to the optimizer.
This class is deprecated. For synchrononous training, please use [Distribution
Strategies](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute).
In a typical asynchronous training environment, it's common to have some
stale gradients. For example, with a N-replica asynchronous training,
gradients will be applied to the variables N times independently. Depending
on each replica's training speed, some gradients might be calculated from
copies of the variable from several steps back (N-1 steps on average). This
optimizer avoids stale gradients by collecting gradients from all replicas,
averaging them, then applying them to the variables in one shot, after
which replicas can fetch the new variables and continue.
The following accumulators/queue are created:
* N `gradient accumulators`, one per variable to train. Gradients are pushed
to them and the chief worker will wait until enough gradients are collected
and then average them before applying to variables. The accumulator will
drop all stale gradients (more details in the accumulator op).
* 1 `token` queue where the optimizer pushes the new global_step value after
all variables are updated.
The following local variable is created:
* `sync_rep_local_step`, one per replica. Compared against the global_step in
each accumulator to check for staleness of the gradients.
The optimizer adds nodes to the graph to collect gradients and pause the
trainers until variables are updated.
For the Parameter Server job:
1. An accumulator is created for each variable, and each replica pushes the
gradients into the accumulators instead of directly applying them to the
variables.
2. Each accumulator averages once enough gradients (replicas_to_aggregate)
have been accumulated.
3. Apply the averaged gradients to the variables.
4. Only after all variables have been updated, increment the global step.
5. Only after step 4, pushes `global_step` in the `token_queue`, once for
each worker replica. The workers can now fetch the global step, use it to
update its local_step variable and start the next batch. Please note that
some workers can consume multiple minibatches, while some may not consume
even one. This is because each worker fetches minibatches as long as
a token exists. If one worker is stuck for some reason and does not
consume a token, another worker can use it.
For the replicas:
1. Start a step: fetch variables and compute gradients.
2. Once the gradients have been computed, push them into gradient
accumulators. Each accumulator will check the staleness and drop the stale.
3. After pushing all the gradients, dequeue an updated value of global_step
from the token queue and record that step to its local_step variable. Note
that this is effectively a barrier.
4. Start the next batch.
### Usage
```python
# Create any optimizer to update the variables, say a simple SGD:
opt = GradientDescentOptimizer(learning_rate=0.1)
# Wrap the optimizer with sync_replicas_optimizer with 50 replicas: at each
# step the optimizer collects 50 gradients before applying to variables.
# Note that if you want to have 2 backup replicas, you can change
# total_num_replicas=52 and make sure this number matches how many physical
# replicas you started in your job.
opt = tf.train.SyncReplicasOptimizer(opt, replicas_to_aggregate=50,
total_num_replicas=50)
# Some models have startup_delays to help stabilize the model but when using
# sync_replicas training, set it to 0.
# Now you can call `minimize()` or `compute_gradients()` and
# `apply_gradients()` normally
training_op = opt.minimize(total_loss, global_step=self.global_step)
# You can create the hook which handles initialization and queues.
sync_replicas_hook = opt.make_session_run_hook(is_chief)
```
In the training program, every worker will run the train_op as if not
synchronized.
```python
with training.MonitoredTrainingSession(
master=workers[worker_id].target, is_chief=is_chief,
hooks=[sync_replicas_hook]) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(training_op)
```
To use SyncReplicasOptimizer with an `Estimator`, you need to send
sync_replicas_hook while calling the fit.
```python
my_estimator = DNNClassifier(..., optimizer=opt)
my_estimator.fit(..., hooks=[sync_replicas_hook])
```
"""
@deprecation.deprecated(
None,
"The `SyncReplicaOptimizer` class is deprecated. For synchrononous "
"training, please use [Distribution Strategies](https://github.com/"
"tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute).",
warn_once=True)
def __init__(self,
opt,
replicas_to_aggregate,
total_num_replicas=None,
variable_averages=None,
variables_to_average=None,
use_locking=False,
name="sync_replicas"):
"""Construct a sync_replicas optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
replicas_to_aggregate: number of replicas to aggregate for each variable
update.
total_num_replicas: Total number of tasks/workers/replicas, could be
different from replicas_to_aggregate.
If total_num_replicas > replicas_to_aggregate: it is backup_replicas +
replicas_to_aggregate.
If total_num_replicas < replicas_to_aggregate: Replicas compute
multiple batches per update to variables.
variable_averages: Optional `ExponentialMovingAverage` object, used to
maintain moving averages for the variables passed in
`variables_to_average`.
variables_to_average: a list of variables that need to be averaged. Only
needed if variable_averages is passed in.
use_locking: If True use locks for update operation.
name: string. Optional name of the returned operation.
"""
if total_num_replicas is None:
total_num_replicas = replicas_to_aggregate
super(SyncReplicasOptimizer, self).__init__(use_locking, name)
logging.info(
"SyncReplicasV2: replicas_to_aggregate=%s; total_num_replicas=%s",
replicas_to_aggregate, total_num_replicas)
self._opt = opt
self._replicas_to_aggregate = replicas_to_aggregate
self._gradients_applied = False
self._variable_averages = variable_averages
self._variables_to_average = variables_to_average
self._total_num_replicas = total_num_replicas
self._tokens_per_step = max(total_num_replicas, replicas_to_aggregate)
self._global_step = None
self._sync_token_queue = None
# The synchronization op will be executed in a queue runner which should
# only be executed by one of the replicas (usually the chief).
self._chief_queue_runner = None
# Remember which accumulator is on which device to set the initial step in
# the accumulator to be global step. This list contains list of the
# following format: (accumulator, device).
self._accumulator_list = []
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of "loss" for the variables in "var_list".
This simply wraps the compute_gradients() from the real optimizer. The
gradients will be aggregated in the apply_gradients() so that user can
modify the gradients like clipping with per replica global norm if needed.
The global norm with aggregated gradients can be bad as one replica's huge
gradients can hurt the gradients from other replicas.
Args:
*args: Arguments for compute_gradients().
**kwargs: Keyword arguments for compute_gradients().
Returns:
A list of (gradient, variable) pairs.
"""
return self._opt.compute_gradients(*args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This contains most of the synchronization implementation and also wraps the
apply_gradients() from the real optimizer.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
train_op: The op to dequeue a token so the replicas can exit this batch
and start the next one. This is executed by each replica.
Raises:
ValueError: If the grads_and_vars is empty.
ValueError: If global step is not provided, the staleness cannot be
checked.
"""
if not grads_and_vars:
raise ValueError("Must supply at least one variable")
if global_step is None:
raise ValueError("Global step is required to check staleness")
self._global_step = global_step
train_ops = []
aggregated_grad = []
var_list = []
# local_anchor op will be placed on this worker task by default.
local_anchor = control_flow_ops.no_op()
# Colocating local_step variable prevents it being placed on the PS.
distribution_strategy = distribution_strategy_context.get_strategy()
with distribution_strategy.extended.colocate_vars_with(local_anchor):
self._local_step = variable_scope.variable(
initial_value=0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=global_step.dtype.base_dtype,
name="sync_rep_local_step")
self.local_step_init_op = state_ops.assign(self._local_step, global_step)
chief_init_ops = [self.local_step_init_op]
self.ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
with ops.name_scope(None, self._name):
for grad, var in grads_and_vars:
var_list.append(var)
with ops.device(var.device):
# Dense gradients.
if grad is None:
aggregated_grad.append(None) # pass-through.
continue
elif isinstance(grad, ops.Tensor):
grad_accum = data_flow_ops.ConditionalAccumulator(
grad.dtype,
shape=var.get_shape(),
shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_grad(
self._replicas_to_aggregate))
else:
if not isinstance(grad, ops.IndexedSlices):
raise ValueError("Unknown grad type!")
grad_accum = data_flow_ops.SparseConditionalAccumulator(
grad.dtype, shape=(), shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_indexed_slices_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_indexed_slices_grad(
self._replicas_to_aggregate))
self._accumulator_list.append((grad_accum, var.device))
aggregated_grads_and_vars = zip(aggregated_grad, var_list)
# sync_op will be assigned to the same device as the global step.
with ops.device(global_step.device), ops.name_scope(""):
update_op = self._opt.apply_gradients(aggregated_grads_and_vars,
global_step)
# Create token queue.
with ops.device(global_step.device), ops.name_scope(""):
sync_token_queue = (
data_flow_ops.FIFOQueue(-1,
global_step.dtype.base_dtype,
shapes=(),
name="sync_token_q",
shared_name="sync_token_q"))
self._sync_token_queue = sync_token_queue
# dummy_queue is passed to the queue runner. Don't use the real queues
# because the queue runner doesn't automatically reopen it once it
# closed queues in PS devices.
dummy_queue = (
data_flow_ops.FIFOQueue(1,
types_pb2.DT_INT32,
shapes=(),
name="dummy_queue",
shared_name="dummy_queue"))
with ops.device(global_step.device), ops.name_scope(""):
# Replicas have to wait until they can get a token from the token queue.
with ops.control_dependencies(train_ops):
token = sync_token_queue.dequeue()
train_op = state_ops.assign(self._local_step, token)
with ops.control_dependencies([update_op]):
# Sync_op needs to insert tokens to the token queue at the end of the
# step so the replicas can fetch them to start the next step.
tokens = array_ops.fill([self._tokens_per_step], global_step)
sync_op = sync_token_queue.enqueue_many((tokens,))
if self._variable_averages is not None:
with ops.control_dependencies([sync_op]), ops.name_scope(""):
sync_op = self._variable_averages.apply(
self._variables_to_average)
self._chief_queue_runner = queue_runner.QueueRunner(dummy_queue,
[sync_op])
for accum, dev in self._accumulator_list:
with ops.device(dev):
chief_init_ops.append(
accum.set_global_step(
global_step, name="SetGlobalStep"))
self.chief_init_op = control_flow_ops.group(*(chief_init_ops))
self._gradients_applied = True
return train_op
def get_chief_queue_runner(self):
"""Returns the QueueRunner for the chief to execute.
This includes the operations to synchronize replicas: aggregate gradients,
apply to variables, increment global step, insert tokens to token queue.
Note that this can only be called after calling apply_gradients() which
actually generates this queuerunner.
Returns:
A `QueueRunner` for chief to execute.
Raises:
ValueError: If this is called before apply_gradients().
"""
if self._gradients_applied is False:
raise ValueError("Should be called after apply_gradients().")
return self._chief_queue_runner
def get_slot(self, *args, **kwargs):
"""Return a slot named "name" created for "var" by the Optimizer.
This simply wraps the get_slot() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
return self._opt.get_slot(*args, **kwargs)
def variables(self):
"""Fetches a list of optimizer variables in the default graph.
This wraps `variables()` from the actual optimizer. It does not include
the `SyncReplicasOptimizer`'s local step.
Returns:
A list of variables.
"""
return self._opt.variables()
def get_slot_names(self, *args, **kwargs):
"""Return a list of the names of slots created by the `Optimizer`.
This simply wraps the get_slot_names() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
A list of strings.
"""
return self._opt.get_slot_names(*args, **kwargs)
def get_init_tokens_op(self, num_tokens=-1):
"""Returns the op to fill the sync_token_queue with the tokens.
This is supposed to be executed in the beginning of the chief/sync thread
so that even if the total_num_replicas is less than replicas_to_aggregate,
the model can still proceed as the replicas can compute multiple steps per
variable update. Make sure:
`num_tokens >= replicas_to_aggregate - total_num_replicas`.
Args:
num_tokens: Number of tokens to add to the queue.
Returns:
An op for the chief/sync replica to fill the token queue.
Raises:
ValueError: If this is called before apply_gradients().
ValueError: If num_tokens are smaller than replicas_to_aggregate -
total_num_replicas.
"""
if self._gradients_applied is False:
raise ValueError(
"get_init_tokens_op() should be called after apply_gradients().")
tokens_needed = self._replicas_to_aggregate - self._total_num_replicas
if num_tokens == -1:
num_tokens = self._replicas_to_aggregate
elif num_tokens < tokens_needed:
raise ValueError(
"Too few tokens to finish the first step: %d (given) vs %d (needed)" %
(num_tokens, tokens_needed))
if num_tokens > 0:
with ops.device(self._global_step.device), ops.name_scope(""):
tokens = array_ops.fill([num_tokens], self._global_step)
init_tokens = self._sync_token_queue.enqueue_many((tokens,))
else:
init_tokens = control_flow_ops.no_op(name="no_init_tokens")
return init_tokens
def make_session_run_hook(self, is_chief, num_tokens=-1):
"""Creates a hook to handle SyncReplicasHook ops such as initialization."""
return _SyncReplicasOptimizerHook(self, is_chief, num_tokens)
class _SyncReplicasOptimizerHook(session_run_hook.SessionRunHook):
"""A SessionRunHook handles ops related to SyncReplicasOptimizer."""
def __init__(self, sync_optimizer, is_chief, num_tokens):
"""Creates hook to handle SyncReplicasOptimizer initialization ops.
Args:
sync_optimizer: `SyncReplicasOptimizer` which this hook will initialize.
is_chief: `Bool`, whether is this a chief replica or not.
num_tokens: Number of tokens to add to the queue.
"""
self._sync_optimizer = sync_optimizer
self._is_chief = is_chief
self._num_tokens = num_tokens
def begin(self):
if self._sync_optimizer._gradients_applied is False: # pylint: disable=protected-access
raise ValueError(
"SyncReplicasOptimizer.apply_gradient should be called before using "
"the hook.")
if self._is_chief:
self._local_init_op = self._sync_optimizer.chief_init_op
self._ready_for_local_init_op = (
self._sync_optimizer.ready_for_local_init_op)
self._q_runner = self._sync_optimizer.get_chief_queue_runner()
self._init_tokens_op = self._sync_optimizer.get_init_tokens_op(
self._num_tokens)
else:
self._local_init_op = self._sync_optimizer.local_step_init_op
self._ready_for_local_init_op = (
self._sync_optimizer.ready_for_local_init_op)
self._q_runner = None
self._init_tokens_op = None
def after_create_session(self, session, coord):
"""Runs SyncReplicasOptimizer initialization ops."""
local_init_success, msg = session_manager._ready( # pylint: disable=protected-access
self._ready_for_local_init_op, session,
"Model is not ready for SyncReplicasOptimizer local init.")
if not local_init_success:
raise RuntimeError(
"Init operations did not make model ready for SyncReplicasOptimizer "
"local_init. Init op: %s, error: %s" %
(self._local_init_op.name, msg))
session.run(self._local_init_op)
if self._init_tokens_op is not None:
session.run(self._init_tokens_op)
if self._q_runner is not None:
self._q_runner.create_threads(
session, coord=coord, daemon=True, start=True)
| apache-2.0 |
wangjun/odoo | openerp/report/render/rml2txt/rml2txt.py | 442 | 16460 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009, P. Christeas, Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
import StringIO
from lxml import etree
import utils
Font_size= 10.0
def verbose(text):
sys.stderr.write(text+"\n")
class textbox(object):
"""A box containing plain text.
It can have an offset, in chars.
Lines can be either text strings, or textbox'es, recursively.
"""
def __init__(self,x=0, y=0):
self.posx = x
self.posy = y
self.lines = []
self.curline = ''
self.endspace = False
def newline(self):
if isinstance(self.curline, textbox):
self.lines.extend(self.curline.renderlines())
else:
self.lines.append(self.curline)
self.curline = ''
def fline(self):
if isinstance(self.curline, textbox):
self.lines.extend(self.curline.renderlines())
elif len(self.curline):
self.lines.append(self.curline)
self.curline = ''
def appendtxt(self,txt):
"""Append some text to the current line.
Mimic the HTML behaviour, where all whitespace evaluates to
a single space """
if not txt:
return
bs = es = False
if txt[0].isspace():
bs = True
if txt[len(txt)-1].isspace():
es = True
if bs and not self.endspace:
self.curline += " "
self.curline += txt.strip().replace("\n"," ").replace("\t"," ")
if es:
self.curline += " "
self.endspace = es
def rendertxt(self,xoffset=0):
result = ''
lineoff = ""
for i in range(self.posy):
result +="\n"
for i in range(self.posx+xoffset):
lineoff+=" "
for l in self.lines:
result+= lineoff+ l +"\n"
return result
def renderlines(self,pad=0):
"""Returns a list of lines, from the current object
pad: all lines must be at least pad characters.
"""
result = []
lineoff = ""
for i in range(self.posx):
lineoff+=" "
for l in self.lines:
lpad = ""
if pad and len(l) < pad :
for i in range(pad - len(l)):
lpad += " "
#elif pad and len(l) > pad ?
result.append(lineoff+ l+lpad)
return result
def haplines(self,arr,offset,cc= ''):
""" Horizontaly append lines
"""
while len(self.lines) < len(arr):
self.lines.append("")
for i in range(len(self.lines)):
while len(self.lines[i]) < offset:
self.lines[i] += " "
for i in range(len(arr)):
self.lines[i] += cc +arr[i]
class _flowable(object):
def __init__(self, template, doc,localcontext):
self._tags = {
'1title': self._tag_title,
'1spacer': self._tag_spacer,
'para': self._tag_para,
'font': self._tag_font,
'section': self._tag_section,
'1nextFrame': self._tag_next_frame,
'blockTable': self._tag_table,
'1pageBreak': self._tag_page_break,
'1setNextTemplate': self._tag_next_template,
}
self.template = template
self.doc = doc
self.localcontext = localcontext
self.nitags = []
self.tbox = None
def warn_nitag(self,tag):
if tag not in self.nitags:
verbose("Unknown tag \"%s\", please implement it." % tag)
self.nitags.append(tag)
def _tag_page_break(self, node):
return "\f"
def _tag_next_template(self, node):
return ''
def _tag_next_frame(self, node):
result=self.template.frame_stop()
result+='\n'
result+=self.template.frame_start()
return result
def _tag_title(self, node):
node.tagName='h1'
return node.toxml()
def _tag_spacer(self, node):
length = 1+int(utils.unit_get(node.get('length')))/35
return "\n"*length
def _tag_table(self, node):
self.tb.fline()
saved_tb = self.tb
self.tb = None
sizes = None
if node.get('colWidths'):
sizes = map(lambda x: utils.unit_get(x), node.get('colWidths').split(','))
trs = []
for n in utils._child_get(node,self):
if n.tag == 'tr':
tds = []
for m in utils._child_get(n,self):
if m.tag == 'td':
self.tb = textbox()
self.rec_render_cnodes(m)
tds.append(self.tb)
self.tb = None
if len(tds):
trs.append(tds)
if not sizes:
verbose("computing table sizes..")
for tds in trs:
trt = textbox()
off=0
for i in range(len(tds)):
p = int(sizes[i]/Font_size)
trl = tds[i].renderlines(pad=p)
trt.haplines(trl,off)
off += sizes[i]/Font_size
saved_tb.curline = trt
saved_tb.fline()
self.tb = saved_tb
return
def _tag_para(self, node):
#TODO: styles
self.rec_render_cnodes(node)
self.tb.newline()
def _tag_section(self, node):
#TODO: styles
self.rec_render_cnodes(node)
self.tb.newline()
def _tag_font(self, node):
"""We do ignore fonts.."""
self.rec_render_cnodes(node)
def rec_render_cnodes(self,node):
self.tb.appendtxt(utils._process_text(self, node.text or ''))
for n in utils._child_get(node,self):
self.rec_render(n)
self.tb.appendtxt(utils._process_text(self, node.tail or ''))
def rec_render(self,node):
""" Recursive render: fill outarr with text of current node
"""
if node.tag is not None:
if node.tag in self._tags:
self._tags[node.tag](node)
else:
self.warn_nitag(node.tag)
def render(self, node):
self.tb= textbox()
#result = self.template.start()
#result += self.template.frame_start()
self.rec_render_cnodes(node)
#result += self.template.frame_stop()
#result += self.template.end()
result = self.tb.rendertxt()
del self.tb
return result
class _rml_tmpl_tag(object):
def __init__(self, *args):
pass
def tag_start(self):
return ''
def tag_end(self):
return False
def tag_stop(self):
return ''
def tag_mergeable(self):
return True
class _rml_tmpl_frame(_rml_tmpl_tag):
def __init__(self, posx, width):
self.width = width
self.posx = posx
def tag_start(self):
return "frame start"
def tag_end(self):
return True
def tag_stop(self):
return "frame stop"
def tag_mergeable(self):
return False
# An awfull workaround since I don't really understand the semantic behind merge.
def merge(self, frame):
pass
class _rml_tmpl_draw_string(_rml_tmpl_tag):
def __init__(self, node, style):
self.posx = utils.unit_get(node.get('x'))
self.posy = utils.unit_get(node.get('y'))
aligns = {
'drawString': 'left',
'drawRightString': 'right',
'drawCentredString': 'center'
}
align = aligns[node.localName]
self.pos = [(self.posx, self.posy, align, utils.text_get(node), style.get('td'), style.font_size_get('td'))]
def tag_start(self):
return "draw string \"%s\" @(%d,%d)..\n" %("txt",self.posx,self.posy)
def merge(self, ds):
self.pos+=ds.pos
class _rml_tmpl_draw_lines(_rml_tmpl_tag):
def __init__(self, node, style):
coord = [utils.unit_get(x) for x in utils.text_get(node).split(' ')]
self.ok = False
self.posx = coord[0]
self.posy = coord[1]
self.width = coord[2]-coord[0]
self.ok = coord[1]==coord[3]
self.style = style
self.style = style.get('hr')
def tag_start(self):
return "draw lines..\n"
class _rml_stylesheet(object):
def __init__(self, stylesheet, doc):
self.doc = doc
self.attrs = {}
self._tags = {
'fontSize': lambda x: ('font-size',str(utils.unit_get(x))+'px'),
'alignment': lambda x: ('text-align',str(x))
}
result = ''
for ps in stylesheet.findall('paraStyle'):
attr = {}
attrs = ps.attributes
for i in range(attrs.length):
name = attrs.item(i).localName
attr[name] = ps.get(name)
attrs = []
for a in attr:
if a in self._tags:
attrs.append("%s:%s" % self._tags[a](attr[a]))
if len(attrs):
result += "p."+attr['name']+" {"+'; '.join(attrs)+"}\n"
self.result = result
def render(self):
return ''
class _rml_draw_style(object):
def __init__(self):
self.style = {}
self._styles = {
'fill': lambda x: {'td': {'color':x.get('color')}},
'setFont': lambda x: {'td': {'font-size':x.get('size')+'px'}},
'stroke': lambda x: {'hr': {'color':x.get('color')}},
}
def update(self, node):
if node.localName in self._styles:
result = self._styles[node.localName](node)
for key in result:
if key in self.style:
self.style[key].update(result[key])
else:
self.style[key] = result[key]
def font_size_get(self,tag):
size = utils.unit_get(self.style.get('td', {}).get('font-size','16'))
return size
def get(self,tag):
if not tag in self.style:
return ""
return ';'.join(['%s:%s' % (x[0],x[1]) for x in self.style[tag].items()])
class _rml_template(object):
def __init__(self, localcontext, out, node, doc, images=None, path='.', title=None):
self.localcontext = localcontext
self.frame_pos = -1
self.frames = []
self.template_order = []
self.page_template = {}
self.loop = 0
self._tags = {
'drawString': _rml_tmpl_draw_string,
'drawRightString': _rml_tmpl_draw_string,
'drawCentredString': _rml_tmpl_draw_string,
'lines': _rml_tmpl_draw_lines
}
self.style = _rml_draw_style()
for pt in node.findall('pageTemplate'):
frames = {}
id = pt.get('id')
self.template_order.append(id)
for tmpl in pt.findall('frame'):
posy = int(utils.unit_get(tmpl.get('y1'))) #+utils.unit_get(tmpl.get('height')))
posx = int(utils.unit_get(tmpl.get('x1')))
frames[(posy,posx,tmpl.get('id'))] = _rml_tmpl_frame(posx, utils.unit_get(tmpl.get('width')))
for tmpl in node.findall('pageGraphics'):
for n in tmpl.getchildren():
if n.nodeType==n.ELEMENT_NODE:
if n.localName in self._tags:
t = self._tags[n.localName](n, self.style)
frames[(t.posy,t.posx,n.localName)] = t
else:
self.style.update(n)
keys = frames.keys()
keys.sort()
keys.reverse()
self.page_template[id] = []
for key in range(len(keys)):
if key>0 and keys[key-1][0] == keys[key][0]:
if type(self.page_template[id][-1]) == type(frames[keys[key]]):
if self.page_template[id][-1].tag_mergeable():
self.page_template[id][-1].merge(frames[keys[key]])
continue
self.page_template[id].append(frames[keys[key]])
self.template = self.template_order[0]
def _get_style(self):
return self.style
def set_next_template(self):
self.template = self.template_order[(self.template_order.index(name)+1) % self.template_order]
self.frame_pos = -1
def set_template(self, name):
self.template = name
self.frame_pos = -1
def frame_start(self):
result = ''
frames = self.page_template[self.template]
ok = True
while ok:
self.frame_pos += 1
if self.frame_pos>=len(frames):
self.frame_pos=0
self.loop=1
ok = False
continue
f = frames[self.frame_pos]
result+=f.tag_start()
ok = not f.tag_end()
if ok:
result+=f.tag_stop()
return result
def frame_stop(self):
frames = self.page_template[self.template]
f = frames[self.frame_pos]
result=f.tag_stop()
return result
def start(self):
return ''
def end(self):
return "template end\n"
class _rml_doc(object):
def __init__(self, node, localcontext=None, images=None, path='.', title=None):
self.localcontext = {} if localcontext is None else localcontext
self.etree = node
self.filename = self.etree.get('filename')
self.result = ''
def render(self, out):
#el = self.etree.findall('docinit')
#if el:
#self.docinit(el)
#el = self.etree.findall('stylesheet')
#self.styles = _rml_styles(el,self.localcontext)
el = self.etree.findall('template')
self.result =""
if len(el):
pt_obj = _rml_template(self.localcontext, out, el[0], self)
stories = utils._child_get(self.etree, self, 'story')
for story in stories:
if self.result:
self.result += '\f'
f = _flowable(pt_obj,story,self.localcontext)
self.result += f.render(story)
del f
else:
self.result = "<cannot render w/o template>"
self.result += '\n'
out.write( self.result)
def parseNode(rml, localcontext=None,fout=None, images=None, path='.',title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
fp = StringIO.StringIO()
r.render(fp)
return fp.getvalue()
def parseString(rml, localcontext=None,fout=None, images=None, path='.',title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
if fout:
fp = file(fout,'wb')
r.render(fp)
fp.close()
return fout
else:
fp = StringIO.StringIO()
r.render(fp)
return fp.getvalue()
def trml2pdf_help():
print 'Usage: rml2txt input.rml >output.html'
print 'Render the standard input (RML) and output an TXT file'
sys.exit(0)
if __name__=="__main__":
if len(sys.argv)>1:
if sys.argv[1]=='--help':
trml2pdf_help()
print parseString(file(sys.argv[1], 'r').read()).encode('iso8859-7')
else:
print 'Usage: trml2txt input.rml >output.pdf'
print 'Try \'trml2txt --help\' for more information.'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
RiccardoPecora/MP | Lib/site-packages/numpy/compat/_inspect.py | 94 | 9117 | """Subset of inspect module from upstream python
We use this instead of upstream because upstream inspect is slow to import, and
significanly contributes to numpy import times. Importing this copy has almost
no overhead.
"""
import types
__all__ = ['getargspec', 'formatargspec']
# ----------------------------------------------------------- type-checking
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
im_class class object in which this method belongs
im_func function object containing implementation of method
im_self instance to which this method is bound, or None"""
return isinstance(object, types.MethodType)
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
func_code code object containing compiled function bytecode
func_defaults tuple of any default values for arguments
func_doc (same as __doc__)
func_globals global namespace in which this function was defined
func_name (same as __name__)"""
return isinstance(object, types.FunctionType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
# ------------------------------------------------ argument list extraction
# These constants are from Python's compile.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('arg is not a code object')
code = co.co_code
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
step = 0
# The following acrobatics are for anonymous (tuple) arguments.
for i in range(nargs):
if args[i][:1] in ['', '.']:
stack, remain, count = [], [], []
while step < len(code):
op = ord(code[step])
step = step + 1
if op >= dis.HAVE_ARGUMENT:
opname = dis.opname[op]
value = ord(code[step]) + ord(code[step+1])*256
step = step + 2
if opname in ['UNPACK_TUPLE', 'UNPACK_SEQUENCE']:
remain.append(value)
count.append(value)
elif opname == 'STORE_FAST':
stack.append(names[value])
# Special case for sublists of length 1: def foo((bar))
# doesn't generate the UNPACK_TUPLE bytecode, so if
# `remain` is empty here, we have such a sublist.
if not remain:
stack[0] = [stack[0]]
break
else:
remain[-1] = remain[-1] - 1
while remain[-1] == 0:
remain.pop()
size = count.pop()
stack[-size:] = [stack[-size:]]
if not remain: break
remain[-1] = remain[-1] - 1
if not remain: break
args[i] = stack[0]
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, varkw
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
"""
if ismethod(func):
func = func.im_func
if not isfunction(func):
raise TypeError('arg is not a Python function')
args, varargs, varkw = getargs(func.func_code)
return args, varargs, varkw, func.func_defaults
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return args, varargs, varkw, frame.f_locals
def joinseq(seq):
if len(seq) == 1:
return '(' + seq[0] + ',)'
else:
return '(' + ', '.join(seq) + ')'
def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element."""
if type(object) in [types.ListType, types.TupleType]:
return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object))
else:
return convert(object)
def formatargspec(args, varargs=None, varkw=None, defaults=None,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargspec.
The first four arguments are (args, varargs, varkw, defaults). The
other four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i in range(len(args)):
spec = strseq(args[i], formatarg, join)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(varargs))
if varkw is not None:
specs.append(formatvarkw(varkw))
return '(' + ', '.join(specs) + ')'
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(strseq(args[i], convert, join))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + string.join(specs, ', ') + ')'
if __name__ == '__main__':
import inspect
def foo(x, y, z=None):
return None
print inspect.getargs(foo.func_code)
print getargs(foo.func_code)
print inspect.getargspec(foo)
print getargspec(foo)
print inspect.formatargspec(*inspect.getargspec(foo))
print formatargspec(*getargspec(foo))
| gpl-3.0 |
zhangxq5012/sky_engine | sky/tools/webkitpy/layout_tests/run_webkit_tests.py | 2 | 22007 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
# Copyright (C) 2011 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import optparse
import os
import subprocess
import sys
import traceback
from webkitpy.common.host import Host
from webkitpy.layout_tests.controllers.manager import Manager
from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.port import configuration_options, platform_options
from webkitpy.layout_tests.views import buildbot_results
from webkitpy.layout_tests.views import printing
from webkitpy.layout_tests.generate_results_dashboard import GenerateDashBoard
_log = logging.getLogger(__name__)
def main(argv, stdout, stderr):
options, args = parse_args(argv)
if options.platform and 'test' in options.platform and not 'browser_test' in options.platform:
# It's a bit lame to import mocks into real code, but this allows the user
# to run tests against the test platform interactively, which is useful for
# debugging test failures.
from webkitpy.common.host_mock import MockHost
host = MockHost()
else:
host = Host()
if options.lint_test_files:
from webkitpy.layout_tests.lint_test_expectations import run_checks
return run_checks(host, options, stderr)
try:
port = host.port_factory.get(options.platform, options)
except NotImplementedError, e:
# FIXME: is this the best way to handle unsupported port names?
print >> stderr, str(e)
return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
try:
run_details = run_tests(port, options, args, stderr)
if ((run_details.exit_code not in test_run_results.ERROR_CODES or
run_details.exit_code == test_run_results.EARLY_EXIT_STATUS) and
not run_details.initial_results.keyboard_interrupted):
bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug_rwt_logging)
bot_printer.print_results(run_details)
if options.enable_versioned_results:
gen_dash_board = GenerateDashBoard(port)
gen_dash_board.generate()
if run_details.exit_code != 0:
return run_details.exit_code
analyzer_result = run_analyzer(port, options, args, stderr)
return analyzer_result
# We need to still handle KeyboardInterrupt, atleast for webkitpy unittest cases.
except KeyboardInterrupt:
return test_run_results.INTERRUPTED_EXIT_STATUS
except test_run_results.TestRunException as e:
print >> stderr, e.msg
return e.code
except BaseException as e:
if isinstance(e, Exception):
print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
traceback.print_exc(file=stderr)
return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
def parse_args(args):
option_group_definitions = []
option_group_definitions.append(("Platform options", platform_options()))
option_group_definitions.append(("Configuration options", configuration_options()))
option_group_definitions.append(("Printing Options", printing.print_options()))
option_group_definitions.append(("Android-specific Options", [
optparse.make_option("--adb-device",
action="append", default=[],
help="Run Android layout tests on these devices."),
# FIXME: Flip this to be off by default once we can log the device setup more cleanly.
optparse.make_option("--no-android-logging",
action="store_false", dest='android_logging', default=True,
help="Do not log android-specific debug messages (default is to log as part of --debug-rwt-logging"),
]))
option_group_definitions.append(("Results Options", [
optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_true",
dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
optparse.make_option("--no-pixel", "--no-pixel-tests", action="store_false",
dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
optparse.make_option("--results-directory", help="Location of test results"),
optparse.make_option("--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)"),
optparse.make_option("--add-platform-exceptions", action="store_true", default=False,
help="Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"),
optparse.make_option("--new-baseline", action="store_true",
default=False, help="Save generated results as new baselines "
"into the *most-specific-platform* directory, overwriting whatever's "
"already there. Equivalent to --reset-results --add-platform-exceptions"),
optparse.make_option("--reset-results", action="store_true",
default=False, help="Reset expectations to the "
"generated results in their existing location."),
optparse.make_option("--no-new-test-results", action="store_false",
dest="new_test_results", default=True,
help="Don't create new baselines when no expected results exist"),
#FIXME: we should support a comma separated list with --pixel-test-directory as well.
optparse.make_option("--pixel-test-directory", action="append", default=[], dest="pixel_test_directories",
help="A directory where it is allowed to execute tests as pixel tests. "
"Specify multiple times to add multiple directories. "
"This option implies --pixel-tests. If specified, only those tests "
"will be executed as pixel tests that are located in one of the "
"directories enumerated with the option. Some ports may ignore this "
"option while others can have a default value that can be overridden here."),
optparse.make_option("--skip-failing-tests", action="store_true",
default=False, help="Skip tests that are expected to fail. "
"Note: When using this option, you might miss new crashes "
"in these tests."),
optparse.make_option("--additional-drt-flag", action="append",
default=[], help="Additional command line flag to pass to the driver "
"Specify multiple times to add multiple flags."),
optparse.make_option("--driver-name", type="string",
help="Alternative driver binary to use"),
optparse.make_option("--additional-platform-directory", action="append",
default=[], help="Additional directory where to look for test "
"baselines (will take precendence over platform baselines). "
"Specify multiple times to add multiple search path entries."),
optparse.make_option("--additional-expectations", action="append", default=[],
help="Path to a test_expectations file that will override previous expectations. "
"Specify multiple times for multiple sets of overrides."),
optparse.make_option("--compare-port", action="store", default=None,
help="Use the specified port's baselines first"),
optparse.make_option("--no-show-results", action="store_false",
default=True, dest="show_results",
help="Don't launch a browser with results after the tests "
"are done"),
optparse.make_option("--full-results-html", action="store_true",
default=False,
help="Show all failures in results.html, rather than only regressions"),
optparse.make_option("--no-clobber-old-results", action="store_false",
dest="clobber_old_results", default=True,
help="Clobbers test results from previous runs."),
optparse.make_option("--enable-versioned-results", action="store_true",
default=False, help="Archive the test results for later access."),
optparse.make_option("--smoke", action="store_true",
help="Run just the SmokeTests"),
optparse.make_option("--no-smoke", dest="smoke", action="store_false",
help="Do not run just the SmokeTests"),
]))
option_group_definitions.append(("Testing Options", [
optparse.make_option("--build", dest="build",
action="store_true", default=True,
help="Check to ensure the build is up-to-date (default)."),
optparse.make_option("--no-build", dest="build",
action="store_false", help="Don't check to see if the build is up-to-date."),
optparse.make_option("-n", "--dry-run", action="store_true",
default=False,
help="Do everything but actually run the tests or upload results."),
optparse.make_option("--nocheck-sys-deps", action="store_true",
default=False,
help="Don't check the system dependencies (themes)"),
optparse.make_option("--wrapper",
help="wrapper command to insert before invocations of "
"the driver; option is split on whitespace before "
"running. (Example: --wrapper='valgrind --smc-check=all')"),
optparse.make_option("-i", "--ignore-tests", action="append", default=[],
help="directories or test to ignore (may specify multiple times)"),
optparse.make_option("--ignore-flaky-tests", action="store",
help=("Control whether tests that are flaky on the bots get ignored."
"'very-flaky' == Ignore any tests that flaked more than once on the bot."
"'maybe-flaky' == Ignore any tests that flaked once on the bot."
"'unexpected' == Ignore any tests that had unexpected results on the bot.")),
optparse.make_option("--ignore-builder-category", action="store",
help=("The category of builders to use with the --ignore-flaky-tests "
"option ('layout' or 'deps').")),
optparse.make_option("--test-list", action="append",
help="read list of tests to run from file", metavar="FILE"),
optparse.make_option("--skipped", action="store", default=None,
help=("control how tests marked SKIP are run. "
"'default' == Skip tests unless explicitly listed on the command line, "
"'ignore' == Run them anyway, "
"'only' == only run the SKIP tests, "
"'always' == always skip, even if listed on the command line.")),
optparse.make_option("--time-out-ms",
help="Set the timeout for each test"),
optparse.make_option("--order", action="store", default="random-seeded",
help=("determine the order in which the test cases will be run. "
"'none' == use the order in which the tests were listed either in arguments or test list, "
"'natural' == use the natural order (default), "
"'random-seeded' == randomize the test order using a fixed seed, "
"'random' == randomize the test order.")),
optparse.make_option("--run-chunk",
help=("Run a specified chunk (n:l), the nth of len l, "
"of the layout tests")),
optparse.make_option("--run-part", help=("Run a specified part (n:m), "
"the nth of m parts, of the layout tests")),
optparse.make_option("--batch-size",
help=("Run a the tests in batches (n), after every n tests, "
"the driver is relaunched."), type="int", default=None),
optparse.make_option("--run-singly", action="store_true",
default=False, help="DEPRECATED, same as --batch-size=1 --verbose"),
optparse.make_option("--child-processes",
help="Number of drivers to run in parallel."),
# FIXME: Display default number of child processes that will run.
optparse.make_option("-f", "--fully-parallel", action="store_true",
help="run all tests in parallel", default=True),
optparse.make_option("--exit-after-n-failures", type="int", default=None,
help="Exit after the first N failures instead of running all "
"tests"),
optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
default=None, help="Exit after the first N crashes instead of "
"running all tests"),
optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"),
optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"),
optparse.make_option("--retry-failures", action="store_true",
help="Re-try any tests that produce unexpected results. Default is to not retry if an explicit list of tests is passed to run-webkit-tests."),
optparse.make_option("--no-retry-failures", action="store_false",
dest="retry_failures",
help="Don't re-try any tests that produce unexpected results."),
optparse.make_option("--max-locked-shards", type="int", default=0,
help="Set the maximum number of locked shards"),
optparse.make_option("--additional-env-var", type="string", action="append", default=[],
help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
optparse.make_option("--profile", action="store_true",
help="Output per-test profile information."),
optparse.make_option("--profiler", action="store",
help="Output per-test profile information, using the specified profiler."),
optparse.make_option("--driver-logging", action="store_true",
help="Print detailed logging of the driver/content_shell"),
optparse.make_option("--disable-breakpad", action="store_true",
help="Don't use breakpad to symbolize unexpected crashes."),
optparse.make_option("--enable-leak-detection", action="store_true",
help="Enable the leak detection of DOM objects."),
optparse.make_option("--enable-sanitizer", action="store_true",
help="Only alert on sanitizer-related errors and crashes"),
optparse.make_option("--path-to-server", action="store",
help="Path to a locally build sky_server executable."),
]))
option_group_definitions.append(("Miscellaneous Options", [
optparse.make_option("--lint-test-files", action="store_true",
default=False, help=("Makes sure the test files parse for all "
"configurations. Does not run any tests.")),
]))
# FIXME: Move these into json_results_generator.py
option_group_definitions.append(("Result JSON Options", [
optparse.make_option("--master-name", help="The name of the buildbot master."),
optparse.make_option("--builder-name", default="",
help=("The name of the builder shown on the waterfall running "
"this script e.g. WebKit.")),
optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
help=("The build number of the builder running this script.")),
optparse.make_option("--test-results-server", default="",
help=("If specified, upload results json files to this appengine "
"server.")),
optparse.make_option("--write-full-results-to",
help=("If specified, copy full_results.json from the results dir "
"to the specified path.")),
]))
option_parser = optparse.OptionParser()
for group_name, group_options in option_group_definitions:
option_group = optparse.OptionGroup(option_parser, group_name)
option_group.add_options(group_options)
option_parser.add_option_group(option_group)
return option_parser.parse_args(args)
def _set_up_derived_options(port, options, args):
"""Sets the options values that depend on other options values."""
if not options.child_processes:
options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES",
str(port.default_child_processes()))
if not options.max_locked_shards:
options.max_locked_shards = int(os.environ.get("WEBKIT_TEST_MAX_LOCKED_SHARDS",
str(port.default_max_locked_shards())))
if not options.configuration:
options.configuration = port.default_configuration()
if options.pixel_tests is None:
options.pixel_tests = port.default_pixel_tests()
if not options.time_out_ms:
options.time_out_ms = str(port.default_timeout_ms())
options.slow_time_out_ms = str(5 * int(options.time_out_ms))
if options.additional_platform_directory:
additional_platform_directories = []
for path in options.additional_platform_directory:
additional_platform_directories.append(port.host.filesystem.abspath(path))
options.additional_platform_directory = additional_platform_directories
if options.new_baseline:
options.reset_results = True
options.add_platform_exceptions = True
if options.pixel_test_directories:
options.pixel_tests = True
varified_dirs = set()
pixel_test_directories = options.pixel_test_directories
for directory in pixel_test_directories:
# FIXME: we should support specifying the directories all the ways we support it for additional
# arguments specifying which tests and directories to run. We should also move the logic for that
# to Port.
filesystem = port.host.filesystem
if not filesystem.isdir(filesystem.join(port.layout_tests_dir(), directory)):
_log.warning("'%s' was passed to --pixel-test-directories, which doesn't seem to be a directory" % str(directory))
else:
varified_dirs.add(directory)
options.pixel_test_directories = list(varified_dirs)
if options.run_singly:
options.batch_size = 1
options.verbose = True
if not args and not options.test_list and options.smoke is None:
options.smoke = port.default_smoke_test_only()
if options.smoke:
if not args and not options.test_list and options.retry_failures is None:
# Retry failures by default if we're doing just a smoke test (no additional tests).
options.retry_failures = True
if not options.test_list:
options.test_list = []
options.test_list.append(port.host.filesystem.join(port.layout_tests_dir(), 'SmokeTests'))
if not options.skipped:
options.skipped = 'always'
if not options.skipped:
options.skipped = 'default'
def run_tests(port, options, args, logging_stream):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO)
try:
printer = printing.Printer(port, options, logging_stream, logger=logger)
_set_up_derived_options(port, options, args)
manager = Manager(port, options, printer)
printer.print_config(port.results_directory())
run_details = manager.run(args)
_log.debug("Testing completed, Exit status: %d" % run_details.exit_code)
return run_details
finally:
printer.cleanup()
def run_analyzer(port, options, args, logging_stream):
test_dir = os.path.dirname(os.path.abspath(__file__))
sky_tools_dir = os.path.dirname(os.path.dirname(test_dir))
analyzer_path = os.path.join(sky_tools_dir, 'skyanalyzer')
src_dir = os.path.dirname(os.path.dirname(sky_tools_dir))
analyzer_target_path = os.path.join(src_dir, 'examples/stocks/lib/main.dart')
analyzer_args = [
analyzer_path,
analyzer_target_path
]
try:
output = subprocess.check_output(analyzer_args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print >> logging_stream, "Analyzer found new issues:"
print >> logging_stream, e.output
return e.returncode
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))
| bsd-3-clause |
jrha/aquilon | tests/broker/test_update_address.py | 2 | 7804 | #!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the update address command."""
import unittest
if __name__ == "__main__":
from broker import utils
utils.import_depends()
from broker.brokertest import TestBrokerCommand
class TestUpdateAddress(TestBrokerCommand):
def test_100_update_reverse(self):
self.dsdb_expect_update("arecord15.aqd-unittest.ms.com",
comments="Test comment")
command = ["update", "address",
"--fqdn", "arecord15.aqd-unittest.ms.com",
"--reverse_ptr", "arecord14.aqd-unittest.ms.com",
"--comments", "Test comment"]
self.noouttest(command)
self.dsdb_verify()
def test_105_verify_arecord15(self):
command = ["show", "fqdn", "--fqdn", "arecord15.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Comments: Test comment", command)
self.matchoutput(out, "Reverse PTR: arecord14.aqd-unittest.ms.com",
command)
def test_105_search_ptr(self):
command = ["search", "dns",
"--reverse_ptr", "arecord14.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "arecord15.aqd-unittest.ms.com", command)
def test_105_search_override(self):
command = ["search", "dns", "--reverse_override"]
out = self.commandtest(command)
self.matchoutput(out, "arecord15.aqd-unittest.ms.com", command)
def test_110_clear_ptr_override(self):
command = ["update", "address",
"--fqdn", "arecord15.aqd-unittest.ms.com",
"--reverse_ptr", "arecord15.aqd-unittest.ms.com"]
self.noouttest(command)
self.dsdb_verify(empty=True)
def test_115_verify_arecord15(self):
command = ["show", "fqdn", "--fqdn", "arecord15.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchclean(out, "Reverse", command)
def test_115_verify_search(self):
command = ["search", "dns", "--reverse_override"]
out = self.commandtest(command)
self.matchclean(out, "arecord15.aqd-unittest.ms.com", command)
def test_120_update_ip(self):
old_ip = self.net.unknown[0].usable[15]
ip = self.net.unknown[0].usable[-1]
self.dsdb_expect_update("arecord15.aqd-unittest.ms.com", ip=ip)
command = ["update", "address",
"--fqdn", "arecord15.aqd-unittest.ms.com", "--ip", ip]
self.noouttest(command)
self.dsdb_verify()
def test_125_verify_arecord15(self):
ip = self.net.unknown[0].usable[-1]
command = ["show", "fqdn", "--fqdn", "arecord15.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "IP: %s" % ip, command)
def test_129_fix_ip(self):
# Change the IP address back not to confuse other parts of the testsuite
old_ip = self.net.unknown[0].usable[-1]
ip = self.net.unknown[0].usable[15]
self.dsdb_expect_update("arecord15.aqd-unittest.ms.com", ip=ip)
command = ["update", "address",
"--fqdn", "arecord15.aqd-unittest.ms.com", "--ip", ip]
self.noouttest(command)
self.dsdb_verify()
def test_130_update_dyndhcp_noop(self):
command = ["update", "address",
"--fqdn", "dynamic-4-2-4-20.aqd-unittest.ms.com",
"--reverse_ptr", "dynamic-4-2-4-20.aqd-unittest.ms.com"]
self.noouttest(command)
self.dsdb_verify(empty=True)
def test_135_verify_dyndhcp(self):
command = ["show", "fqdn", "--fqdn",
"dynamic-4-2-4-20.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchclean(out, "Reverse", command)
def test_140_restricted_reverse(self):
command = ["update", "address",
"--fqdn", "arecord17.aqd-unittest.ms.com",
"--reverse_ptr", "reverse2.restrict.aqd-unittest.ms.com"]
out, err = self.successtest(command)
self.assertEmptyOut(out, command)
self.matchoutput(err,
"WARNING: Will create a reference to "
"reverse2.restrict.aqd-unittest.ms.com, but trying to "
"resolve it resulted in an error: Name or service "
"not known",
command)
self.dsdb_verify(empty=True)
def test_141_verify_reverse(self):
command = ["search", "dns", "--fullinfo",
"--fqdn", "arecord17.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out,
"Reverse PTR: reverse2.restrict.aqd-unittest.ms.com",
command)
self.matchclean(out, "reverse.restrict.aqd-unittest.ms.com", command)
command = ["search", "dns", "--record_type", "reserved_name"]
out = self.commandtest(command)
self.matchclean(out, "reverse.restrict", command)
self.matchoutput(out, "reverse2.restrict.aqd-unittest.ms.com", command)
def test_200_update_dyndhcp(self):
command = ["update", "address",
"--fqdn", "dynamic-4-2-4-20.aqd-unittest.ms.com",
"--reverse_ptr", "unittest20.aqd-unittest.ms.com"]
out = self.badrequesttest(command)
self.matchoutput(out, "The reverse PTR record cannot be set for DNS "
"records used for dynamic DHCP.", command)
def test_200_ip_conflict(self):
ip = self.net.unknown[0].usable[14]
command = ["update", "address",
"--fqdn", "arecord15.aqd-unittest.ms.com", "--ip", ip]
out = self.badrequesttest(command)
self.matchoutput(out, "IP address %s is already used by DNS record "
"arecord14.aqd-unittest.ms.com." % ip, command)
def test_200_update_primary(self):
command = ["update", "address",
"--fqdn", "unittest20.aqd-unittest.ms.com",
"--ip", self.net.unknown[0].usable[-1]]
out = self.badrequesttest(command)
self.matchoutput(out, "DNS Record unittest20.aqd-unittest.ms.com is "
"a primary name, and its IP address cannot be "
"changed.", command)
def test_200_update_used(self):
command = ["update", "address",
"--fqdn", "unittest20-e1.aqd-unittest.ms.com",
"--ip", self.net.unknown[0].usable[-1]]
out = self.badrequesttest(command)
self.matchoutput(out, "DNS Record unittest20-e1.aqd-unittest.ms.com is "
"already used by the following interfaces, and its "
"IP address cannot be changed: "
"unittest20.aqd-unittest.ms.com/eth1.",
command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdateAddress)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 |
2014c2g2/2014c2 | w2/static/Brython2.0.0-20140209-164925/Lib/xml/etree/__init__.py | 1200 | 1604 | # $Id: __init__.py 3375 2008-02-13 08:05:08Z fredrik $
# elementtree package
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
| gpl-2.0 |
milad-soufastai/ansible-modules-extras | windows/win_owner.py | 63 | 1658 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2015, Hans-Joachim Kliemeck <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_owner
version_added: "2.1"
short_description: Set owner
description:
- Set owner of files or directories
options:
path:
description:
- Path to be used for changing owner
required: true
user:
description:
- Name to be used for changing owner
required: true
recurse:
description:
- Indicates if the owner should be changed recursively
required: false
choices:
- no
- yes
default: no
author: Hans-Joachim Kliemeck (@h0nIg)
'''
EXAMPLES = '''
# Playbook example
---
- name: Change owner of Path
win_owner:
path: 'C:\\apache\\'
user: apache
recurse: yes
- name: Set the owner of root directory
win_owner:
path: 'C:\\apache\\'
user: SYSTEM
recurse: no
'''
RETURN = '''
''' | gpl-3.0 |
naturalness/unnaturalcode | unnaturalcode/check_eclipse_syntax.py | 2 | 9733 | #!/usr/bin/python
# Copyright 2017 Dhvani Patel
#
# This file is part of UnnaturalCode.
#
# UnnaturalCode is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UnnaturalCode is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with UnnaturalCode. If not, see <http://www.gnu.org/licenses/>.
# Takes in a string of Java code and checks for errors
# NOTE: FOR ECLIPSE
import os
import subprocess
import sys
import tempfile
from compile_error import CompileError
# Method for finding index of certain characters in a string, n being the n'th occurence of the character/string
def find_nth(haystack, needle, n):
start = haystack.find(needle)
while start >= 0 and n > 1:
start = haystack.find(needle, start+len(needle))
n -= 1
return start
# Main method
def checkEclipseSyntax(src, flag_source):
with open(src) as f:
for i, l in enumerate(f):
pass
numTotLines = i + 1
with open (src, "r") as myfile:
data = myfile.read()
#print data
myFile = open("ToCheckEc.java", "w")
myFile.write(data)
myFile.close()
if flag_source == False:
proc = subprocess.Popen(['java', '-jar', '../Downloads/ecj-4.7.jar', 'ToCheckEc.java', '-maxProblems', '500', '-source', '1.8', '-nowarn'], stderr=subprocess.PIPE)
elif flag_source == True:
proc = subprocess.Popen(['java', '-jar', '../Downloads/ecj-4.7.jar', 'ToCheckEc.java', '-maxProblems', '500', '-nowarn'], stderr=subprocess.PIPE)
streamdata, err = proc.communicate()
rc = proc.returncode
if rc == 0:
# No errors, all good
os.remove("ToCheckEc.java")
return None
else:
# Error, disect data for constructor
#print err
err = err[:len(err)-1]
#print err
lastLine = err.rfind("\n")
#print lastLine
#print "split"
#print len(err)
lastErrorNum = err[lastLine:]
cutOff = find_nth(lastErrorNum, '(', 1)
lastError = lastErrorNum[cutOff+1:lastErrorNum.index('error')-1]
numError = int(lastError)
lineNums = []
insToks = []
indRepeats = []
typeErrors = []
flagError = False
for ind in range(numError):
flagError = True
#print numError
fileInd = find_nth(err, "(at line ", ind+1)
temp = err[fileInd:]
#print "OK"
#print temp
before = len(insToks)
#print before
synErrInd = find_nth(temp, "Syntax error", 1)
flagInd = find_nth(temp, "----------\n", 1)
#print flagError
#print flagInd
if synErrInd != -1 and synErrInd < flagInd:
actLine = temp[synErrInd:]
tokInsInd = find_nth(actLine, ", insert", 1)
#print tokInsInd
#print "dhvani"
if tokInsInd != -1 and (tokInsInd+synErrInd) < flagInd:
#print "HERE TOO"
cut = find_nth(actLine, "\" ", 1)
typeErrors.append('i')
toksIns = actLine[tokInsInd+10:cut]
#print toksIns
insToks.append(toksIns)
flagError = True
stringInd = find_nth(temp, "String literal is not properly closed by a double-quote", 1)
if stringInd != -1 and stringInd < flagInd:
typeErrors.append('i')
toksIns = "\""
insToks.append(toksIns)
flagError = True
subErrInd = find_nth(temp, "Syntax error on token", 1)
if subErrInd != -1 and subErrInd < flagInd:
cutLine = temp[subErrInd:]
fixTok = find_nth(cutLine, "expected after this token", 1)
if fixTok != -1 and (subErrInd + fixTok) < flagInd:
cutInd = find_nth(cutLine,"\", ", 1)
toksSub = cutLine[cutInd+3:fixTok-1]
typeErrors.append('i')
insToks.append(toksSub)
flagError = True
if fixTok == -1:
fixTokCheck = find_nth(cutLine, "expected before this token", 1)
if fixTokCheck != -1 and (fixTokCheck + subErrInd) < flagInd:
cutInd = find_nth(cutLine,"\", ", 1)
toksSub = cutLine[cutInd+3:fixTokCheck-1]
typeErrors.append('i')
insToks.append(toksSub)
flagError = True
if fixTokCheck == -1:
checkSub = find_nth(cutLine, "expected", 1)
if checkSub != -1 and (checkSub + subErrInd)< flagInd:
#print "HERE"
cutInd = find_nth(cutLine,"\", ", 1)
toksSub = cutLine[cutInd+3:checkSub-1]
typeErrors.append('s')
insToks.append(toksSub)
#print toksSub
flagError = True
delInd = find_nth(cutLine, ", delete this token", 1)
if delInd != -1 and (delInd + subErrInd) < flagInd:
delTok = temp[subErrInd+23:subErrInd+delInd-1]
typeErrors.append('d')
insToks.append(delTok)
flagError = True
mulErrInd = find_nth(temp, "Syntax error on tokens, delete these tokens", 1)
if mulErrInd != -1 and mulErrInd < flagInd:
typeErrors.append('d')
insToks.append('')
#print toksSub
flagError = True
fakeInd = find_nth(temp, "cannot be resolved", 1)
fakeTwoInd = find_nth(temp, "is undefined", 1)
if fakeInd != -1 and fakeInd < flagInd:
flagError = False
if fakeTwoInd != -1 and fakeTwoInd < flagInd:
flagError = False
#print flagError
sourceCheckInd = find_nth(temp, "are only available if source level is 1.5 or greater", 1)
if sourceCheckInd != -1 and sourceCheckInd < flagInd:
#print "here"
#print temp
flagError = False
#print before
#print insToks
checkAsserInd = temp.find("must be defined in its own file")
#print checkAsserInd
#print flagInd
if checkAsserInd == -1 or checkAsserInd > flagInd:
#print "HERE-------------------------------------------------------------"
if len(insToks) != before+1:
bruhFakeCheck = find_nth(temp, "type", 1)
if bruhFakeCheck == -1:
bruhFakeCheck = find_nth(temp, "Type", 1)
if bruhFakeCheck != -1 and bruhFakeCheck < flagInd:
#print "?"
realCheck = find_nth(temp, "is out of range", 1)
realTwoCheck = find_nth(temp, "Incorrect number of arguments", 1)
comeCheck = find_nth(temp, "void is an invalid type for the", 1)
anotCheck = find_nth(temp, "only final is permitted", 1)
randCheck = find_nth(temp, "invalid TypeDeclaration", 1)
andCheck = find_nth(temp, "must provide either dimension expressions or an array initializer", 1)
synCheck = find_nth(temp, "Syntax error on token", 1)
if realCheck != -1 and realCheck < flagInd:
flagError = True
elif realTwoCheck != -1 and realTwoCheck < flagInd:
flagError = True
elif comeCheck != -1 and comeCheck < flagInd:
flagError = True
elif anotCheck != -1 and anotCheck < flagInd:
flagError = True
elif randCheck != -1 and randCheck < flagInd:
flagError = True
elif andCheck != -1 and andCheck < flagInd:
flagError = True
elif synCheck != -1 and synCheck < flagInd:
flagError = True
else:
flagError = False
#print flagError
anotherFlag = find_nth(temp, "Return type", 1)
if anotherFlag != -1 and anotherFlag < flagInd:
flagError = True
if flagError == True:
print "dhvani"
typeErrors.append('')
insToks.append('')
#print flagError
if flagError == True:
cutColInd = find_nth(temp, ")", 1)
line = err[fileInd+9:cutColInd+fileInd]
lineNums.append(int(line))
#print insToks
#print insToks
#print lineNums
#print "----OUT----"
checkInd = err.find("must be defined in its own file")
#print msgNo
#print lineNums
if checkInd != -1:
check = err[:checkInd]
lastCheck = check.rfind("(at line ")
tempR = err[lastCheck:]
cutColInd = find_nth(tempR, ")", 1)
lineRemov = err[lastCheck+9:cutColInd+lastCheck]
rid = int(lineRemov)
goOver = lineNums[:]
flag = False
for x in goOver:
if x == rid and flag == False:
lineNums.remove(rid)
flag = True
checkIndAgain = find_nth(err, 'must be defined in its own file', 2)
count = 2
while checkIndAgain != -1:
check = err[:checkIndAgain]
lastCheck = check.rfind("(at line ")
tempR = err[lastCheck:]
cutColInd = find_nth(tempR, ")", 1)
lineRemov = err[lastCheck+9:cutColInd+lastCheck]
rid = int(lineRemov)
goOver = lineNums[:]
flag = False
for x in goOver:
if x == rid and flag == False:
lineNums.remove(rid)
flag = True
count += 1
checkIndAgain = find_nth(err, 'must be defined in its own file', count)
msgNo = []
for x in range(len(lineNums)):
msgNo.append(x+1)
#print msgNo
#print lineNums
if len(msgNo) == 0 and len(lineNums) == 0:
os.remove("ToCheckEc.java")
#return numTotLines, [0], [0], [''], ['']
#print "Here"
#print flag_source
if flag_source == True:
return numTotLines, [0], [0], [''], ['']
else:
#print "important"
return checkEclipseSyntax(src, True)
else:
#errorObj = CompileError(fileName, line, column, None, text, errorname)
#print err
#print msgNo
#print lineNums
#print typeErrors
#print insToks
#print len(msgNo)
#print len(lineNums)
#print len(insToks)
#print len(typeErrors)
os.remove("ToCheckEc.java")
assert len(msgNo) == len(lineNums) == len(typeErrors) == len(insToks)
return numTotLines, msgNo, lineNums, insToks, typeErrors
| agpl-3.0 |
seaotterman/tensorflow | tensorflow/tools/compatibility/tf_upgrade_test.py | 48 | 6045 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import six
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.tools.compatibility import tf_upgrade
class TestUpgrade(test_util.TensorFlowTestCase):
"""Test various APIs that have been changed in 1.0.
We also test whether a converted file is executable. test_file_v0_11.py
aims to exhaustively test that API changes are convertible and actually
work when run with current TensorFlow.
"""
def _upgrade(self, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = tf_upgrade.TensorFlowCodeUpgrader()
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def testParseError(self):
_, report, unused_errors, unused_new_text = self._upgrade(
"import tensorflow as tf\na + \n")
self.assertTrue(report.find("Failed to parse") != -1)
def testReport(self):
text = "tf.mul(a, b)\n"
_, report, unused_errors, unused_new_text = self._upgrade(text)
# This is not a complete test, but it is a sanity test that a report
# is generating information.
self.assertTrue(report.find("Renamed function `tf.mul` to `tf.multiply`"))
def testRename(self):
text = "tf.mul(a, tf.sub(b, c))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.multiply(a, tf.subtract(b, c))\n")
def testRenamePack(self):
text = "tf.pack(a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.stack(a)\n")
text = "tf.unpack(a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.unstack(a)\n")
def testReorder(self):
text = "tf.concat(a, b)\ntf.split(a, b, c)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.concat(axis=a, values=b)\n"
"tf.split(axis=a, num_or_size_splits=b, value=c)\n")
def testConcatReorderWithKeywordArgs(self):
text = "tf.concat(concat_dim=a, values=b)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.concat(axis=a, values=b)\n")
text = "tf.concat(values=b, concat_dim=a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.concat(values=b, axis=a)\n")
text = "tf.concat(a, values=b)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.concat(axis=a, values=b)\n")
def testConcatReorderNested(self):
text = "tf.concat(a, tf.concat(c, d))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text, "tf.concat(axis=a, values=tf.concat(axis=c, values=d))\n")
def testInitializers(self):
text = ("tf.zeros_initializer;tf.zeros_initializer ()\n"
"tf.ones_initializer;tf.ones_initializer ()\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text, "tf.zeros_initializer();tf.zeros_initializer ()\n"
"tf.ones_initializer();tf.ones_initializer ()\n")
def testKeyword(self):
text = "tf.reduce_any(a, reduction_indices=[1, 2])\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.reduce_any(a, axis=[1, 2])\n")
def testComplexExpression(self):
text = "(foo + bar)[a].word()"
_ = self._upgrade(text)
def testReverse(self):
text = "tf.reverse(a, b)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, new_text)
self.assertEqual(errors, ["test.py:1: tf.reverse requires manual check."])
def testListComprehension(self):
def _test(input, output):
_, unused_report, errors, new_text = self._upgrade(input)
self.assertEqual(new_text, output)
_test("tf.concat(0, \t[x for x in y])\n",
"tf.concat(axis=0, \tvalues=[x for x in y])\n")
_test("tf.concat(0,[x for x in y])\n",
"tf.concat(axis=0,values=[x for x in y])\n")
_test("tf.concat(0,[\nx for x in y])\n",
"tf.concat(axis=0,values=[\nx for x in y])\n")
_test("tf.concat(0,[\n \tx for x in y])\n",
"tf.concat(axis=0,values=[\n \tx for x in y])\n")
# TODO(aselle): Explicitly not testing command line interface and process_tree
# for now, since this is a one off utility.
class TestUpgradeFiles(test_util.TensorFlowTestCase):
def testInplace(self):
"""Check to make sure we don't have a file system race."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "tf.mul(a, b)\n"
upgraded = "tf.multiply(a, b)\n"
temp_file.write(original)
temp_file.close()
upgrader = tf_upgrade.TensorFlowCodeUpgrader()
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
if __name__ == "__main__":
test_lib.main()
| apache-2.0 |
bgris/ODL_bgris | lib/python3.5/tkinter/simpledialog.py | 23 | 11424 | #
# An Introduction to Tkinter
#
# Copyright (c) 1997 by Fredrik Lundh
#
# This copyright applies to Dialog, askinteger, askfloat and asktring
#
# [email protected]
# http://www.pythonware.com
#
"""This modules handles dialog boxes.
It contains the following public symbols:
SimpleDialog -- A simple but flexible modal dialog box
Dialog -- a base class for dialogs
askinteger -- get an integer from the user
askfloat -- get a float from the user
askstring -- get a string from the user
"""
from tkinter import *
from tkinter import messagebox
import tkinter # used at _QueryDialog for tkinter._default_root
class SimpleDialog:
def __init__(self, master,
text='', buttons=[], default=None, cancel=None,
title=None, class_=None):
if class_:
self.root = Toplevel(master, class_=class_)
else:
self.root = Toplevel(master)
if title:
self.root.title(title)
self.root.iconname(title)
self.message = Message(self.root, text=text, aspect=400)
self.message.pack(expand=1, fill=BOTH)
self.frame = Frame(self.root)
self.frame.pack()
self.num = default
self.cancel = cancel
self.default = default
self.root.bind('<Return>', self.return_event)
for num in range(len(buttons)):
s = buttons[num]
b = Button(self.frame, text=s,
command=(lambda self=self, num=num: self.done(num)))
if num == default:
b.config(relief=RIDGE, borderwidth=8)
b.pack(side=LEFT, fill=BOTH, expand=1)
self.root.protocol('WM_DELETE_WINDOW', self.wm_delete_window)
self._set_transient(master)
def _set_transient(self, master, relx=0.5, rely=0.3):
widget = self.root
widget.withdraw() # Remain invisible while we figure out the geometry
widget.transient(master)
widget.update_idletasks() # Actualize geometry information
if master.winfo_ismapped():
m_width = master.winfo_width()
m_height = master.winfo_height()
m_x = master.winfo_rootx()
m_y = master.winfo_rooty()
else:
m_width = master.winfo_screenwidth()
m_height = master.winfo_screenheight()
m_x = m_y = 0
w_width = widget.winfo_reqwidth()
w_height = widget.winfo_reqheight()
x = m_x + (m_width - w_width) * relx
y = m_y + (m_height - w_height) * rely
if x+w_width > master.winfo_screenwidth():
x = master.winfo_screenwidth() - w_width
elif x < 0:
x = 0
if y+w_height > master.winfo_screenheight():
y = master.winfo_screenheight() - w_height
elif y < 0:
y = 0
widget.geometry("+%d+%d" % (x, y))
widget.deiconify() # Become visible at the desired location
def go(self):
self.root.wait_visibility()
self.root.grab_set()
self.root.mainloop()
self.root.destroy()
return self.num
def return_event(self, event):
if self.default is None:
self.root.bell()
else:
self.done(self.default)
def wm_delete_window(self):
if self.cancel is None:
self.root.bell()
else:
self.done(self.cancel)
def done(self, num):
self.num = num
self.root.quit()
class Dialog(Toplevel):
'''Class to open dialogs.
This class is intended as a base class for custom dialogs
'''
def __init__(self, parent, title = None):
'''Initialize a dialog.
Arguments:
parent -- a parent window (the application window)
title -- the dialog title
'''
Toplevel.__init__(self, parent)
self.withdraw() # remain invisible for now
# If the master is not viewable, don't
# make the child transient, or else it
# would be opened withdrawn
if parent.winfo_viewable():
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
body = Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
self.buttonbox()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
if self.parent is not None:
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self.deiconify() # become visible now
self.initial_focus.focus_set()
# wait for window to appear on screen before calling grab_set
self.wait_visibility()
self.grab_set()
self.wait_window(self)
def destroy(self):
'''Destroy the window'''
self.initial_focus = None
Toplevel.destroy(self)
#
# construction hooks
def body(self, master):
'''create dialog body.
return widget that should have initial focus.
This method should be overridden, and is called
by the __init__ method.
'''
pass
def buttonbox(self):
'''add standard button box.
override if you do not want the standard buttons
'''
box = Frame(self)
w = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
w = Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
#
# standard button semantics
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
self.withdraw()
self.update_idletasks()
try:
self.apply()
finally:
self.cancel()
def cancel(self, event=None):
# put focus back to the parent window
if self.parent is not None:
self.parent.focus_set()
self.destroy()
#
# command hooks
def validate(self):
'''validate the data
This method is called automatically to validate the data before the
dialog is destroyed. By default, it always validates OK.
'''
return 1 # override
def apply(self):
'''process the data
This method is called automatically to process the data, *after*
the dialog is destroyed. By default, it does nothing.
'''
pass # override
# --------------------------------------------------------------------
# convenience dialogues
class _QueryDialog(Dialog):
def __init__(self, title, prompt,
initialvalue=None,
minvalue = None, maxvalue = None,
parent = None):
if not parent:
parent = tkinter._default_root
self.prompt = prompt
self.minvalue = minvalue
self.maxvalue = maxvalue
self.initialvalue = initialvalue
Dialog.__init__(self, parent, title)
def destroy(self):
self.entry = None
Dialog.destroy(self)
def body(self, master):
w = Label(master, text=self.prompt, justify=LEFT)
w.grid(row=0, padx=5, sticky=W)
self.entry = Entry(master, name="entry")
self.entry.grid(row=1, padx=5, sticky=W+E)
if self.initialvalue is not None:
self.entry.insert(0, self.initialvalue)
self.entry.select_range(0, END)
return self.entry
def validate(self):
try:
result = self.getresult()
except ValueError:
messagebox.showwarning(
"Illegal value",
self.errormessage + "\nPlease try again",
parent = self
)
return 0
if self.minvalue is not None and result < self.minvalue:
messagebox.showwarning(
"Too small",
"The allowed minimum value is %s. "
"Please try again." % self.minvalue,
parent = self
)
return 0
if self.maxvalue is not None and result > self.maxvalue:
messagebox.showwarning(
"Too large",
"The allowed maximum value is %s. "
"Please try again." % self.maxvalue,
parent = self
)
return 0
self.result = result
return 1
class _QueryInteger(_QueryDialog):
errormessage = "Not an integer."
def getresult(self):
return self.getint(self.entry.get())
def askinteger(title, prompt, **kw):
'''get an integer from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is an integer
'''
d = _QueryInteger(title, prompt, **kw)
return d.result
class _QueryFloat(_QueryDialog):
errormessage = "Not a floating point value."
def getresult(self):
return self.getdouble(self.entry.get())
def askfloat(title, prompt, **kw):
'''get a float from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is a float
'''
d = _QueryFloat(title, prompt, **kw)
return d.result
class _QueryString(_QueryDialog):
def __init__(self, *args, **kw):
if "show" in kw:
self.__show = kw["show"]
del kw["show"]
else:
self.__show = None
_QueryDialog.__init__(self, *args, **kw)
def body(self, master):
entry = _QueryDialog.body(self, master)
if self.__show is not None:
entry.configure(show=self.__show)
return entry
def getresult(self):
return self.entry.get()
def askstring(title, prompt, **kw):
'''get a string from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is a string
'''
d = _QueryString(title, prompt, **kw)
return d.result
if __name__ == '__main__':
def test():
root = Tk()
def doit(root=root):
d = SimpleDialog(root,
text="This is a test dialog. "
"Would this have been an actual dialog, "
"the buttons below would have been glowing "
"in soft pink light.\n"
"Do you believe this?",
buttons=["Yes", "No", "Cancel"],
default=0,
cancel=2,
title="Test Dialog")
print(d.go())
print(askinteger("Spam", "Egg count", initialvalue=12*12))
print(askfloat("Spam", "Egg weight\n(in tons)", minvalue=1,
maxvalue=100))
print(askstring("Spam", "Egg label"))
t = Button(root, text='Test', command=doit)
t.pack()
q = Button(root, text='Quit', command=t.quit)
q.pack()
t.mainloop()
test()
| gpl-3.0 |
mapbased/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py | 116 | 10213 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import logging
import os.path
from webkitpy.common.net.layouttestresults import path_for_layout_test, LayoutTestResults
from webkitpy.common.config import urls
from webkitpy.tool.bot.botinfo import BotInfo
from webkitpy.tool.grammar import plural, pluralize, join_with_separators
_log = logging.getLogger(__name__)
class FlakyTestReporter(object):
def __init__(self, tool, bot_name):
self._tool = tool
self._bot_name = bot_name
# FIXME: Use the real port object
self._bot_info = BotInfo(tool, tool.deprecated_port().name())
def _author_emails_for_test(self, flaky_test):
test_path = path_for_layout_test(flaky_test)
commit_infos = self._tool.checkout().recent_commit_infos_for_files([test_path])
# This ignores authors which are not committers because we don't have their bugzilla_email.
return set([commit_info.author().bugzilla_email() for commit_info in commit_infos if commit_info.author()])
def _bugzilla_email(self):
# FIXME: This is kinda a funny way to get the bugzilla email,
# we could also just create a Credentials object directly
# but some of the Credentials logic is in bugzilla.py too...
self._tool.bugs.authenticate()
return self._tool.bugs.username
# FIXME: This should move into common.config
_bot_emails = set([
"[email protected]", # commit-queue
"[email protected]", # old commit-queue
"[email protected]", # style-queue, sheriff-bot, CrLx/Gtk EWS
"[email protected]", # Win EWS
# Mac EWS currently uses [email protected], but that's not normally a bot
])
def _lookup_bug_for_flaky_test(self, flaky_test):
bugs = self._tool.bugs.queries.fetch_bugs_matching_search(search_string=flaky_test)
if not bugs:
return None
# Match any bugs which are from known bots or the email this bot is using.
allowed_emails = self._bot_emails | set([self._bugzilla_email])
bugs = filter(lambda bug: bug.reporter_email() in allowed_emails, bugs)
if not bugs:
return None
if len(bugs) > 1:
# FIXME: There are probably heuristics we could use for finding
# the right bug instead of the first, like open vs. closed.
_log.warn("Found %s %s matching '%s' filed by a bot, using the first." % (pluralize('bug', len(bugs)), [bug.id() for bug in bugs], flaky_test))
return bugs[0]
def _view_source_url_for_test(self, test_path):
return urls.view_source_url("LayoutTests/%s" % test_path)
def _create_bug_for_flaky_test(self, flaky_test, author_emails, latest_flake_message):
format_values = {
'test': flaky_test,
'authors': join_with_separators(sorted(author_emails)),
'flake_message': latest_flake_message,
'test_url': self._view_source_url_for_test(flaky_test),
'bot_name': self._bot_name,
}
title = "Flaky Test: %(test)s" % format_values
description = """This is an automatically generated bug from the %(bot_name)s.
%(test)s has been flaky on the %(bot_name)s.
%(test)s was authored by %(authors)s.
%(test_url)s
%(flake_message)s
The bots will update this with information from each new failure.
If you believe this bug to be fixed or invalid, feel free to close. The bots will re-open if the flake re-occurs.
If you would like to track this test fix with another bug, please close this bug as a duplicate. The bots will follow the duplicate chain when making future comments.
""" % format_values
master_flake_bug = 50856 # MASTER: Flaky tests found by the commit-queue
return self._tool.bugs.create_bug(title, description,
component="Tools / Tests",
cc=",".join(author_emails),
blocked="50856")
# This is over-engineered, but it makes for pretty bug messages.
def _optional_author_string(self, author_emails):
if not author_emails:
return ""
heading_string = plural('author') if len(author_emails) > 1 else 'author'
authors_string = join_with_separators(sorted(author_emails))
return " (%s: %s)" % (heading_string, authors_string)
def _latest_flake_message(self, flaky_result, patch):
failure_messages = [failure.message() for failure in flaky_result.failures]
flake_message = "The %s just saw %s flake (%s) while processing attachment %s on bug %s." % (self._bot_name, flaky_result.test_name, ", ".join(failure_messages), patch.id(), patch.bug_id())
return "%s\n%s" % (flake_message, self._bot_info.summary_text())
def _results_diff_path_for_test(self, test_path):
# FIXME: This is a big hack. We should get this path from results.json
# except that old-run-webkit-tests doesn't produce a results.json
# so we just guess at the file path.
(test_path_root, _) = os.path.splitext(test_path)
return "%s-diffs.txt" % test_path_root
def _follow_duplicate_chain(self, bug):
while bug.is_closed() and bug.duplicate_of():
bug = self._tool.bugs.fetch_bug(bug.duplicate_of())
return bug
def _update_bug_for_flaky_test(self, bug, latest_flake_message):
self._tool.bugs.post_comment_to_bug(bug.id(), latest_flake_message)
# This method is needed because our archive paths include a leading tmp/layout-test-results
def _find_in_archive(self, path, archive):
for archived_path in archive.namelist():
# Archives are currently created with full paths.
if archived_path.endswith(path):
return archived_path
return None
def _attach_failure_diff(self, flake_bug_id, flaky_test, results_archive_zip):
results_diff_path = self._results_diff_path_for_test(flaky_test)
# Check to make sure that the path makes sense.
# Since we're not actually getting this path from the results.html
# there is a chance it's wrong.
bot_id = self._tool.status_server.bot_id or "bot"
archive_path = self._find_in_archive(results_diff_path, results_archive_zip)
if archive_path:
results_diff = results_archive_zip.read(archive_path)
description = "Failure diff from %s" % bot_id
self._tool.bugs.add_attachment_to_bug(flake_bug_id, results_diff, description, filename="failure.diff")
else:
_log.warn("%s does not exist in results archive, uploading entire archive." % results_diff_path)
description = "Archive of layout-test-results from %s" % bot_id
# results_archive is a ZipFile object, grab the File object (.fp) to pass to Mechanize for uploading.
results_archive_file = results_archive_zip.fp
# Rewind the file object to start (since Mechanize won't do that automatically)
# See https://bugs.webkit.org/show_bug.cgi?id=54593
results_archive_file.seek(0)
self._tool.bugs.add_attachment_to_bug(flake_bug_id, results_archive_file, description, filename="layout-test-results.zip")
def report_flaky_tests(self, patch, flaky_test_results, results_archive):
message = "The %s encountered the following flaky tests while processing attachment %s:\n\n" % (self._bot_name, patch.id())
for flaky_result in flaky_test_results:
flaky_test = flaky_result.test_name
bug = self._lookup_bug_for_flaky_test(flaky_test)
latest_flake_message = self._latest_flake_message(flaky_result, patch)
author_emails = self._author_emails_for_test(flaky_test)
if not bug:
_log.info("Bug does not already exist for %s, creating." % flaky_test)
flake_bug_id = self._create_bug_for_flaky_test(flaky_test, author_emails, latest_flake_message)
else:
bug = self._follow_duplicate_chain(bug)
# FIXME: Ideally we'd only make one comment per flake, not two. But that's not possible
# in all cases (e.g. when reopening), so for now file attachment and comment are separate.
self._update_bug_for_flaky_test(bug, latest_flake_message)
flake_bug_id = bug.id()
self._attach_failure_diff(flake_bug_id, flaky_test, results_archive)
message += "%s bug %s%s\n" % (flaky_test, flake_bug_id, self._optional_author_string(author_emails))
message += "The %s is continuing to process your patch." % self._bot_name
self._tool.bugs.post_comment_to_bug(patch.bug_id(), message)
| bsd-3-clause |
setten/pymatgen | pymatgen/alchemy/tests/test_transmuters.py | 4 | 4522 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
'''
Created on Mar 5, 2012
'''
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Mar 5, 2012"
import unittest
import os
from pymatgen.alchemy.transmuters import CifTransmuter, PoscarTransmuter
from pymatgen.alchemy.filters import ContainsSpecieFilter
from pymatgen.transformations.standard_transformations import \
SubstitutionTransformation, RemoveSpeciesTransformation, \
OrderDisorderedStructureTransformation
from pymatgen.transformations.advanced_transformations import \
SuperTransformation
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class CifTransmuterTest(unittest.TestCase):
def test_init(self):
trans = []
trans.append(SubstitutionTransformation({"Fe": "Mn", "Fe2+": "Mn2+"}))
tsc = CifTransmuter.from_filenames([os.path.join(test_dir,
"MultiStructure.cif")],
trans)
self.assertEqual(len(tsc), 2)
expected_ans = set(["Mn", "O", "Li", "P"])
for s in tsc:
els = set([el.symbol
for el in s.final_structure.composition.elements])
self.assertEqual(expected_ans, els)
class PoscarTransmuterTest(unittest.TestCase):
def test_init(self):
trans = []
trans.append(SubstitutionTransformation({"Fe": "Mn"}))
tsc = PoscarTransmuter.from_filenames([os.path.join(test_dir,
"POSCAR"),
os.path.join(test_dir,
"POSCAR")],
trans)
self.assertEqual(len(tsc), 2)
expected_ans = set(["Mn", "O", "P"])
for s in tsc:
els = set([el.symbol
for el in s.final_structure.composition.elements])
self.assertEqual(expected_ans, els)
def test_transmuter(self):
tsc = PoscarTransmuter.from_filenames(
[os.path.join(test_dir, "POSCAR")])
tsc.append_transformation(RemoveSpeciesTransformation('O'))
self.assertEqual(len(tsc[0].final_structure), 8)
tsc.append_transformation(SubstitutionTransformation({"Fe":
{"Fe2+": 0.25,
"Mn3+": .75},
"P": "P5+"}))
tsc.append_transformation(OrderDisorderedStructureTransformation(),
extend_collection=50)
self.assertEqual(len(tsc), 4)
t = SuperTransformation([SubstitutionTransformation({"Fe2+": "Mg2+"}),
SubstitutionTransformation({"Fe2+": "Zn2+"}),
SubstitutionTransformation({"Fe2+": "Be2+"})])
tsc.append_transformation(t, extend_collection=True)
self.assertEqual(len(tsc), 12)
for x in tsc:
self.assertEqual(len(x), 5, 'something might be wrong with the number of transformations in the history') #should be 4 trans + starting structure
#test the filter
tsc.apply_filter(ContainsSpecieFilter(['Zn2+', 'Be2+', 'Mn4+'],
strict_compare=True, AND=False))
self.assertEqual(len(tsc), 8)
self.assertEqual(tsc.transformed_structures[0].as_dict()[
'history'][-1]['@class'], 'ContainsSpecieFilter')
tsc.apply_filter(ContainsSpecieFilter(['Be2+']))
self.assertEqual(len(tsc), 4)
#Test set_parameter and add_tag.
tsc.set_parameter("para1", "hello")
self.assertEqual(tsc.transformed_structures[0]
.as_dict()['other_parameters']['para1'], 'hello')
tsc.add_tags(["world", "universe"])
self.assertEqual(tsc.transformed_structures[0]
.as_dict()['other_parameters']['tags'],
["world", "universe"])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| mit |
jostep/tensorflow | tensorflow/contrib/image/python/kernel_tests/image_ops_test.py | 45 | 10515 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.image.python.ops import image_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
_DTYPES = set(
[dtypes.uint8, dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64])
class ImageOpsTest(test_util.TensorFlowTestCase):
def test_zeros(self):
with self.test_session():
for dtype in _DTYPES:
for shape in [(5, 5), (24, 24), (2, 24, 24, 3)]:
for angle in [0, 1, np.pi / 2.0]:
image = array_ops.zeros(shape, dtype)
self.assertAllEqual(
image_ops.rotate(image, angle).eval(),
np.zeros(shape, dtype.as_numpy_dtype()))
def test_rotate_even(self):
with self.test_session():
for dtype in _DTYPES:
image = array_ops.reshape(
math_ops.cast(math_ops.range(36), dtype), (6, 6))
image_rep = array_ops.tile(image[None, :, :, None], [3, 1, 1, 1])
angles = constant_op.constant([0.0, np.pi / 4.0, np.pi / 2.0],
dtypes.float32)
image_rotated = image_ops.rotate(image_rep, angles)
self.assertAllEqual(image_rotated[:, :, :, 0].eval(),
[[[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]],
[[0, 3, 4, 11, 17, 0], [2, 3, 9, 16, 23, 23],
[1, 8, 15, 21, 22, 29], [6, 13, 20, 21, 27, 34],
[12, 18, 19, 26, 33, 33], [0, 18, 24, 31, 32, 0]],
[[5, 11, 17, 23, 29, 35], [4, 10, 16, 22, 28, 34],
[3, 9, 15, 21, 27, 33], [2, 8, 14, 20, 26, 32],
[1, 7, 13, 19, 25, 31], [0, 6, 12, 18, 24, 30]]])
def test_rotate_odd(self):
with self.test_session():
for dtype in _DTYPES:
image = array_ops.reshape(
math_ops.cast(math_ops.range(25), dtype), (5, 5))
image_rep = array_ops.tile(image[None, :, :, None], [3, 1, 1, 1])
angles = constant_op.constant([np.pi / 4.0, 1.0, -np.pi / 2.0],
dtypes.float32)
image_rotated = image_ops.rotate(image_rep, angles)
self.assertAllEqual(image_rotated[:, :, :, 0].eval(),
[[[0, 3, 8, 9, 0], [1, 7, 8, 13, 19],
[6, 6, 12, 18, 18], [5, 11, 16, 17, 23],
[0, 15, 16, 21, 0]],
[[0, 3, 9, 14, 0], [2, 7, 8, 13, 19],
[1, 6, 12, 18, 23], [5, 11, 16, 17, 22],
[0, 10, 15, 21, 0]],
[[20, 15, 10, 5, 0], [21, 16, 11, 6, 1],
[22, 17, 12, 7, 2], [23, 18, 13, 8, 3],
[24, 19, 14, 9, 4]]])
def test_compose(self):
with self.test_session():
for dtype in _DTYPES:
image = constant_op.constant(
[[1, 1, 1, 0],
[1, 0, 0, 0],
[1, 1, 1, 0],
[0, 0, 0, 0]], dtype=dtype)
# Rotate counter-clockwise by pi / 2.
rotation = image_ops.angles_to_projective_transforms(np.pi / 2, 4, 4)
# Translate right by 1 (the transformation matrix is always inverted,
# hence the -1).
translation = constant_op.constant([1, 0, -1,
0, 1, 0,
0, 0],
dtype=dtypes.float32)
composed = image_ops.compose_transforms(rotation, translation)
image_transformed = image_ops.transform(image, composed)
self.assertAllEqual(image_transformed.eval(),
[[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 1, 0, 1],
[0, 1, 1, 1]])
def test_bilinear(self):
with self.test_session():
image = constant_op.constant(
[[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]],
dtypes.float32)
# The following result matches:
# >>> scipy.ndimage.rotate(image, 45, order=1, reshape=False)
# which uses spline interpolation of order 1, equivalent to bilinear
# interpolation.
self.assertAllClose(
image_ops.rotate(image, np.pi / 4.0, interpolation="BILINEAR").eval(),
[[0.000, 0.000, 0.343, 0.000, 0.000],
[0.000, 0.586, 0.914, 0.586, 0.000],
[0.343, 0.914, 0.000, 0.914, 0.343],
[0.000, 0.586, 0.914, 0.586, 0.000],
[0.000, 0.000, 0.343, 0.000, 0.000]],
atol=0.001)
self.assertAllClose(
image_ops.rotate(image, np.pi / 4.0, interpolation="NEAREST").eval(),
[[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 0, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]])
def test_bilinear_uint8(self):
with self.test_session():
image = constant_op.constant(
np.asarray(
[[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 255, 255, 255, 0.0],
[0.0, 255, 0.0, 255, 0.0],
[0.0, 255, 255, 255, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0]],
np.uint8),
dtypes.uint8)
# == np.rint((expected image above) * 255)
self.assertAllEqual(
image_ops.rotate(image, np.pi / 4.0, interpolation="BILINEAR").eval(),
[[0.0, 0.0, 87., 0.0, 0.0],
[0.0, 149, 233, 149, 0.0],
[87., 233, 0.0, 233, 87.],
[0.0, 149, 233, 149, 0.0],
[0.0, 0.0, 87., 0.0, 0.0]])
def _test_grad(self, shape_to_test):
with self.test_session():
test_image_shape = shape_to_test
test_image = np.random.randn(*test_image_shape)
test_image_tensor = constant_op.constant(
test_image, shape=test_image_shape)
test_transform = image_ops.angles_to_projective_transforms(
np.pi / 2, 4, 4)
output_shape = test_image_shape
output = image_ops.transform(test_image_tensor, test_transform)
left_err = gradient_checker.compute_gradient_error(
test_image_tensor,
test_image_shape,
output,
output_shape,
x_init_value=test_image)
self.assertLess(left_err, 1e-10)
def test_grad(self):
self._test_grad([16, 16])
self._test_grad([4, 12, 12])
self._test_grad([3, 4, 12, 12])
class BipartiteMatchTest(test_util.TensorFlowTestCase):
def _BipartiteMatchTest(self, distance_mat, distance_mat_shape,
num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match):
distance_mat_np = np.array(distance_mat, dtype=np.float32).reshape(
distance_mat_shape)
expected_row_to_col_match_np = np.array(expected_row_to_col_match,
dtype=np.int32)
expected_col_to_row_match_np = np.array(expected_col_to_row_match,
dtype=np.int32)
with self.test_session():
distance_mat_tf = constant_op.constant(distance_mat_np,
shape=distance_mat_shape)
location_to_prior, prior_to_location = image_ops.bipartite_match(
distance_mat_tf, num_valid_rows)
location_to_prior_np = location_to_prior.eval()
prior_to_location_np = prior_to_location.eval()
self.assertAllEqual(location_to_prior_np, expected_row_to_col_match_np)
self.assertAllEqual(prior_to_location_np, expected_col_to_row_match_np)
def testBipartiteMatch(self):
distance_mat = [0.5, 0.8, 0.1,
0.3, 0.2, 0.15]
num_valid_rows = 2
expected_row_to_col_match = [2, 1]
expected_col_to_row_match = [-1, 1, 0]
self._BipartiteMatchTest(distance_mat, [2, 3], num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match)
# The case of num_valid_rows less than num-of-rows-in-distance-mat.
num_valid_rows = 1
expected_row_to_col_match = [2, -1]
expected_col_to_row_match = [-1, -1, 0]
self._BipartiteMatchTest(distance_mat, [2, 3], num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match)
# The case of num_valid_rows being 0.
num_valid_rows = 0
expected_row_to_col_match = [-1, -1]
expected_col_to_row_match = [-1, -1, -1]
self._BipartiteMatchTest(distance_mat, [2, 3], num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match)
# The case of num_valid_rows less being -1.
num_valid_rows = -1
# The expected results are the same as num_valid_rows being 2.
expected_row_to_col_match = [2, 1]
expected_col_to_row_match = [-1, 1, 0]
self._BipartiteMatchTest(distance_mat, [2, 3], num_valid_rows,
expected_row_to_col_match,
expected_col_to_row_match)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
yun-percy/Android_Shell_Tools | sublime_text_3/sublime_plugin.py | 10 | 18883 | import sublime
import threading
import imp
import importlib
import os
import sys
import zipfile
import sublime_api
import traceback
api_ready = False
application_command_classes = []
window_command_classes = []
text_command_classes = []
all_command_classes = [application_command_classes, window_command_classes, text_command_classes]
all_callbacks = {'on_new': [], 'on_clone': [], 'on_load': [], 'on_pre_close': [], 'on_close': [],
'on_pre_save': [], 'on_post_save': [], 'on_modified': [],
'on_selection_modified': [],'on_activated': [], 'on_deactivated': [],
'on_query_context': [], 'on_query_completions': [],
'on_text_command': [], 'on_window_command': [],
'on_post_text_command': [], 'on_post_window_command': [],
'on_modified_async': [],
'on_selection_modified_async': [],
'on_pre_save_async': [],
'on_post_save_async': [],
'on_activated_async': [],
'on_deactivated_async': [],
'on_new_async': [],
'on_load_async': [],
'on_clone_async': []}
def unload_module(module):
if "plugin_unloaded" in module.__dict__:
module.plugin_unloaded()
# Check unload_handler too, for backwards compat
if "unload_handler" in module.__dict__:
module.unload_handler()
# Unload the old plugins
if "plugins" in module.__dict__:
for p in module.plugins:
for cmd_cls_list in all_command_classes:
try:
cmd_cls_list.remove(p)
except ValueError:
pass
for c in all_callbacks.values():
try:
c.remove(p)
except ValueError:
pass
def unload_plugin(modulename):
print("unloading plugin", modulename)
was_loaded = modulename in sys.modules
if was_loaded:
m = sys.modules[modulename]
unload_module(m)
def reload_plugin(modulename):
print("reloading plugin", modulename)
if modulename in sys.modules:
m = sys.modules[modulename]
unload_module(m)
m = imp.reload(m)
else:
m = importlib.import_module(modulename)
module_plugins = []
on_activated_targets = []
for type_name in dir(m):
try:
t = m.__dict__[type_name]
if t.__bases__:
is_plugin = False
if issubclass(t, ApplicationCommand):
application_command_classes.append(t)
is_plugin = True
if issubclass(t, WindowCommand):
window_command_classes.append(t)
is_plugin = True
if issubclass(t, TextCommand):
text_command_classes.append(t)
is_plugin = True
if is_plugin:
module_plugins.append(t)
if issubclass(t, EventListener):
obj = t()
for p in all_callbacks.items():
if p[0] in dir(obj):
p[1].append(obj)
if "on_activated" in dir(obj):
on_activated_targets.append(obj)
module_plugins.append(obj)
except AttributeError:
pass
if len(module_plugins) > 0:
m.plugins = module_plugins
if api_ready:
if "plugin_loaded" in m.__dict__:
try:
m.plugin_loaded()
except:
traceback.print_exc()
# Synthesize any required on_activated calls
for el in on_activated_targets:
w = sublime.active_window()
if w:
v = w.active_view()
if v:
try:
el.on_activated(v)
except:
traceback.print_exc()
def create_application_commands():
cmds = []
for class_ in application_command_classes:
cmds.append(class_())
sublime_api.notify_application_commands(cmds)
def create_window_commands(window_id):
window = sublime.Window(window_id)
cmds = []
for class_ in window_command_classes:
cmds.append(class_(window))
return cmds
def create_text_commands(view_id):
view = sublime.View(view_id)
cmds = []
for class_ in text_command_classes:
cmds.append(class_(view))
return cmds
def on_api_ready():
global api_ready
api_ready = True
for m in list(sys.modules.values()):
if "plugin_loaded" in m.__dict__:
try:
m.plugin_loaded()
except:
traceback.print_exc()
# Synthesize an on_activated call
w = sublime.active_window()
if w:
view_id = sublime_api.window_active_view(w.window_id)
if view_id != 0:
try:
on_activated(view_id)
except:
traceback.print_exc()
def on_new(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_new']:
try:
callback.on_new(v)
except:
traceback.print_exc()
def on_new_async(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_new_async']:
try:
callback.on_new_async(v)
except:
traceback.print_exc()
def on_clone(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_clone']:
try:
callback.on_clone(v)
except:
traceback.print_exc()
def on_clone_async(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_clone_async']:
try:
callback.on_clone_async(v)
except:
traceback.print_exc()
def on_load(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_load']:
try:
callback.on_load(v)
except:
traceback.print_exc()
def on_load_async(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_load_async']:
try:
callback.on_load_async(v)
except:
traceback.print_exc()
def on_pre_close(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_pre_close']:
try:
callback.on_pre_close(v)
except:
traceback.print_exc()
def on_close(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_close']:
try:
callback.on_close(v)
except:
traceback.print_exc()
def on_pre_save(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_pre_save']:
try:
callback.on_pre_save(v)
except:
traceback.print_exc()
def on_pre_save_async(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_pre_save_async']:
try:
callback.on_pre_save_async(v)
except:
traceback.print_exc()
def on_post_save(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_post_save']:
try:
callback.on_post_save(v)
except:
traceback.print_exc()
def on_post_save_async(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_post_save_async']:
try:
callback.on_post_save_async(v)
except:
traceback.print_exc()
def on_modified(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_modified']:
try:
callback.on_modified(v)
except:
traceback.print_exc()
def on_modified_async(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_modified_async']:
try:
callback.on_modified_async(v)
except:
traceback.print_exc()
def on_selection_modified(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_selection_modified']:
try:
callback.on_selection_modified(v)
except:
traceback.print_exc()
def on_selection_modified_async(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_selection_modified_async']:
try:
callback.on_selection_modified_async(v)
except:
traceback.print_exc()
def on_activated(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_activated']:
try:
callback.on_activated(v)
except:
traceback.print_exc()
def on_activated_async(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_activated_async']:
try:
callback.on_activated_async(v)
except:
traceback.print_exc()
def on_deactivated(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_deactivated']:
try:
callback.on_deactivated(v)
except:
traceback.print_exc()
def on_deactivated_async(view_id):
v = sublime.View(view_id)
for callback in all_callbacks['on_deactivated_async']:
try:
callback.on_deactivated_async(v)
except:
traceback.print_exc()
def on_query_context(view_id, key, operator, operand, match_all):
v = sublime.View(view_id)
for callback in all_callbacks['on_query_context']:
try:
val = callback.on_query_context(v, key, operator, operand, match_all)
if val:
return True
except:
traceback.print_exc()
return False
def normalise_completion(c):
if len(c) == 1:
return (c[0], "", "")
elif len(c) == 2:
return (c[0], "", c[1])
else:
return c
def on_query_completions(view_id, prefix, locations):
v = sublime.View(view_id)
completions = []
flags = 0
for callback in all_callbacks['on_query_completions']:
try:
res = callback.on_query_completions(v, prefix, locations)
if isinstance(res, tuple):
completions += [normalise_completion(c) for c in res[0]]
flags |= res[1]
elif isinstance(res, list):
completions += [normalise_completion(c) for c in res]
except:
traceback.print_exc()
return (completions,flags)
def on_text_command(view_id, name, args):
v = sublime.View(view_id)
for callback in all_callbacks['on_text_command']:
try:
res = callback.on_text_command(v, name, args)
if isinstance(res, tuple):
return res
elif res:
return (res, None)
except:
traceback.print_exc()
return ("", None)
def on_window_command(window_id, name, args):
window = sublime.Window(window_id)
for callback in all_callbacks['on_window_command']:
try:
res = callback.on_window_command(window, name, args)
if isinstance(res, tuple):
return res
elif res:
return (res, None)
except:
traceback.print_exc()
return ("", None)
def on_post_text_command(view_id, name, args):
v = sublime.View(view_id)
for callback in all_callbacks['on_post_text_command']:
try:
callback.on_post_text_command(v, name, args)
except:
traceback.print_exc()
def on_post_window_command(window_id, name, args):
window = sublime.Window(window_id)
for callback in all_callbacks['on_post_window_command']:
try:
callback.on_post_window_command(window, name, args)
except:
traceback.print_exc()
class Command(object):
def name(self):
clsname = self.__class__.__name__
name = clsname[0].lower()
last_upper = False
for c in clsname[1:]:
if c.isupper() and not last_upper:
name += '_'
name += c.lower()
else:
name += c
last_upper = c.isupper()
if name.endswith("_command"):
name = name[0:-8]
return name
def is_enabled_(self, args):
ret = None
try:
if args:
if 'event' in args:
del args['event']
ret = self.is_enabled(**args)
else:
ret = self.is_enabled()
except TypeError:
ret = self.is_enabled()
if not isinstance(ret, bool):
raise ValueError("is_enabled must return a bool", self)
return ret
def is_enabled(self):
return True
def is_visible_(self, args):
ret = None
try:
if args:
ret = self.is_visible(**args)
else:
ret = self.is_visible()
except TypeError:
ret = self.is_visible()
if not isinstance(ret, bool):
raise ValueError("is_visible must return a bool", self)
return ret
def is_visible(self):
return True
def is_checked_(self, args):
ret = None
try:
if args:
ret = self.is_checked(**args)
else:
ret = self.is_checked()
except TypeError:
ret = self.is_checked()
if not isinstance(ret, bool):
raise ValueError("is_checked must return a bool", self)
return ret
def is_checked(self):
return False
def description_(self, args):
try:
if args != None:
return self.description(**args)
else:
return self.description()
except TypeError as e:
return ""
def description(self):
return ""
class ApplicationCommand(Command):
def run_(self, edit_token, args):
if args:
if 'event' in args:
del args['event']
return self.run(**args)
else:
return self.run()
def run(self):
pass
class WindowCommand(Command):
def __init__(self, window):
self.window = window
def run_(self, edit_token, args):
if args:
if 'event' in args:
del args['event']
return self.run(**args)
else:
return self.run()
def run(self):
pass
class TextCommand(Command):
def __init__(self, view):
self.view = view
def run_(self, edit_token, args):
if args:
if 'event' in args:
del args['event']
edit = self.view.begin_edit(edit_token, self.name(), args)
try:
return self.run(edit, **args)
finally:
self.view.end_edit(edit)
else:
edit = self.view.begin_edit(edit_token, self.name())
try:
return self.run(edit)
finally:
self.view.end_edit(edit)
def run(self, edit):
pass
class EventListener(object):
pass
class MultizipImporter(object):
def __init__(self):
self.loaders = []
self.file_loaders = []
def find_module(self, fullname, path = None):
if not path:
for l in self.loaders:
if l.name == fullname:
return l
for l in self.loaders:
if path == [l.zippath]:
if l.has(fullname):
return l
return None
class ZipLoader(object):
def __init__(self, zippath):
self.zippath = zippath
self.name = os.path.splitext(os.path.basename(zippath))[0]
self.contents = {"":""}
self.packages = {""}
z = zipfile.ZipFile(zippath, 'r')
files = [i.filename for i in z.infolist()]
for f in files:
base, ext = os.path.splitext(f)
if ext != ".py":
continue
paths = base.split('/')
if len(paths) > 0 and paths[len(paths) - 1] == "__init__":
paths.pop()
self.packages.add('.'.join(paths))
try:
self.contents['.'.join(paths)] = z.read(f).decode('utf-8')
except UnicodeDecodeError:
print(f, "in", zippath, "is not utf-8 encoded, unable to load plugin")
continue
while len(paths) > 1:
paths.pop()
parent = '.'.join(paths)
if parent not in self.contents:
self.contents[parent] = ""
self.packages.add(parent)
z.close()
def has(self, fullname):
key = '.'.join(fullname.split('.')[1:])
if key in self.contents:
return True
override_file = os.path.join(override_path, os.sep.join(fullname.split('.')) + '.py')
if os.path.isfile(override_file):
return True
override_package = os.path.join(override_path, os.sep.join(fullname.split('.')))
if os.path.isdir(override_package):
return True
return False
def load_module(self, fullname):
if fullname in sys.modules:
mod = sys.modules[fullname]
else:
mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
mod.__file__ = self.zippath + "/" + fullname
mod.__name__ = fullname
mod.__path__ = [self.zippath]
mod.__loader__ = self
key = '.'.join(fullname.split('.')[1:])
if key in self.contents:
source = self.contents[key]
source_path = key + " in " + self.zippath
is_pkg = key in self.packages
try:
override_file = os.path.join(override_path, os.sep.join(fullname.split('.')) + '.py')
override_package_init = os.path.join(os.path.join(override_path, os.sep.join(fullname.split('.'))), '__init__.py')
if os.path.isfile(override_file):
with open(override_file, 'r') as f:
source = f.read()
source_path = override_file
elif os.path.isfile(override_package_init):
with open(override_package_init, 'r') as f:
source = f.read()
source_path = override_package_init
is_pkg = True
except:
pass
if is_pkg:
mod.__package__ = mod.__name__
else:
mod.__package__ = fullname.rpartition('.')[0]
exec(compile(source, source_path, 'exec'), mod.__dict__)
return mod
override_path = None
multi_importer = MultizipImporter()
sys.meta_path.insert(0, multi_importer)
def update_compressed_packages(pkgs):
multi_importer.loaders = [ZipLoader(p) for p in pkgs]
def set_override_path(path):
global override_path
override_path = path
| mit |
niranjfantain/django-obfuscator | django_obfuscator/django_obfuscator/settings.py | 1 | 4350 | """
Copyright 2015, Fantain Sports Private Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Django settings for django_obfuscator project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'eu0zo1(w=2q+d2uf=wmul=f+dzvglr2q4ufv%1bevup&8jj@5r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'data_obfuscator',
'testobfuscator',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_obfuscator.urls'
WSGI_APPLICATION = 'django_obfuscator.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
# Include the default Django email handler for errors
# This is what you'd get without configuring logging at all.
'mail_admins': {
'class': 'django.utils.log.AdminEmailHandler',
'level': 'ERROR',
'filters': ['require_debug_false'],
# But the emails are plain text by default - HTML is nicer
'include_html': True,
},
# Log to a text file that can be rotated by logrotate
'logfile': {
'class': 'logging.handlers.WatchedFileHandler',
'filename': os.path.join(BASE_DIR, 'logs', 'django-log')
},
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
# Again, default Django configuration to email unhandled exceptions
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
# Might as well log any errors anywhere else in Django
'django': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
# Your own app - this assumes all your logger names start with
# "myapp."
'data_obfuscator': {
'handlers': ['console'],
'level': 'DEBUG', # Or maybe INFO or WARNING
'propagate':True
},
},
}
| apache-2.0 |
python/psf-salt | tasks/salt.py | 1 | 4528 | from __future__ import absolute_import, division, print_function
import os
import invoke
import fabric.api
import fabric.contrib.files
from .utils import cd, ssh_host
SALT_MASTER = "192.168.5.1"
@invoke.task(name="sync-changes")
def sync_changes():
# Push our changes to GitHub
# TODO: Determine what origin to use?
invoke.run("git push origin master", echo=True)
if os.path.isdir("pillar/prod/secrets"):
with cd("pillar/prod/secrets"):
# Push our changes into the secret repository
invoke.run("git push origin master", echo=True)
# SSH into the salt master and pull our changes
with ssh_host("salt.iad1.psf.io"):
with fabric.api.cd("/srv/salt"):
fabric.api.sudo("git pull --ff-only origin master")
with fabric.api.cd("/srv/pillar/prod/secrets"):
fabric.api.sudo("git pull --ff-only origin master")
@invoke.task
def bootstrap(host, codename="trusty", pre=[sync_changes]):
# If the host does not contain '.', we'll assume it's of the form
# [host].iad1.psf.io.
if "." not in host:
host += ".iad1.psf.io"
# SSH into the root user of this server and bootstrap the server.
with ssh_host("root@" + host):
# Make sure this host hasn't already been bootstrapped.
if fabric.contrib.files.exists("/etc/salt/minion.d/local.conf"):
raise RuntimeError("{} is already bootstrapped.".format(host))
fabric.api.run("wget -O - https://repo.saltstack.com/apt/ubuntu/14.04/amd64/2018.3/SALTSTACK-GPG-KEY.pub | apt-key add -")
if codename == "trusty":
fabric.api.run("echo 'deb http://repo.saltstack.com/apt/ubuntu/14.04/amd64/2018.3 trusty main' > /etc/apt/sources.list.d/saltstack.list")
elif codename == "xenial":
fabric.api.run("echo 'deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2018.3 xenial main' > /etc/apt/sources.list.d/saltstack.list")
else:
raise RuntimeError("{} is not supported!".format(codename))
# Then we need to update our local apt
fabric.api.run("apt-get update -qy")
# Then, upgrade all of the packages that are currently on this
# machine.
fabric.api.run("apt-get upgrade -qy")
fabric.api.run("apt-get dist-upgrade -qy")
# We don't want the nova-agent installed.
# This doesn't appear to be installed on Xenial anymore?
if codename != "xenial":
fabric.api.run("apt-get purge nova-agent -qy")
# Install salt-minion and python-apt so we can manage things with
# salt.
fabric.api.run("apt-get install -qy salt-minion")
# Drop the /etc/salt/minion.d/local.conf onto the server so that it
# can connect with our salt master.
fabric.contrib.files.upload_template(
"conf/minion.conf",
"/etc/salt/minion.d/local.conf",
context={
"master": SALT_MASTER,
},
use_jinja=True,
mode=0o0644,
)
# Run salt-call state.highstate, this will fail the first time because
# the Master hasn't accepted our key yet.
fabric.api.run("salt-call state.highstate", warn_only=True)
# Get the minion ID of this server
minion_id = fabric.api.run("cat /etc/salt/minion_id")
# SSH into our salt master and accept the key for this server.
with ssh_host("salt.iad1.psf.io"):
fabric.api.sudo("salt-key -ya {}".format(minion_id))
# Finally SSH into our server one more time to run salt-call
# state.highstate for real this time.
with ssh_host("root@" + host):
fabric.api.run("salt-call state.highstate")
# Reboot the server to make sure any upgrades have been loaded.
fabric.api.reboot()
@invoke.task(default=True, pre=[sync_changes])
def highstate(hosts, dc="iad1"):
# Until invoke supports *args we need to hack around the lack of support
# for now.
hosts = [h.strip() for h in hosts.split(",") if h.strip()]
# Ensure we have some hosts
if not hosts:
raise ValueError("Must specify hosts for highstate")
# Loop over all the hosts and if they do not have a ., then we'll add
# .psf.io to them.
hosts = [h if "." in h else h + "." + dc + ".psf.io" for h in hosts]
# Loop over all the hosts and call salt-call state.highstate on them.
for host in hosts:
with ssh_host(host):
fabric.api.sudo("salt-call state.highstate")
| mit |
kchodorow/tensorflow | tensorflow/contrib/linalg/python/kernel_tests/linear_operator_test.py | 24 | 6943 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import linalg as linalg_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
linalg = linalg_lib
rng = np.random.RandomState(123)
class LinearOperatorShape(linalg.LinearOperator):
"""LinearOperator that implements the methods ._shape and _shape_tensor."""
def __init__(self,
shape,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None):
self._stored_shape = shape
super(LinearOperatorShape, self).__init__(
dtype=dtypes.float32,
graph_parents=None,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square)
def _shape(self):
return tensor_shape.TensorShape(self._stored_shape)
def _shape_tensor(self):
return constant_op.constant(self._stored_shape, dtype=dtypes.int32)
class LinearOperatorApplyOnly(linalg.LinearOperator):
"""LinearOperator that simply wraps a [batch] matrix and implements apply."""
def __init__(self,
matrix,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None):
self._matrix = ops.convert_to_tensor(matrix, name="matrix")
super(LinearOperatorApplyOnly, self).__init__(
dtype=matrix.dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square)
def _shape(self):
return self._matrix.get_shape()
def _shape_tensor(self):
return array_ops.shape(self._matrix)
def _apply(self, x, adjoint=False):
return math_ops.matmul(self._matrix, x, adjoint_a=adjoint)
class LinearOperatorTest(test.TestCase):
def test_all_shape_properties_defined_by_the_one_property_shape(self):
shape = (1, 2, 3, 4)
operator = LinearOperatorShape(shape)
self.assertAllEqual(shape, operator.shape)
self.assertAllEqual(4, operator.tensor_rank)
self.assertAllEqual((1, 2), operator.batch_shape)
self.assertAllEqual(4, operator.domain_dimension)
self.assertAllEqual(3, operator.range_dimension)
def test_all_shape_methods_defined_by_the_one_method_shape(self):
with self.test_session():
shape = (1, 2, 3, 4)
operator = LinearOperatorShape(shape)
self.assertAllEqual(shape, operator.shape_tensor().eval())
self.assertAllEqual(4, operator.tensor_rank_tensor().eval())
self.assertAllEqual((1, 2), operator.batch_shape_tensor().eval())
self.assertAllEqual(4, operator.domain_dimension_tensor().eval())
self.assertAllEqual(3, operator.range_dimension_tensor().eval())
def test_is_x_properties(self):
operator = LinearOperatorShape(
shape=(2, 2),
is_non_singular=False,
is_self_adjoint=True,
is_positive_definite=False)
self.assertFalse(operator.is_non_singular)
self.assertTrue(operator.is_self_adjoint)
self.assertFalse(operator.is_positive_definite)
def test_generic_to_dense_method_non_square_matrix_static(self):
matrix = rng.randn(2, 3, 4)
operator = LinearOperatorApplyOnly(matrix)
with self.test_session():
operator_dense = operator.to_dense()
self.assertAllEqual((2, 3, 4), operator_dense.get_shape())
self.assertAllClose(matrix, operator_dense.eval())
def test_generic_to_dense_method_non_square_matrix_tensor(self):
matrix = rng.randn(2, 3, 4)
matrix_ph = array_ops.placeholder(dtypes.float64)
operator = LinearOperatorApplyOnly(matrix_ph)
with self.test_session():
operator_dense = operator.to_dense()
self.assertAllClose(
matrix, operator_dense.eval(feed_dict={matrix_ph: matrix}))
def test_is_square_set_to_true_for_square_static_shapes(self):
operator = LinearOperatorShape(shape=(2, 4, 4))
self.assertTrue(operator.is_square)
def test_is_square_set_to_false_for_square_static_shapes(self):
operator = LinearOperatorShape(shape=(2, 3, 4))
self.assertFalse(operator.is_square)
def test_is_square_set_incorrectly_to_false_raises(self):
with self.assertRaisesRegexp(ValueError, "but.*was square"):
_ = LinearOperatorShape(shape=(2, 4, 4), is_square=False).is_square
def test_is_square_set_inconsistent_with_other_hints_raises(self):
with self.assertRaisesRegexp(ValueError, "is always square"):
matrix = array_ops.placeholder(dtypes.float32)
LinearOperatorApplyOnly(matrix, is_non_singular=True, is_square=False)
with self.assertRaisesRegexp(ValueError, "is always square"):
matrix = array_ops.placeholder(dtypes.float32)
LinearOperatorApplyOnly(
matrix, is_positive_definite=True, is_square=False)
def test_non_square_operators_raise_on_determinant_and_solve(self):
operator = LinearOperatorShape((2, 3))
with self.assertRaisesRegexp(NotImplementedError, "not be square"):
operator.determinant()
with self.assertRaisesRegexp(NotImplementedError, "not be square"):
operator.log_abs_determinant()
with self.assertRaisesRegexp(NotImplementedError, "not be square"):
operator.solve(rng.rand(2, 2))
with self.assertRaisesRegexp(ValueError, "is always square"):
matrix = array_ops.placeholder(dtypes.float32)
LinearOperatorApplyOnly(
matrix, is_positive_definite=True, is_square=False)
def test_is_square_manual_set_works(self):
matrix = array_ops.placeholder(dtypes.float32)
# Default is None.
operator = LinearOperatorApplyOnly(matrix)
self.assertEqual(None, operator.is_square)
# Set to True
operator = LinearOperatorApplyOnly(matrix, is_square=True)
self.assertTrue(operator.is_square)
if __name__ == "__main__":
test.main()
| apache-2.0 |
kaiix/depot_tools | tests/git_rebase_update_test.py | 5 | 13292 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for git_rebase_update.py"""
import os
import sys
DEPOT_TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, DEPOT_TOOLS_ROOT)
from testing_support import coverage_utils
from testing_support import git_test_utils
class GitRebaseUpdateTest(git_test_utils.GitRepoReadWriteTestBase):
REPO_SCHEMA = """
A B C D E F G
B H I J K
J L
"""
@classmethod
def getRepoContent(cls, commit):
# Every commit X gets a file X with the content X
return {commit: {'data': commit}}
@classmethod
def setUpClass(cls):
super(GitRebaseUpdateTest, cls).setUpClass()
import git_rebase_update, git_new_branch, git_reparent_branch, git_common
import git_rename_branch
cls.reup = git_rebase_update
cls.rp = git_reparent_branch
cls.nb = git_new_branch
cls.mv = git_rename_branch
cls.gc = git_common
cls.gc.TEST_MODE = True
def setUp(self):
super(GitRebaseUpdateTest, self).setUp()
# Include branch_K, branch_L to make sure that ABCDEFG all get the
# same commit hashes as self.repo. Otherwise they get committed with the
# wrong timestamps, due to commit ordering.
# TODO(iannucci): Make commit timestamps deterministic in left to right, top
# to bottom order, not in lexi-topographical order.
origin_schema = git_test_utils.GitRepoSchema("""
A B C D E F G M N O
B H I J K
J L
""", self.getRepoContent)
self.origin = origin_schema.reify()
self.origin.git('checkout', 'master')
self.origin.git('branch', '-d', *['branch_'+l for l in 'KLG'])
self.repo.git('remote', 'add', 'origin', self.origin.repo_path)
self.repo.git('config', '--add', 'remote.origin.fetch',
'+refs/tags/*:refs/tags/*')
self.repo.git('update-ref', 'refs/remotes/origin/master', 'tag_E')
self.repo.git('branch', '--set-upstream-to', 'branch_G', 'branch_K')
self.repo.git('branch', '--set-upstream-to', 'branch_K', 'branch_L')
self.repo.git('branch', '--set-upstream-to', 'origin/master', 'branch_G')
self.repo.to_schema_refs += ['origin/master']
def tearDown(self):
self.origin.nuke()
super(GitRebaseUpdateTest, self).tearDown()
def testRebaseUpdate(self):
self.repo.git('checkout', 'branch_K')
self.repo.run(self.nb.main, ['foobar'])
self.assertEqual(self.repo.git('rev-parse', 'HEAD').stdout,
self.repo.git('rev-parse', 'origin/master').stdout)
with self.repo.open('foobar', 'w') as f:
f.write('this is the foobar file')
self.repo.git('add', 'foobar')
self.repo.git_commit('foobar1')
with self.repo.open('foobar', 'w') as f:
f.write('totes the Foobar file')
self.repo.git_commit('foobar2')
self.repo.git('checkout', 'branch_K')
self.repo.run(self.nb.main, ['--upstream-current', 'sub_K'])
with self.repo.open('K', 'w') as f:
f.write('This depends on K')
self.repo.git_commit('sub_K')
self.repo.run(self.nb.main, ['old_branch'])
self.repo.git('reset', '--hard', self.repo['A'])
with self.repo.open('old_file', 'w') as f:
f.write('old_files we want to keep around')
self.repo.git('add', 'old_file')
self.repo.git_commit('old_file')
self.repo.git('config', 'branch.old_branch.dormant', 'true')
self.repo.git('checkout', 'origin/master')
self.assertSchema("""
A B H I J K sub_K
J L
B C D E foobar1 foobar2
E F G
A old_file
""")
self.assertEquals(self.repo['A'], self.origin['A'])
self.assertEquals(self.repo['E'], self.origin['E'])
with self.repo.open('bob', 'wb') as f:
f.write('testing auto-freeze/thaw')
output, _ = self.repo.capture_stdio(self.reup.main)
self.assertIn('Cannot rebase-update', output)
self.repo.run(self.nb.main, ['empty_branch'])
self.repo.run(self.nb.main, ['--upstream-current', 'empty_branch2'])
self.repo.git('checkout', 'branch_K')
output, _ = self.repo.capture_stdio(self.reup.main)
self.assertIn('Rebasing: branch_G', output)
self.assertIn('Rebasing: branch_K', output)
self.assertIn('Rebasing: branch_L', output)
self.assertIn('Rebasing: foobar', output)
self.assertIn('Rebasing: sub_K', output)
self.assertIn('Deleted branch branch_G', output)
self.assertIn('Deleted branch empty_branch', output)
self.assertIn('Deleted branch empty_branch2', output)
self.assertIn('Reparented branch_K to track origin/master', output)
self.assertSchema("""
A B C D E F G M N O H I J K sub_K
K L
O foobar1 foobar2
A old_file
""")
output, _ = self.repo.capture_stdio(self.reup.main)
self.assertIn('branch_K up-to-date', output)
self.assertIn('branch_L up-to-date', output)
self.assertIn('foobar up-to-date', output)
self.assertIn('sub_K up-to-date', output)
with self.repo.open('bob') as f:
self.assertEquals('testing auto-freeze/thaw', f.read())
self.assertEqual(self.repo.git('status', '--porcelain').stdout, '?? bob\n')
self.repo.git('checkout', 'origin/master')
_, err = self.repo.capture_stdio(self.rp.main, [])
self.assertIn('Must specify new parent somehow', err)
_, err = self.repo.capture_stdio(self.rp.main, ['foobar'])
self.assertIn('Must be on the branch', err)
self.repo.git('checkout', 'branch_K')
_, err = self.repo.capture_stdio(self.rp.main, ['origin/master'])
self.assertIn('Cannot reparent a branch to its existing parent', err)
output, _ = self.repo.capture_stdio(self.rp.main, ['foobar'])
self.assertIn('Rebasing: branch_K', output)
self.assertIn('Rebasing: sub_K', output)
self.assertIn('Rebasing: branch_L', output)
self.assertSchema("""
A B C D E F G M N O foobar1 foobar2 H I J K L
K sub_K
A old_file
""")
self.repo.git('checkout', 'sub_K')
output, _ = self.repo.capture_stdio(self.rp.main, ['foobar'])
self.assertIn('Squashing failed', output)
self.assertTrue(self.repo.run(self.gc.in_rebase))
self.repo.git('rebase', '--abort')
self.assertIsNone(self.repo.run(self.gc.thaw))
self.assertSchema("""
A B C D E F G M N O foobar1 foobar2 H I J K L
A old_file
K sub_K
""")
self.assertEqual(self.repo.git('status', '--porcelain').stdout, '?? bob\n')
branches = self.repo.run(set, self.gc.branches())
self.assertEqual(branches, {'branch_K', 'master', 'sub_K', 'root_A',
'branch_L', 'old_branch', 'foobar'})
self.repo.git('checkout', 'branch_K')
self.repo.run(self.mv.main, ['special_K'])
branches = self.repo.run(set, self.gc.branches())
self.assertEqual(branches, {'special_K', 'master', 'sub_K', 'root_A',
'branch_L', 'old_branch', 'foobar'})
self.repo.git('checkout', 'origin/master')
_, err = self.repo.capture_stdio(self.mv.main, ['special_K', 'cool branch'])
self.assertIn('fatal: \'cool branch\' is not a valid branch name.', err)
self.repo.run(self.mv.main, ['special_K', 'cool_branch'])
branches = self.repo.run(set, self.gc.branches())
# This check fails with git 2.4 (see crbug.com/487172)
self.assertEqual(branches, {'cool_branch', 'master', 'sub_K', 'root_A',
'branch_L', 'old_branch', 'foobar'})
_, branch_tree = self.repo.run(self.gc.get_branch_tree)
self.assertEqual(branch_tree['sub_K'], 'foobar')
def testRebaseConflicts(self):
# Pretend that branch_L landed
self.origin.git('checkout', 'master')
with self.origin.open('L', 'w') as f:
f.write('L')
self.origin.git('add', 'L')
self.origin.git_commit('L')
# Add a commit to branch_K so that things fail
self.repo.git('checkout', 'branch_K')
with self.repo.open('M', 'w') as f:
f.write('NOPE')
self.repo.git('add', 'M')
self.repo.git_commit('K NOPE')
# Add a commits to branch_L which will work when squashed
self.repo.git('checkout', 'branch_L')
self.repo.git('reset', 'branch_L~')
with self.repo.open('L', 'w') as f:
f.write('NOPE')
self.repo.git('add', 'L')
self.repo.git_commit('L NOPE')
with self.repo.open('L', 'w') as f:
f.write('L')
self.repo.git('add', 'L')
self.repo.git_commit('L YUP')
# start on a branch which will be deleted
self.repo.git('checkout', 'branch_G')
output, _ = self.repo.capture_stdio(self.reup.main)
self.assertIn('branch.branch_K.dormant true', output)
output, _ = self.repo.capture_stdio(self.reup.main)
self.assertIn('Rebase in progress', output)
self.repo.git('checkout', '--theirs', 'M')
self.repo.git('rebase', '--skip')
output, _ = self.repo.capture_stdio(self.reup.main)
self.assertIn('Failed! Attempting to squash', output)
self.assertIn('Deleted branch branch_G', output)
self.assertIn('Deleted branch branch_L', output)
self.assertIn('\'branch_G\' was merged', output)
self.assertIn('checking out \'origin/master\'', output)
def testRebaseConflictsKeepGoing(self):
# Pretend that branch_L landed
self.origin.git('checkout', 'master')
with self.origin.open('L', 'w') as f:
f.write('L')
self.origin.git('add', 'L')
self.origin.git_commit('L')
# Add a commit to branch_K so that things fail
self.repo.git('checkout', 'branch_K')
with self.repo.open('M', 'w') as f:
f.write('NOPE')
self.repo.git('add', 'M')
self.repo.git_commit('K NOPE')
# Add a commits to branch_L which will work when squashed
self.repo.git('checkout', 'branch_L')
self.repo.git('reset', 'branch_L~')
with self.repo.open('L', 'w') as f:
f.write('NOPE')
self.repo.git('add', 'L')
self.repo.git_commit('L NOPE')
with self.repo.open('L', 'w') as f:
f.write('L')
self.repo.git('add', 'L')
self.repo.git_commit('L YUP')
# start on a branch which will be deleted
self.repo.git('checkout', 'branch_G')
self.repo.git('config', 'branch.branch_K.dormant', 'false')
output, _ = self.repo.capture_stdio(self.reup.main, ['-k'])
self.assertIn('--keep-going set, continuing with next branch.', output)
self.assertIn('could not be cleanly rebased:', output)
self.assertIn(' branch_K', output)
def testTrackTag(self):
self.origin.git('tag', 'lkgr', self.origin['M'])
self.repo.git('tag', 'lkgr', self.repo['D'])
self.repo.git('config', 'branch.branch_G.remote', '.')
self.repo.git('config', 'branch.branch_G.merge', 'refs/tags/lkgr')
self.assertIn(
'fatal: \'foo bar\' is not a valid branch name',
self.repo.capture_stdio(self.nb.main, ['--lkgr', 'foo bar'])[1])
self.repo.run(self.nb.main, ['--lkgr', 'foobar'])
with self.repo.open('foobar', 'w') as f:
f.write('this is the foobar file')
self.repo.git('add', 'foobar')
self.repo.git_commit('foobar1')
with self.repo.open('foobar', 'w') as f:
f.write('totes the Foobar file')
self.repo.git_commit('foobar2')
self.assertSchema("""
A B H I J K
J L
B C D E F G
D foobar1 foobar2
""")
self.assertEquals(self.repo['A'], self.origin['A'])
self.assertEquals(self.repo['G'], self.origin['G'])
output, _ = self.repo.capture_stdio(self.reup.main)
self.assertIn('Fetching', output)
self.assertIn('Rebasing: branch_G', output)
self.assertIn('Rebasing: branch_K', output)
self.assertIn('Rebasing: branch_L', output)
self.assertIn('Rebasing: foobar', output)
self.assertEquals(self.repo.git('rev-parse', 'lkgr').stdout.strip(),
self.origin['M'])
self.assertSchema("""
A B C D E F G M N O
M H I J K L
M foobar1 foobar2
""")
_, err = self.repo.capture_stdio(self.rp.main, ['tag F'])
self.assertIn('fatal: invalid reference', err)
output, _ = self.repo.capture_stdio(self.rp.main, ['tag_F'])
self.assertIn('to track tag_F [tag] (was lkgr [tag])', output)
self.assertSchema("""
A B C D E F G M N O
M H I J K L
F foobar1 foobar2
""")
output, _ = self.repo.capture_stdio(self.rp.main, ['--lkgr'])
self.assertIn('to track lkgr [tag] (was tag_F [tag])', output)
self.assertSchema("""
A B C D E F G M N O
M H I J K L
M foobar1 foobar2
""")
output, _ = self.repo.capture_stdio(self.rp.main, ['--root'])
self.assertIn('to track origin/master (was lkgr [tag])', output)
self.assertSchema("""
A B C D E F G M N O foobar1 foobar2
M H I J K L
""")
if __name__ == '__main__':
sys.exit(coverage_utils.covered_main((
os.path.join(DEPOT_TOOLS_ROOT, 'git_rebase_update.py'),
os.path.join(DEPOT_TOOLS_ROOT, 'git_new_branch.py'),
os.path.join(DEPOT_TOOLS_ROOT, 'git_reparent_branch.py'),
os.path.join(DEPOT_TOOLS_ROOT, 'git_rename_branch.py')
)))
| bsd-3-clause |
ipazc/oculus-crawl | main/search_engine/howold_images.py | 1 | 2402 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from main.search_engine.search_engine import SEARCH_ENGINES, SearchEngine
import urllib
import logging
from bs4 import BeautifulSoup
__author__ = "Ivan de Paz Centeno"
class HowOldImages(SearchEngine):
"""
Search engine that retrieves information of images in the howold webpage.
"""
def retrieve(self, search_request):
"""
Performs a retrieval from HowOld given the search request info.
:param search_request: A search request instance filled with the keywords and the options for the desired
search. The following options are currently accepted:
:return:
"""
# This way we cache the transport core.
if not self.transport_core or search_request.get_transport_core_proto() != self.transport_core.__class__:
self.transport_core = search_request.get_transport_core_proto()()
logging.info("Transport core created from proto.")
logging.debug("Retrieving image links from request {}.".format(search_request))
return self._retrieve_image_links_data(search_request.get_words(), search_request.get_options())
def _retrieve_image_links_data(self, search_words, search_options):
url = "https://how-old.net/?q={}".format(
urllib.parse.quote_plus(search_words))
logging.info("Built url ({}) for request.".format(url))
self.transport_core.get(url)
self.transport_core.wait_for_elements_from_class("ImageSelector")
logging.info("Get done. Loading elements JSON")
image_list_html = self.transport_core.get_elements_html_by_id("imageList", innerHTML=False)[0]
img_tag_list = BeautifulSoup(image_list_html, 'html.parser').find().find_all("img")
json_elements = [self._build_json_for(image_tag, search_words) for image_tag in img_tag_list]
logging.info("Retrieved {} elements".format(len(json_elements)))
return json_elements
def _build_json_for(self, image_tag, search_words):
url = image_tag['src']
image_size = self._get_url_size(url)
return {'url': image_tag['src'], 'width': image_size[0], 'height': image_size[1], 'desc': '',
'searchwords':search_words,
'source': 'howold'}
# Register the class to enable deserialization.
SEARCH_ENGINES[str(HowOldImages)] = HowOldImages
| gpl-3.0 |
suncycheng/intellij-community | python/lib/Lib/distutils/command/build_scripts.py | 97 | 5528 | """distutils.command.build_scripts
Implements the Distutils 'build_scripts' command."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: build_scripts.py 59668 2008-01-02 18:59:36Z guido.van.rossum $"
import sys, os, re
from stat import ST_MODE
from distutils import sysconfig
from distutils.core import Command
from distutils.dep_util import newer
from distutils.util import convert_path
from distutils import log
# check if Python is called on the first line with this expression
first_line_re = re.compile('^#!.*python[0-9.]*([ \t].*)?$')
class build_scripts (Command):
description = "\"build\" scripts (copy and fixup #! line)"
user_options = [
('build-dir=', 'd', "directory to \"build\" (copy) to"),
('force', 'f', "forcibly build everything (ignore file timestamps"),
('executable=', 'e', "specify final destination interpreter path"),
]
boolean_options = ['force']
def initialize_options (self):
self.build_dir = None
self.scripts = None
self.force = None
self.executable = None
self.outfiles = None
def finalize_options (self):
self.set_undefined_options('build',
('build_scripts', 'build_dir'),
('force', 'force'),
('executable', 'executable'))
self.scripts = self.distribution.scripts
def get_source_files(self):
return self.scripts
def run (self):
if not self.scripts:
return
self.copy_scripts()
def copy_scripts (self):
"""Copy each script listed in 'self.scripts'; if it's marked as a
Python script in the Unix way (first line matches 'first_line_re',
ie. starts with "\#!" and contains "python"), then adjust the first
line to refer to the current Python interpreter as we copy.
"""
self.mkpath(self.build_dir)
outfiles = []
for script in self.scripts:
adjust = 0
script = convert_path(script)
outfile = os.path.join(self.build_dir, os.path.basename(script))
outfiles.append(outfile)
if not self.force and not newer(script, outfile):
log.debug("not copying %s (up-to-date)", script)
continue
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, "r")
except IOError:
if not self.dry_run:
raise
f = None
else:
first_line = f.readline()
if not first_line:
self.warn("%s is an empty file (skipping)" % script)
continue
match = first_line_re.match(first_line)
if match:
adjust = 1
post_interp = match.group(1) or ''
if adjust:
log.info("copying and adjusting %s -> %s", script,
self.build_dir)
if not sysconfig.python_build:
executable = self.executable
else:
executable = os.path.join(
sysconfig.get_config_var("BINDIR"),
"python" + sysconfig.get_config_var("EXE"))
executable = fix_jython_executable(executable, post_interp)
if not self.dry_run:
outf = open(outfile, "w")
outf.write("#!%s%s\n" %
(executable,
post_interp))
outf.writelines(f.readlines())
outf.close()
if f:
f.close()
else:
if f:
f.close()
self.copy_file(script, outfile)
if hasattr(os, 'chmod'):
for file in outfiles:
if self.dry_run:
log.info("changing mode of %s", file)
else:
oldmode = os.stat(file)[ST_MODE] & 07777
newmode = (oldmode | 0555) & 07777
if newmode != oldmode:
log.info("changing mode of %s from %o to %o",
file, oldmode, newmode)
os.chmod(file, newmode)
# copy_scripts ()
# class build_scripts
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
fp = open(executable)
magic = fp.read(2)
fp.close()
except IOError, OSError:
return executable
return magic == '#!'
def fix_jython_executable(executable, options):
if sys.platform.startswith('java') and is_sh(executable):
# Workaround Jython's sys.executable being a .sh (an invalid
# shebang line interpreter)
if options:
# Can't apply the workaround, leave it broken
log.warn("WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
else:
return '/usr/bin/env %s' % executable
return executable
| apache-2.0 |
francocurotto/GraphSLAM | src/python-helpers/commons/g2o2lab.py | 1 | 1819 | '''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from slamFunctions import *
from slamData import slamData
def g2o2lab(guessPath, optPath, resDir):
"""
Converts SLAM data from g2o format to alternative format (lab).
Parameters
----------
guessPath: string
initial guess file in g2o format
optPath: string
optimized result file in g2o format
resdir: directory to output leb format
"""
resDir = "res_lab/"
guessData = slamData(guessPath)
optData = slamData(optPath)
fd = open(resDir + 'deadReckoning.dat', 'w')
for i in range(len(guessData.poseX)):
fd.write(str(i) + " " + str(guessData.poseX[i]) + " " + str(guessData.poseY[i]) + " " + str(guessData.poseA[i]) + "\n")
fd.close()
fp = open(resDir + 'particlePose.dat', 'w')
for i in range(len(optData.poseX)):
fp.write(str(i) + " 0 " + str(optData.poseX[i]) + " " + str(optData.poseY[i]) + " " + str(optData.poseA[i]) + " 1 \n")
fd.close()
fl = open(resDir + "landmarkEst.dat", 'w')
for i in range(len(optData.landmarkX)):
fl.write(str(1) + " " + str(0) + " " + str(optData.landmarkX[i]) + " " + str(optData.landmarkY[i]) + " 1 0 1 1\n")
fl.close()
| gpl-3.0 |
beiko-lab/gengis | bin/Lib/site-packages/scipy/linalg/tests/test_decomp_cholesky.py | 1 | 7125 | from __future__ import division, print_function, absolute_import
from numpy.testing import TestCase, assert_array_almost_equal
from numpy import array, transpose, dot, conjugate, zeros_like
from numpy.random import rand
from scipy.linalg import cholesky, cholesky_banded, cho_solve_banded, \
cho_factor, cho_solve
from scipy.linalg._testutils import assert_no_overwrite
def random(size):
return rand(*size)
class TestCholesky(TestCase):
def test_simple(self):
a = [[8,2,3],[2,9,3],[3,3,6]]
c = cholesky(a)
assert_array_almost_equal(dot(transpose(c),c),a)
c = transpose(c)
a = dot(c,transpose(c))
assert_array_almost_equal(cholesky(a,lower=1),c)
def test_check_finite(self):
a = [[8,2,3],[2,9,3],[3,3,6]]
c = cholesky(a, check_finite=False)
assert_array_almost_equal(dot(transpose(c),c),a)
c = transpose(c)
a = dot(c,transpose(c))
assert_array_almost_equal(cholesky(a,lower=1, check_finite=False),c)
def test_simple_complex(self):
m = array([[3+1j,3+4j,5],[0,2+2j,2+7j],[0,0,7+4j]])
a = dot(transpose(conjugate(m)),m)
c = cholesky(a)
a1 = dot(transpose(conjugate(c)),c)
assert_array_almost_equal(a,a1)
c = transpose(c)
a = dot(c,transpose(conjugate(c)))
assert_array_almost_equal(cholesky(a,lower=1),c)
def test_random(self):
n = 20
for k in range(2):
m = random([n,n])
for i in range(n):
m[i,i] = 20*(.1+m[i,i])
a = dot(transpose(m),m)
c = cholesky(a)
a1 = dot(transpose(c),c)
assert_array_almost_equal(a,a1)
c = transpose(c)
a = dot(c,transpose(c))
assert_array_almost_equal(cholesky(a,lower=1),c)
def test_random_complex(self):
n = 20
for k in range(2):
m = random([n,n])+1j*random([n,n])
for i in range(n):
m[i,i] = 20*(.1+abs(m[i,i]))
a = dot(transpose(conjugate(m)),m)
c = cholesky(a)
a1 = dot(transpose(conjugate(c)),c)
assert_array_almost_equal(a,a1)
c = transpose(c)
a = dot(c,transpose(conjugate(c)))
assert_array_almost_equal(cholesky(a,lower=1),c)
class TestCholeskyBanded(TestCase):
"""Tests for cholesky_banded() and cho_solve_banded."""
def test_check_finite(self):
# Symmetric positive definite banded matrix `a`
a = array([[4.0, 1.0, 0.0, 0.0],
[1.0, 4.0, 0.5, 0.0],
[0.0, 0.5, 4.0, 0.2],
[0.0, 0.0, 0.2, 4.0]])
# Banded storage form of `a`.
ab = array([[-1.0, 1.0, 0.5, 0.2],
[4.0, 4.0, 4.0, 4.0]])
c = cholesky_banded(ab, lower=False, check_finite=False)
ufac = zeros_like(a)
ufac[list(range(4)),list(range(4))] = c[-1]
ufac[(0,1,2),(1,2,3)] = c[0,1:]
assert_array_almost_equal(a, dot(ufac.T, ufac))
b = array([0.0, 0.5, 4.2, 4.2])
x = cho_solve_banded((c, False), b, check_finite=False)
assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
def test_upper_real(self):
# Symmetric positive definite banded matrix `a`
a = array([[4.0, 1.0, 0.0, 0.0],
[1.0, 4.0, 0.5, 0.0],
[0.0, 0.5, 4.0, 0.2],
[0.0, 0.0, 0.2, 4.0]])
# Banded storage form of `a`.
ab = array([[-1.0, 1.0, 0.5, 0.2],
[4.0, 4.0, 4.0, 4.0]])
c = cholesky_banded(ab, lower=False)
ufac = zeros_like(a)
ufac[list(range(4)),list(range(4))] = c[-1]
ufac[(0,1,2),(1,2,3)] = c[0,1:]
assert_array_almost_equal(a, dot(ufac.T, ufac))
b = array([0.0, 0.5, 4.2, 4.2])
x = cho_solve_banded((c, False), b)
assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
def test_upper_complex(self):
# Hermitian positive definite banded matrix `a`
a = array([[4.0, 1.0, 0.0, 0.0],
[1.0, 4.0, 0.5, 0.0],
[0.0, 0.5, 4.0, -0.2j],
[0.0, 0.0, 0.2j, 4.0]])
# Banded storage form of `a`.
ab = array([[-1.0, 1.0, 0.5, -0.2j],
[4.0, 4.0, 4.0, 4.0]])
c = cholesky_banded(ab, lower=False)
ufac = zeros_like(a)
ufac[list(range(4)),list(range(4))] = c[-1]
ufac[(0,1,2),(1,2,3)] = c[0,1:]
assert_array_almost_equal(a, dot(ufac.conj().T, ufac))
b = array([0.0, 0.5, 4.0-0.2j, 0.2j + 4.0])
x = cho_solve_banded((c, False), b)
assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
def test_lower_real(self):
# Symmetric positive definite banded matrix `a`
a = array([[4.0, 1.0, 0.0, 0.0],
[1.0, 4.0, 0.5, 0.0],
[0.0, 0.5, 4.0, 0.2],
[0.0, 0.0, 0.2, 4.0]])
# Banded storage form of `a`.
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 0.5, 0.2, -1.0]])
c = cholesky_banded(ab, lower=True)
lfac = zeros_like(a)
lfac[list(range(4)),list(range(4))] = c[0]
lfac[(1,2,3),(0,1,2)] = c[1,:3]
assert_array_almost_equal(a, dot(lfac, lfac.T))
b = array([0.0, 0.5, 4.2, 4.2])
x = cho_solve_banded((c, True), b)
assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
def test_lower_complex(self):
# Hermitian positive definite banded matrix `a`
a = array([[4.0, 1.0, 0.0, 0.0],
[1.0, 4.0, 0.5, 0.0],
[0.0, 0.5, 4.0, -0.2j],
[0.0, 0.0, 0.2j, 4.0]])
# Banded storage form of `a`.
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 0.5, 0.2j, -1.0]])
c = cholesky_banded(ab, lower=True)
lfac = zeros_like(a)
lfac[list(range(4)),list(range(4))] = c[0]
lfac[(1,2,3),(0,1,2)] = c[1,:3]
assert_array_almost_equal(a, dot(lfac, lfac.conj().T))
b = array([0.0, 0.5j, 3.8j, 3.8])
x = cho_solve_banded((c, True), b)
assert_array_almost_equal(x, [0.0, 0.0, 1.0j, 1.0])
class TestOverwrite(object):
def test_cholesky(self):
assert_no_overwrite(cholesky, [(3,3)])
def test_cho_factor(self):
assert_no_overwrite(cho_factor, [(3,3)])
def test_cho_solve(self):
x = array([[2,-1,0], [-1,2,-1], [0,-1,2]])
xcho = cho_factor(x)
assert_no_overwrite(lambda b: cho_solve(xcho, b), [(3,)])
def test_cholesky_banded(self):
assert_no_overwrite(cholesky_banded, [(2,3)])
def test_cho_solve_banded(self):
x = array([[0, -1, -1], [2, 2, 2]])
xcho = cholesky_banded(x)
assert_no_overwrite(lambda b: cho_solve_banded((xcho, False), b),
[(3,)])
| gpl-3.0 |
Gamebasis/3DGamebasisServer | GameData/blender-2.71-windows64/2.71/python/lib/pprint.py | 74 | 14861 | # Author: Fred L. Drake, Jr.
# [email protected]
#
# This is a simple little module I wrote to make life easier. I didn't
# see anything quite like it in the library, though I may have overlooked
# something. I wrote this when I was trying to read some heavily nested
# tuples with fairly non-descriptive content. This is modeled very much
# after Lisp/Scheme - style pretty-printing of lists. If you find it
# useful, thank small children who sleep at night.
"""Support to pretty-print lists, tuples, & dictionaries recursively.
Very simple, but useful, especially in debugging data structures.
Classes
-------
PrettyPrinter()
Handle pretty-printing operations onto a stream using a configured
set of formatting parameters.
Functions
---------
pformat()
Format a Python object into a pretty-printed representation.
pprint()
Pretty-print a Python object to a stream [default is sys.stdout].
saferepr()
Generate a 'standard' repr()-like value, but protect against recursive
data structures.
"""
import re
import sys as _sys
from collections import OrderedDict as _OrderedDict
from io import StringIO as _StringIO
__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
"PrettyPrinter"]
def pprint(object, stream=None, indent=1, width=80, depth=None, *,
compact=False):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
printer = PrettyPrinter(
stream=stream, indent=indent, width=width, depth=depth,
compact=compact)
printer.pprint(object)
def pformat(object, indent=1, width=80, depth=None, *, compact=False):
"""Format a Python object into a pretty-printed representation."""
return PrettyPrinter(indent=indent, width=width, depth=depth,
compact=compact).pformat(object)
def saferepr(object):
"""Version of repr() which can handle recursive data structures."""
return _safe_repr(object, {}, None, 0)[0]
def isreadable(object):
"""Determine if saferepr(object) is readable by eval()."""
return _safe_repr(object, {}, None, 0)[1]
def isrecursive(object):
"""Determine if object requires a recursive representation."""
return _safe_repr(object, {}, None, 0)[2]
class _safe_key:
"""Helper function for key functions when sorting unorderable objects.
The wrapped-object will fallback to an Py2.x style comparison for
unorderable types (sorting first comparing the type name and then by
the obj ids). Does not work recursively, so dict.items() must have
_safe_key applied to both the key and the value.
"""
__slots__ = ['obj']
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
try:
rv = self.obj.__lt__(other.obj)
except TypeError:
rv = NotImplemented
if rv is NotImplemented:
rv = (str(type(self.obj)), id(self.obj)) < \
(str(type(other.obj)), id(other.obj))
return rv
def _safe_tuple(t):
"Helper function for comparing 2-tuples"
return _safe_key(t[0]), _safe_key(t[1])
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None, *,
compact=False):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
compact
If true, several items will be combined in one line.
"""
indent = int(indent)
width = int(width)
assert indent >= 0, "indent must be >= 0"
assert depth is None or depth > 0, "depth must be > 0"
assert width, "width must be != 0"
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
self._compact = bool(compact)
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
level = level + 1
objid = id(object)
if objid in context:
stream.write(_recursion(object))
self._recursive = True
self._readable = False
return
rep = self._repr(object, context, level - 1)
typ = type(object)
max_width = self._width - 1 - indent - allowance
sepLines = len(rep) > max_width
write = stream.write
if sepLines:
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict):
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = len(object)
if length:
context[objid] = 1
indent = indent + self._indent_per_level
if issubclass(typ, _OrderedDict):
items = list(object.items())
else:
items = sorted(object.items(), key=_safe_tuple)
key, ent = items[0]
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, stream, indent + len(rep) + 2,
allowance + 1, context, level)
if length > 1:
for key, ent in items[1:]:
rep = self._repr(key, context, level)
write(',\n%s%s: ' % (' '*indent, rep))
self._format(ent, stream, indent + len(rep) + 2,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
write('}')
return
if ((issubclass(typ, list) and r is list.__repr__) or
(issubclass(typ, tuple) and r is tuple.__repr__) or
(issubclass(typ, set) and r is set.__repr__) or
(issubclass(typ, frozenset) and r is frozenset.__repr__)
):
length = len(object)
if issubclass(typ, list):
write('[')
endchar = ']'
elif issubclass(typ, tuple):
write('(')
endchar = ')'
else:
if not length:
write(rep)
return
if typ is set:
write('{')
endchar = '}'
else:
write(typ.__name__)
write('({')
endchar = '})'
indent += len(typ.__name__) + 1
object = sorted(object, key=_safe_key)
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
if length:
context[objid] = 1
self._format_items(object, stream,
indent + self._indent_per_level,
allowance + 1, context, level)
del context[objid]
if issubclass(typ, tuple) and length == 1:
write(',')
write(endchar)
return
if issubclass(typ, str) and len(object) > 0 and r is str.__repr__:
def _str_parts(s):
"""
Return a list of string literals comprising the repr()
of the given string using literal concatenation.
"""
lines = s.splitlines(True)
for i, line in enumerate(lines):
rep = repr(line)
if len(rep) <= max_width:
yield rep
else:
# A list of alternating (non-space, space) strings
parts = re.split(r'(\s+)', line) + ['']
current = ''
for i in range(0, len(parts), 2):
part = parts[i] + parts[i+1]
candidate = current + part
if len(repr(candidate)) > max_width:
if current:
yield repr(current)
current = part
else:
current = candidate
if current:
yield repr(current)
for i, rep in enumerate(_str_parts(object)):
if i > 0:
write('\n' + ' '*indent)
write(rep)
return
write(rep)
def _format_items(self, items, stream, indent, allowance, context, level):
write = stream.write
delimnl = ',\n' + ' ' * indent
delim = ''
width = max_width = self._width - indent - allowance + 2
for ent in items:
if self._compact:
rep = self._repr(ent, context, level)
w = len(rep) + 2
if width < w:
width = max_width
if delim:
delim = delimnl
if width >= w:
width -= w
write(delim)
delim = ', '
write(rep)
continue
write(delim)
delim = delimnl
self._format(ent, stream, indent, allowance, context, level)
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return _safe_repr(object, context, maxlevels, level)
# Return triple (repr_string, isreadable, isrecursive).
def _safe_repr(object, context, maxlevels, level):
typ = type(object)
if typ is str:
if 'locale' not in _sys.modules:
return repr(object), True, False
if "'" in object and '"' not in object:
closure = '"'
quotes = {'"': '\\"'}
else:
closure = "'"
quotes = {"'": "\\'"}
qget = quotes.get
sio = _StringIO()
write = sio.write
for char in object:
if char.isalpha():
write(char)
else:
write(qget(char, repr(char)[1:-1]))
return ("%s%s%s" % (closure, sio.getvalue(), closure)), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(object.items(), key=_safe_tuple)
for k, v in items:
krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % ", ".join(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % ", ".join(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
def _recursion(object):
return ("<Recursion on %s with id=%s>"
% (type(object).__name__, id(object)))
def _perfcheck(object=None):
import time
if object is None:
object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
p = PrettyPrinter()
t1 = time.time()
_safe_repr(object, {}, None, 0)
t2 = time.time()
p.pformat(object)
t3 = time.time()
print("_safe_repr:", t2 - t1)
print("pformat:", t3 - t2)
if __name__ == "__main__":
_perfcheck()
| gpl-3.0 |
bdang2012/taiga-front | scripts/manage_translations.py | 20 | 4103 | #!/usr/bin/env python
#
# NOTE: This script is based on taiga-back manage_translations.py script
# (https://github.com/taigaio/taiga-back/blob/master/scripts/manage_translations.py)
#
# This python file contains utility scripts to manage taiga translations.
# It has to be run inside the taiga-front git root directory (over the taiga-back env).
#
# The following commands are available:
#
# * fetch: fetch translations from transifex.com
#
# * commit: update resources in transifex.com with the local files
#
# Each command support the --languages and --resources options to limit their
# operation to the specified language or resource. For example, to get stats
# for Spanish in contrib.admin, run:
#
# $ python scripts/manage_translations.py fetch --language=es --resources=locale
import os
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from subprocess import PIPE, Popen, call
def _tx_resource_for_name(name):
""" Return the Transifex resource name """
return "taiga-front.{}".format(name)
def fetch(resources=None, languages=None):
"""
Fetch translations from Transifex.
"""
if not resources:
if languages is None:
call("tx pull -f --minimum-perc=5", shell=True)
else:
for lang in languages:
call("tx pull -f -l {lang}".format(lang=lang), shell=True)
else:
for resource in resources:
if languages is None:
call("tx pull -r {res} -f --minimum-perc=5".format(res=_tx_resource_for_name(resource)),
shell=True)
else:
for lang in languages:
call("tx pull -r {res} -f -l {lang}".format(res=_tx_resource_for_name(resource), lang=lang),
shell=True)
def commit(resources=None, languages=None):
"""
Commit messages to Transifex,
"""
if not resources:
if languages is None:
call("tx push -s -l en", shell=True)
else:
for lang in languages:
call("tx push -t -l {lang}".format(lang=lang), shell=True)
else:
for resource in resources:
# Transifex push
if languages is None:
call("tx push -r {res} -s -l en".format(res=_tx_resource_for_name(resource)), shell=True)
else:
for lang in languages:
call("tx push -r {res} -t -l {lang}".format(res= _tx_resource_for_name(resource), lang=lang), shell=True)
if __name__ == "__main__":
try:
devnull = open(os.devnull)
Popen(["tx"], stdout=devnull, stderr=devnull).communicate()
except OSError as e:
if e.errno == os.errno.ENOENT:
print("""
You need transifex-client, install it.
1. Install transifex-client, use
$ pip install --upgrade transifex-client==0.11.1.beta
2. Create ~/.transifexrc file:
$ vim ~/.transifexrc"
[https://www.transifex.com]
hostname = https://www.transifex.com
token =
username = <YOUR_USERNAME>
password = <YOUR_PASSWOR>
""")
exit(1)
RUNABLE_SCRIPTS = {
"commit": "send .json file to transifex ('en' by default).",
"fetch": "get .json files from transifex.",
}
parser = ArgumentParser(description="manage translations in taiga-front between the repo and transifex.",
formatter_class=RawTextHelpFormatter)
parser.add_argument("cmd", nargs=1,
help="\n".join(["{0} - {1}".format(c, h) for c, h in RUNABLE_SCRIPTS.items()]))
parser.add_argument("-r", "--resources", action="append",
help="limit operation to the specified resources")
parser.add_argument("-l", "--languages", action="append",
help="limit operation to the specified languages")
options = parser.parse_args()
if options.cmd[0] in RUNABLE_SCRIPTS.keys():
eval(options.cmd[0])(options.resources, options.languages)
else:
print("Available commands are: {}".format(", ".join(RUNABLE_SCRIPTS.keys())))
| agpl-3.0 |
lochiiconnectivity/boto | boto/support/layer1.py | 15 | 20582 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import json
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.support import exceptions
class SupportConnection(AWSQueryConnection):
"""
AWS Support
The AWS Support API reference is intended for programmers who need
detailed information about the AWS Support actions and data types.
This service enables you to manage with your AWS Support cases
programmatically. It is built on the AWS Query API programming
model and provides HTTP methods that take parameters and return
results in JSON format.
The AWS Support service also exposes a set of `Trusted Advisor`_
features. You can retrieve a list of checks you can run on your
resources, specify checks to run and refresh, and check the status
of checks you have submitted.
The following list describes the AWS Support case management
actions:
+ **Service names, issue categories, and available severity
levels. **The actions `DescribeServices`_ and
`DescribeSeverityLevels`_ enable you to obtain AWS service names,
service codes, service categories, and problem severity levels.
You use these values when you call the `CreateCase`_ action.
+ **Case Creation, case details, and case resolution**. The
actions `CreateCase`_, `DescribeCases`_, and `ResolveCase`_ enable
you to create AWS Support cases, retrieve them, and resolve them.
+ **Case communication**. The actions
`DescribeCaseCommunications`_ and `AddCommunicationToCase`_ enable
you to retrieve and add communication to AWS Support cases.
The following list describes the actions available from the AWS
Support service for Trusted Advisor:
+ `DescribeTrustedAdviserChecks`_ returns the list of checks that you can run against your AWS
resources.
+ Using the CheckId for a specific check returned by
DescribeTrustedAdviserChecks, you can call
`DescribeTrustedAdvisorCheckResult`_ and obtain a new result for the check you specified.
+ Using `DescribeTrustedAdvisorCheckSummaries`_, you can get
summaries for a set of Trusted Advisor checks.
+ `RefreshTrustedAdvisorCheck`_ enables you to request that
Trusted Advisor run the check again.
+ ``_ gets statuses on the checks you are running.
For authentication of requests, the AWS Support uses `Signature
Version 4 Signing Process`_.
See the AWS Support Developer Guide for information about how to
use this service to manage create and manage your support cases,
and how to call Trusted Advisor for results of checks on your
resources.
"""
APIVersion = "2013-04-15"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "support.us-east-1.amazonaws.com"
ServiceName = "Support"
TargetPrefix = "AWSSupport_20130415"
ResponseError = JSONResponseError
_faults = {
"CaseIdNotFound": exceptions.CaseIdNotFound,
"CaseCreationLimitExceeded": exceptions.CaseCreationLimitExceeded,
"InternalServerError": exceptions.InternalServerError,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
kwargs['host'] = region.endpoint
AWSQueryConnection.__init__(self, **kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_communication_to_case(self, communication_body, case_id=None,
cc_email_addresses=None):
"""
This action adds additional customer communication to an AWS
Support case. You use the CaseId value to identify the case to
which you want to add communication. You can list a set of
email addresses to copy on the communication using the
CcEmailAddresses value. The CommunicationBody value contains
the text of the communication.
This action's response indicates the success or failure of the
request.
This action implements a subset of the behavior on the AWS
Support `Your Support Cases`_ web form.
:type case_id: string
:param case_id:
:type communication_body: string
:param communication_body:
:type cc_email_addresses: list
:param cc_email_addresses:
"""
params = {'communicationBody': communication_body, }
if case_id is not None:
params['caseId'] = case_id
if cc_email_addresses is not None:
params['ccEmailAddresses'] = cc_email_addresses
return self.make_request(action='AddCommunicationToCase',
body=json.dumps(params))
def create_case(self, subject, service_code, category_code,
communication_body, severity_code=None,
cc_email_addresses=None, language=None, issue_type=None):
"""
Creates a new case in the AWS Support Center. This action is
modeled on the behavior of the AWS Support Center `Open a new
case`_ page. Its parameters require you to specify the
following information:
#. **ServiceCode.** Represents a code for an AWS service. You
obtain the ServiceCode by calling `DescribeServices`_.
#. **CategoryCode**. Represents a category for the service
defined for the ServiceCode value. You also obtain the
cateogory code for a service by calling `DescribeServices`_.
Each AWS service defines its own set of category codes.
#. **SeverityCode**. Represents a value that specifies the
urgency of the case, and the time interval in which your
service level agreement specifies a response from AWS Support.
You obtain the SeverityCode by calling
`DescribeSeverityLevels`_.
#. **Subject**. Represents the **Subject** field on the AWS
Support Center `Open a new case`_ page.
#. **CommunicationBody**. Represents the **Description** field
on the AWS Support Center `Open a new case`_ page.
#. **Language**. Specifies the human language in which AWS
Support handles the case. The API currently supports English
and Japanese.
#. **CcEmailAddresses**. Represents the AWS Support Center
**CC** field on the `Open a new case`_ page. You can list
email addresses to be copied on any correspondence about the
case. The account that opens the case is already identified by
passing the AWS Credentials in the HTTP POST method or in a
method or function call from one of the programming languages
supported by an `AWS SDK`_.
The AWS Support API does not currently support the ability to
add attachments to cases. You can, however, call
`AddCommunicationToCase`_ to add information to an open case.
A successful `CreateCase`_ request returns an AWS Support case
number. Case numbers are used by `DescribeCases`_ request to
retrieve existing AWS Support support cases.
:type subject: string
:param subject:
:type service_code: string
:param service_code:
:type severity_code: string
:param severity_code:
:type category_code: string
:param category_code:
:type communication_body: string
:param communication_body:
:type cc_email_addresses: list
:param cc_email_addresses:
:type language: string
:param language:
:type issue_type: string
:param issue_type:
"""
params = {
'subject': subject,
'serviceCode': service_code,
'categoryCode': category_code,
'communicationBody': communication_body,
}
if severity_code is not None:
params['severityCode'] = severity_code
if cc_email_addresses is not None:
params['ccEmailAddresses'] = cc_email_addresses
if language is not None:
params['language'] = language
if issue_type is not None:
params['issueType'] = issue_type
return self.make_request(action='CreateCase',
body=json.dumps(params))
def describe_cases(self, case_id_list=None, display_id=None,
after_time=None, before_time=None,
include_resolved_cases=None, next_token=None,
max_results=None, language=None):
"""
This action returns a list of cases that you specify by
passing one or more CaseIds. In addition, you can filter the
cases by date by setting values for the AfterTime and
BeforeTime request parameters.
The response returns the following in JSON format:
#. One or more `CaseDetails`_ data types.
#. One or more NextToken objects, strings that specifies where
to paginate the returned records represented by CaseDetails .
:type case_id_list: list
:param case_id_list:
:type display_id: string
:param display_id:
:type after_time: string
:param after_time:
:type before_time: string
:param before_time:
:type include_resolved_cases: boolean
:param include_resolved_cases:
:type next_token: string
:param next_token:
:type max_results: integer
:param max_results:
:type language: string
:param language:
"""
params = {}
if case_id_list is not None:
params['caseIdList'] = case_id_list
if display_id is not None:
params['displayId'] = display_id
if after_time is not None:
params['afterTime'] = after_time
if before_time is not None:
params['beforeTime'] = before_time
if include_resolved_cases is not None:
params['includeResolvedCases'] = include_resolved_cases
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
if language is not None:
params['language'] = language
return self.make_request(action='DescribeCases',
body=json.dumps(params))
def describe_communications(self, case_id, before_time=None,
after_time=None, next_token=None,
max_results=None):
"""
This action returns communications regarding the support case.
You can use the AfterTime and BeforeTime parameters to filter
by date. The CaseId parameter enables you to identify a
specific case by its CaseId number.
The MaxResults and NextToken parameters enable you to control
the pagination of the result set. Set MaxResults to the number
of cases you want displayed on each page, and use NextToken to
specify the resumption of pagination.
:type case_id: string
:param case_id:
:type before_time: string
:param before_time:
:type after_time: string
:param after_time:
:type next_token: string
:param next_token:
:type max_results: integer
:param max_results:
"""
params = {'caseId': case_id, }
if before_time is not None:
params['beforeTime'] = before_time
if after_time is not None:
params['afterTime'] = after_time
if next_token is not None:
params['nextToken'] = next_token
if max_results is not None:
params['maxResults'] = max_results
return self.make_request(action='DescribeCommunications',
body=json.dumps(params))
def describe_services(self, service_code_list=None, language=None):
"""
Returns the current list of AWS services and a list of service
categories that applies to each one. You then use service
names and categories in your `CreateCase`_ requests. Each AWS
service has its own set of categories.
The service codes and category codes correspond to the values
that are displayed in the **Service** and **Category** drop-
down lists on the AWS Support Center `Open a new case`_ page.
The values in those fields, however, do not necessarily match
the service codes and categories returned by the
`DescribeServices` request. Always use the service codes and
categories obtained programmatically. This practice ensures
that you always have the most recent set of service and
category codes.
:type service_code_list: list
:param service_code_list:
:type language: string
:param language:
"""
params = {}
if service_code_list is not None:
params['serviceCodeList'] = service_code_list
if language is not None:
params['language'] = language
return self.make_request(action='DescribeServices',
body=json.dumps(params))
def describe_severity_levels(self, language=None):
"""
This action returns the list of severity levels that you can
assign to an AWS Support case. The severity level for a case
is also a field in the `CaseDetails`_ data type included in
any `CreateCase`_ request.
:type language: string
:param language:
"""
params = {}
if language is not None:
params['language'] = language
return self.make_request(action='DescribeSeverityLevels',
body=json.dumps(params))
def resolve_case(self, case_id=None):
"""
Takes a CaseId and returns the initial state of the case along
with the state of the case after the call to `ResolveCase`_
completed.
:type case_id: string
:param case_id:
"""
params = {}
if case_id is not None:
params['caseId'] = case_id
return self.make_request(action='ResolveCase',
body=json.dumps(params))
def describe_trusted_advisor_check_refresh_statuses(self, check_ids):
"""
Returns the status of all refresh requests Trusted Advisor
checks called using `RefreshTrustedAdvisorCheck`_.
:type check_ids: list
:param check_ids:
"""
params = {'checkIds': check_ids, }
return self.make_request(action='DescribeTrustedAdvisorCheckRefreshStatuses',
body=json.dumps(params))
def describe_trusted_advisor_check_result(self, check_id, language=None):
"""
This action responds with the results of a Trusted Advisor
check. Once you have obtained the list of available Trusted
Advisor checks by calling `DescribeTrustedAdvisorChecks`_, you
specify the CheckId for the check you want to retrieve from
AWS Support.
The response for this action contains a JSON-formatted
`TrustedAdvisorCheckResult`_ object
, which is a container for the following three objects:
#. `TrustedAdvisorCategorySpecificSummary`_
#. `TrustedAdvisorResourceDetail`_
#. `TrustedAdvisorResourcesSummary`_
In addition, the response contains the following fields:
#. **Status**. Overall status of the check.
#. **Timestamp**. Time at which Trusted Advisor last ran the
check.
#. **CheckId**. Unique identifier for the specific check
returned by the request.
:type check_id: string
:param check_id:
:type language: string
:param language:
"""
params = {'checkId': check_id, }
if language is not None:
params['language'] = language
return self.make_request(action='DescribeTrustedAdvisorCheckResult',
body=json.dumps(params))
def describe_trusted_advisor_check_summaries(self, check_ids):
"""
This action enables you to get the latest summaries for
Trusted Advisor checks that you specify in your request. You
submit the list of Trusted Advisor checks for which you want
summaries. You obtain these CheckIds by submitting a
`DescribeTrustedAdvisorChecks`_ request.
The response body contains an array of
`TrustedAdvisorCheckSummary`_ objects.
:type check_ids: list
:param check_ids:
"""
params = {'checkIds': check_ids, }
return self.make_request(action='DescribeTrustedAdvisorCheckSummaries',
body=json.dumps(params))
def describe_trusted_advisor_checks(self, language):
"""
This action enables you to get a list of the available Trusted
Advisor checks. You must specify a language code. English
("en") and Japanese ("jp") are currently supported. The
response contains a list of `TrustedAdvisorCheckDescription`_
objects.
:type language: string
:param language:
"""
params = {'language': language, }
return self.make_request(action='DescribeTrustedAdvisorChecks',
body=json.dumps(params))
def refresh_trusted_advisor_check(self, check_id):
"""
This action enables you to query the service to request a
refresh for a specific Trusted Advisor check. Your request
body contains a CheckId for which you are querying. The
response body contains a `RefreshTrustedAdvisorCheckResult`_
object containing Status and TimeUntilNextRefresh fields.
:type check_id: string
:param check_id:
"""
params = {'checkId': check_id, }
return self.make_request(action='RefreshTrustedAdvisorCheck',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read()
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| mit |
prakhya/linux_sai | Documentation/sphinx/kernel_include.py | 949 | 8570 | #!/usr/bin/env python3
# -*- coding: utf-8; mode: python -*-
# pylint: disable=R0903, C0330, R0914, R0912, E0401
u"""
kernel-include
~~~~~~~~~~~~~~
Implementation of the ``kernel-include`` reST-directive.
:copyright: Copyright (C) 2016 Markus Heiser
:license: GPL Version 2, June 1991 see linux/COPYING for details.
The ``kernel-include`` reST-directive is a replacement for the ``include``
directive. The ``kernel-include`` directive expand environment variables in
the path name and allows to include files from arbitrary locations.
.. hint::
Including files from arbitrary locations (e.g. from ``/etc``) is a
security risk for builders. This is why the ``include`` directive from
docutils *prohibit* pathnames pointing to locations *above* the filesystem
tree where the reST document with the include directive is placed.
Substrings of the form $name or ${name} are replaced by the value of
environment variable name. Malformed variable names and references to
non-existing variables are left unchanged.
"""
# ==============================================================================
# imports
# ==============================================================================
import os.path
from docutils import io, nodes, statemachine
from docutils.utils.error_reporting import SafeString, ErrorString
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.body import CodeBlock, NumberLines
from docutils.parsers.rst.directives.misc import Include
__version__ = '1.0'
# ==============================================================================
def setup(app):
# ==============================================================================
app.add_directive("kernel-include", KernelInclude)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
# ==============================================================================
class KernelInclude(Include):
# ==============================================================================
u"""KernelInclude (``kernel-include``) directive"""
def run(self):
path = os.path.realpath(
os.path.expandvars(self.arguments[0]))
# to get a bit security back, prohibit /etc:
if path.startswith(os.sep + "etc"):
raise self.severe(
'Problems with "%s" directive, prohibited path: %s'
% (self.name, path))
self.arguments[0] = path
#return super(KernelInclude, self).run() # won't work, see HINTs in _run()
return self._run()
def _run(self):
"""Include a file as part of the content of this reST file."""
# HINT: I had to copy&paste the whole Include.run method. I'am not happy
# with this, but due to security reasons, the Include.run method does
# not allow absolute or relative pathnames pointing to locations *above*
# the filesystem tree where the reST document is placed.
if not self.state.document.settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
source_dir = os.path.dirname(os.path.abspath(source))
path = directives.path(self.arguments[0])
if path.startswith('<') and path.endswith('>'):
path = os.path.join(self.standard_include_path, path[1:-1])
path = os.path.normpath(os.path.join(source_dir, path))
# HINT: this is the only line I had to change / commented out:
#path = utils.relative_path(None, path)
path = nodes.reprunicode(path)
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler=self.state.document.settings.input_encoding_error_handler
tab_width = self.options.get(
'tab-width', self.state.document.settings.tab_width)
try:
self.state.document.settings.record_dependencies.add(path)
include_file = io.FileInput(source_path=path,
encoding=encoding,
error_handler=e_handler)
except UnicodeEncodeError as error:
raise self.severe('Problems with "%s" directive path:\n'
'Cannot encode input file path "%s" '
'(wrong locale?).' %
(self.name, SafeString(path)))
except IOError as error:
raise self.severe('Problems with "%s" directive path:\n%s.' %
(self.name, ErrorString(error)))
startline = self.options.get('start-line', None)
endline = self.options.get('end-line', None)
try:
if startline or (endline is not None):
lines = include_file.readlines()
rawtext = ''.join(lines[startline:endline])
else:
rawtext = include_file.read()
except UnicodeError as error:
raise self.severe('Problem with "%s" directive:\n%s' %
(self.name, ErrorString(error)))
# start-after/end-before: no restrictions on newlines in match-text,
# and no restrictions on matching inside lines vs. line boundaries
after_text = self.options.get('start-after', None)
if after_text:
# skip content in rawtext before *and incl.* a matching text
after_index = rawtext.find(after_text)
if after_index < 0:
raise self.severe('Problem with "start-after" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[after_index + len(after_text):]
before_text = self.options.get('end-before', None)
if before_text:
# skip content in rawtext after *and incl.* a matching text
before_index = rawtext.find(before_text)
if before_index < 0:
raise self.severe('Problem with "end-before" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[:before_index]
include_lines = statemachine.string2lines(rawtext, tab_width,
convert_whitespace=True)
if 'literal' in self.options:
# Convert tabs to spaces, if `tab_width` is positive.
if tab_width >= 0:
text = rawtext.expandtabs(tab_width)
else:
text = rawtext
literal_block = nodes.literal_block(rawtext, source=path,
classes=self.options.get('class', []))
literal_block.line = 1
self.add_name(literal_block)
if 'number-lines' in self.options:
try:
startline = int(self.options['number-lines'] or 1)
except ValueError:
raise self.error(':number-lines: with non-integer '
'start value')
endline = startline + len(include_lines)
if text.endswith('\n'):
text = text[:-1]
tokens = NumberLines([([], text)], startline, endline)
for classes, value in tokens:
if classes:
literal_block += nodes.inline(value, value,
classes=classes)
else:
literal_block += nodes.Text(value, value)
else:
literal_block += nodes.Text(text, text)
return [literal_block]
if 'code' in self.options:
self.options['source'] = path
codeblock = CodeBlock(self.name,
[self.options.pop('code')], # arguments
self.options,
include_lines, # content
self.lineno,
self.content_offset,
self.block_text,
self.state,
self.state_machine)
return codeblock.run()
self.state_machine.insert_input(include_lines, path)
return []
| gpl-2.0 |
digideskio/brackets-shell | gyp/pylib/gyp/generator/gypd.py | 912 | 3325 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
| mit |
LEEClab/LS_METRICS | previous_versions/LSMetrics_v1_0_0_garbage.py | 1 | 28905 | # LSMetrics v 1.0.0 Garbage
#-------------------------------------------
def createBinarios_single(ListMapBins, prepareBIODIM):
"""
This function reclassify an input map into a binary map, according to reclassification rules passed by
a text file
"""
readtxt = selectdirectory()
grass.run_command('g.region', rast=ListMapBins)
grass.run_command('r.reclass', input=ListMapBins, output=ListMapBins+'_HABMAT',
rules=readtxt, overwrite = True)
if prepareBIODIM:
mapsList = grass.list_grouped ('rast', pattern='(*)') ['userbase']
else:
mapsList = grass.list_grouped ('rast', pattern='(*)') ['PERMANENT']
return readtxt
#-------------------------------------------
def createBinarios(ListMapBins, prepareBIODIM):
"""
This function reclassify a series of input maps into binary maps, according to reclassification rules passed by
a text file
"""
readtxt = selectdirectory()
for i in ListMapBins:
grass.run_command('g.region',rast=i)
grass.run_command('r.reclass',input=i,output=i+'_HABMAT',rules=readtxt, overwrite = True)
if prepareBIODIM:
mapsList=grass.list_grouped ('rast', pattern='(*)') ['userbase']
else:
mapsList=grass.list_grouped ('rast', pattern='(*)') ['current_mapset']
return readtxt
#-------------------------------------------
def create_habmat_single(ListMapBins_in, prefix, dirout, list_habitat_classes, prepareBIODIM, calcStatistics):
"""
Function for a single map
This function reclassify an input map into a binary map, according to reclassification rules passed by
a text file
"""
ListMapBins = prefix+ListMapBins_in
# opcao 1: ler um arquivo e fazer reclass
# TEMOS QUE ORGANIZAR ISSO AINDA!!
#readtxt=selectdirectory()
#grass.run_command('g.region',rast=ListMapBins)
#grass.run_command('r.reclass',input=ListMapBins,output=ListMapBins+'_HABMAT',rules=readtxt, overwrite = True)
# opcao 2: definir quais classes sao habitat; todas as outras serao matriz
if(len(list_habitat_classes) > 0):
conditional = ''
cc = 0
for j in list_habitat_classes:
if cc > 0:
conditional = conditional+' || '
conditional = conditional+ListMapBins_in+' == '+j
cc += 1
expression = ListMapBins+'_HABMAT = if('+conditional+', 1, 0)'
grass.run_command('g.region', rast=ListMapBins_in)
grass.mapcalc(expression, overwrite = True, quiet = True)
grass.run_command('r.null', map=ListMapBins+'_HABMAT', null='0') # precisa disso??, nao sei .rsrs
else:
print 'You did not type which class is habitat!! Map not generated' # organizar para dar um erro; pode ser com try except
if prepareBIODIM:
create_TXTinputBIODIM([ListMapBins+'_HABMAT'], outputfolder, "simulados_HABMAT")
else:
grass.run_command('g.region', rast=ListMapBins+'_HABMAT')
grass.run_command('r.out.gdal', input=ListMapBins+'_HABMAT', out=ListMapBins+'_HABMAT.tif',overwrite = True)
if calcStatistics:
createtxt(ListMapBins+'_HABMAT', dirout, ListMapBins+'_HABMAT')
#-------------------------------------------
def exportPNG(mapinp=[]):
"""
This function exports a series of raster maps as png images
"""
lista_png=[]
for i in mapinp:
grass.run_command('r.out.png',input=i,out=i)
lista_png.append(i+'.png')
return lista_png
#----------------------------------------------------------------------------------
# Metrics for patch size/area/ID (PATCH)
def patchSingle(Listmapspatch_in, prefix,dirout,prepareBIODIM,calcStatistics,removeTrash):
"""
Function for a single map
This function calculates area per patch in a map (PATCH), considering structural connectivity
(no fragmentation or dilatation):
- generates and exports maps with Patch ID and Area of each patch
- generatics statistics - Area per patch (if calcStatistics == True)
"""
Listmapspatch = prefix+Listmapspatch_in
grass.run_command('g.region', rast=Listmapspatch_in)
grass.run_command('r.clump', input=Listmapspatch_in, output=Listmapspatch+"_patch_clump", overwrite = True)
########## essa proxima linha muda algo?? clump * mata/nao-mata
expression12=Listmapspatch+"_patch_clump_mata = "+Listmapspatch+"_patch_clump*"+Listmapspatch_in
grass.mapcalc(expression12, overwrite = True, quiet = True)
expression13=Listmapspatch+"_patch_clump_mata_limpa_pid = if("+Listmapspatch+"_patch_clump_mata > 0, "+Listmapspatch+"_patch_clump_mata, null())"
grass.mapcalc(expression13, overwrite = True, quiet = True)
nametxtreclass=rulesreclass(Listmapspatch+"_patch_clump_mata_limpa_pid", dirout)
grass.run_command('r.reclass', input=Listmapspatch+"_patch_clump_mata_limpa_pid", output=Listmapspatch+"_patch_clump_mata_limpa_AreaHA", rules=nametxtreclass, overwrite = True)
os.remove(nametxtreclass)
if prepareBIODIM:
#grass.run_command('r.out.gdal',input=Listmapspatch+"_patch_clump_mata_limpa",out=Listmapspatch+"_patch_PID.tif")
create_TXTinputBIODIM([Listmapspatch+"_patch_clump_mata_limpa_pid"], "simulados_HABMAT_grassclump_PID", dirout)
create_TXTinputBIODIM([Listmapspatch+"_patch_clump_mata_limpa_AreaHA"], "simulados_HABMAT_grassclump_AREApix", dirout)
else:
grass.run_command('g.region', rast=Listmapspatch+"_patch_clump_mata_limpa_AreaHA")
grass.run_command('r.out.gdal', input=Listmapspatch+"_patch_clump_mata_limpa_AreaHA", out=Listmapspatch+"_patch_AreaHA.tif",overwrite = True)
if calcStatistics:
createtxt(Listmapspatch+"_patch_clump_mata_limpa_pid", dirout, Listmapspatch+"_patch_AreaHA")
if removeTrash:
if prepareBIODIM:
txts = [Listmapspatch+"_patch_clump", Listmapspatch+"_patch_clump_mata"]
else:
txts = [Listmapspatch+"_patch_clump", Listmapspatch+"_patch_clump_mata"] #, Listmapspatch+"_patch_clump_mata_limpa_pid"]
for txt in txts:
grass.run_command('g.remove', type="raster", name=txt, flags='f')
#-------------------------------------------
def areaFragSingle(map_HABITAT_Single, prefix, dirout, list_esc_areaFrag,
prepareBIODIM, calcStatistics, removeTrash):
"""
Function for a single map
This function fragments patches (FRAG), excluding corridors and edges given input scales (distances), and:
- generates and exports maps with Patch ID and Area of each "fragmented" patch
- generatics statistics - Area per patch (if calcStatistics == True)
"""
ListmapsFrag = prefix+map_HABITAT_Single
grass.run_command('g.region', rast=map_HABITAT_Single)
Lista_escalafragM, listmeters = escala_frag(map_HABITAT_Single,list_esc_areaFrag)
x=0
for a in Lista_escalafragM:
meters=int(listmeters[x])
#print escalafragM
grass.run_command('r.neighbors', input=map_HABITAT_Single, output=ListmapsFrag+"_ero_"+`meters`+'m', method='minimum', size=a, overwrite = True)
grass.run_command('r.neighbors', input=ListmapsFrag+"_ero_"+`meters`+'m', output=ListmapsFrag+"_dila_"+`meters`+'m', method='maximum', size=a, overwrite = True)
expression1=ListmapsFrag+"_FRAG"+`meters`+"m_mata = if("+ListmapsFrag+"_dila_"+`meters`+'m'+" > 0, "+ListmapsFrag+"_dila_"+`meters`+'m'+", null())"
grass.mapcalc(expression1, overwrite = True, quiet = True)
expression2=ListmapsFrag+"_FRAG"+`meters`+"m_mata_lpo = if("+map_HABITAT_Single+" >= 0, "+ListmapsFrag+"_FRAG"+`meters`+"m_mata, null())"
grass.mapcalc(expression2, overwrite = True, quiet = True)
grass.run_command('r.clump', input=ListmapsFrag+"_FRAG"+`meters`+"m_mata_lpo", output=ListmapsFrag+"_FRAG"+`meters`+"m_mata_clump_pid", overwrite = True)
grass.run_command('g.region', rast=ListmapsFrag+"_FRAG"+`meters`+"m_mata_clump_pid")
nametxtreclass=rulesreclass(ListmapsFrag+"_FRAG"+`meters`+"m_mata_clump_pid", dirout)
grass.run_command('r.reclass', input=ListmapsFrag+"_FRAG"+`meters`+"m_mata_clump_pid", output=ListmapsFrag+"_FRAG"+`meters`+"m_mata_clump_AreaHA", rules=nametxtreclass, overwrite = True)
os.remove(nametxtreclass)
# identificando branch tampulins e corredores
expression3='temp_BSSC=if(isnull('+ListmapsFrag+"_FRAG"+`meters`+"m_mata_clump_AreaHA"+'),'+map_HABITAT_Single+')'
grass.mapcalc(expression3, overwrite = True, quiet = True)
expression1="MapaBinario=temp_BSSC"
grass.mapcalc(expression1, overwrite = True, quiet = True)
grass.run_command('g.region',rast="MapaBinario")
expression2="A=MapaBinario"
grass.mapcalc(expression2, overwrite = True, quiet = True)
grass.run_command('g.region',rast="MapaBinario")
expression3="MapaBinario_A=if(A[0,0]==0 && A[0,-1]==1 && A[1,-1]==0 && A[1,0]==1,1,A)"
grass.mapcalc(expression3, overwrite = True, quiet = True)
expression4="A=MapaBinario_A"
grass.mapcalc(expression4, overwrite = True, quiet = True)
expression5="MapaBinario_AB=if(A[0,0]==0 && A[-1,0]==1 && A[-1,1]==0 && A[0,1]==1,1,A)"
grass.mapcalc(expression5, overwrite = True, quiet = True)
expression6="A=MapaBinario_AB"
grass.mapcalc(expression6, overwrite = True, quiet = True)
expression7="MapaBinario_ABC=if(A[0,0]==0 && A[0,1]==1 && A[1,1]==0 && A[1,0]==1,1,A)"
grass.mapcalc(expression7, overwrite = True, quiet = True)
expression8="A=MapaBinario_ABC"
grass.mapcalc(expression8, overwrite = True, quiet = True)
expression9="MapaBinario_ABCD=if(A[0,0]==0 && A[1,0]==1 && A[1,1]==0 && A[0,1]==1,1,A)"
grass.mapcalc(expression9, overwrite = True, quiet = True)
expression4='MapaBinario_ABCD1=if(MapaBinario_ABCD==0,null(),1)'
grass.mapcalc(expression4, overwrite = True, quiet = True)
grass.run_command('r.clump', input='MapaBinario_ABCD1', output="MapaBinario_ABCD1_pid", overwrite = True)
grass.run_command('r.neighbors', input='MapaBinario_ABCD1_pid', output='MapaBinario_ABCD1_pid_mode', method='mode', size=3, overwrite = True)
grass.run_command('r.cross', input=ListmapsFrag+"_FRAG"+`meters`+'m_mata_clump_pid,MapaBinario_ABCD1_pid_mode',out=ListmapsFrag+"_FRAG"+`meters`+'m_mata_clump_pid_cross_corredor',overwrite = True)
cross_TB = grass.read_command('r.stats', input=ListmapsFrag+"_FRAG"+`meters`+'m_mata_clump_pid_cross_corredor', flags='l') # pegando a resolucao
print cross_TB
txt=open("table_cross.txt",'w')
txt.write(cross_TB)
txt.close()
reclass_frag_cor('MapaBinario_ABCD1_pid', dirout)
expression10='MapaBinario_ABCD1_pid_reclass_sttepings=if(isnull(MapaBinario_ABCD1_pid_reclass)&&temp_BSSC==1,3,MapaBinario_ABCD1_pid_reclass)'
grass.mapcalc(expression10, overwrite = True, quiet = True)
expression11='MapaBinario_ABCD1_pid_reclass_sttepings2=if(temp_BSSC==1,MapaBinario_ABCD1_pid_reclass_sttepings,null())'
grass.mapcalc(expression11, overwrite = True, quiet = True)
if prepareBIODIM:
#grass.run_command('r.out.gdal',input=ListmapsFrag+"_FRAG"+`meters`+"m_mata_clump_pid",out=ListmapsFrag+"_FRAG"+`meters`+"m_PID.tif")
create_TXTinputBIODIM([ListmapsFrag+"_FRAG"+`meters`+"m_mata_clump_pid"], "simulados_HABMAT_FRAC_"+`meters`+"m_PID", dirout)
create_TXTinputBIODIM([ListmapsFrag+"_FRAG"+`meters`+"m_mata_clump_AreaHA"], "simulados_HABMAT_FRAC_"+`meters`+"m_AREApix", dirout)
else:
grass.run_command('g.region', rast=ListmapsFrag+"_FRAG"+`meters`+"m_mata_clump_AreaHA")
grass.run_command('r.out.gdal', input=ListmapsFrag+"_FRAG"+`meters`+"m_mata_clump_AreaHA", out=ListmapsFrag+"_FRAG"+`meters`+"m_AreaHA.tif",overwrite = True)
if calcStatistics:
createtxt(ListmapsFrag+"_FRAG"+`meters`+"m_mata_clump_pid", dirout, ListmapsFrag+"_FRAG"+`meters`+"m_AreaHA")
if removeTrash:
if prepareBIODIM:
txts = [ListmapsFrag+"_ero_"+`meters`+'m', ListmapsFrag+"_dila_"+`meters`+'m', ListmapsFrag+"_FRAG"+`meters`+"m_mata", ListmapsFrag+"_FRAG"+`meters`+"m_mata_lpo"]
else:
txts = [ListmapsFrag+"_ero_"+`meters`+'m', ListmapsFrag+"_dila_"+`meters`+'m', ListmapsFrag+"_FRAG"+`meters`+"m_mata", ListmapsFrag+"_FRAG"+`meters`+"m_mata_lpo"] #, ListmapsFrag+"_FRAG"+`meters`+"m_mata_clump_pid"]
for txt in txts:
grass.run_command('g.remove', type="raster", name=txt, flags='f')
x=x+1
#-------------------------------------------
def areaconSingle(mapHABITAT_Single, prefix, dirout, escala_frag_con,
prepareBIODIM, calcStatistics, removeTrash):
os.chdir(dirout)
"""
Function for a single map
This function calculates functional patch area in a map (CON), considering functional connectivity
(dilatation of edges given input scales/distances), and:
- generates and exports maps with Patch ID and Area of each patch
- generatics statistics - Area per patch (if calcStatistics == True)
"""
Listmapspatch = prefix+mapHABITAT_Single
grass.run_command('g.region', rast=mapHABITAT_Single)
listescalafconM, listmeters = escala_con(mapHABITAT_Single, escala_frag_con)
x=0
for a in listescalafconM:
meters = int(listmeters[x])
grass.run_command('r.neighbors', input=mapHABITAT_Single, output=Listmapspatch+"_dila_"+`meters`+'m_orig', method='maximum', size=a, overwrite = True)
expression=Listmapspatch+"_dila_"+`meters`+'m_orig_temp = if('+Listmapspatch+"_dila_"+`meters`+'m_orig == 0, null(), '+Listmapspatch+"_dila_"+`meters`+'m_orig)'
grass.mapcalc(expression, overwrite = True, quiet = True)
grass.run_command('r.clump', input=Listmapspatch+"_dila_"+`meters`+'m_orig_temp', output=Listmapspatch+"_dila_"+`meters`+'m_orig_clump_pid', overwrite = True)
espressao1=Listmapspatch+"_dila_"+`meters`+'m_orig_clump_mata = '+mapHABITAT_Single+'*'+Listmapspatch+"_dila_"+`meters`+'m_orig_clump_pid'
grass.mapcalc(espressao1, overwrite = True, quiet = True)
espressao2=Listmapspatch+"_dila_"+`meters`+'m_orig_clump_mata_limpa_pid = if('+Listmapspatch+"_dila_"+`meters`+'m_orig_clump_mata > 0, '+Listmapspatch+"_dila_"+`meters`+'m_orig_clump_mata, null())'
grass.mapcalc(espressao2, overwrite = True, quiet = True)
nametxtreclass=rulesreclass(Listmapspatch+"_dila_"+`meters`+'m_orig_clump_mata_limpa_pid', dirout)
grass.run_command('r.reclass', input=Listmapspatch+"_dila_"+`meters`+'m_orig_clump_mata_limpa_pid', output=Listmapspatch+"_dila_"+`meters`+'m_orig_clump_mata_limpa_AreaHA', rules=nametxtreclass, overwrite = True)
os.remove(nametxtreclass)
if prepareBIODIM:
#grass.run_command('r.out.gdal',input=Listmapspatch+"_dila_"+`meters`+'m_orig_clump_mata_limpa_pid',out=Listmapspatch+"_dila_"+`meters`+'m_clean_PID.tif')
create_TXTinputBIODIM([Listmapspatch+"_dila_"+`meters`+'m_orig_clump_mata_limpa_pid'], dirout, "simulados_HABMAT_grassclump_dila_"+`meters`+"m_clean_PID")
create_TXTinputBIODIM([Listmapspatch+"_dila_"+`meters`+'m_orig_clump_mata_limpa_AreaHA'], dirout, "simulados_HABMAT_grassclump_dila_"+`meters`+"m_clean_AREApix")
########### calculando o area complete, exportanto ele e tb PID complete - precisa tambem gerar um area complete mesmo?
nametxtreclass=rulesreclass(Listmapspatch+"_dila_"+`meters`+'m_orig_clump_pid',dirout)
grass.run_command('r.reclass', input=Listmapspatch+"_dila_"+`meters`+'m_orig_clump_pid', output=Listmapspatch+"_dila_"+`meters`+'m_orig_clump_complete_AreaHA', rules=nametxtreclass, overwrite = True)
os.remove(nametxtreclass)
#grass.run_command('r.out.gdal', input=Listmapspatch+"_dila_"+`meters`+'m_orig_clump_complete_AreaHA', out=Listmapspatch+"_dila_"+`meters`+'m_complete_AreaHA.tif')
#grass.run_command('r.out.gdal', input=Listmapspatch+"_dila_"+`meters`+'m_orig_clump_pid', out=Listmapspatch+"_dila_"+`meters`+'m_complete_PID.tif')
create_TXTinputBIODIM([Listmapspatch+"_dila_"+`meters`+'m_orig_clump_pid'], dirout, "simulados_HABMAT_grassclump_dila_"+`meters`+"m_complete_PID")
create_TXTinputBIODIM([Listmapspatch+"_dila_"+`meters`+'m_orig_clump_complete_AreaHA'], dirout, "simulados_HABMAT_grassclump_dila_"+`meters`+"m_complete_AREApix")
else:
grass.run_command('g.region', rast=Listmapspatch+"_dila_"+`meters`+'m_orig_clump_mata_limpa_AreaHA')
grass.run_command('r.out.gdal', input=Listmapspatch+"_dila_"+`meters`+'m_orig_clump_mata_limpa_AreaHA', out=Listmapspatch+"_dila_"+`meters`+'m_clean_AreaHA.tif',overwrite = True)
if calcStatistics:
createtxt(Listmapspatch+"_dila_"+`meters`+'m_orig_clump_mata_limpa_pid', dirout, Listmapspatch+"_dila_"+`meters`+"m_clean_AreaHA") # clean
createtxt(Listmapspatch+"_dila_"+`meters`+'m_orig_clump_pid', dirout, Listmapspatch+"_dila_"+`meters`+"m_complete_AreaHA") # complete
if removeTrash:
if prepareBIODIM:
txts = [Listmapspatch+"_dila_"+`meters`+'m_orig', Listmapspatch+"_dila_"+`meters`+'m_orig_temp', Listmapspatch+"_dila_"+`meters`+'m_orig_clump_mata']
else:
txts = [Listmapspatch+"_dila_"+`meters`+'m_orig', Listmapspatch+"_dila_"+`meters`+'m_orig_temp', Listmapspatch+"_dila_"+`meters`+'m_orig_clump_pid', Listmapspatch+"_dila_"+`meters`+'m_orig_clump_mata']
for txt in txts:
grass.run_command('g.remove', type="raster", name=txt, flags='f')
x=x+1
#-------------------------------------------
# funcao de pcts
def PCTs_single(mapbin_HABITAT,escales):
for i in escales:
esc=int(i)
outputname=mapbin_HABITAT+"_PCT_esc_"+`esc`
windowsize=getsizepx(mapbin_HABITAT, esc)
grass.run_command('g.region', rast=mapbin_HABITAT)
grass.run_command('r.neighbors',input=mapbin_HABITAT,out="temp_PCT",method='average',size=windowsize,overwrite = True )
expression1=outputname+'=temp_PCT*100'
grass.mapcalc(expression1, overwrite = True, quiet = True)
grass.run_command('r.out.gdal', input=outputname, out=outputname+'.tif', overwrite = True)
grass.run_command('g.remove', type="raster", name='temp_PCT', flags='f')
#-------------------------------------------
def PCTs(Listmap_HABITAT,escales):
for i in escales:
esc=int(i)
for mapHABT in Listmap_HABITAT:
outputname=mapHABT+"_PCT_esc_"+`esc`
windowsize=getsizepx(mapHABT, esc)
grass.run_command('g.region', rast=mapHABT)
grass.run_command('r.neighbors',input=mapHABT,out="temp_PCT",method='average',size=windowsize,overwrite = True )
expression1=outputname+'=temp_PCT*100'
grass.mapcalc(expression1, overwrite = True, quiet = True)
grass.run_command('r.out.gdal', input=outputname, out=outputname+'.tif', overwrite = True)
grass.run_command('g.remove', type="raster", name='temp_PCT', flags='f')
#-------------------------------------------
def dist_edge_Single(Listmapsdist_in, prefix,prepareBIODIM, dirout,removeTrash):
"""
Function for a single map
This function calculates the distance of each pixel to habitat edges, considering
negative values (inside patches) and positive values (into the matrix). Also:
- generates and exports maps of distance to edge (DIST)
"""
Listmapsdist = prefix+Listmapsdist_in
grass.run_command('g.region', rast=Listmapsdist_in)
expression1=Listmapsdist+'_invert = if('+Listmapsdist_in+' == 0, 1, null())'
grass.mapcalc(expression1, overwrite = True, quiet = True)
grass.run_command('r.grow.distance', input=Listmapsdist+'_invert', distance=Listmapsdist+'_invert_forest_neg_eucldist',overwrite = True)
expression2=Listmapsdist+'_invert_matrix = if('+Listmapsdist_in+' == 0, null(), 1)'
grass.mapcalc(expression2, overwrite = True, quiet = True)
grass.run_command('r.grow.distance', input=Listmapsdist+'_invert_matrix', distance=Listmapsdist+'_invert_matrix_pos_eucldist',overwrite = True)
expression3=Listmapsdist+'_dist = '+Listmapsdist+'_invert_matrix_pos_eucldist-'+Listmapsdist+'_invert_forest_neg_eucldist'
grass.mapcalc(expression3, overwrite = True, quiet = True)
if prepareBIODIM:
create_TXTinputBIODIM([Listmapsdist+'_dist'], dirout, "simulados_HABMAT_DIST")
else:
grass.run_command('r.out.gdal', input=Listmapsdist+'_dist', out=Listmapsdist+'_DIST.tif', overwrite = True)
if removeTrash:
txts = [Listmapsdist+'_invert', Listmapsdist+'_invert_forest_neg_eucldist', Listmapsdist+'_invert_matrix', Listmapsdist+'_invert_matrix_pos_eucldist']
for txt in txts:
grass.run_command('g.remove', type="raster", name=txt, flags='f')
#-------------------------------------------
def create_EDGE_single(ListmapsED_in, escale_ed, dirs, prefix,calcStatistics,removeTrash,escale_pct):
"""
Function for a single map
This function separates habitat area into edge and interior/core regions, given a scale/distance defined as edge, and:
- generates and exports maps with each region
- generatics statistics - Area per region (matrix/edge/core) (if calcStatistics == True)
"""
os.chdir(dirs)
ListmapsED = prefix+ListmapsED_in
grass.run_command('g.region', rast=ListmapsED_in)
listsize, listmeters = escala_frag(ListmapsED_in, escale_ed)
cont_escale=0
for i in listsize:
apoioname = int(listmeters[cont_escale])
formatnumber='0000'+`apoioname`
formatnumber=formatnumber[-4:]
outputname_meco=ListmapsED+'_MECO_'+formatnumber+'m' # nome de saida do mapa edge-core-matriz
outputname_core=ListmapsED+'_CORE_'+formatnumber+'m' # nome de saida do mapa Core
outputname_edge=ListmapsED+'_EDGE_'+formatnumber+'m' # nome de saida do mapa edge
grass.run_command('r.neighbors', input=ListmapsED_in, output=ListmapsED+"_eroED_"+`apoioname`+'m', method='minimum', size=i, overwrite = True)
inputs=ListmapsED+"_eroED_"+`apoioname`+'m,'+ListmapsED_in
out=ListmapsED+'_EDGE'+`apoioname`+'m_temp1'
grass.run_command('r.series', input=inputs, out=out, method='sum', overwrite = True)
espressaoEd=ListmapsED+'_EDGE'+`apoioname`+'m_temp2 = int('+ListmapsED+'_EDGE'+`apoioname`+'m_temp1)' # criando uma mapa inteiro
mapcalcED(espressaoEd)
espressaoclip=outputname_meco+'= if('+ListmapsED_in+' >= 0, '+ListmapsED+'_EDGE'+`apoioname`+'m_temp2, null())'
mapcalcED(espressaoclip)
espressaocore=outputname_core+'= if('+outputname_meco+'==2,1,0)'
grass.mapcalc(espressaocore, overwrite = True, quiet = True)
espressaoedge=outputname_edge+'= if('+outputname_meco+'==1,1,0)'
grass.mapcalc(espressaoedge, overwrite = True, quiet = True)
grass.run_command('r.out.gdal', input=outputname_meco, out=outputname_meco+'.tif', overwrite = True)
grass.run_command('r.out.gdal', input=outputname_edge, out=outputname_edge+'.tif', overwrite = True)
grass.run_command('r.out.gdal', input=outputname_core, out=outputname_core+'.tif', overwrite = True)
print '>>>>>>>>>>>>>>>>>>>>',escale_pct
if len(escale_pct)>0:
for pct in escale_pct:
pctint=int(pct)
formatnumber='0000'+`pctint`
formatnumber=formatnumber[-4:]
outputname_edge_pct=outputname_edge+'_PCT_esc_'+formatnumber
size=getsizepx(outputname_edge, pctint)
grass.run_command('r.neighbors', input=outputname_edge, output="temp_pct", method='average', size=size, overwrite = True)
espressaoedge=outputname_edge_pct+'=temp_pct*100'
grass.mapcalc(espressaoedge, overwrite = True, quiet = True)
grass.run_command('r.out.gdal', input=outputname_edge_pct, out=outputname_edge_pct+'.tif', overwrite = True)
grass.run_command('g.remove', type="raster", name='temp_pct', flags='f')
if calcStatistics:
createtxt(ListmapsED+'_EDGE'+`apoioname`+'m', dirs, out)
if removeTrash:
grass.run_command('g.remove', type="raster", name=ListmapsED+"_eroED_"+`apoioname`+'m,'+ListmapsED+'_EDGE'+`apoioname`+'m_temp1,'+ListmapsED+'_EDGE'+`apoioname`+'m_temp2', flags='f')
cont_escale=cont_escale+1
#-------------------------------------------
#----------------------------------------------------------------------------------
#def para diversidade de shannon
def createUiqueList(tab_fid00_arry_subset_list,dim):
tab_fid00_arry_subset_list_apoio=[]
for i in xrange(dim):
temp1=tab_fid00_arry_subset_list[i][:]
for j in temp1:
if j != -9999 :
tab_fid00_arry_subset_list_apoio.append(j)
return tab_fid00_arry_subset_list_apoio
def Shannon(st):
st = st
stList = list(st)
alphabet = list(Set(st)) # list of symbols in the string
# calculate the frequency of each symbol in the string
freqList = []
for symbol in alphabet:
ctr = 0
for sym in stList:
if sym == symbol:
ctr += 1
freqList.append(float(ctr) / len(stList))
# Shannon entropy
ent = 0.0
for freq in freqList:
ent = ent + freq * math.log(freq, 2)
ent = -ent
#print int(math.ceil(ent))
return ent
def removeBlancsapce(ls):
ls2=[]
for i in ls:
if i != "":
ls2.append(i)
return ls2
def setNodata(arry,nrow,ncol,nodata):
for i in xrange(nrow):
for j in xrange(ncol):
arry[i][j]=nodata
return arry
#----------------------------------------------------------------------------------
def shannon_diversity(landuse_map,dirout,Raio_Analise):
for raio in Raio_Analise:
raio_int=int(raio)
os.chdir(dirout) #
grass.run_command('g.region',rast=landuse_map)
grass.run_command('r.out.ascii',input=landuse_map,output='landuse_map.asc',null_value=-9999,flags='h')
landusemap_arry=np.loadtxt('landuse_map.asc')
NRows,Ncols=landusemap_arry.shape
region_info = grass.parse_command('g.region', rast=landuse_map, flags='m') # pegando a resolucao
cell_size = float(region_info['ewres'])
north=float(region_info['n'])
south=float(region_info['s'])
east=float(region_info['e'])
west=float(region_info['w'])
rows=int(region_info['rows'])
cols=int(region_info['cols'])
Nodata=-9999
JanelaLinha=(raio_int/cell_size)
new_array = np.zeros(shape=(NRows,Ncols))
new_array=setNodata(new_array,NRows,Ncols,Nodata)
JanelaLinha= int(JanelaLinha)
#
for i in xrange(JanelaLinha,NRows-JanelaLinha):
for j in xrange(JanelaLinha,Ncols-JanelaLinha):
landusemap_arry_subset=landusemap_arry[i-JanelaLinha:i+JanelaLinha,j-JanelaLinha:j+JanelaLinha]
landusemap_arry_subset_list=landusemap_arry_subset.tolist()
landusemap_arry_subset_list=createUiqueList(landusemap_arry_subset_list,len(landusemap_arry_subset_list))
landusemap_arry_subset_list=map(str,landusemap_arry_subset_list)
new_array[i][j]=round(Shannon(landusemap_arry_subset_list),6)
txt=open("landuse_map_shannon.asc",'w')
L_parameters_Info_asc=['north: ',`north`+'\nsouth: ',`south`+'\neast: ',`east`+'\nwest: ',`west`+'\nrows: ',`rows`+'\ncols: '+`cols`+'\n']
check_ultm=1 # variavel que vai saber se e o ultimo
for i in L_parameters_Info_asc:
if check_ultm==len(L_parameters_Info_asc):
txt.write(i)
else:
txt.write(i+' ')
check_ultm=check_ultm+1
for i in range(NRows):
for j in range(Ncols):
txt.write(str(new_array[i][j])+' ')
txt.write('\n')
txt.close()
grass.run_command('r.in.ascii',input="landuse_map_shannon.asc",output=landuse_map+"_Shanno_Div_Esc_"+`raio_int`,overwrite=True,null_value=-9999)
grass.run_command('r.colors',map=landuse_map+"_Shanno_Div_Esc_"+`raio_int`,color='differences')
os.remove('landuse_map_shannon.asc')
os.remove('landuse_map.asc')
#-------------------------------------------
#-------------------------------------------
#-------------------------------------------
#------------------------------------------- | gpl-2.0 |
mollyproject/mollyproject | molly/apps/search/tests.py | 1 | 6280 | import sys
import inspect
from django.utils import unittest
from django.conf import settings
from django.utils.importlib import import_module
from molly.utils.views import BaseView
from molly.utils.breadcrumbs import NullBreadcrumb
class Argspec(tuple):
args = property(lambda self: self[0])
varargs = property(lambda self: self[1])
keywords = property(lambda self: self[2])
defaults = property(lambda self: self[3])
def getargspec(*args, **kwargs):
return Argspec(inspect.getargspec(*args, **kwargs))
class GenericSearchTestCase(unittest.TestCase):
def testViewSignatures(self):
for app_name in settings.INSTALLED_APPS:
try:
views = import_module(app_name+'.views')
except ImportError:
continue
for view_name in dir(views):
view = getattr(views, view_name)
if not isinstance(view, type):
continue
if not BaseView in view.__mro__:
continue
metadata_sig = None
breadcrumb_sig = None
handler_sigs = []
initial_context_sig = None
for func_name in dir(view):
func = getattr(view, func_name)
if func_name == 'get_metadata':
metadata_sig = getargspec(func)
elif func_name == 'initial_context':
initial_context_sig = getargspec(func)
elif func_name.startswith('handle_') and func_name[7:].upper() == func_name[7:]:
handler_sigs.append( (func_name, getargspec(func)) )
elif func_name == 'breadcrumb':
if func is BaseView.breadcrumb:
breadcrumb_sig = True
continue
# If it's not gone through BreadcrumbFactory
elif type(func) == type(BaseView.breadcrumb):
breadcrumb_sig = getargspec(func)
else:
breadcrumb_sig = getargspec(func.breadcrumb_func)
else:
continue
if not handler_sigs:
continue
if not breadcrumb_sig:
self.fail('%s.%s does not define a breadcrumb' % (app_name, view_name))
# Keep track of the first handler sig to compare things to
fhn, fhs = handler_sigs[0]
self.assertEqual(
fhs.args[:3],
['self','request','context'],
"View handler %s.views.%s.%s must take (self, request, context) as its first three arguments" % (
app_name, view_name, fhn,
)
)
for handler_name, argspec in handler_sigs:
if handler_name != 'handle_HEAD':
self.assertEqual(
fhs.args, argspec.args,
'View handler signatures differ for %s.views.%s: %s and %s' % (
app_name, view_name, fhn, handler_name
),
)
#self.assertEqual(
# argspec.varargs, None,
# "View handler %s.views.%s.%s takes *%s when it shouldn't" % (
# app_name, view_name, handler_name, argspec.varargs
# ),
#)
#self.assertEqual(
# argspec.keywords, None,
# "View handler %s.views.%s.%s takes **%s when it shouldn't" % (
# app_name, view_name, handler_name, argspec.keywords
# ),
#)
if not (initial_context_sig.varargs or initial_context_sig.keywords):
self.assertEqual(
initial_context_sig.args,
fhs.args[:2] + fhs.args[3:],
"initial_context for %s.views.%s has a signature inconsistent with the handlers" % (
app_name, view_name,
)
)
if metadata_sig:
self.assertEqual(
metadata_sig.args,
fhs.args[:2] + fhs.args[3:],
"get_metadata for %s.views.%s has a signature inconsistent with the handlers" % (
app_name, view_name,
)
)
self.assertEqual(
metadata_sig.varargs, None,
"get_metadata() for %s.views.%s takes *%s when it shouldn't" % (
app_name, view_name, metadata_sig.varargs
),
)
self.assertEqual(
metadata_sig.keywords, None,
"get_metadata() for %s.views.%s takes **%s when it shouldn't" % (
app_name, view_name, metadata_sig.keywords
),
)
if breadcrumb_sig != True:
if breadcrumb_sig[0][0] != 'self':
fhs = (fhs[0][1:], fhs[1], fhs[2], fhs[3])
self.assertEqual(
breadcrumb_sig, fhs,
"breadcrumb signature for %s.%s differs from its view handlers (%s, %s)" % (
app_name, view_name, breadcrumb_sig, fhs
)
)
else:
self.assertEqual(
breadcrumb_sig, fhs,
"breadcrumb signature for %s.%s differs from its view handlers (%s, %s)" % (
app_name, view_name, breadcrumb_sig, fhs
)
) | apache-2.0 |
akhmadMizkat/odoo | addons/crm/crm_stage.py | 4 | 2537 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import osv, fields
AVAILABLE_PRIORITIES = [
('0', 'Normal'),
('1', 'Low'),
('2', 'High'),
('3', 'Very High'),
]
class crm_stage(osv.Model):
""" Model for case stages. This models the main stages of a document
management flow. Main CRM objects (leads, opportunities, project
issues, ...) will now use only stages, instead of state and stages.
Stages are for example used to display the kanban view of records.
"""
_name = "crm.stage"
_description = "Stage of case"
_rec_name = 'name'
_order = "sequence"
_columns = {
'name': fields.char('Stage Name', required=True, translate=True),
'sequence': fields.integer('Sequence', help="Used to order stages. Lower is better."),
'probability': fields.float('Probability (%)', required=True, help="This percentage depicts the default/average probability of the Case for this stage to be a success"),
'on_change': fields.boolean('Change Probability Automatically', help="Setting this stage will change the probability automatically on the opportunity."),
'requirements': fields.text('Requirements', help="Enter here the internal requirements for this stage (ex: Offer sent to customer). It will appear as a tooltip over the stage's name."),
'team_id': fields.many2one('crm.team', 'Team',
ondelete='set null',
help='Specific team that uses this stage. Other teams will not ne able to see or use this stage.'),
'legend_priority': fields.text(
'Priority Management Explanation', translate=True,
help='Explanation text to help users using the star and priority mechanism on stages or issues that are in this stage.'),
'fold': fields.boolean('Folded in Pipeline',
help='This stage is folded in the kanban view when '
'there are no records in that stage to display.'),
}
_defaults = {
'sequence': 1,
'probability': 10.0,
'fold': False,
}
def default_get(self, cr, uid, fields, context=None):
if context and context.get('default_team_id') and not context.get('crm_team_mono', False):
context = dict(context)
context.pop('default_team_id')
return super(crm_stage, self).default_get(cr, uid, fields, context=context)
| gpl-3.0 |
ajgallegog/gem5_arm | src/arch/x86/isa/insts/x87/control/clear_exceptions.py | 91 | 2159 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# FCLEX
# FNCLEX
'''
| bsd-3-clause |
PHOTOX/fuase | ase/ase/test/cmr/reactions_test.py | 6 | 2955 | from ase.test import NotAvailable
import warnings
# cmr calls all available methods in ase.atoms detected by the module inspect.
# Therefore also deprecated methods are called - and we choose to silence those warnings.
warnings.filterwarnings('ignore', 'ase.atoms.*deprecated',)
# if CMR_SETTINGS_FILE is missing, cmr raises simply
# Exception("CMR is not configured properly. Please create the settings file with cmr --create-settings.")
try:
import cmr
except (Exception, ImportError):
raise NotAvailable('CMR is required')
from cmr.ui import DirectoryReader
from cmr.test.examples.ase_reaction_energy import ASEReactionEnergy
# see the module for the required format of reactions definition
from ase.test.cmr.reactions import reactions
from ase.test.cmr.reactions import reference
# assure that all reactions define a reaction_id
for r in reactions:
assert r[-1][0] == 'reaction_id'
# project id: must uniquely identify the project!
project_id = 'EMT' + ' reaction energies'
# if True, then results are uploaded to the database
database = False
# create assisting class for project with project_id,
# that allows one to convert trajectory files into
# db-files and perform analysis
re = ASEReactionEnergy(project_id, reactions, prefix='', verbose=False)
# compounds names
compounds = re.get_compounds()
# put additional fields here:
cmr_params = {'calculator': 'EMT'}
# convert all traj files in this directory to db-files
re.create_db_files(cmr_params)
# calculate the reaction energies and write the results to db-files
# named 'reaction_id.index.db'
# Each db-file defines a group (group of all compounds belonging to
# the given reaction).
# reaction energies on initial, unoptimized geometries
cmr_params = {'geometries': 'initial'}
re.make_reaction_groups(database=False, index=0, cmr_params=cmr_params)
# print
re.print_result(database=False)
# reaction energies on final, optimized geometries
cmr_params = {'geometries': 'final'}
re.make_reaction_groups(database=False, index= -1, cmr_params=cmr_params)
# print
re.print_result(database=False)
reader = DirectoryReader('.')
# retrieve all reactions (groups) with project_id and optimized geometries from the current directory
all = reader.find(name_value_list=[('db_calculator', 'group'),
('geometries', 'final')
],
keyword_list=[project_id, 'reaction'])
print 'reaction_id, calc, ref, calc - ref'
# compare with the reference
for r in reactions:
reaction_id = r[-1][1]
res = all.get('reaction_id', reaction_id)
if res is None:
print "Could not find reaction_id %s in reference"%str(reaction_id)
else:
calc = res['reaction_energy']
ref = reference[reaction_id]
print reaction_id, calc, ref, calc - ref
assert abs(calc - ref) < 1e-5
# upload the created groups to the database
if database:
re.upload_to_database()
| gpl-2.0 |
abaditsegay/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/idlelib/configHandler.py | 52 | 28969 | """Provides access to stored IDLE configuration information.
Refer to the comments at the beginning of config-main.def for a description of
the available configuration files and the design implemented to update user
configuration information. In particular, user configuration choices which
duplicate the defaults will be removed from the user's configuration files,
and if a file becomes empty, it will be deleted.
The contents of the user files may be altered using the Options/Configure IDLE
menu to access the configuration GUI (configDialog.py), or manually.
Throughout this module there is an emphasis on returning useable defaults
when a problem occurs in returning a requested configuration value back to
idle. This is to allow IDLE to continue to function in spite of errors in
the retrieval of config information. When a default is returned instead of
a requested config value, a message is printed to stderr to aid in
configuration problem notification and resolution.
"""
import os
import sys
import string
import macosxSupport
from ConfigParser import ConfigParser, NoOptionError, NoSectionError
class InvalidConfigType(Exception): pass
class InvalidConfigSet(Exception): pass
class InvalidFgBg(Exception): pass
class InvalidTheme(Exception): pass
class IdleConfParser(ConfigParser):
"""
A ConfigParser specialised for idle configuration file handling
"""
def __init__(self, cfgFile, cfgDefaults=None):
"""
cfgFile - string, fully specified configuration file name
"""
self.file=cfgFile
ConfigParser.__init__(self,defaults=cfgDefaults)
def Get(self, section, option, type=None, default=None, raw=False):
"""
Get an option value for given section/option or return default.
If type is specified, return as type.
"""
if not self.has_option(section, option):
return default
if type=='bool':
return self.getboolean(section, option)
elif type=='int':
return self.getint(section, option)
else:
return self.get(section, option, raw=raw)
def GetOptionList(self,section):
"""
Get an option list for given section
"""
if self.has_section(section):
return self.options(section)
else: #return a default value
return []
def Load(self):
"""
Load the configuration file from disk
"""
self.read(self.file)
class IdleUserConfParser(IdleConfParser):
"""
IdleConfigParser specialised for user configuration handling.
"""
def AddSection(self,section):
"""
if section doesn't exist, add it
"""
if not self.has_section(section):
self.add_section(section)
def RemoveEmptySections(self):
"""
remove any sections that have no options
"""
for section in self.sections():
if not self.GetOptionList(section):
self.remove_section(section)
def IsEmpty(self):
"""
Remove empty sections and then return 1 if parser has no sections
left, else return 0.
"""
self.RemoveEmptySections()
if self.sections():
return 0
else:
return 1
def RemoveOption(self,section,option):
"""
If section/option exists, remove it.
Returns 1 if option was removed, 0 otherwise.
"""
if self.has_section(section):
return self.remove_option(section,option)
def SetOption(self,section,option,value):
"""
Sets option to value, adding section if required.
Returns 1 if option was added or changed, otherwise 0.
"""
if self.has_option(section,option):
if self.get(section,option)==value:
return 0
else:
self.set(section,option,value)
return 1
else:
if not self.has_section(section):
self.add_section(section)
self.set(section,option,value)
return 1
def RemoveFile(self):
"""
Removes the user config file from disk if it exists.
"""
if os.path.exists(self.file):
os.remove(self.file)
def Save(self):
"""Update user configuration file.
Remove empty sections. If resulting config isn't empty, write the file
to disk. If config is empty, remove the file from disk if it exists.
"""
if not self.IsEmpty():
fname = self.file
try:
cfgFile = open(fname, 'w')
except IOError:
os.unlink(fname)
cfgFile = open(fname, 'w')
self.write(cfgFile)
else:
self.RemoveFile()
class IdleConf:
"""
holds config parsers for all idle config files:
default config files
(idle install dir)/config-main.def
(idle install dir)/config-extensions.def
(idle install dir)/config-highlight.def
(idle install dir)/config-keys.def
user config files
(user home dir)/.idlerc/config-main.cfg
(user home dir)/.idlerc/config-extensions.cfg
(user home dir)/.idlerc/config-highlight.cfg
(user home dir)/.idlerc/config-keys.cfg
"""
def __init__(self):
self.defaultCfg={}
self.userCfg={}
self.cfg={}
self.CreateConfigHandlers()
self.LoadCfgFiles()
#self.LoadCfg()
def CreateConfigHandlers(self):
"""
set up a dictionary of config parsers for default and user
configurations respectively
"""
#build idle install path
if __name__ != '__main__': # we were imported
idleDir=os.path.dirname(__file__)
else: # we were exec'ed (for testing only)
idleDir=os.path.abspath(sys.path[0])
userDir=self.GetUserCfgDir()
configTypes=('main','extensions','highlight','keys')
defCfgFiles={}
usrCfgFiles={}
for cfgType in configTypes: #build config file names
defCfgFiles[cfgType]=os.path.join(idleDir,'config-'+cfgType+'.def')
usrCfgFiles[cfgType]=os.path.join(userDir,'config-'+cfgType+'.cfg')
for cfgType in configTypes: #create config parsers
self.defaultCfg[cfgType]=IdleConfParser(defCfgFiles[cfgType])
self.userCfg[cfgType]=IdleUserConfParser(usrCfgFiles[cfgType])
def GetUserCfgDir(self):
"""
Creates (if required) and returns a filesystem directory for storing
user config files.
"""
cfgDir = '.idlerc'
userDir = os.path.expanduser('~')
if userDir != '~': # expanduser() found user home dir
if not os.path.exists(userDir):
warn = ('\n Warning: os.path.expanduser("~") points to\n '+
userDir+',\n but the path does not exist.\n')
try:
sys.stderr.write(warn)
except IOError:
pass
userDir = '~'
if userDir == "~": # still no path to home!
# traditionally IDLE has defaulted to os.getcwd(), is this adequate?
userDir = os.getcwd()
userDir = os.path.join(userDir, cfgDir)
if not os.path.exists(userDir):
try:
os.mkdir(userDir)
except (OSError, IOError):
warn = ('\n Warning: unable to create user config directory\n'+
userDir+'\n Check path and permissions.\n Exiting!\n\n')
sys.stderr.write(warn)
raise SystemExit
return userDir
def GetOption(self, configType, section, option, default=None, type=None,
warn_on_default=True, raw=False):
"""
Get an option value for given config type and given general
configuration section/option or return a default. If type is specified,
return as type. Firstly the user configuration is checked, with a
fallback to the default configuration, and a final 'catch all'
fallback to a useable passed-in default if the option isn't present in
either the user or the default configuration.
configType must be one of ('main','extensions','highlight','keys')
If a default is returned, and warn_on_default is True, a warning is
printed to stderr.
"""
if self.userCfg[configType].has_option(section,option):
return self.userCfg[configType].Get(section, option,
type=type, raw=raw)
elif self.defaultCfg[configType].has_option(section,option):
return self.defaultCfg[configType].Get(section, option,
type=type, raw=raw)
else: #returning default, print warning
if warn_on_default:
warning = ('\n Warning: configHandler.py - IdleConf.GetOption -\n'
' problem retrieving configration option %r\n'
' from section %r.\n'
' returning default value: %r\n' %
(option, section, default))
try:
sys.stderr.write(warning)
except IOError:
pass
return default
def SetOption(self, configType, section, option, value):
"""In user's config file, set section's option to value.
"""
self.userCfg[configType].SetOption(section, option, value)
def GetSectionList(self, configSet, configType):
"""
Get a list of sections from either the user or default config for
the given config type.
configSet must be either 'user' or 'default'
configType must be one of ('main','extensions','highlight','keys')
"""
if not (configType in ('main','extensions','highlight','keys')):
raise InvalidConfigType, 'Invalid configType specified'
if configSet == 'user':
cfgParser=self.userCfg[configType]
elif configSet == 'default':
cfgParser=self.defaultCfg[configType]
else:
raise InvalidConfigSet, 'Invalid configSet specified'
return cfgParser.sections()
def GetHighlight(self, theme, element, fgBg=None):
"""
return individual highlighting theme elements.
fgBg - string ('fg'or'bg') or None, if None return a dictionary
containing fg and bg colours (appropriate for passing to Tkinter in,
e.g., a tag_config call), otherwise fg or bg colour only as specified.
"""
if self.defaultCfg['highlight'].has_section(theme):
themeDict=self.GetThemeDict('default',theme)
else:
themeDict=self.GetThemeDict('user',theme)
fore=themeDict[element+'-foreground']
if element=='cursor': #there is no config value for cursor bg
back=themeDict['normal-background']
else:
back=themeDict[element+'-background']
highlight={"foreground": fore,"background": back}
if not fgBg: #return dict of both colours
return highlight
else: #return specified colour only
if fgBg == 'fg':
return highlight["foreground"]
if fgBg == 'bg':
return highlight["background"]
else:
raise InvalidFgBg, 'Invalid fgBg specified'
def GetThemeDict(self,type,themeName):
"""
type - string, 'default' or 'user' theme type
themeName - string, theme name
Returns a dictionary which holds {option:value} for each element
in the specified theme. Values are loaded over a set of ultimate last
fallback defaults to guarantee that all theme elements are present in
a newly created theme.
"""
if type == 'user':
cfgParser=self.userCfg['highlight']
elif type == 'default':
cfgParser=self.defaultCfg['highlight']
else:
raise InvalidTheme, 'Invalid theme type specified'
#foreground and background values are provded for each theme element
#(apart from cursor) even though all these values are not yet used
#by idle, to allow for their use in the future. Default values are
#generally black and white.
theme={ 'normal-foreground':'#000000',
'normal-background':'#ffffff',
'keyword-foreground':'#000000',
'keyword-background':'#ffffff',
'builtin-foreground':'#000000',
'builtin-background':'#ffffff',
'comment-foreground':'#000000',
'comment-background':'#ffffff',
'string-foreground':'#000000',
'string-background':'#ffffff',
'definition-foreground':'#000000',
'definition-background':'#ffffff',
'hilite-foreground':'#000000',
'hilite-background':'gray',
'break-foreground':'#ffffff',
'break-background':'#000000',
'hit-foreground':'#ffffff',
'hit-background':'#000000',
'error-foreground':'#ffffff',
'error-background':'#000000',
#cursor (only foreground can be set)
'cursor-foreground':'#000000',
#shell window
'stdout-foreground':'#000000',
'stdout-background':'#ffffff',
'stderr-foreground':'#000000',
'stderr-background':'#ffffff',
'console-foreground':'#000000',
'console-background':'#ffffff' }
for element in theme.keys():
if not cfgParser.has_option(themeName,element):
#we are going to return a default, print warning
warning=('\n Warning: configHandler.py - IdleConf.GetThemeDict'
' -\n problem retrieving theme element %r'
'\n from theme %r.\n'
' returning default value: %r\n' %
(element, themeName, theme[element]))
try:
sys.stderr.write(warning)
except IOError:
pass
colour=cfgParser.Get(themeName,element,default=theme[element])
theme[element]=colour
return theme
def CurrentTheme(self):
"""
Returns the name of the currently active theme
"""
return self.GetOption('main','Theme','name',default='')
def CurrentKeys(self):
"""
Returns the name of the currently active key set
"""
return self.GetOption('main','Keys','name',default='')
def GetExtensions(self, active_only=True, editor_only=False, shell_only=False):
"""
Gets a list of all idle extensions declared in the config files.
active_only - boolean, if true only return active (enabled) extensions
"""
extns=self.RemoveKeyBindNames(
self.GetSectionList('default','extensions'))
userExtns=self.RemoveKeyBindNames(
self.GetSectionList('user','extensions'))
for extn in userExtns:
if extn not in extns: #user has added own extension
extns.append(extn)
if active_only:
activeExtns=[]
for extn in extns:
if self.GetOption('extensions', extn, 'enable', default=True,
type='bool'):
#the extension is enabled
if editor_only or shell_only:
if editor_only:
option = "enable_editor"
else:
option = "enable_shell"
if self.GetOption('extensions', extn,option,
default=True, type='bool',
warn_on_default=False):
activeExtns.append(extn)
else:
activeExtns.append(extn)
return activeExtns
else:
return extns
def RemoveKeyBindNames(self,extnNameList):
#get rid of keybinding section names
names=extnNameList
kbNameIndicies=[]
for name in names:
if name.endswith(('_bindings', '_cfgBindings')):
kbNameIndicies.append(names.index(name))
kbNameIndicies.sort()
kbNameIndicies.reverse()
for index in kbNameIndicies: #delete each keybinding section name
del(names[index])
return names
def GetExtnNameForEvent(self,virtualEvent):
"""
Returns the name of the extension that virtualEvent is bound in, or
None if not bound in any extension.
virtualEvent - string, name of the virtual event to test for, without
the enclosing '<< >>'
"""
extName=None
vEvent='<<'+virtualEvent+'>>'
for extn in self.GetExtensions(active_only=0):
for event in self.GetExtensionKeys(extn).keys():
if event == vEvent:
extName=extn
return extName
def GetExtensionKeys(self,extensionName):
"""
returns a dictionary of the configurable keybindings for a particular
extension,as they exist in the dictionary returned by GetCurrentKeySet;
that is, where previously used bindings are disabled.
"""
keysName=extensionName+'_cfgBindings'
activeKeys=self.GetCurrentKeySet()
extKeys={}
if self.defaultCfg['extensions'].has_section(keysName):
eventNames=self.defaultCfg['extensions'].GetOptionList(keysName)
for eventName in eventNames:
event='<<'+eventName+'>>'
binding=activeKeys[event]
extKeys[event]=binding
return extKeys
def __GetRawExtensionKeys(self,extensionName):
"""
returns a dictionary of the configurable keybindings for a particular
extension, as defined in the configuration files, or an empty dictionary
if no bindings are found
"""
keysName=extensionName+'_cfgBindings'
extKeys={}
if self.defaultCfg['extensions'].has_section(keysName):
eventNames=self.defaultCfg['extensions'].GetOptionList(keysName)
for eventName in eventNames:
binding=self.GetOption('extensions',keysName,
eventName,default='').split()
event='<<'+eventName+'>>'
extKeys[event]=binding
return extKeys
def GetExtensionBindings(self,extensionName):
"""
Returns a dictionary of all the event bindings for a particular
extension. The configurable keybindings are returned as they exist in
the dictionary returned by GetCurrentKeySet; that is, where re-used
keybindings are disabled.
"""
bindsName=extensionName+'_bindings'
extBinds=self.GetExtensionKeys(extensionName)
#add the non-configurable bindings
if self.defaultCfg['extensions'].has_section(bindsName):
eventNames=self.defaultCfg['extensions'].GetOptionList(bindsName)
for eventName in eventNames:
binding=self.GetOption('extensions',bindsName,
eventName,default='').split()
event='<<'+eventName+'>>'
extBinds[event]=binding
return extBinds
def GetKeyBinding(self, keySetName, eventStr):
"""
returns the keybinding for a specific event.
keySetName - string, name of key binding set
eventStr - string, the virtual event we want the binding for,
represented as a string, eg. '<<event>>'
"""
eventName=eventStr[2:-2] #trim off the angle brackets
binding=self.GetOption('keys',keySetName,eventName,default='').split()
return binding
def GetCurrentKeySet(self):
result = self.GetKeySet(self.CurrentKeys())
if macosxSupport.runningAsOSXApp():
# We're using AquaTk, replace all keybingings that use the
# Alt key by ones that use the Option key because the former
# don't work reliably.
for k, v in result.items():
v2 = [ x.replace('<Alt-', '<Option-') for x in v ]
if v != v2:
result[k] = v2
return result
def GetKeySet(self,keySetName):
"""
Returns a dictionary of: all requested core keybindings, plus the
keybindings for all currently active extensions. If a binding defined
in an extension is already in use, that binding is disabled.
"""
keySet=self.GetCoreKeys(keySetName)
activeExtns=self.GetExtensions(active_only=1)
for extn in activeExtns:
extKeys=self.__GetRawExtensionKeys(extn)
if extKeys: #the extension defines keybindings
for event in extKeys.keys():
if extKeys[event] in keySet.values():
#the binding is already in use
extKeys[event]='' #disable this binding
keySet[event]=extKeys[event] #add binding
return keySet
def IsCoreBinding(self,virtualEvent):
"""
returns true if the virtual event is bound in the core idle keybindings.
virtualEvent - string, name of the virtual event to test for, without
the enclosing '<< >>'
"""
return ('<<'+virtualEvent+'>>') in self.GetCoreKeys().keys()
def GetCoreKeys(self, keySetName=None):
"""
returns the requested set of core keybindings, with fallbacks if
required.
Keybindings loaded from the config file(s) are loaded _over_ these
defaults, so if there is a problem getting any core binding there will
be an 'ultimate last resort fallback' to the CUA-ish bindings
defined here.
"""
keyBindings={
'<<copy>>': ['<Control-c>', '<Control-C>'],
'<<cut>>': ['<Control-x>', '<Control-X>'],
'<<paste>>': ['<Control-v>', '<Control-V>'],
'<<beginning-of-line>>': ['<Control-a>', '<Home>'],
'<<center-insert>>': ['<Control-l>'],
'<<close-all-windows>>': ['<Control-q>'],
'<<close-window>>': ['<Alt-F4>'],
'<<do-nothing>>': ['<Control-x>'],
'<<end-of-file>>': ['<Control-d>'],
'<<python-docs>>': ['<F1>'],
'<<python-context-help>>': ['<Shift-F1>'],
'<<history-next>>': ['<Alt-n>'],
'<<history-previous>>': ['<Alt-p>'],
'<<interrupt-execution>>': ['<Control-c>'],
'<<view-restart>>': ['<F6>'],
'<<restart-shell>>': ['<Control-F6>'],
'<<open-class-browser>>': ['<Alt-c>'],
'<<open-module>>': ['<Alt-m>'],
'<<open-new-window>>': ['<Control-n>'],
'<<open-window-from-file>>': ['<Control-o>'],
'<<plain-newline-and-indent>>': ['<Control-j>'],
'<<print-window>>': ['<Control-p>'],
'<<redo>>': ['<Control-y>'],
'<<remove-selection>>': ['<Escape>'],
'<<save-copy-of-window-as-file>>': ['<Alt-Shift-S>'],
'<<save-window-as-file>>': ['<Alt-s>'],
'<<save-window>>': ['<Control-s>'],
'<<select-all>>': ['<Alt-a>'],
'<<toggle-auto-coloring>>': ['<Control-slash>'],
'<<undo>>': ['<Control-z>'],
'<<find-again>>': ['<Control-g>', '<F3>'],
'<<find-in-files>>': ['<Alt-F3>'],
'<<find-selection>>': ['<Control-F3>'],
'<<find>>': ['<Control-f>'],
'<<replace>>': ['<Control-h>'],
'<<goto-line>>': ['<Alt-g>'],
'<<smart-backspace>>': ['<Key-BackSpace>'],
'<<newline-and-indent>>': ['<Key-Return> <Key-KP_Enter>'],
'<<smart-indent>>': ['<Key-Tab>'],
'<<indent-region>>': ['<Control-Key-bracketright>'],
'<<dedent-region>>': ['<Control-Key-bracketleft>'],
'<<comment-region>>': ['<Alt-Key-3>'],
'<<uncomment-region>>': ['<Alt-Key-4>'],
'<<tabify-region>>': ['<Alt-Key-5>'],
'<<untabify-region>>': ['<Alt-Key-6>'],
'<<toggle-tabs>>': ['<Alt-Key-t>'],
'<<change-indentwidth>>': ['<Alt-Key-u>'],
'<<del-word-left>>': ['<Control-Key-BackSpace>'],
'<<del-word-right>>': ['<Control-Key-Delete>']
}
if keySetName:
for event in keyBindings.keys():
binding=self.GetKeyBinding(keySetName,event)
if binding:
keyBindings[event]=binding
else: #we are going to return a default, print warning
warning=('\n Warning: configHandler.py - IdleConf.GetCoreKeys'
' -\n problem retrieving key binding for event %r'
'\n from key set %r.\n'
' returning default value: %r\n' %
(event, keySetName, keyBindings[event]))
try:
sys.stderr.write(warning)
except IOError:
pass
return keyBindings
def GetExtraHelpSourceList(self,configSet):
"""Fetch list of extra help sources from a given configSet.
Valid configSets are 'user' or 'default'. Return a list of tuples of
the form (menu_item , path_to_help_file , option), or return the empty
list. 'option' is the sequence number of the help resource. 'option'
values determine the position of the menu items on the Help menu,
therefore the returned list must be sorted by 'option'.
"""
helpSources=[]
if configSet=='user':
cfgParser=self.userCfg['main']
elif configSet=='default':
cfgParser=self.defaultCfg['main']
else:
raise InvalidConfigSet, 'Invalid configSet specified'
options=cfgParser.GetOptionList('HelpFiles')
for option in options:
value=cfgParser.Get('HelpFiles',option,default=';')
if value.find(';')==-1: #malformed config entry with no ';'
menuItem='' #make these empty
helpPath='' #so value won't be added to list
else: #config entry contains ';' as expected
value=string.split(value,';')
menuItem=value[0].strip()
helpPath=value[1].strip()
if menuItem and helpPath: #neither are empty strings
helpSources.append( (menuItem,helpPath,option) )
helpSources.sort(self.__helpsort)
return helpSources
def __helpsort(self, h1, h2):
if int(h1[2]) < int(h2[2]):
return -1
elif int(h1[2]) > int(h2[2]):
return 1
else:
return 0
def GetAllExtraHelpSourcesList(self):
"""
Returns a list of tuples containing the details of all additional help
sources configured, or an empty list if there are none. Tuples are of
the format returned by GetExtraHelpSourceList.
"""
allHelpSources=( self.GetExtraHelpSourceList('default')+
self.GetExtraHelpSourceList('user') )
return allHelpSources
def LoadCfgFiles(self):
"""
load all configuration files.
"""
for key in self.defaultCfg.keys():
self.defaultCfg[key].Load()
self.userCfg[key].Load() #same keys
def SaveUserCfgFiles(self):
"""
write all loaded user configuration files back to disk
"""
for key in self.userCfg.keys():
self.userCfg[key].Save()
idleConf=IdleConf()
### module test
if __name__ == '__main__':
def dumpCfg(cfg):
print '\n',cfg,'\n'
for key in cfg.keys():
sections=cfg[key].sections()
print key
print sections
for section in sections:
options=cfg[key].options(section)
print section
print options
for option in options:
print option, '=', cfg[key].Get(section,option)
dumpCfg(idleConf.defaultCfg)
dumpCfg(idleConf.userCfg)
print idleConf.userCfg['main'].Get('Theme','name')
#print idleConf.userCfg['highlight'].GetDefHighlight('Foo','normal')
| apache-2.0 |
jonathanwcrane/boto | boto/roboto/param.py | 160 | 4529 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
class Converter(object):
@classmethod
def convert_string(cls, param, value):
# TODO: could do length validation, etc. here
if not isinstance(value, basestring):
raise ValueError
return value
@classmethod
def convert_integer(cls, param, value):
# TODO: could do range checking here
return int(value)
@classmethod
def convert_boolean(cls, param, value):
"""
For command line arguments, just the presence
of the option means True so just return True
"""
return True
@classmethod
def convert_file(cls, param, value):
if os.path.exists(value) and not os.path.isdir(value):
return value
raise ValueError
@classmethod
def convert_dir(cls, param, value):
if os.path.isdir(value):
return value
raise ValueError
@classmethod
def convert(cls, param, value):
try:
if hasattr(cls, 'convert_'+param.ptype):
mthd = getattr(cls, 'convert_'+param.ptype)
else:
mthd = cls.convert_string
return mthd(param, value)
except:
raise ValidationException(param, '')
class Param(Converter):
def __init__(self, name=None, ptype='string', optional=True,
short_name=None, long_name=None, doc='',
metavar=None, cardinality=1, default=None,
choices=None, encoder=None, request_param=True):
self.name = name
self.ptype = ptype
self.optional = optional
self.short_name = short_name
self.long_name = long_name
self.doc = doc
self.metavar = metavar
self.cardinality = cardinality
self.default = default
self.choices = choices
self.encoder = encoder
self.request_param = request_param
@property
def optparse_long_name(self):
ln = None
if self.long_name:
ln = '--%s' % self.long_name
return ln
@property
def synopsis_long_name(self):
ln = None
if self.long_name:
ln = '--%s' % self.long_name
return ln
@property
def getopt_long_name(self):
ln = None
if self.long_name:
ln = '%s' % self.long_name
if self.ptype != 'boolean':
ln += '='
return ln
@property
def optparse_short_name(self):
sn = None
if self.short_name:
sn = '-%s' % self.short_name
return sn
@property
def synopsis_short_name(self):
sn = None
if self.short_name:
sn = '-%s' % self.short_name
return sn
@property
def getopt_short_name(self):
sn = None
if self.short_name:
sn = '%s' % self.short_name
if self.ptype != 'boolean':
sn += ':'
return sn
def convert(self, value):
"""
Convert a string value as received in the command line
tools and convert to the appropriate type of value.
Raise a ValidationError if the value can't be converted.
:type value: str
:param value: The value to convert. This should always
be a string.
"""
return super(Param, self).convert(self,value)
| mit |
catapult-project/catapult-csm | third_party/gsutil/third_party/boto/boto/ec2/cloudwatch/datapoint.py | 152 | 1668 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from datetime import datetime
class Datapoint(dict):
def __init__(self, connection=None):
dict.__init__(self)
self.connection = connection
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name in ['Average', 'Maximum', 'Minimum', 'Sum', 'SampleCount']:
self[name] = float(value)
elif name == 'Timestamp':
self[name] = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name != 'member':
self[name] = value
| bsd-3-clause |
LettError/MutatorMath | Lib/mutatorMath/test/ufo/data/sources/fixSpacing.py | 2 | 2071 |
for f in AllFonts():
if 'Wide' in f.info.styleName:
wk = 'wide'
else:
wk = 'cond'
if 'Bold' in f.info.styleName:
bk = 'bold'
else:
bk = 'light'
print 'current', wk, bk
groups = {
"L_straight": ['B', 'D', 'E', 'F', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'P', 'R', 'U', 'Z', "quotesinglbase", "quotedblbase", "period", "comma", "colon", "semicolon", "quotedblright", "quotedblleft"],
"R_straight": [ 'E', 'F', 'G', 'H', 'I', 'J', 'M', 'N', 'U', 'Z', "quotesinglbase", "quotedblbase", "period", "comma", "colon", "semicolon", "quotedblright", "quotedblleft"],
"L_angled": ['A', 'V', 'W', 'X', 'Y'],
"R_angled": ['A', 'K', 'V', 'W', 'X', 'Y'],
"L_short": ['T', ],
"R_short": ['L', 'T', 'P', 'R', 'B', ],
"L_round": ['C','G', 'O', 'Q', 'S'],
"R_round": ['C', 'D', 'O', 'Q', 'S'],
}
margins = {
'bold': {
'straight': dict(cond=30, wide=60),
'angled': dict(cond=10, wide=20),
'round': dict(cond=20, wide=40),
'short': dict(cond=20, wide=20),
},
'light': {
'straight': dict(cond=60, wide=120),
'angled': dict(cond=20, wide=40),
'round': dict(cond=50, wide=80),
'short': dict(cond=30, wide=40),
},
}
for n in f.keys():
#print n
for k, v in groups.items():
if n in v:
parts = k.split("_")
left = None
right = None
if parts[0] == "L":
left = margins[bk].get(parts[1])[wk]
#print "links", n, k, left
elif parts[0] == "R":
right = margins[bk].get(parts[1])[wk]
#print "rechts", n, k, right
if n in f:
if left:
f[n].leftMargin = left
if right:
f[n].rightMargin = right
| bsd-3-clause |
fangxingli/hue | apps/hbase/src/hbase/server/hbase_lib.py | 28 | 1441 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from thrift import Thrift
from thrift.transport.TSocket import TSocket
from thrift.transport.TTransport import TBufferedTransport
from thrift.protocol import TBinaryProtocol
from hbased import Hbase as thrift_hbase
from hbased import ttypes
LOG = logging.getLogger(__name__)
def get_client_type():
return thrift_hbase.Client
def get_thrift_type(name):
if not hasattr(ttypes,name):
return False
return getattr(ttypes,name)
def get_thrift_attributes(name):
thrift_type = get_thrift_type(name)
attrs = {}
for spec in thrift_type.thrift_spec:
if spec is not None:
attrs[spec[2]] = spec[1]
return attrs
| apache-2.0 |
gratipay/gratipay.com | tests/py/test_teams.py | 1 | 15001 | from __future__ import absolute_import, division, print_function, unicode_literals
import json
import pytest
from aspen.testing.client import FileUpload
from gratipay.testing import Harness, IMAGE, D,T
from gratipay.testing.email import QueuedEmailHarness
from gratipay.models.team import Team, slugize, InvalidTeamName
class TestTeams(QueuedEmailHarness):
valid_data = {
'name': 'Gratiteam',
'product_or_service': 'We make widgets.',
'homepage': 'http://gratipay.com/',
'onboarding_url': 'http://inside.gratipay.com/',
'agree_public': 'true',
'agree_payroll': 'true',
'agree_terms': 'true',
'image': FileUpload(IMAGE, 'logo.png'),
}
def post_new(self, data, auth_as='alice', expected=200):
r = self.client.POST( '/teams/create.json'
, data=data
, auth_as=auth_as
, raise_immediately=False
)
assert r.code == expected
return r
def test_harness_can_make_a_team(self):
team = self.make_team()
assert team.name == 'The Enterprise'
assert team.owner == 'picard'
def test_can_construct_from_slug(self):
self.make_team()
team = Team.from_slug('TheEnterprise')
assert team.name == 'The Enterprise'
assert team.owner == 'picard'
def test_can_construct_from_id(self):
team = Team.from_id(self.make_team().id)
assert team.name == 'The Enterprise'
assert team.owner == 'picard'
def make_alice(self):
self.make_participant( 'alice'
, claimed_time='now'
, email_address='[email protected]'
, last_paypal_result=''
)
def test_can_create_new_team(self):
self.make_alice()
r = self.post_new(dict(self.valid_data))
team = self.db.one("SELECT * FROM teams")
assert team
assert team.owner == 'alice'
assert json.loads(r.body)['review_url'] == team.review_url
def test_all_fields_persist(self):
self.make_alice()
self.post_new(dict(self.valid_data))
team = T('gratiteam')
assert team.name == 'Gratiteam'
assert team.homepage == 'http://gratipay.com/'
assert team.product_or_service == 'We make widgets.'
assert team.review_url == 'some-github-issue'
def test_casing_of_urls_survives(self):
self.make_alice()
self.post_new(dict( self.valid_data
, homepage='Http://gratipay.com/'
))
team = T('gratiteam')
assert team.homepage == 'Http://gratipay.com/'
def test_casing_of_slug_survives(self):
self.make_alice()
data = dict(self.valid_data)
data['name'] = 'GratiTeam'
self.post_new(dict(data))
team = T('GratiTeam')
assert team is not None
assert team.slug_lower == 'gratiteam'
def test_application_email_sent_to_owner(self):
self.make_alice()
self.post_new(dict(self.valid_data))
last_email = self.get_last_email()
self.app.email_queue.flush()
assert last_email['to'] == 'alice <[email protected]>'
expected = "Thanks for applying to use Gratipay!"
assert expected in last_email['body_text']
def test_401_for_anon_creating_new_team(self):
self.post_new(self.valid_data, auth_as=None, expected=401)
assert self.db.one("SELECT COUNT(*) FROM teams") == 0
def test_error_message_for_no_valid_email(self):
self.make_participant('alice', claimed_time='now')
r = self.post_new(dict(self.valid_data), expected=400)
assert self.db.one("SELECT COUNT(*) FROM teams") == 0
assert "You must have a verified email address to apply for a new team." in r.body
def test_error_message_for_no_payout_route(self):
self.make_participant('alice', claimed_time='now', email_address='[email protected]')
r = self.post_new(dict(self.valid_data), expected=400)
assert self.db.one("SELECT COUNT(*) FROM teams") == 0
assert "You must attach a PayPal account to apply for a new team." in r.body
def test_error_message_for_public_review(self):
self.make_alice()
data = dict(self.valid_data)
del data['agree_public']
r = self.post_new(data, expected=400)
assert self.db.one("SELECT COUNT(*) FROM teams") == 0
assert "Sorry, you must agree to have your application publicly reviewed." in r.body
def test_error_message_for_terms(self):
self.make_alice()
data = dict(self.valid_data)
del data['agree_terms']
r = self.post_new(data, expected=400)
assert self.db.one("SELECT COUNT(*) FROM teams") == 0
assert "Sorry, you must agree to the terms of service." in r.body
def test_error_message_for_missing_fields(self):
self.make_alice()
data = dict(self.valid_data)
del data['name']
r = self.post_new(data, expected=400)
assert self.db.one("SELECT COUNT(*) FROM teams") == 0
assert "Please fill out the 'Team Name' field." in r.body
def test_error_message_for_bad_url(self):
self.make_alice()
r = self.post_new(dict(self.valid_data, homepage='foo'), expected=400)
assert self.db.one("SELECT COUNT(*) FROM teams") == 0
assert "Please enter an http[s]:// URL for the 'Homepage' field." in r.body
def test_error_message_for_invalid_team_name(self):
self.make_alice()
data = dict(self.valid_data)
data['name'] = '~Invalid:Name;'
r = self.post_new(data, expected=400)
assert self.db.one("SELECT COUNT(*) FROM teams") == 0
assert "Sorry, your team name is invalid." in r.body
def test_error_message_for_slug_collision(self):
self.make_alice()
self.post_new(dict(self.valid_data))
r = self.post_new(dict(self.valid_data), expected=400)
assert self.db.one("SELECT COUNT(*) FROM teams") == 1
assert "Sorry, there is already a team using 'Gratiteam'." in r.body
def test_stripping_required_inputs(self):
self.make_alice()
data = dict(self.valid_data)
data['name'] = " "
r = self.post_new(data, expected=400)
assert self.db.one("SELECT COUNT(*) FROM teams") == 0
assert "Please fill out the 'Team Name' field." in r.body
def test_receiving_page_basically_works(self):
team = self.make_team(is_approved=True)
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
alice.set_payment_instruction(team, '3.00')
body = self.client.GET('/TheEnterprise/receiving/', auth_as='picard').body
assert '100.0%' in body
# Dues, Upcoming Payment
# ======================
def test_get_dues(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
bob = self.make_participant('bob', claimed_time='now', last_bill_result='Fail!')
team = self.make_team(is_approved=True)
alice.set_payment_instruction(team, '3.00') # Funded
bob.set_payment_instruction(team, '5.00') # Unfunded
# Simulate dues
self.db.run("UPDATE payment_instructions SET due = amount")
assert team.get_dues() == (3, 5)
def test_upcoming_payment(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
bob = self.make_participant('bob', claimed_time='now', last_bill_result='')
carl = self.make_participant('carl', claimed_time='now', last_bill_result='Fail!')
team = self.make_team(is_approved=True)
alice.set_payment_instruction(team, '5.00') # Funded
bob.set_payment_instruction(team, '3.00') # Funded, but won't hit minimum charge
carl.set_payment_instruction(team, '10.00') # Unfunded
# Simulate dues
self.db.run("UPDATE payment_instructions SET due = amount")
assert team.get_upcoming_payment() == 10 # 2 * Alice's $5
# Cached Values
# =============
def test_receiving_only_includes_funded_payment_instructions(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
bob = self.make_participant('bob', claimed_time='now', last_bill_result="Fail!")
team = self.make_team(is_approved=True)
alice.set_payment_instruction(team, '3.00') # The only funded payment instruction
bob.set_payment_instruction(team, '5.00')
assert team.receiving == D('3.00')
assert team.nreceiving_from == 1
funded_payment_instruction = self.db.one("SELECT * FROM payment_instructions "
"WHERE is_funded ORDER BY id")
assert funded_payment_instruction.participant_id == alice.id
def test_receiving_only_includes_latest_payment_instructions(self):
alice = self.make_participant('alice', claimed_time='now', last_bill_result='')
team = self.make_team(is_approved=True)
alice.set_payment_instruction(team, '5.00')
alice.set_payment_instruction(team, '3.00')
assert team.receiving == D('3.00')
assert team.nreceiving_from == 1
# Images
# ======
def test_save_image_saves_image(self):
team = self.make_team()
team.save_image(IMAGE, IMAGE, IMAGE, 'image/png')
media_type = self.db.one('SELECT image_type FROM teams WHERE id=%s', (team.id,))
assert media_type == 'image/png'
def test_save_image_records_the_event(self):
team = self.make_team()
oids = team.save_image(IMAGE, IMAGE, IMAGE, 'image/png')
event = self.db.all('SELECT * FROM events ORDER BY ts DESC')[0]
assert event.type == 'team'
assert event.payload == { 'action': 'upsert_image'
, 'original': oids['original']
, 'large': oids['large']
, 'small': oids['small']
, 'id': team.id
}
def test_load_image_loads_image(self):
team = self.make_team()
team.save_image(IMAGE, IMAGE, IMAGE, 'image/png')
image = team.load_image('large') # buffer
assert str(image) == IMAGE
def test_image_endpoint_serves_an_image(self):
team = self.make_team()
team.save_image(IMAGE, IMAGE, IMAGE, 'image/png')
image = self.client.GET('/TheEnterprise/image').body # buffer
assert str(image) == IMAGE
def test_get_image_url_gets_image_url(self):
team = self.make_team()
team.save_image(IMAGE, IMAGE, IMAGE, 'image/png')
assert team.get_image_url('small') == '/TheEnterprise/image?size=small'
# Update
# ======
def test_update_works(self):
team = self.make_team(slug='enterprise')
update_data = {
'name': 'Enterprise',
'product_or_service': 'We save galaxies.',
'homepage': 'http://starwars-enterprise.com/',
'onboarding_url': 'http://starwars-enterprise.com/onboarding',
}
team.update(**update_data)
team = T('enterprise')
for field in update_data:
assert getattr(team, field) == update_data[field]
def test_can_only_update_allowed_fields(self):
allowed_fields = set(['name', 'product_or_service', 'homepage',
'onboarding_url',])
team = self.make_team(slug='enterprise')
fields = vars(team).keys()
for field in fields:
if field not in allowed_fields:
with pytest.raises(AssertionError):
team.update(field='foo')
def test_homepage_not_allowed_for_package(self):
alice = self.make_participant('alice', claimed_time='now')
package = self.make_package(name='enterprise')
with self.db.get_cursor() as c:
team = package.get_or_create_linked_team(c, alice)
pytest.raises(AssertionError, team.update, homepage='foo')
def test_update_records_the_old_values_as_events(self):
team = self.make_team(slug='enterprise', product_or_service='Product')
team.update(name='Enterprise', product_or_service='We save galaxies.')
event = self.db.all('SELECT * FROM events ORDER BY ts DESC')[0]
assert event.payload == { 'action': 'update'
, 'id': team.id
, 'name': 'The Enterprise'
, 'product_or_service': 'Product'
}
def test_update_updates_object_attributes(self):
team = self.make_team(slug='enterprise')
team.update(name='Enterprise', product_or_service='We save galaxies.')
assert team.name == 'Enterprise'
assert team.product_or_service == 'We save galaxies.'
# slugize
def test_slugize_slugizes(self):
assert slugize('Foo') == 'Foo'
def test_slugize_requires_a_letter(self):
assert pytest.raises(InvalidTeamName, slugize, '123')
def test_slugize_accepts_letter_in_middle(self):
assert slugize('1a23') == '1a23'
def test_slugize_converts_comma_to_dash(self):
assert slugize('foo,bar') == 'foo-bar'
def test_slugize_converts_space_to_dash(self):
assert slugize('foo bar') == 'foo-bar'
def test_slugize_allows_underscore(self):
assert slugize('foo_bar') == 'foo_bar'
def test_slugize_allows_period(self):
assert slugize('foo.bar') == 'foo.bar'
def test_slugize_trims_whitespace(self):
assert slugize(' Foo Bar ') == 'Foo-Bar'
def test_slugize_trims_dashes(self):
assert slugize('--Foo Bar--') == 'Foo-Bar'
def test_slugize_trims_replacement_dashes(self):
assert slugize(',,Foo Bar,,') == 'Foo-Bar'
def test_slugize_folds_dashes_together(self):
assert slugize('1a----------------23') == '1a-23'
def test_slugize_disallows_slashes(self):
self.assertRaises(InvalidTeamName, slugize, 'abc/def')
def test_slugize_disallows_questions(self):
self.assertRaises(InvalidTeamName, slugize, 'abc?def')
def test_slugize_disallows_backslashes(self):
self.assertRaises(InvalidTeamName, slugize, 'abc\def')
class Cast(Harness):
def test_casts_team(self):
team = self.make_team()
state = self.client.GET('/TheEnterprise/', return_after='cast', want='state')
assert state['request'].path['team'] == team
def test_canonicalizes(self):
self.make_team()
response = self.client.GxT('/theenterprise/', return_after='cast')
assert response.code == 302
assert response.headers['Location'] == '/TheEnterprise/'
| mit |
tswast/google-cloud-python | oslogin/synth.py | 2 | 2076 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
from synthtool import gcp
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
# ----------------------------------------------------------------------------
# Generate oslogin GAPIC layer
# ----------------------------------------------------------------------------
library = gapic.py_library(
"oslogin",
"v1",
config_path="/google/cloud/oslogin/artman_oslogin_v1.yaml",
artman_output_name="os-login-v1",
include_protos=True,
)
# pb2's are incorrectly generated into deeper directories, so copy separately into proto/
s.move(
library,
excludes=[
"nox.py",
"setup.py",
"README.rst",
"docs/index.rst",
library / "google/cloud/oslogin_v1/proto/oslogin/**",
library / "google/cloud/oslogin_v1/proto/oslogin_v1/**",
],
)
s.move(library / "google/cloud/oslogin_v1/proto/**/*", "google/cloud/oslogin_v1/proto")
# Fix up imports
s.replace(
"google/**/proto/*.py",
"from google.cloud.oslogin.common import common_pb2",
"from google.cloud.oslogin_v1.proto import common_pb2",
)
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library(unit_cov_level=97, cov_level=100)
s.move(templated_files)
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
| apache-2.0 |
Iljalala/AdvancedWebFuzzer | awf/core/comparer.py | 1 | 3335 | class Comparer:
def __init__(self):
self.results = []
def compare_properties(self, old_response, new_response, compared_properties):
""" Сравнивает свойства compared_properties между объектами old_response и new_response
:param old_response: объект ResponseObject, символизирующий "было"
:param new_response: объект ResponseObject, символизирующий "стало"
:param compared_properties: list названий свойств, сравнение которых нужно провести
:return: list, содержащий кортеж (new_param, old_param, sigh), где sign - элемент из ['=', '>', '<'], показывающий, в какую сторону произошли изменения
"""
self.results = []
for prop in compared_properties:
getattr(self, '_compare_'+prop)(old_response, new_response)
return self.results
def _compare_response_code(self, old_response, new_response):
old_code, new_code = old_response.response_code, new_response.response_code
if old_code == new_code:
self.results.append((new_code, old_code, '='))
elif new_code > old_code:
self.results.append((new_code, old_code, '>'))
else:
self.results.append((new_code, old_code, '<'))
def _compare_content_length(self, old_response, new_response):
old_content, new_content = old_response.content_length, new_response.content_length
if old_content == new_content:
self.results.append((new_content, 0, '='))
elif new_content > old_content:
self.results.append((new_content, new_content - old_content, '>'))
else:
self.results.append((new_content, new_content - old_content, '<'))
def _compare_row_count(self, old_response, new_response):
old_row_count, new_row_count = old_response.row_count, new_response.row_count
if old_row_count == new_row_count:
self.results.append((new_row_count, 0, '='))
elif new_row_count > old_row_count:
self.results.append((new_row_count, new_row_count - old_row_count, '>'))
else:
self.results.append((new_row_count, new_row_count - old_row_count, '<'))
def _compare_word_count(self, old_response, new_response):
old_word_count, new_word_count = old_response.word_count, new_response.word_count
if old_word_count == new_word_count:
self.results.append((new_word_count, 0, '='))
elif new_word_count > old_word_count:
self.results.append((new_word_count, new_word_count - old_word_count, '>'))
else:
self.results.append((new_word_count, new_word_count - old_word_count, '<'))
def _compare_request_time(self, old_response, new_response):
old_time, new_time = round(old_response.request_time, 3), round(new_response.request_time, 3)
if old_time == new_time:
self.results.append((new_time, 0, '='))
elif new_time > old_time:
self.results.append((new_time, round(new_time - old_time, 3), '>'))
else:
self.results.append((new_time, round(new_time - old_time, 3), '<'))
| gpl-3.0 |
sahiljain/catapult | third_party/google-endpoints/apitools/base/py/credentials_lib_test.py | 7 | 4432 | #
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import mock
import six
from six.moves import http_client
import unittest2
from apitools.base.py import credentials_lib
from apitools.base.py import util
def CreateUriValidator(uri_regexp, content=''):
def CheckUri(uri, headers=None):
if 'X-Google-Metadata-Request' not in headers:
raise ValueError('Missing required header')
if uri_regexp.match(uri):
message = content
status = http_client.OK
else:
message = 'Expected uri matching pattern %s' % uri_regexp.pattern
status = http_client.BAD_REQUEST
return type('HttpResponse', (object,), {'status': status})(), message
return CheckUri
class CredentialsLibTest(unittest2.TestCase):
def _GetServiceCreds(self, service_account_name=None, scopes=None):
kwargs = {}
if service_account_name is not None:
kwargs['service_account_name'] = service_account_name
service_account_name = service_account_name or 'default'
def MockMetadataCalls(request_url):
default_scopes = scopes or ['scope1']
if request_url.endswith('scopes'):
return six.StringIO(''.join(default_scopes))
elif request_url.endswith('service-accounts'):
return six.StringIO(service_account_name)
elif request_url.endswith(
'/service-accounts/%s/token' % service_account_name):
return six.StringIO('{"access_token": "token"}')
self.fail('Unexpected HTTP request to %s' % request_url)
with mock.patch.object(credentials_lib, '_GceMetadataRequest',
side_effect=MockMetadataCalls,
autospec=True) as opener_mock:
with mock.patch.object(util, 'DetectGce',
autospec=True) as mock_detect:
mock_detect.return_value = True
validator = CreateUriValidator(
re.compile(r'.*/%s/.*' % service_account_name),
content='{"access_token": "token"}')
credentials = credentials_lib.GceAssertionCredentials(
scopes, **kwargs)
self.assertIsNone(credentials._refresh(validator))
self.assertEqual(3, opener_mock.call_count)
def testGceServiceAccounts(self):
scopes = ['scope1']
self._GetServiceCreds()
self._GetServiceCreds(scopes=scopes)
self._GetServiceCreds(service_account_name='my_service_account',
scopes=scopes)
class TestGetRunFlowFlags(unittest2.TestCase):
def setUp(self):
self._flags_actual = credentials_lib.FLAGS
def tearDown(self):
credentials_lib.FLAGS = self._flags_actual
def test_with_gflags(self):
HOST = 'myhostname'
PORT = '144169'
class MockFlags(object):
auth_host_name = HOST
auth_host_port = PORT
auth_local_webserver = False
credentials_lib.FLAGS = MockFlags
flags = credentials_lib._GetRunFlowFlags([
'--auth_host_name=%s' % HOST,
'--auth_host_port=%s' % PORT,
'--noauth_local_webserver',
])
self.assertEqual(flags.auth_host_name, HOST)
self.assertEqual(flags.auth_host_port, PORT)
self.assertEqual(flags.logging_level, 'ERROR')
self.assertEqual(flags.noauth_local_webserver, True)
def test_without_gflags(self):
credentials_lib.FLAGS = None
flags = credentials_lib._GetRunFlowFlags([])
self.assertEqual(flags.auth_host_name, 'localhost')
self.assertEqual(flags.auth_host_port, [8080, 8090])
self.assertEqual(flags.logging_level, 'ERROR')
self.assertEqual(flags.noauth_local_webserver, False)
| bsd-3-clause |
LAndreas/zulip | puppet/zulip_internal/files/postgresql/pg_backup_and_purge.py | 114 | 1575 | #!/usr/bin/python
import subprocess
import sys
import logging
import dateutil.parser
import pytz
from datetime import datetime, timedelta
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s")
logger = logging.getLogger(__name__)
def run(args, dry_run=False):
if dry_run:
print "Would have run: " + " ".join(args)
return ""
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode:
logger.error("Could not invoke %s\nstdout: %s\nstderror: %s"
% (args[0], stdout, stderr))
sys.exit(1)
return stdout
# Only run if we're the master
if run(['psql', '-t', '-c', 'select pg_is_in_recovery()']).strip() != 'f':
sys.exit(0)
run(['env-wal-e', 'backup-push', '/var/lib/postgresql/9.1/main'])
now = datetime.now(tz=pytz.utc)
with open('/var/lib/nagios_state/last_postgres_backup', 'w') as f:
f.write(now.isoformat())
f.write("\n")
backups = {}
lines = run(['env-wal-e', 'backup-list']).split("\n")
for line in lines[1:]:
if line:
backup_name, date, _, _ = line.split()
backups[dateutil.parser.parse(date)] = backup_name
one_month_ago = now - timedelta(days=30)
for date in sorted(backups.keys(), reverse=True):
if date < one_month_ago:
run(['env-wal-e', 'delete', '--confirm', 'before', backups[date]])
# Because we're going from most recent to least recent, we
# only have to do one delete operation
break
| apache-2.0 |
Jgarcia-IAS/localizacion | openerp/addons-extra/odoo-pruebas/odoo-server/addons/l10n_in_hr_payroll/wizard/hr_salary_employee_bymonth.py | 374 | 2830 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class hr_salary_employee_bymonth(osv.osv_memory):
_name = 'hr.salary.employee.month'
_description = 'Hr Salary Employee By Month Report'
_columns = {
'start_date': fields.date('Start Date', required=True),
'end_date': fields.date('End Date', required=True),
'employee_ids': fields.many2many('hr.employee', 'payroll_year_rel', 'payroll_year_id', 'employee_id', 'Employees', required=True),
'category_id': fields.many2one('hr.salary.rule.category', 'Category', required=True),
}
def _get_default_category(self, cr, uid, context=None):
category_ids = self.pool.get('hr.salary.rule.category').search(cr, uid, [('code', '=', 'NET')], context=context)
return category_ids and category_ids[0] or False
_defaults = {
'start_date': lambda *a: time.strftime('%Y-01-01'),
'end_date': lambda *a: time.strftime('%Y-%m-%d'),
'category_id': _get_default_category
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: return report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, context=context)
res = res and res[0] or {}
datas.update({'form': res})
return self.pool['report'].get_action(cr, uid, ids,
'l10n_in_hr_payroll.report_hrsalarybymonth',
data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
UCSUR-Pitt/wprdc-etl | test/base.py | 1 | 2458 | import os
import json
import unittest
import sqlite3
from marshmallow import fields
HERE = os.path.abspath(os.path.dirname(__file__))
from pipeline.loaders import Loader
from pipeline.extractors import Extractor
from pipeline.connectors import Connector
from pipeline.schema import BaseSchema
class TestSchema(BaseSchema):
death_date = fields.DateTime(format='%m/%d/%Y')
death_time = fields.DateTime(format='%I:%M %p')
death_date_and_time = fields.DateTime(dump_only=True)
manner_of_death = fields.String()
age = fields.Integer()
sex = fields.String()
race = fields.String()
case_dispo = fields.String()
combined_od1 = fields.String(allow_none=True)
combined_od2 = fields.String(allow_none=True)
combined_od3 = fields.String(allow_none=True)
combined_od4 = fields.String(allow_none=True)
combined_od5 = fields.String(allow_none=True)
combined_od6 = fields.String(allow_none=True)
combined_od7 = fields.String(allow_none=True)
incident_zip = fields.Integer()
decedent_zip = fields.Integer()
case_year = fields.Integer()
class TestLoader(Loader):
def load(self, data):
pass
class TestConnector(Connector):
def connect(self, target):
return []
def checksum_contents(self, target):
return ''
def close(self):
return True
class TestExtractor(Extractor):
def process_connection(self):
return []
def handle_line(self, line):
return []
def set_headers(self):
pass
def extract(self):
return []
class TestBase(unittest.TestCase):
def setUp(self):
self.settings_file = os.path.join(HERE, 'mock/first_test_settings.json')
self.Connector = TestConnector
self.Loader = TestLoader
with open(self.settings_file) as f:
db = json.loads(f.read())['general']['statusdb']
self.conn = sqlite3.connect(db)
self.cur = self.conn.cursor()
self.cur.execute(
'''
CREATE TABLE IF NOT EXISTS
status (
name TEXT NOT NULL,
display_name TEXT,
last_ran INTEGER,
start_time INTEGER NOT NULL,
input_checksum TEXT,
status TEXT,
num_lines INTEGER,
PRIMARY KEY (display_name, start_time)
)
'''
)
def tearDown(self):
self.conn.close()
| mit |
jamesblunt/scrapy | scrapy/contrib/downloadermiddleware/httpcompression.py | 138 | 2278 | import zlib
from scrapy.utils.gz import gunzip, is_gzipped
from scrapy.http import Response, TextResponse
from scrapy.responsetypes import responsetypes
from scrapy.exceptions import NotConfigured
class HttpCompressionMiddleware(object):
"""This middleware allows compressed (gzip, deflate) traffic to be
sent/received from web sites"""
@classmethod
def from_crawler(cls, crawler):
if not crawler.settings.getbool('COMPRESSION_ENABLED'):
raise NotConfigured
return cls()
def process_request(self, request, spider):
request.headers.setdefault('Accept-Encoding', 'gzip,deflate')
def process_response(self, request, response, spider):
if isinstance(response, Response):
content_encoding = response.headers.getlist('Content-Encoding')
if content_encoding and not is_gzipped(response):
encoding = content_encoding.pop()
decoded_body = self._decode(response.body, encoding.lower())
respcls = responsetypes.from_args(headers=response.headers, \
url=response.url)
kwargs = dict(cls=respcls, body=decoded_body)
if issubclass(respcls, TextResponse):
# force recalculating the encoding until we make sure the
# responsetypes guessing is reliable
kwargs['encoding'] = None
response = response.replace(**kwargs)
if not content_encoding:
del response.headers['Content-Encoding']
return response
def _decode(self, body, encoding):
if encoding == 'gzip' or encoding == 'x-gzip':
body = gunzip(body)
if encoding == 'deflate':
try:
body = zlib.decompress(body)
except zlib.error:
# ugly hack to work with raw deflate content that may
# be sent by microsoft servers. For more information, see:
# http://carsten.codimi.de/gzip.yaws/
# http://www.port80software.com/200ok/archive/2005/10/31/868.aspx
# http://www.gzip.org/zlib/zlib_faq.html#faq38
body = zlib.decompress(body, -15)
return body
| bsd-3-clause |
xflin/spark | examples/src/main/python/sql/streaming/structured_network_wordcount.py | 76 | 2534 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Counts words in UTF8 encoded, '\n' delimited text received from the network.
Usage: structured_network_wordcount.py <hostname> <port>
<hostname> and <port> describe the TCP server that Structured Streaming
would connect to receive data.
To run this on your local machine, you need to first run a Netcat server
`$ nc -lk 9999`
and then run the example
`$ bin/spark-submit examples/src/main/python/sql/streaming/structured_network_wordcount.py
localhost 9999`
"""
from __future__ import print_function
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import explode
from pyspark.sql.functions import split
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: structured_network_wordcount.py <hostname> <port>", file=sys.stderr)
exit(-1)
host = sys.argv[1]
port = int(sys.argv[2])
spark = SparkSession\
.builder\
.appName("StructuredNetworkWordCount")\
.getOrCreate()
# Create DataFrame representing the stream of input lines from connection to host:port
lines = spark\
.readStream\
.format('socket')\
.option('host', host)\
.option('port', port)\
.load()
# Split the lines into words
words = lines.select(
# explode turns each item in an array into a separate row
explode(
split(lines.value, ' ')
).alias('word')
)
# Generate running word count
wordCounts = words.groupBy('word').count()
# Start running the query that prints the running counts to the console
query = wordCounts\
.writeStream\
.outputMode('complete')\
.format('console')\
.start()
query.awaitTermination()
| apache-2.0 |
googlefonts/roboto | scripts/lib/fontbuild/features.py | 7 | 7722 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from feaTools import parser
from feaTools.writers.fdkSyntaxWriter import FDKSyntaxFeatureWriter
class FilterFeatureWriter(FDKSyntaxFeatureWriter):
"""Feature writer to detect invalid references and duplicate definitions."""
def __init__(self, refs=set(), name=None, isFeature=False):
"""Initializes the set of known references, empty by default."""
self.refs = refs
self.featureNames = set()
self.lookupNames = set()
self.tableNames = set()
self.languageSystems = set()
super(FilterFeatureWriter, self).__init__(
name=name, isFeature=isFeature)
# error to print when undefined reference is found in glyph class
self.classErr = ('Undefined reference "%s" removed from glyph class '
'definition %s.')
# error to print when undefined reference is found in sub or pos rule
subErr = ['Substitution rule with undefined reference "%s" removed']
if self._name:
subErr.append(" from ")
subErr.append("feature" if self._isFeature else "lookup")
subErr.append(' "%s"' % self._name)
subErr.append(".")
self.subErr = "".join(subErr)
self.posErr = self.subErr.replace("Substitution", "Positioning")
def _subwriter(self, name, isFeature):
"""Use this class for nested expressions e.g. in feature definitions."""
return FilterFeatureWriter(self.refs, name, isFeature)
def _flattenRefs(self, refs, flatRefs):
"""Flatten a list of references."""
for ref in refs:
if type(ref) == list:
self._flattenRefs(ref, flatRefs)
elif ref != "'": # ignore contextual class markings
flatRefs.append(ref)
def _checkRefs(self, refs, errorMsg):
"""Check a list of references found in a sub or pos rule."""
flatRefs = []
self._flattenRefs(refs, flatRefs)
for ref in flatRefs:
# trailing apostrophes should be ignored
if ref[-1] == "'":
ref = ref[:-1]
if ref not in self.refs:
print errorMsg % ref
# insert an empty instruction so that we can't end up with an
# empty block, which is illegal syntax
super(FilterFeatureWriter, self).rawText(";")
return False
return True
def classDefinition(self, name, contents):
"""Check that contents are valid, then add name to known references."""
if name in self.refs:
return
newContents = []
for ref in contents:
if ref not in self.refs and ref != "-":
print self.classErr % (ref, name)
else:
newContents.append(ref)
self.refs.add(name)
super(FilterFeatureWriter, self).classDefinition(name, newContents)
def gsubType1(self, target, replacement):
"""Check a sub rule with one-to-one replacement."""
if self._checkRefs([target, replacement], self.subErr):
super(FilterFeatureWriter, self).gsubType1(target, replacement)
def gsubType4(self, target, replacement):
"""Check a sub rule with many-to-one replacement."""
if self._checkRefs([target, replacement], self.subErr):
super(FilterFeatureWriter, self).gsubType4(target, replacement)
def gsubType6(self, precedingContext, target, trailingContext, replacement):
"""Check a sub rule with contextual replacement."""
refs = [precedingContext, target, trailingContext, replacement]
if self._checkRefs(refs, self.subErr):
super(FilterFeatureWriter, self).gsubType6(
precedingContext, target, trailingContext, replacement)
def gposType1(self, target, value):
"""Check a single positioning rule."""
if self._checkRefs([target], self.posErr):
super(FilterFeatureWriter, self).gposType1(target, value)
def gposType2(self, target, value, needEnum=False):
"""Check a pair positioning rule."""
if self._checkRefs(target, self.posErr):
super(FilterFeatureWriter, self).gposType2(target, value, needEnum)
# these rules may contain references, but they aren't present in Roboto
def gsubType3(self, target, replacement):
raise NotImplementedError
def feature(self, name):
"""Adds a feature definition only once."""
if name not in self.featureNames:
self.featureNames.add(name)
return super(FilterFeatureWriter, self).feature(name)
# we must return a new writer even if we don't add it to this one
return FDKSyntaxFeatureWriter(name, True)
def lookup(self, name):
"""Adds a lookup block only once."""
if name not in self.lookupNames:
self.lookupNames.add(name)
return super(FilterFeatureWriter, self).lookup(name)
# we must return a new writer even if we don't add it to this one
return FDKSyntaxFeatureWriter(name, False)
def languageSystem(self, langTag, scriptTag):
"""Adds a language system instruction only once."""
system = (langTag, scriptTag)
if system not in self.languageSystems:
self.languageSystems.add(system)
super(FilterFeatureWriter, self).languageSystem(langTag, scriptTag)
def table(self, name, data):
"""Adds a table only once."""
if name in self.tableNames:
return
self.tableNames.add(name)
self._instructions.append("table %s {" % name)
self._instructions.extend([" %s %s;" % line for line in data])
self._instructions.append("} %s;" % name)
def compileFeatureRE(name):
"""Compiles a feature-matching regex."""
# this is the pattern used internally by feaTools:
# https://github.com/typesupply/feaTools/blob/master/Lib/feaTools/parser.py
featureRE = list(parser.featureContentRE)
featureRE.insert(2, name)
featureRE.insert(6, name)
return re.compile("".join(featureRE))
def updateFeature(font, name, value):
"""Add a feature definition, or replace existing one."""
featureRE = compileFeatureRE(name)
if featureRE.search(font.features.text):
font.features.text = featureRE.sub(value, font.features.text)
else:
font.features.text += "\n" + value
def readFeatureFile(font, text, prepend=True):
"""Incorporate valid definitions from feature text into font."""
writer = FilterFeatureWriter(set(font.keys()))
if prepend:
text += font.features.text
else:
text = font.features.text + text
parser.parseFeatures(writer, text)
font.features.text = writer.write()
def writeFeatureFile(font, path):
"""Write the font's features to an external file."""
fout = open(path, "w")
fout.write(font.features.text)
fout.close()
| apache-2.0 |
switchboardOp/ansible | test/units/modules/network/vyos/test_vyos_config.py | 77 | 3575 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.vyos import vyos_config
from .vyos_module import TestVyosModule, load_fixture, set_module_args
class TestVyosConfigModule(TestVyosModule):
module = vyos_config
def setUp(self):
self.mock_get_config = patch('ansible.modules.network.vyos.vyos_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.vyos.vyos_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible.modules.network.vyos.vyos_config.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
config_file = 'vyos_config_config.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_vyos_config_unchanged(self):
src = load_fixture('vyos_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_vyos_config_src(self):
src = load_fixture('vyos_config_src.cfg')
set_module_args(dict(src=src))
commands = ['set system host-name foo', 'delete interfaces ethernet eth0 address']
self.execute_module(changed=True, commands=commands)
def test_vyos_config_src_brackets(self):
src = load_fixture('vyos_config_src_brackets.cfg')
set_module_args(dict(src=src))
commands = ['set interfaces ethernet eth0 address 10.10.10.10/24', 'set system host-name foo']
self.execute_module(changed=True, commands=commands)
def test_vyos_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_vyos_config_lines(self):
commands = ['set system host-name foo']
set_module_args(dict(lines=commands))
self.execute_module(changed=True, commands=commands)
def test_vyos_config_config(self):
config = 'set system host-name localhost'
new_config = ['set system host-name router']
set_module_args(dict(lines=new_config, config=config))
self.execute_module(changed=True, commands=new_config)
def test_vyos_config_match_none(self):
lines = ['set system interfaces ethernet eth0 address 1.2.3.4/24',
'set system interfaces ethernet eth0 description test string']
set_module_args(dict(lines=lines, match='none'))
self.execute_module(changed=True, commands=lines, sort=False)
| gpl-3.0 |
vnsofthe/odoo | addons/hw_screen/__openerp__.py | 25 | 1524 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2015 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Screen Driver',
'version': '1.0',
'category': 'Hardware Drivers',
'sequence': 6,
'summary': 'Provides support for customer facing displays',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Screen Driver
=============
This module allows the POS client to send rendered HTML to a remotely
installed screen. This module then displays this HTML using a web
browser.
""",
'author': 'OpenERP SA',
'depends': ['hw_proxy'],
'installable': False,
'auto_install': False,
}
| agpl-3.0 |
iam-TJ/node-gyp | gyp/test/mac/gyptest-app.py | 85 | 1409 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that app bundles are built correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='app-bundle')
test.build('test.gyp', test.ALL, chdir='app-bundle')
# Binary
test.built_file_must_exist('Test App Gyp.app/Contents/MacOS/Test App Gyp',
chdir='app-bundle')
# Info.plist
info_plist = test.built_file_path('Test App Gyp.app/Contents/Info.plist',
chdir='app-bundle')
test.must_exist(info_plist)
test.must_contain(info_plist, 'com.google.Test App Gyp') # Variable expansion
# Resources
test.built_file_must_exist(
'Test App Gyp.app/Contents/Resources/English.lproj/InfoPlist.strings',
chdir='app-bundle')
test.built_file_must_exist(
'Test App Gyp.app/Contents/Resources/English.lproj/MainMenu.nib',
chdir='app-bundle')
# Packaging
test.built_file_must_exist('Test App Gyp.app/Contents/PkgInfo',
chdir='app-bundle')
test.built_file_must_match('Test App Gyp.app/Contents/PkgInfo', 'APPLause',
chdir='app-bundle')
test.pass_test()
| mit |
rlucioni/rotations | rotations/migrations/0001_initial.py | 1 | 2069 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-03 04:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import sortedm2m.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text="The member's name.", max_length=255, unique=True)),
('email', models.EmailField(help_text="The member's email address, to which messages can be sent.", max_length=254, unique=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['-modified'],
},
),
migrations.CreateModel(
name='Rotation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text="The rotation's name.", max_length=255, unique=True)),
('description', models.TextField(help_text="A description of the rotation's purpose.")),
('message', models.TextField(help_text='A reminder message sent to members of the rotation.')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('members', sortedm2m.fields.SortedManyToManyField(help_text=None, related_name='rotations', to='rotations.Member')),
('on_call', models.ForeignKey(help_text='Member currently on call.', on_delete=django.db.models.deletion.CASCADE, to='rotations.Member')),
],
options={
'ordering': ['-modified'],
},
),
]
| mit |
prodromou87/gem5 | tests/configs/arm_generic.py | 10 | 4640 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from abc import ABCMeta, abstractmethod
import m5
from m5.objects import *
from m5.proxy import *
m5.util.addToPath('../configs/common')
import FSConfig
from Caches import *
from base_config import *
class LinuxArmSystemBuilder(object):
"""Mix-in that implements create_system.
This mix-in is intended as a convenient way of adding an
ARM-specific create_system method to a class deriving from one of
the generic base systems.
"""
def __init__(self, machine_type):
"""
Arguments:
machine_type -- String describing the platform to simulate
"""
self.machine_type = machine_type
def create_system(self):
system = FSConfig.makeArmSystem(self.mem_mode,
self.machine_type, None, False)
# We typically want the simulator to panic if the kernel
# panics or oopses. This prevents the simulator from running
# an obviously failed test case until the end of time.
system.panic_on_panic = True
system.panic_on_oops = True
self.init_system(system)
return system
class LinuxArmFSSystem(LinuxArmSystemBuilder,
BaseFSSystem):
"""Basic ARM full system builder."""
def __init__(self, machine_type='RealView_PBX', **kwargs):
"""Initialize an ARM system that supports full system simulation.
Note: Keyword arguments that are not listed below will be
passed to the BaseFSSystem.
Keyword Arguments:
machine_type -- String describing the platform to simulate
"""
BaseSystem.__init__(self, **kwargs)
LinuxArmSystemBuilder.__init__(self, machine_type)
class LinuxArmFSSystemUniprocessor(LinuxArmSystemBuilder,
BaseFSSystemUniprocessor):
"""Basic ARM full system builder for uniprocessor systems.
Note: This class is a specialization of the ArmFSSystem and is
only really needed to provide backwards compatibility for existing
test cases.
"""
def __init__(self, machine_type='RealView_PBX', **kwargs):
BaseFSSystemUniprocessor.__init__(self, **kwargs)
LinuxArmSystemBuilder.__init__(self, machine_type)
class LinuxArmFSSwitcheroo(LinuxArmSystemBuilder, BaseFSSwitcheroo):
"""Uniprocessor ARM system prepared for CPU switching"""
def __init__(self, machine_type='RealView_PBX', **kwargs):
BaseFSSwitcheroo.__init__(self, **kwargs)
LinuxArmSystemBuilder.__init__(self, machine_type)
| bsd-3-clause |
pirati-cz/byro | setup.py | 1 | 1665 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import locale
from setuptools import (setup, find_packages)
from byro import (__version__, __author__, __email__, __license__, __doc__)
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
setup(
# Basic
name='Byro',
version=__version__,
packages=find_packages(),
# Entry ponit
entry_points={
'console_scripts': [
'byro = byro.__main__:main',
]
},
# Requirements
install_requires=["wget", "dateutils", "markdown",
"ConfigArgParse", "sh",
"python-redmine", "python-docx",
"pytesseract"],
package_data={
'byro': ['resource/*.ini',
'resource/*.png',
'resource/styles/fonts/LiberationSans/*ttf',
'resource/styles/fonts/Nina/*ttf',
'resource/styles/fonts/PoliticsHead/*ttf',
'resource/styles/common/*.tex',
'resource/styles/letter/*.tex',
'resource/styles/letter/*.pdf']
},
# About
author=str(__author__),
author_email=__email__,
description='Bureaucracy assistant',
license=__license__,
long_description=__doc__,
keywords="bureaucracy administration pdf git ocr markdown",
url='https://github.com/pirati-cz/byro/',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Legal Industry',
'Intended Audience :: Users',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Natural Language :: English',
'Natural Language :: Czech',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Office/Business',
'Topic :: Utilities'
]
)
| gpl-3.0 |
thenewguy/django-jsonfield | jsonfield/tests.py | 10 | 10231 | from decimal import Decimal
from django.core.serializers import deserialize, serialize
from django.core.serializers.base import DeserializationError
from django.db import models
from django.test import TestCase
try:
import json
except ImportError:
from django.utils import simplejson as json
from .fields import JSONField, JSONCharField
from django.forms.util import ValidationError
from collections import OrderedDict
class JsonModel(models.Model):
json = JSONField()
default_json = JSONField(default={"check":12})
complex_default_json = JSONField(default=[{"checkcheck": 1212}])
empty_default = JSONField(default={})
class JsonCharModel(models.Model):
json = JSONCharField(max_length=100)
default_json = JSONCharField(max_length=100, default={"check":34})
class ComplexEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, complex):
return {
'__complex__': True,
'real': obj.real,
'imag': obj.imag,
}
return json.JSONEncoder.default(self, obj)
def as_complex(dct):
if '__complex__' in dct:
return complex(dct['real'], dct['imag'])
return dct
class JSONModelCustomEncoders(models.Model):
# A JSON field that can store complex numbers
json = JSONField(
dump_kwargs={'cls': ComplexEncoder, "indent": 4},
load_kwargs={'object_hook': as_complex},
)
class JSONFieldTest(TestCase):
"""JSONField Wrapper Tests"""
json_model = JsonModel
def test_json_field_create(self):
"""Test saving a JSON object in our JSONField"""
json_obj = {
"item_1": "this is a json blah",
"blergh": "hey, hey, hey"}
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(new_obj.json, json_obj)
def test_string_in_json_field(self):
"""Test saving an ordinary Python string in our JSONField"""
json_obj = 'blah blah'
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(new_obj.json, json_obj)
def test_float_in_json_field(self):
"""Test saving a Python float in our JSONField"""
json_obj = 1.23
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(new_obj.json, json_obj)
def test_int_in_json_field(self):
"""Test saving a Python integer in our JSONField"""
json_obj = 1234567
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(new_obj.json, json_obj)
def test_decimal_in_json_field(self):
"""Test saving a Python Decimal in our JSONField"""
json_obj = Decimal(12.34)
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
# here we must know to convert the returned string back to Decimal,
# since json does not support that format
self.assertEqual(Decimal(new_obj.json), json_obj)
def test_json_field_modify(self):
"""Test modifying a JSON object in our JSONField"""
json_obj_1 = {'a': 1, 'b': 2}
json_obj_2 = {'a': 3, 'b': 4}
obj = self.json_model.objects.create(json=json_obj_1)
self.assertEqual(obj.json, json_obj_1)
obj.json = json_obj_2
self.assertEqual(obj.json, json_obj_2)
obj.save()
self.assertEqual(obj.json, json_obj_2)
self.assertTrue(obj)
def test_json_field_load(self):
"""Test loading a JSON object from the DB"""
json_obj_1 = {'a': 1, 'b': 2}
obj = self.json_model.objects.create(json=json_obj_1)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(new_obj.json, json_obj_1)
def test_json_list(self):
"""Test storing a JSON list"""
json_obj = ["my", "list", "of", 1, "objs", {"hello": "there"}]
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(new_obj.json, json_obj)
def test_empty_objects(self):
"""Test storing empty objects"""
for json_obj in [{}, [], 0, '', False]:
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(json_obj, obj.json)
self.assertEqual(json_obj, new_obj.json)
def test_custom_encoder(self):
"""Test encoder_cls and object_hook"""
value = 1 + 3j # A complex number
obj = JSONModelCustomEncoders.objects.create(json=value)
new_obj = JSONModelCustomEncoders.objects.get(pk=obj.pk)
self.assertEqual(value, new_obj.json)
def test_django_serializers(self):
"""Test serializing/deserializing jsonfield data"""
for json_obj in [{}, [], 0, '', False, {'key': 'value', 'num': 42,
'ary': list(range(5)),
'dict': {'k': 'v'}}]:
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assert_(new_obj)
queryset = self.json_model.objects.all()
ser = serialize('json', queryset)
for dobj in deserialize('json', ser):
obj = dobj.object
pulled = self.json_model.objects.get(id=obj.pk)
self.assertEqual(obj.json, pulled.json)
def test_default_parameters(self):
"""Test providing a default value to the model"""
model = JsonModel()
model.json = {"check": 12}
self.assertEqual(model.json, {"check": 12})
self.assertEqual(type(model.json), dict)
self.assertEqual(model.default_json, {"check": 12})
self.assertEqual(type(model.default_json), dict)
def test_invalid_json(self):
# invalid json data {] in the json and default_json fields
ser = '[{"pk": 1, "model": "jsonfield.jsoncharmodel", ' \
'"fields": {"json": "{]", "default_json": "{]"}}]'
with self.assertRaises(DeserializationError) as cm:
next(deserialize('json', ser))
inner = cm.exception.args[0]
self.assertTrue(isinstance(inner, ValidationError))
self.assertEqual('Enter valid JSON', inner.messages[0])
def test_integer_in_string_in_json_field(self):
"""Test saving the Python string '123' in our JSONField"""
json_obj = '123'
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(new_obj.json, json_obj)
def test_boolean_in_string_in_json_field(self):
"""Test saving the Python string 'true' in our JSONField"""
json_obj = 'true'
obj = self.json_model.objects.create(json=json_obj)
new_obj = self.json_model.objects.get(id=obj.id)
self.assertEqual(new_obj.json, json_obj)
def test_pass_by_reference_pollution(self):
"""Make sure the default parameter is copied rather than passed by reference"""
model = JsonModel()
model.default_json["check"] = 144
model.complex_default_json[0]["checkcheck"] = 144
self.assertEqual(model.default_json["check"], 144)
self.assertEqual(model.complex_default_json[0]["checkcheck"], 144)
# Make sure when we create a new model, it resets to the default value
# and not to what we just set it to (it would be if it were passed by reference)
model = JsonModel()
self.assertEqual(model.default_json["check"], 12)
self.assertEqual(model.complex_default_json[0]["checkcheck"], 1212)
def test_normal_regex_filter(self):
"""Make sure JSON model can filter regex"""
JsonModel.objects.create(json={"boom": "town"})
JsonModel.objects.create(json={"move": "town"})
JsonModel.objects.create(json={"save": "town"})
self.assertEqual(JsonModel.objects.count(), 3)
self.assertEqual(JsonModel.objects.filter(json__regex=r"boom").count(), 1)
self.assertEqual(JsonModel.objects.filter(json__regex=r"town").count(), 3)
def test_save_blank_object(self):
"""Test that JSON model can save a blank object as none"""
model = JsonModel()
self.assertEqual(model.empty_default, {})
model.save()
self.assertEqual(model.empty_default, {})
model1 = JsonModel(empty_default={"hey": "now"})
self.assertEqual(model1.empty_default, {"hey": "now"})
model1.save()
self.assertEqual(model1.empty_default, {"hey": "now"})
class JSONCharFieldTest(JSONFieldTest):
json_model = JsonCharModel
class OrderedJsonModel(models.Model):
json = JSONField(load_kwargs={'object_pairs_hook': OrderedDict})
class OrderedDictSerializationTest(TestCase):
ordered_dict = OrderedDict([
('number', [1, 2, 3, 4]),
('notes', True),
])
expected_key_order = ['number', 'notes']
def test_ordered_dict_differs_from_normal_dict(self):
self.assertEqual(list(self.ordered_dict.keys()), self.expected_key_order)
self.assertNotEqual(dict(self.ordered_dict).keys(), self.expected_key_order)
def test_default_behaviour_loses_sort_order(self):
mod = JsonModel.objects.create(json=self.ordered_dict)
self.assertEqual(list(mod.json.keys()), self.expected_key_order)
mod_from_db = JsonModel.objects.get(id=mod.id)
# mod_from_db lost ordering information during json.loads()
self.assertNotEqual(mod_from_db.json.keys(), self.expected_key_order)
def test_load_kwargs_hook_does_not_lose_sort_order(self):
mod = OrderedJsonModel.objects.create(json=self.ordered_dict)
self.assertEqual(list(mod.json.keys()), self.expected_key_order)
mod_from_db = OrderedJsonModel.objects.get(id=mod.id)
self.assertEqual(list(mod_from_db.json.keys()), self.expected_key_order)
| mit |
codeforamerica/skillcamp | ENV/lib/python2.7/site-packages/gunicorn/app/pasterapp.py | 24 | 5631 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import pkg_resources
import sys
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
from paste.deploy import loadapp, loadwsgi
SERVER = loadwsgi.SERVER
from gunicorn.app.base import Application
from gunicorn.config import Config, get_default_config_file
from gunicorn import util
def paste_config(gconfig, config_url, relative_to, global_conf=None):
# add entry to pkg_resources
sys.path.insert(0, relative_to)
pkg_resources.working_set.add_entry(relative_to)
cx = loadwsgi.loadcontext(SERVER, config_url, relative_to=relative_to,
global_conf=global_conf)
gc, lc = cx.global_conf.copy(), cx.local_conf.copy()
cfg = {}
host, port = lc.pop('host', ''), lc.pop('port', '')
if host and port:
cfg['bind'] = '%s:%s' % (host, port)
elif host:
cfg['bind'] = host.split(',')
cfg['workers'] = int(lc.get('workers', 1))
cfg['umask'] = int(lc.get('umask', 0))
cfg['default_proc_name'] = gc.get('__file__')
for k, v in gc.items():
if k not in gconfig.settings:
continue
cfg[k] = v
for k, v in lc.items():
if k not in gconfig.settings:
continue
cfg[k] = v
return cfg
def load_pasteapp(config_url, relative_to, global_conf=None):
return loadapp(config_url, relative_to=relative_to,
global_conf=global_conf)
class PasterBaseApplication(Application):
gcfg = None
def app_config(self):
return paste_config(self.cfg, self.cfgurl, self.relpath,
global_conf=self.gcfg)
def load_config(self):
super(PasterBaseApplication, self).load_config()
# reload logging conf
if hasattr(self, "cfgfname"):
parser = ConfigParser.ConfigParser()
parser.read([self.cfgfname])
if parser.has_section('loggers'):
from logging.config import fileConfig
config_file = os.path.abspath(self.cfgfname)
fileConfig(config_file, dict(__file__=config_file,
here=os.path.dirname(config_file)))
class PasterApplication(PasterBaseApplication):
def init(self, parser, opts, args):
if len(args) != 1:
parser.error("No application name specified.")
cwd = util.getcwd()
cfgfname = os.path.normpath(os.path.join(cwd, args[0]))
cfgfname = os.path.abspath(cfgfname)
if not os.path.exists(cfgfname):
parser.error("Config file not found: %s" % cfgfname)
self.cfgurl = 'config:%s' % cfgfname
self.relpath = os.path.dirname(cfgfname)
self.cfgfname = cfgfname
sys.path.insert(0, self.relpath)
pkg_resources.working_set.add_entry(self.relpath)
return self.app_config()
def load(self):
# chdir to the configured path before loading,
# default is the current dir
os.chdir(self.cfg.chdir)
return load_pasteapp(self.cfgurl, self.relpath, global_conf=self.gcfg)
class PasterServerApplication(PasterBaseApplication):
def __init__(self, app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs):
self.cfg = Config()
self.gcfg = gcfg # need to hold this for app_config
self.app = app
self.callable = None
gcfg = gcfg or {}
cfgfname = gcfg.get("__file__")
if cfgfname is not None:
self.cfgurl = 'config:%s' % cfgfname
self.relpath = os.path.dirname(cfgfname)
self.cfgfname = cfgfname
cfg = kwargs.copy()
if port and not host.startswith("unix:"):
bind = "%s:%s" % (host, port)
else:
bind = host
cfg["bind"] = bind.split(',')
if gcfg:
for k, v in gcfg.items():
cfg[k] = v
cfg["default_proc_name"] = cfg['__file__']
try:
for k, v in cfg.items():
if k.lower() in self.cfg.settings and v is not None:
self.cfg.set(k.lower(), v)
except Exception as e:
sys.stderr.write("\nConfig error: %s\n" % str(e))
sys.stderr.flush()
sys.exit(1)
if cfg.get("config"):
self.load_config_from_file(cfg["config"])
else:
default_config = get_default_config_file()
if default_config is not None:
self.load_config_from_file(default_config)
def load(self):
# chdir to the configured path before loading,
# default is the current dir
os.chdir(self.cfg.chdir)
return self.app
def run():
"""\
The ``gunicorn_paster`` command for launcing Paster compatible
apllications like Pylons or Turbogears2
"""
util.warn("""This command is deprecated.
You should now use the `--paste` option. Ex.:
gunicorn --paste development.ini
""")
from gunicorn.app.pasterapp import PasterApplication
PasterApplication("%(prog)s [OPTIONS] pasteconfig.ini").run()
def paste_server(app, gcfg=None, host="127.0.0.1", port=None, *args, **kwargs):
"""\
A paster server.
Then entry point in your paster ini file should looks like this:
[server:main]
use = egg:gunicorn#main
host = 127.0.0.1
port = 5000
"""
from gunicorn.app.pasterapp import PasterServerApplication
PasterServerApplication(app, gcfg=gcfg, host=host, port=port, *args, **kwargs).run()
| mit |
adelton/django | tests/postgres_tests/fields.py | 302 | 1087 | """
Indirection layer for PostgreSQL-specific fields, so the tests don't fail when
run with a backend other than PostgreSQL.
"""
from django.db import models
try:
from django.contrib.postgres.fields import (
ArrayField, BigIntegerRangeField, DateRangeField, DateTimeRangeField,
FloatRangeField, HStoreField, IntegerRangeField, JSONField,
)
except ImportError:
class DummyArrayField(models.Field):
def __init__(self, base_field, size=None, **kwargs):
super(DummyArrayField, self).__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(DummyArrayField, self).deconstruct()
kwargs.update({
'base_field': '',
'size': 1,
})
return name, path, args, kwargs
ArrayField = DummyArrayField
BigIntegerRangeField = models.Field
DateRangeField = models.Field
DateTimeRangeField = models.Field
FloatRangeField = models.Field
HStoreField = models.Field
IntegerRangeField = models.Field
JSONField = models.Field
| bsd-3-clause |
jborean93/ansible | lib/ansible/modules/validate_argument_spec.py | 18 | 1421 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: validate_argument_spec
short_description: Validate role argument specs.
description:
- This module validates role arguments with a defined argument specification.
version_added: "2.11"
options:
argument_spec:
description:
- A dictionary like AnsibleModule argument_spec
required: true
provided_arguments:
description:
- A dictionary of the arguments that will be validated according to argument_spec
author:
- Ansible Core Team
'''
EXAMPLES = r'''
'''
RETURN = r'''
argument_errors:
description: A list of arg validation errors.
returned: failure
type: list
elements: str
sample:
- "error message 1"
- "error message 2"
argument_spec_data:
description: A dict of the data from the 'argument_spec' arg.
returned: failure
type: dict
sample:
some_arg:
type: "str"
some_other_arg:
type: "int"
required: true
validate_args_context:
description: A dict of info about where validate_args_spec was used
type: dict
returned: always
sample:
name: my_role
type: role
path: /home/user/roles/my_role/
argument_spec_name: main
'''
| gpl-3.0 |
rotofly/odoo | addons/account_payment/__openerp__.py | 261 | 2925 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Suppliers Payment Management',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Accounting & Finance',
'description': """
Module to manage the payment of your supplier invoices.
=======================================================
This module allows you to create and manage your payment orders, with purposes to
---------------------------------------------------------------------------------
* serve as base for an easy plug-in of various automated payment mechanisms.
* provide a more efficient way to manage invoice payment.
Warning:
~~~~~~~~
The confirmation of a payment order does _not_ create accounting entries, it just
records the fact that you gave your payment order to your bank. The booking of
your order must be encoded as usual through a bank statement. Indeed, it's only
when you get the confirmation from your bank that your order has been accepted
that you can book it in your accounting. To help you with that operation, you
have a new option to import payment orders as bank statement lines.
""",
'depends': ['account','account_voucher'],
'data': [
'security/account_payment_security.xml',
'security/ir.model.access.csv',
'wizard/account_payment_pay_view.xml',
'wizard/account_payment_populate_statement_view.xml',
'wizard/account_payment_create_order_view.xml',
'account_payment_view.xml',
'account_payment_workflow.xml',
'account_payment_sequence.xml',
'account_payment_report.xml',
'views/report_paymentorder.xml',
],
'demo': ['account_payment_demo.xml'],
'test': [
'test/account_payment_demo.yml',
'test/cancel_payment_order.yml',
'test/payment_order_process.yml',
'test/account_payment_report.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pforret/python-for-android | python3-alpha/python-libs/gdata/apps_property.py | 150 | 1121 | #!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
"""Provides a base class to represent property elements in feeds.
This module is used for version 2 of the Google Data APIs. The primary class
in this module is AppsProperty.
"""
__author__ = 'Vic Fryzel <[email protected]>'
import atom.core
import gdata.apps
class AppsProperty(atom.core.XmlElement):
"""Represents an <apps:property> element in a feed."""
_qname = gdata.apps.APPS_TEMPLATE % 'property'
name = 'name'
value = 'value'
| apache-2.0 |
garyjyao1/ansible | lib/ansible/modules/core/web_infrastructure/htpasswd.py | 99 | 8780 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Nimbis Services, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
module: htpasswd
version_added: "1.3"
short_description: manage user files for basic authentication
description:
- Add and remove username/password entries in a password file using htpasswd.
- This is used by web servers such as Apache and Nginx for basic authentication.
options:
path:
required: true
aliases: [ dest, destfile ]
description:
- Path to the file that contains the usernames and passwords
name:
required: true
aliases: [ username ]
description:
- User name to add or remove
password:
required: false
description:
- Password associated with user.
- Must be specified if user does not exist yet.
crypt_scheme:
required: false
choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
default: "apr_md5_crypt"
description:
- Encryption scheme to be used. As well as the four choices listed
here, you can also use any other hash supported by passlib, such as
md5_crypt and sha256_crypt, which are linux passwd hashes. If you
do so the password file will not be compatible with Apache or Nginx
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the user entry should be present or not
create:
required: false
choices: [ "yes", "no" ]
default: "yes"
description:
- Used with C(state=present). If specified, the file will be created
if it does not already exist. If set to "no", will fail if the
file does not exist
notes:
- "This module depends on the I(passlib) Python library, which needs to be installed on all target systems."
- "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
- "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
requires: [ passlib>=1.6 ]
author: "Lorin Hochstein (@lorin)"
"""
EXAMPLES = """
# Add a user to a password file and ensure permissions are set
- htpasswd: path=/etc/nginx/passwdfile name=janedoe password=9s36?;fyNp owner=root group=www-data mode=0640
# Remove a user from a password file
- htpasswd: path=/etc/apache2/passwdfile name=foobar state=absent
# Add a user to a password file suitable for use by libpam-pwdfile
- htpasswd: path=/etc/mail/passwords name=alex password=oedu2eGh crypt_scheme=md5_crypt
"""
import os
import tempfile
from distutils.version import StrictVersion
try:
from passlib.apache import HtpasswdFile, htpasswd_context
from passlib.context import CryptContext
import passlib
except ImportError:
passlib_installed = False
else:
passlib_installed = True
apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
def create_missing_directories(dest):
destpath = os.path.dirname(dest)
if not os.path.exists(destpath):
os.makedirs(destpath)
def present(dest, username, password, crypt_scheme, create, check_mode):
""" Ensures user is present
Returns (msg, changed) """
if crypt_scheme in apache_hashes:
context = htpasswd_context
else:
context = CryptContext(schemes = [ crypt_scheme ] + apache_hashes)
if not os.path.exists(dest):
if not create:
raise ValueError('Destination %s does not exist' % dest)
if check_mode:
return ("Create %s" % dest, True)
create_missing_directories(dest)
if StrictVersion(passlib.__version__) >= StrictVersion('1.6'):
ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
else:
ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Created %s and added %s" % (dest, username), True)
else:
if StrictVersion(passlib.__version__) >= StrictVersion('1.6'):
ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
else:
ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
found = None
if getattr(ht, 'check_password', None):
found = ht.check_password(username, password)
else:
found = ht.verify(username, password)
if found:
return ("%s already present" % username, False)
else:
if not check_mode:
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Add/update %s" % username, True)
def absent(dest, username, check_mode):
""" Ensures user is absent
Returns (msg, changed) """
if not os.path.exists(dest):
raise ValueError("%s does not exists" % dest)
if StrictVersion(passlib.__version__) >= StrictVersion('1.6'):
ht = HtpasswdFile(dest, new=False)
else:
ht = HtpasswdFile(dest)
if username not in ht.users():
return ("%s not present" % username, False)
else:
if not check_mode:
ht.delete(username)
ht.save()
return ("Remove %s" % username, True)
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
arg_spec = dict(
path=dict(required=True, aliases=["dest", "destfile"]),
name=dict(required=True, aliases=["username"]),
password=dict(required=False, default=None),
crypt_scheme=dict(required=False, default="apr_md5_crypt"),
state=dict(required=False, default="present"),
create=dict(type='bool', default='yes'),
)
module = AnsibleModule(argument_spec=arg_spec,
add_file_common_args=True,
supports_check_mode=True)
path = module.params['path']
username = module.params['name']
password = module.params['password']
crypt_scheme = module.params['crypt_scheme']
state = module.params['state']
create = module.params['create']
check_mode = module.check_mode
if not passlib_installed:
module.fail_json(msg="This module requires the passlib Python library")
# Check file for blank lines in effort to avoid "need more than 1 value to unpack" error.
try:
f = open(path, "r")
except IOError:
# No preexisting file to remove blank lines from
f = None
else:
try:
lines = f.readlines()
finally:
f.close()
# If the file gets edited, it returns true, so only edit the file if it has blank lines
strip = False
for line in lines:
if not line.strip():
strip = True
break
if strip:
# If check mode, create a temporary file
if check_mode:
temp = tempfile.NamedTemporaryFile()
path = temp.name
f = open(path, "w")
try:
[ f.write(line) for line in lines if line.strip() ]
finally:
f.close()
try:
if state == 'present':
(msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
elif state == 'absent':
(msg, changed) = absent(path, username, check_mode)
else:
module.fail_json(msg="Invalid state: %s" % state)
check_file_attrs(module, changed, msg)
module.exit_json(msg=msg, changed=changed)
except Exception, e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
monkeysecurity/npyscreen | npyscreen/compatibility_code/npysNPSTree.py | 21 | 7343 | #!/usr/bin/env python
import weakref
import collections
import operator
class NPSTreeData(object):
CHILDCLASS = None
def __init__(self, content=None, parent=None, selected=False, selectable=True,
highlight=False, expanded=True, ignoreRoot=True, sort_function=None):
self.setParent(parent)
self.setContent(content)
self.selectable = selectable
self.selected = selected
self.highlight = highlight
self.expanded = expanded
self._children = []
self.ignoreRoot = ignoreRoot
self.sort = False
self.sort_function = sort_function
self.sort_function_wrapper = True
def getContent(self):
return self.content
def getContentForDisplay(self):
return str(self.content)
def setContent(self, content):
self.content = content
def isSelected(self):
return self.selected
def isHighlighted(self):
return self.highlight
def setParent(self, parent):
if parent == None:
self._parent = None
else:
self._parent = weakref.proxy(parent)
def getParent(self):
return self._parent
def findDepth(self, d=0):
depth = d
parent = self.getParent()
while parent:
d += 1
parent = parent.getParent()
return d
# Recursive
#if self._parent == None:
# return d
#else:
# return(self._parent.findDepth(d+1))
def isLastSibling(self):
if self.getParent():
if list(self.getParent().getChildren())[-1] == self:
return True
else:
return False
else:
return None
def hasChildren(self):
if len(self._children) > 0:
return True
else:
return False
def getChildren(self):
for c in self._children:
try:
yield weakref.proxy(c)
except:
yield c
def getChildrenObjects(self):
return self._children[:]
def _getChildrenList(self):
return self._children
def newChild(self, *args, **keywords):
if self.CHILDCLASS:
cld = self.CHILDCLASS
else:
cld = type(self)
c = cld(parent=self, *args, **keywords)
self._children.append(c)
return weakref.proxy(c)
def removeChild(self, child):
new_children = []
for ch in self._children:
# do it this way because of weakref equality bug.
if not ch.getContent() == child.getContent():
new_children.append(ch)
else:
ch.setParent(None)
self._children = new_children
def create_wrapped_sort_function(self, this_function):
def new_function(the_item):
if the_item:
the_real_item = the_item.getContent()
return this_function(the_real_item)
else:
return the_item
return new_function
def walkParents(self):
p = self.getParent()
while p:
yield p
p = p.getParent()
def walkTree(self, onlyExpanded=True, ignoreRoot=True, sort=None, sort_function=None):
#Iterate over Tree
if sort is None:
sort = self.sort
if sort_function is None:
sort_function = self.sort_function
# example sort function # sort = True
# example sort function # def sort_function(the_item):
# example sort function # import email.utils
# example sort function # if the_item:
# example sort function # if the_item.getContent():
# example sort function # frm = the_item.getContent().get('from')
# example sort function # try:
# example sort function # frm = email.utils.parseaddr(frm)[0]
# example sort function # except:
# example sort function # pass
# example sort function # return frm
# example sort function # else:
# example sort function # return the_item
#key = operator.methodcaller('getContent',)
if self.sort_function_wrapper and sort_function:
# def wrapped_sort_function(the_item):
# if the_item:
# the_real_item = the_item.getContent()
# return sort_function(the_real_item)
# else:
# return the_item
# _this_sort_function = wrapped_sort_function
_this_sort_function = self.create_wrapped_sort_function(sort_function)
else:
_this_sort_function = sort_function
key = _this_sort_function
if not ignoreRoot:
yield self
nodes_to_yield = collections.deque() # better memory management than a list for pop(0)
if self.expanded or not onlyExpanded:
if sort:
# This and the similar block below could be combined into a nested function
if key:
nodes_to_yield.extend(sorted(self.getChildren(), key=key,))
else:
nodes_to_yield.extend(sorted(self.getChildren()))
else:
nodes_to_yield.extend(self.getChildren())
while nodes_to_yield:
child = nodes_to_yield.popleft()
if child.expanded or not onlyExpanded:
# This and the similar block above could be combined into a nested function
if sort:
if key:
# must be reverse because about to use extendleft() below.
nodes_to_yield.extendleft(sorted(child.getChildren(), key=key, reverse=True))
else:
nodes_to_yield.extendleft(sorted(child.getChildren(), reverse=True))
else:
#for node in child.getChildren():
# if node not in nodes_to_yield:
# nodes_to_yield.appendleft(node)
yield_these = list(child.getChildren())
yield_these.reverse()
nodes_to_yield.extendleft(yield_these)
del yield_these
yield child
def _walkTreeRecursive(self,onlyExpanded=True, ignoreRoot=True,):
#This is an old, recursive version
if (not onlyExpanded) or (self.expanded):
for child in self.getChildren():
for node in child.walkTree(onlyExpanded=onlyExpanded, ignoreRoot=False):
yield node
def getTreeAsList(self, onlyExpanded=True, sort=None, key=None):
_a = []
for node in self.walkTree(onlyExpanded=onlyExpanded, ignoreRoot=self.ignoreRoot, sort=sort):
try:
_a.append(weakref.proxy(node))
except:
_a.append(node)
return _a
| bsd-2-clause |
a-parhom/edx-platform | lms/djangoapps/courseware/tests/test_masquerade.py | 4 | 21981 | # -*- coding: utf-8 -*-
"""
Unit tests for masquerade.
"""
import json
import pickle
from datetime import datetime
import ddt
from django.conf import settings
from django.urls import reverse
from django.test import TestCase
from mock import patch
from pytz import UTC
from capa.tests.response_xml_factory import OptionResponseXMLFactory
from courseware.masquerade import CourseMasquerade, MasqueradingKeyValueStore, get_masquerading_user_group
from courseware.tests.factories import StaffFactory
from courseware.tests.helpers import LoginEnrollmentTestCase, masquerade_as_group_member
from courseware.tests.test_submitting_problems import ProblemSubmissionTestMixin
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from openedx.core.djangoapps.user_api.preferences.api import get_user_preference, set_user_preference
from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag
from openedx.core.lib.tests import attr
from openedx.features.course_experience import UNIFIED_COURSE_TAB_FLAG
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xblock.runtime import DictKeyValueStore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
class MasqueradeTestCase(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Base class for masquerade tests that sets up a test course and enrolls a user in the course.
"""
@classmethod
def setUpClass(cls):
super(MasqueradeTestCase, cls).setUpClass()
cls.course = CourseFactory.create(number='masquerade-test', metadata={'start': datetime.now(UTC)})
cls.info_page = ItemFactory.create(
category="course_info", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="updates"
)
cls.chapter = ItemFactory.create(
parent_location=cls.course.location,
category="chapter",
display_name="Test Section",
)
cls.sequential_display_name = "Test Masquerade Subsection"
cls.sequential = ItemFactory.create(
parent_location=cls.chapter.location,
category="sequential",
display_name=cls.sequential_display_name,
)
cls.vertical = ItemFactory.create(
parent_location=cls.sequential.location,
category="vertical",
display_name="Test Unit",
)
problem_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=2,
weight=2,
options=['Correct', 'Incorrect'],
correct_option='Correct'
)
cls.problem_display_name = "TestMasqueradeProblem"
cls.problem = ItemFactory.create(
parent_location=cls.vertical.location,
category='problem',
data=problem_xml,
display_name=cls.problem_display_name
)
def setUp(self):
super(MasqueradeTestCase, self).setUp()
self.test_user = self.create_user()
self.login(self.test_user.email, 'test')
self.enroll(self.course, True)
def get_courseware_page(self):
"""
Returns the server response for the courseware page.
"""
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.chapter.location.block_id,
'section': self.sequential.location.block_id,
}
)
return self.client.get(url)
def get_course_info_page(self):
"""
Returns the server response for course info page.
"""
url = reverse(
'info',
kwargs={
'course_id': unicode(self.course.id),
}
)
return self.client.get(url)
def get_progress_page(self):
"""
Returns the server response for progress page.
"""
url = reverse(
'progress',
kwargs={
'course_id': unicode(self.course.id),
}
)
return self.client.get(url)
def verify_staff_debug_present(self, staff_debug_expected):
"""
Verifies that the staff debug control visibility is as expected (for staff only).
"""
content = self.get_courseware_page().content
self.assertIn(self.sequential_display_name, content, "Subsection should be visible")
self.assertEqual(staff_debug_expected, 'Staff Debug Info' in content)
def get_problem(self):
"""
Returns the JSON content for the problem in the course.
"""
problem_url = reverse(
'xblock_handler',
kwargs={
'course_id': unicode(self.course.id),
'usage_id': unicode(self.problem.location),
'handler': 'xmodule_handler',
'suffix': 'problem_get'
}
)
return self.client.get(problem_url)
def verify_show_answer_present(self, show_answer_expected):
"""
Verifies that "Show Answer" is only present when expected (for staff only).
"""
problem_html = json.loads(self.get_problem().content)['html']
self.assertIn(self.problem_display_name, problem_html)
self.assertEqual(show_answer_expected, "Show Answer" in problem_html)
def ensure_masquerade_as_group_member(self, partition_id, group_id):
"""
Installs a masquerade for the test_user and test course, to enable the
user to masquerade as belonging to the specific partition/group combination.
Also verifies that the call to install the masquerade was successful.
Arguments:
partition_id (int): the integer partition id, referring to partitions already
configured in the course.
group_id (int); the integer group id, within the specified partition.
"""
self.assertEqual(200, masquerade_as_group_member(self.test_user, self.course, partition_id, group_id))
@attr(shard=1)
class NormalStudentVisibilityTest(MasqueradeTestCase):
"""
Verify the course displays as expected for a "normal" student (to ensure test setup is correct).
"""
def create_user(self):
"""
Creates a normal student user.
"""
return UserFactory()
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_not_visible(self):
"""
Tests that staff debug control is not present for a student.
"""
self.verify_staff_debug_present(False)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_not_visible(self):
"""
Tests that "Show Answer" is not visible for a student.
"""
self.verify_show_answer_present(False)
class StaffMasqueradeTestCase(MasqueradeTestCase):
"""
Base class for tests of the masquerade behavior for a staff member.
"""
def create_user(self):
"""
Creates a staff user.
"""
return StaffFactory(course_key=self.course.id)
def update_masquerade(self, role, group_id=None, user_name=None):
"""
Toggle masquerade state.
"""
masquerade_url = reverse(
'masquerade_update',
kwargs={
'course_key_string': unicode(self.course.id),
}
)
response = self.client.post(
masquerade_url,
json.dumps({"role": role, "group_id": group_id, "user_name": user_name}),
"application/json"
)
self.assertEqual(response.status_code, 200)
return response
@attr(shard=1)
class TestStaffMasqueradeAsStudent(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as student.
"""
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_with_masquerade(self):
"""
Tests that staff debug control is not visible when masquerading as a student.
"""
# Verify staff initially can see staff debug
self.verify_staff_debug_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_staff_debug_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_staff_debug_present(True)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_for_staff(self):
"""
Tests that "Show Answer" is not visible when masquerading as a student.
"""
# Verify that staff initially can see "Show Answer".
self.verify_show_answer_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_show_answer_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_show_answer_present(True)
@ddt.ddt
@attr(shard=1)
class TestStaffMasqueradeAsSpecificStudent(StaffMasqueradeTestCase, ProblemSubmissionTestMixin):
"""
Check for staff being able to masquerade as a specific student.
"""
def setUp(self):
super(TestStaffMasqueradeAsSpecificStudent, self).setUp()
self.student_user = self.create_user()
self.login_student()
self.enroll(self.course, True)
def login_staff(self):
""" Login as a staff user """
self.logout()
self.login(self.test_user.email, 'test')
def login_student(self):
""" Login as a student """
self.logout()
self.login(self.student_user.email, 'test')
def submit_answer(self, response1, response2):
"""
Submit an answer to the single problem in our test course.
"""
return self.submit_question_answer(
self.problem_display_name,
{'2_1': response1, '2_2': response2}
)
def get_progress_detail(self):
"""
Return the reported progress detail for the problem in our test course.
The return value is a string like u'1/2'.
"""
json_data = json.loads(self.look_at_question(self.problem_display_name).content)
progress = '%s/%s' % (str(json_data['current_score']), str(json_data['total_possible']))
return progress
def assertExpectedLanguageInPreference(self, user, expected_language_code):
"""
This method is a custom assertion verifies that a given user has expected
language code in the preference and in cookies.
Arguments:
user: User model instance
expected_language_code: string indicating a language code
"""
self.assertEqual(
get_user_preference(user, LANGUAGE_KEY), expected_language_code
)
self.assertEqual(
self.client.cookies[settings.LANGUAGE_COOKIE].value, expected_language_code
)
@override_waffle_flag(UNIFIED_COURSE_TAB_FLAG, active=False)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_user_on_self_paced(self):
"""
Test masquerading as a specific user for course info page when self paced configuration
"enable_course_home_improvements" flag is set
Login as a staff user and visit course info page.
set masquerade to view same page as a specific student and revisit the course info page.
"""
# Log in as staff, and check we can see the info page.
self.login_staff()
response = self.get_course_info_page()
self.assertEqual(response.status_code, 200)
content = response.content
self.assertIn("OOGIE BLOOGIE", content)
# Masquerade as the student,enable the self paced configuration, and check we can see the info page.
SelfPacedConfiguration(enable_course_home_improvements=True).save()
self.update_masquerade(role='student', user_name=self.student_user.username)
response = self.get_course_info_page()
self.assertEqual(response.status_code, 200)
content = response.content
self.assertIn("OOGIE BLOOGIE", content)
@ddt.data(
'john', # Non-unicode username
u'fôô@bar', # Unicode username with @, which is what the ENABLE_UNICODE_USERNAME feature allows
)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student(self, username):
"""
Test masquerading as a specific user.
We answer the problem in our test course as the student and as staff user, and we use the
progress as a proxy to determine who's state we currently see.
"""
student = UserFactory.create(username=username)
CourseEnrollment.enroll(student, self.course.id)
self.logout()
self.login(student.email, 'test')
# Answer correctly as the student, and check progress.
self.submit_answer('Correct', 'Correct')
self.assertEqual(self.get_progress_detail(), u'2/2')
# Log in as staff, and check the problem is unanswered.
self.login_staff()
self.assertEqual(self.get_progress_detail(), u'0/2')
# Masquerade as the student, and check we can see the student state.
self.update_masquerade(role='student', user_name=student.username)
self.assertEqual(self.get_progress_detail(), u'2/2')
# Temporarily override the student state.
self.submit_answer('Correct', 'Incorrect')
self.assertEqual(self.get_progress_detail(), u'1/2')
# Reload the page and check we see the student state again.
self.get_courseware_page()
self.assertEqual(self.get_progress_detail(), u'2/2')
# Become the staff user again, and check the problem is still unanswered.
self.update_masquerade(role='staff')
self.assertEqual(self.get_progress_detail(), u'0/2')
# Verify the student state did not change.
self.logout()
self.login(student.email, 'test')
self.assertEqual(self.get_progress_detail(), u'2/2')
def test_masquerading_with_language_preference(self):
"""
Tests that masquerading as a specific user for the course does not update preference language
for the staff.
Login as a staff user and set user's language preference to english and visit the courseware page.
Set masquerade to view same page as a specific student having different language preference and
revisit the courseware page.
"""
english_language_code = 'en'
set_user_preference(self.test_user, preference_key=LANGUAGE_KEY, preference_value=english_language_code)
self.login_staff()
# Reload the page and check we have expected language preference in system and in cookies.
self.get_courseware_page()
self.assertExpectedLanguageInPreference(self.test_user, english_language_code)
# Set student language preference and set masquerade to view same page the student.
set_user_preference(self.student_user, preference_key=LANGUAGE_KEY, preference_value='es-419')
self.update_masquerade(role='student', user_name=self.student_user.username)
# Reload the page and check we have expected language preference in system and in cookies.
self.get_courseware_page()
self.assertExpectedLanguageInPreference(self.test_user, english_language_code)
@override_waffle_flag(UNIFIED_COURSE_TAB_FLAG, active=False)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student_course_info(self):
"""
Test masquerading as a specific user for course info page.
We login with login_staff and check course info page content if it's working and then we
set masquerade to view same page as a specific student and test if it's working or not.
"""
# Log in as staff, and check we can see the info page.
self.login_staff()
content = self.get_course_info_page().content
self.assertIn("OOGIE BLOOGIE", content)
# Masquerade as the student, and check we can see the info page.
self.update_masquerade(role='student', user_name=self.student_user.username)
content = self.get_course_info_page().content
self.assertIn("OOGIE BLOOGIE", content)
def test_masquerade_as_specific_student_progress(self):
"""
Test masquerading as a specific user for progress page.
"""
# Give the student some correct answers, check their progress page
self.login_student()
self.submit_answer('Correct', 'Correct')
student_progress = self.get_progress_page().content
self.assertNotIn("1 of 2 possible points", student_progress)
self.assertIn("2 of 2 possible points", student_progress)
# Staff answers are slightly different
self.login_staff()
self.submit_answer('Incorrect', 'Correct')
staff_progress = self.get_progress_page().content
self.assertNotIn("2 of 2 possible points", staff_progress)
self.assertIn("1 of 2 possible points", staff_progress)
# Should now see the student's scores
self.update_masquerade(role='student', user_name=self.student_user.username)
masquerade_progress = self.get_progress_page().content
self.assertNotIn("1 of 2 possible points", masquerade_progress)
self.assertIn("2 of 2 possible points", masquerade_progress)
@attr(shard=1)
class TestGetMasqueradingGroupId(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as belonging to a group.
"""
def setUp(self):
super(TestGetMasqueradingGroupId, self).setUp()
self.user_partition = UserPartition(
0, 'Test User Partition', '',
[Group(0, 'Group 1'), Group(1, 'Group 2')],
scheme_id='cohort'
)
self.course.user_partitions.append(self.user_partition)
modulestore().update_item(self.course, self.test_user.id)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_get_masquerade_group(self):
"""
Tests that a staff member can masquerade as being in a group in a user partition
"""
# Verify there is no masquerading group initially
group = get_masquerading_user_group(self.course.id, self.test_user, self.user_partition)
self.assertIsNone(group)
# Install a masquerading group
self.ensure_masquerade_as_group_member(0, 1)
# Verify that the masquerading group is returned
group = get_masquerading_user_group(self.course.id, self.test_user, self.user_partition)
self.assertEqual(group.id, 1)
class ReadOnlyKeyValueStore(DictKeyValueStore):
"""
A KeyValueStore that raises an exception on attempts to modify it.
Used to make sure MasqueradingKeyValueStore does not try to modify the underlying KeyValueStore.
"""
def set(self, key, value):
assert False, "ReadOnlyKeyValueStore may not be modified."
def delete(self, key):
assert False, "ReadOnlyKeyValueStore may not be modified."
def set_many(self, update_dict): # pylint: disable=unused-argument
assert False, "ReadOnlyKeyValueStore may not be modified."
class FakeSession(dict):
""" Mock for Django session object. """
modified = False # We need dict semantics with a writable 'modified' property
class MasqueradingKeyValueStoreTest(TestCase):
"""
Unit tests for the MasqueradingKeyValueStore class.
"""
def setUp(self):
super(MasqueradingKeyValueStoreTest, self).setUp()
self.ro_kvs = ReadOnlyKeyValueStore({'a': 42, 'b': None, 'c': 'OpenCraft'})
self.session = FakeSession()
self.kvs = MasqueradingKeyValueStore(self.ro_kvs, self.session)
def test_all(self):
self.assertEqual(self.kvs.get('a'), 42)
self.assertEqual(self.kvs.get('b'), None)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
with self.assertRaises(KeyError):
self.kvs.get('d')
self.assertTrue(self.kvs.has('a'))
self.assertTrue(self.kvs.has('b'))
self.assertTrue(self.kvs.has('c'))
self.assertFalse(self.kvs.has('d'))
self.kvs.set_many({'a': 'Norwegian Blue', 'd': 'Giraffe'})
self.kvs.set('b', 7)
self.assertEqual(self.kvs.get('a'), 'Norwegian Blue')
self.assertEqual(self.kvs.get('b'), 7)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
self.assertEqual(self.kvs.get('d'), 'Giraffe')
for key in 'abd':
self.assertTrue(self.kvs.has(key))
self.kvs.delete(key)
with self.assertRaises(KeyError):
self.kvs.get(key)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
class CourseMasqueradeTest(TestCase):
"""
Unit tests for the CourseMasquerade class.
"""
def test_unpickling_sets_all_attributes(self):
"""
Make sure that old CourseMasquerade objects receive missing attributes when unpickled from
the session.
"""
cmasq = CourseMasquerade(7)
del cmasq.user_name
pickled_cmasq = pickle.dumps(cmasq)
unpickled_cmasq = pickle.loads(pickled_cmasq)
self.assertEqual(unpickled_cmasq.user_name, None)
| agpl-3.0 |
michaelmcandrew/readthedocs.org | readthedocs/rtd_tests/tests/test_redirects.py | 20 | 10441 | from django.test import TestCase
from django.test.utils import override_settings
from django_dynamic_fixture import get
from django_dynamic_fixture import fixture
from readthedocs.builds.constants import LATEST
from readthedocs.projects.models import Project
from readthedocs.redirects.models import Redirect
import logging
class RedirectTests(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
logging.disable(logging.DEBUG)
self.client.login(username='eric', password='test')
self.client.post(
'/dashboard/import/',
{'repo_type': 'git', 'name': 'Pip',
'tags': 'big, fucking, monkey', 'default_branch': '',
'project_url': 'http://pip.rtfd.org',
'repo': 'https://github.com/fail/sauce',
'csrfmiddlewaretoken': '34af7c8a5ba84b84564403a280d9a9be',
'default_version': LATEST,
'privacy_level': 'public',
'version_privacy_level': 'public',
'description': 'wat',
'documentation_type': 'sphinx'})
pip = Project.objects.get(slug='pip')
pip.versions.create_latest()
def test_proper_url_no_slash(self):
r = self.client.get('/docs/pip')
# This is triggered by Django, so its a 301, basically just
# APPEND_SLASH
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'], 'http://testserver/docs/pip/')
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 302)
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 200)
def test_proper_url(self):
r = self.client.get('/docs/pip/')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://testserver/docs/pip/en/latest/')
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 200)
def test_proper_url_with_lang_slug_only(self):
r = self.client.get('/docs/pip/en/')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://testserver/docs/pip/en/latest/')
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 200)
def test_proper_url_full(self):
r = self.client.get('/docs/pip/en/latest/')
self.assertEqual(r.status_code, 200)
def test_proper_url_full_with_filename(self):
r = self.client.get('/docs/pip/en/latest/test.html')
self.assertEqual(r.status_code, 200)
# Specific Page Redirects
def test_proper_page_on_main_site(self):
r = self.client.get('/docs/pip/page/test.html')
self.assertEqual(r.status_code, 302)
self.assertEqual(r['Location'],
'http://testserver/docs/pip/en/latest/test.html')
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 200)
def test_proper_url_with_version_slug_only(self):
r = self.client.get('/docs/pip/latest/')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://testserver/docs/pip/en/latest/')
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 200)
# If slug is neither valid lang nor valid version, it should 404.
# TODO: This should 404 directly, not redirect first
def test_improper_url_with_nonexistent_slug(self):
r = self.client.get('/docs/pip/nonexistent/')
self.assertEqual(r.status_code, 302)
r = self.client.get(r['Location'])
self.assertEqual(r.status_code, 404)
def test_improper_url_filename_only(self):
r = self.client.get('/docs/pip/test.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_dir_file(self):
r = self.client.get('/docs/pip/nonexistent_dir/bogus.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_dir_subdir_file(self):
r = self.client.get('/docs/pip/nonexistent_dir/subdir/bogus.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_lang_file(self):
r = self.client.get('/docs/pip/en/bogus.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_lang_subdir_file(self):
r = self.client.get('/docs/pip/en/nonexistent_dir/bogus.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_dir_subdir_file(self):
r = self.client.get('/docs/pip/en/nonexistent_dir/subdir/bogus.html')
self.assertEqual(r.status_code, 404)
def test_improper_url_version_dir_file(self):
r = self.client.get('/docs/pip/latest/nonexistent_dir/bogus.html')
self.assertEqual(r.status_code, 404)
# Subdomains
def test_proper_subdomain(self):
r = self.client.get('/', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/')
def test_proper_subdomain_with_lang_slug_only(self):
r = self.client.get('/en/', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/')
def test_proper_subdomain_and_url(self):
r = self.client.get('/en/latest/', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 200)
def test_proper_subdomain_and_url_with_filename(self):
r = self.client.get(
'/en/latest/test.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 200)
# Specific Page Redirects
def test_proper_page_on_subdomain(self):
r = self.client.get('/page/test.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(r['Location'],
'http://pip.readthedocs.org/en/latest/test.html')
# When there's only a version slug, the redirect prepends the lang slug
def test_proper_subdomain_with_version_slug_only(self):
r = self.client.get('/1.4.1/', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(r['Location'],
'http://pip.readthedocs.org/en/1.4.1/')
def test_improper_subdomain_filename_only(self):
r = self.client.get('/test.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 404)
class RedirectUnderscoreTests(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
logging.disable(logging.DEBUG)
self.client.login(username='eric', password='test')
whatup = Project.objects.create(
slug='what_up', name='What Up Underscore')
# Test _ -> - slug lookup
def test_underscore_redirect(self):
r = self.client.get('/',
HTTP_HOST='what-up.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://what-up.readthedocs.org/en/latest/')
class RedirectAppTests(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
self.client.post(
'/dashboard/import/',
{'repo_type': 'git', 'name': 'Pip',
'tags': 'big, fucking, monkey', 'default_branch': '',
'project_url': 'http://pip.rtfd.org',
'repo': 'https://github.com/fail/sauce',
'csrfmiddlewaretoken': '34af7c8a5ba84b84564403a280d9a9be',
'default_version': LATEST,
'privacy_level': 'public',
'version_privacy_level': 'public',
'description': 'wat',
'documentation_type': 'sphinx'})
self.pip = Project.objects.get(slug='pip')
self.pip.versions.create_latest()
@override_settings(USE_SUBDOMAIN=True)
def test_redirect_root(self):
Redirect.objects.create(
project=self.pip, redirect_type='prefix', from_url='/woot/')
r = self.client.get('/woot/faq.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/faq.html')
@override_settings(USE_SUBDOMAIN=True)
def test_redirect_page(self):
Redirect.objects.create(
project=self.pip, redirect_type='page', from_url='/install.html', to_url='/tutorial/install.html')
r = self.client.get('/install.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/tutorial/install.html')
@override_settings(USE_SUBDOMAIN=True, PYTHON_MEDIA=True)
def test_redirect_html(self):
Redirect.objects.create(
project=self.pip, redirect_type='sphinx_html')
r = self.client.get('/en/latest/faq/', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/faq.html')
@override_settings(USE_SUBDOMAIN=True, PYTHON_MEDIA=True)
def test_redirect_htmldir(self):
Redirect.objects.create(
project=self.pip, redirect_type='sphinx_htmldir')
r = self.client.get('/en/latest/faq.html', HTTP_HOST='pip.readthedocs.org')
self.assertEqual(r.status_code, 302)
self.assertEqual(
r['Location'], 'http://pip.readthedocs.org/en/latest/faq/')
class RedirectBuildTests(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.project = get(Project,
slug='project-1',
documentation_type='sphinx',
conf_py_file='test_conf.py',
versions=[fixture()])
self.version = self.project.versions.all()[0]
def test_redirect_list(self):
r = self.client.get('/builds/project-1/')
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'], 'http://testserver/projects/project-1/builds/')
def test_redirect_detail(self):
r = self.client.get('/builds/project-1/1337/')
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'], 'http://testserver/projects/project-1/builds/1337/')
| mit |
hsluo/youtube-dl | devscripts/generate_aes_testdata.py | 129 | 1136 | from __future__ import unicode_literals
import codecs
import subprocess
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.utils import intlist_to_bytes
from youtube_dl.aes import aes_encrypt, key_expansion
secret_msg = b'Secret message goes here'
def hex_str(int_list):
return codecs.encode(intlist_to_bytes(int_list), 'hex')
def openssl_encode(algo, key, iv):
cmd = ['openssl', 'enc', '-e', '-' + algo, '-K', hex_str(key), '-iv', hex_str(iv)]
prog = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, _ = prog.communicate(secret_msg)
return out
iv = key = [0x20, 0x15] + 14 * [0]
r = openssl_encode('aes-128-cbc', key, iv)
print('aes_cbc_decrypt')
print(repr(r))
password = key
new_key = aes_encrypt(password, key_expansion(password))
r = openssl_encode('aes-128-ctr', new_key, iv)
print('aes_decrypt_text 16')
print(repr(r))
password = key + 16 * [0]
new_key = aes_encrypt(password, key_expansion(password)) * (32 // 16)
r = openssl_encode('aes-256-ctr', new_key, iv)
print('aes_decrypt_text 32')
print(repr(r))
| unlicense |
asser/django | django/core/files/uploadedfile.py | 471 | 4334 | """
Classes representing uploaded files.
"""
import errno
import os
from io import BytesIO
from django.conf import settings
from django.core.files import temp as tempfile
from django.core.files.base import File
from django.utils.encoding import force_str
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile',
'SimpleUploadedFile')
class UploadedFile(File):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2 ** 10
def __init__(self, file=None, name=None, content_type=None, size=None, charset=None, content_type_extra=None):
super(UploadedFile, self).__init__(file, name)
self.size = size
self.content_type = content_type
self.charset = charset
self.content_type_extra = content_type_extra
def __repr__(self):
return force_str("<%s: %s (%s)>" % (
self.__class__.__name__, self.name, self.content_type))
def _get_name(self):
return self._name
def _set_name(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
ext = ext[:255]
name = name[:255 - len(ext)] + ext
self._name = name
name = property(_get_name, _set_name)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, name, content_type, size, charset, content_type_extra=None):
if settings.FILE_UPLOAD_TEMP_DIR:
file = tempfile.NamedTemporaryFile(suffix='.upload',
dir=settings.FILE_UPLOAD_TEMP_DIR)
else:
file = tempfile.NamedTemporaryFile(suffix='.upload')
super(TemporaryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra)
def temporary_file_path(self):
"""
Returns the full path of this file.
"""
return self.file.name
def close(self):
try:
return self.file.close()
except OSError as e:
if e.errno != errno.ENOENT:
# Means the file was moved or deleted before the tempfile
# could unlink it. Still sets self.file.close_called and
# calls self.file.file.close() before the exception
raise
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, field_name, name, content_type, size, charset, content_type_extra=None):
super(InMemoryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra)
self.field_name = field_name
def open(self, mode=None):
self.file.seek(0)
def chunks(self, chunk_size=None):
self.file.seek(0)
yield self.read()
def multiple_chunks(self, chunk_size=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, content_type='text/plain'):
content = content or b''
super(SimpleUploadedFile, self).__init__(BytesIO(content), None, name,
content_type, len(content), None, None)
@classmethod
def from_dict(cls, file_dict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(file_dict['filename'],
file_dict['content'],
file_dict.get('content-type', 'text/plain'))
| bsd-3-clause |
s-ludwig/FrameworkBenchmarks | frameworks/Python/cherrypy/satool.py | 79 | 1643 | import cherrypy
__all__ = ['SATool']
class SATool(cherrypy.Tool):
def __init__(self):
"""
The SA tool is responsible for associating a SA session
to the SA engine and attaching it to the current request.
Since we are running in a multithreaded application,
we use the scoped_session that will create a session
on a per thread basis so that you don't worry about
concurrency on the session object itself.
This tools binds a session to the engine each time
a requests starts and commits/rollbacks whenever
the request terminates.
"""
cherrypy.Tool.__init__(self, 'on_start_resource',
self.bind_session,
priority=20)
def _setup(self):
cherrypy.Tool._setup(self)
cherrypy.request.hooks.attach('on_end_resource',
self.commit_transaction,
priority=80)
def bind_session(self):
"""
Attaches a session to the request's scope by requesting
the SA plugin to bind a session to the SA engine.
"""
session = cherrypy.engine.publish('bind-session').pop()
cherrypy.request.db = session
def commit_transaction(self):
"""
Commits the current transaction or rolls back
if an error occurs. Removes the session handle
from the request's scope.
"""
if not hasattr(cherrypy.request, 'db'):
return
cherrypy.request.db = None
cherrypy.engine.publish('commit-session')
| bsd-3-clause |
disqus/python-phabricator | phabricator/tests/test_phabricator.py | 1 | 5145 | try:
import unittest2 as unittest
except ImportError:
import unittest
import requests
import responses
from pkg_resources import resource_string
import json
import phabricator
phabricator.ARCRC = {} # overwrite any arcrc that might be read
RESPONSES = json.loads(
resource_string(
'phabricator.tests.resources',
'responses.json'
).decode('utf8')
)
CERTIFICATE = resource_string(
'phabricator.tests.resources',
'certificate.txt'
).decode('utf8').strip()
# Protect against local user's .arcrc interference.
phabricator.ARCRC = {}
class PhabricatorTest(unittest.TestCase):
def setUp(self):
self.api = phabricator.Phabricator(
username='test',
certificate='test',
host='http://localhost/api/'
)
self.api.certificate = CERTIFICATE
def test_generate_hash(self):
token = '12345678'
hashed = self.api.generate_hash(token)
self.assertEqual(hashed, 'f8d3bea4e58a2b2967d93d5b307bfa7c693b2e7f')
@responses.activate
def test_connect(self):
responses.add('POST', 'http://localhost/api/conduit.connect',
body=RESPONSES['conduit.connect'], status=200)
api = phabricator.Phabricator(
username='test',
certificate='test',
host='http://localhost/api/'
)
api.connect()
keys = api._conduit.keys()
self.assertIn('sessionKey', keys)
self.assertIn('connectionID', keys)
assert len(responses.calls) == 1
@responses.activate
def test_user_whoami(self):
responses.add('POST', 'http://localhost/api/user.whoami',
body=RESPONSES['user.whoami'], status=200)
api = phabricator.Phabricator(
username='test',
certificate='test',
host='http://localhost/api/'
)
api._conduit = True
self.assertEqual(api.user.whoami()['userName'], 'testaccount')
def test_classic_resources(self):
api = phabricator.Phabricator(
username='test',
certificate='test',
host='http://localhost/api/'
)
self.assertEqual(api.user.whoami.method, 'user')
self.assertEqual(api.user.whoami.endpoint, 'whoami')
def test_nested_resources(self):
api = phabricator.Phabricator(
username='test',
certificate='test',
host='http://localhost/api/'
)
self.assertEqual(api.diffusion.repository.edit.method, 'diffusion')
self.assertEqual(
api.diffusion.repository.edit.endpoint, 'repository.edit')
@responses.activate
def test_bad_status(self):
responses.add(
'POST', 'http://localhost/api/conduit.connect', status=400)
api = phabricator.Phabricator(
username='test',
certificate='test',
host='http://localhost/api/'
)
with self.assertRaises(requests.exceptions.HTTPError):
api.user.whoami()
assert len(responses.calls) == 1
@responses.activate
def test_maniphest_find(self):
responses.add('POST', 'http://localhost/api/maniphest.find',
body=RESPONSES['maniphest.find'], status=200)
api = phabricator.Phabricator(
username='test',
certificate='test',
host='http://localhost/api/'
)
api._conduit = True
result = api.maniphest.find(
ownerphids=['PHID-USER-5022a9389121884ab9db']
)
self.assertEqual(len(result), 1)
# Test iteration
self.assertIsInstance([x for x in result], list)
# Test getattr
self.assertEqual(
result['PHID-TASK-4cgpskv6zzys6rp5rvrc']['status'],
'3'
)
def test_validation(self):
self.api._conduit = True
self.assertRaises(ValueError, self.api.differential.find)
with self.assertRaises(ValueError):
self.api.differential.find(query=1)
with self.assertRaises(ValueError):
self.api.differential.find(query='1')
with self.assertRaises(ValueError):
self.api.differential.find(query='1', guids='1')
def test_map_param_type(self):
uint = 'uint'
self.assertEqual(phabricator.map_param_type(uint), int)
list_bool = 'list<bool>'
self.assertEqual(phabricator.map_param_type(list_bool), [bool])
list_pair = 'list<pair<callsign, path>>'
self.assertEqual(phabricator.map_param_type(list_pair), [tuple])
complex_list_pair = 'list<pair<string-constant<"gtcm">, string>>'
self.assertEqual(phabricator.map_param_type(
complex_list_pair), [tuple])
def test_endpoint_shadowing(self):
shadowed_endpoints = [e for e in self.api._interface.keys() if e in self.api.__dict__]
self.assertEqual(
shadowed_endpoints,
[],
"The following endpoints are shadowed: {}".format(shadowed_endpoints)
)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
fernand/scipy | scipy/signal/signaltools.py | 13 | 87587 | # Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import warnings
import threading
from . import sigtools
from scipy._lib.six import callable
from scipy._lib._version import NumpyVersion
from scipy import linalg
from scipy.fftpack import (fft, ifft, ifftshift, fft2, ifft2, fftn,
ifftn, fftfreq)
from numpy.fft import rfftn, irfftn
from numpy import (allclose, angle, arange, argsort, array, asarray,
atleast_1d, atleast_2d, cast, dot, exp, expand_dims,
iscomplexobj, mean, ndarray, newaxis, ones, pi,
poly, polyadd, polyder, polydiv, polymul, polysub, polyval,
prod, product, r_, ravel, real_if_close, reshape,
roots, sort, sum, take, transpose, unique, where, zeros,
zeros_like)
import numpy as np
from scipy.special import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'detrend', 'lfilter_zi', 'sosfilt_zi',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
_rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0, 1, 2]:
raise ValueError("Acceptable mode flags are 'valid' (0),"
" 'same' (1), or 'full' (2).")
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0, 1, 2]:
raise ValueError("Acceptable boundary flags are 'fill', 'wrap'"
" (or 'circular'), \n and 'symm'"
" (or 'symmetric').")
val = boundary << 2
return val
def _check_valid_mode_shapes(shape1, shape2):
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in "
"every dimension for 'valid' mode.")
def correlate(in1, in2, mode='full'):
"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as:
z[...,k,...] = sum[..., i_l, ...]
x[..., i_l,...] * conj(y[..., i_l + k,...])
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + np.random.randn(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
if in1.ndim == in2.ndim == 0:
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward
swapped_inputs = (mode == 'full') and (in2.size > in1.size)
if swapped_inputs:
in1, in2 = in2, in1
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
# Reverse and conjugate to undo the effect of swapping inputs
if swapped_inputs:
slice_obj = [slice(None, None, -1)] * len(z.shape)
z = z[slice_obj].conj()
return z
def _centered(arr, newsize):
# Return the center newsize portion of the array.
newsize = asarray(newsize)
currsize = array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _next_regular(target):
"""
Find the next regular number greater than or equal to target.
Regular numbers are composites of the prime factors 2, 3, and 5.
Also known as 5-smooth numbers or Hamming numbers, these are the optimal
size for inputs to FFTPACK.
Target must be a positive integer.
"""
if target <= 6:
return target
# Quickly check if it's already a power of 2
if not (target & (target-1)):
return target
match = float('inf') # Anything found will be smaller
p5 = 1
while p5 < target:
p35 = p5
while p35 < target:
# Ceiling integer division, avoiding conversion to float
# (quotient = ceil(target / p35))
quotient = -(-target // p35)
# Quickly find next power of 2 >= quotient
try:
p2 = 2**((quotient - 1).bit_length())
except AttributeError:
# Fallback for Python <2.7
p2 = 2**(len(bin(quotient - 1)) - 2)
N = p2 * p35
if N == target:
return N
elif N < match:
match = N
p35 *= 3
if p35 == target:
return p35
if p35 < match:
match = p35
p5 *= 5
if p5 == target:
return p5
if p5 < match:
match = p5
return match
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Autocorrelation of white noise is an impulse. (This is at least 100 times
as fast as `convolve`.)
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.tight_layout()
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> face = misc.face(gray=True)
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(face, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(1, 3)
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, complex) or
np.issubdtype(in2.dtype, complex))
shape = s1 + s2 - 1
if mode == "valid":
_check_valid_mode_shapes(s1, s2)
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [_next_regular(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
ret = irfftn(rfftn(in1, fshape) *
rfftn(in2, fshape), fshape)[fslice].copy()
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
ret = ifftn(fftn(in1, fshape) * fftn(in2, fshape))[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def convolve(in1, in2, mode='full'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
Examples
--------
Smooth a square pulse using a Hann window:
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 0.], 100)
>>> win = signal.hann(50)
>>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('Original pulse')
>>> ax_orig.margins(0, 0.1)
>>> ax_win.plot(win)
>>> ax_win.set_title('Filter impulse response')
>>> ax_win.margins(0, 0.1)
>>> ax_filt.plot(filtered)
>>> ax_filt.set_title('Filtered signal')
>>> ax_filt.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
volume = asarray(in1)
kernel = asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
slice_obj = [slice(None, None, -1)] * len(kernel.shape)
if np.iscomplexobj(kernel):
return correlate(volume, kernel[slice_obj].conj(), mode)
else:
return correlate(volume, kernel[slice_obj], mode)
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `in`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `in`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * len(volume.shape)
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(len(volume.shape)):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or arraylike, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * len(im.shape)
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') / product(mysize, axis=0)
- lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> face = misc.face(gray=True)
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(face, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(1, 3)
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> face = misc.face(gray=True) - misc.face(gray=True).mean()
>>> template = np.copy(face[300:365, 670:750]) # right eye
>>> template -= template.mean()
>>> face = face + np.random.randn(*face.shape) * 50 # add noise
>>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(1, 3)
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 0, val, bval, fillvalue)
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a),len(b))-1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb]
- a[1]*y[n-1] - ... - a[na]*y[n-na]
using the following difference equations::
y[m] = b[0]*x[m] + z[0,m-1]
z[0,m] = b[1]*x[m] + z[1,m-1] - a[1]*y[m]
...
z[n-3,m] = b[n-2]*x[m] + z[n-2,m-1] - a[n-2]*y[m]
z[n-2,m] = b[n-1]*x[m] - a[n-1]*y[m]
where m is the output sample number and n=max(len(a),len(b)) is the
model order.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -nb
b[0] + b[1]z + ... + b[nb] z
Y(z) = ---------------------------------- X(z)
-1 -na
a[0] + a[1]z + ... + a[na] z
"""
a = np.atleast_1d(a)
if len(a) == 1:
# This path only supports types fdgFDGO to mirror _linear_filter below.
# Any of b, a, x, or zi can set the dtype, but there is no default
# casting of other types; instead a NotImplementedError is raised.
b = np.asarray(b)
a = np.asarray(a)
if b.ndim != 1 and a.ndim != 1:
raise ValueError('object of too small depth for desired array')
x = np.asarray(x)
inputs = [b, a, x]
if zi is not None:
# _linear_filter does not broadcast zi, but does do expansion of singleton dims.
zi = np.asarray(zi)
if zi.ndim != x.ndim:
raise ValueError('object of too small depth for desired array')
expected_shape = list(x.shape)
expected_shape[axis] = b.shape[0] - 1
expected_shape = tuple(expected_shape)
# check the trivial case where zi is the right shape first
if zi.shape != expected_shape:
strides = zi.ndim * [None]
if axis < 0:
axis += zi.ndim
for k in range(zi.ndim):
if k == axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == 1:
strides[k] = 0
else:
raise ValueError('Unexpected shape for zi: expected '
'%s, found %s.' %
(expected_shape, zi.shape))
zi = np.lib.stride_tricks.as_strided(zi, expected_shape, strides)
inputs.append(zi)
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
b = np.array(b, dtype=dtype)
a = np.array(a, dtype=dtype, copy=False)
b /= a[0]
x = np.array(x, dtype=dtype, copy=False)
out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
ind = out_full.ndim * [slice(None)]
if zi is not None:
ind[axis] = slice(zi.shape[axis])
out_full[ind] += zi
ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
out = out_full[ind]
if zi is None:
return out
else:
ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
zf = out_full[ind]
return out, zf
else:
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N=len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M=len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi``.
``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, where ``K = max(M,N)``.
See Also
--------
lfilter
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
if y.dtype.kind in 'bui':
# ensure calculations are floating point
y = y.astype(np.float64)
zi = zeros(K, y.dtype)
if x is None:
x = zeros(M, y.dtype)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal``.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = ones(N - D + 1, float)
input[1:] = 0
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
Examples
---------
In this example we use the Hilbert transform to determine the amplitude
envelope and instantaneous frequency of an amplitude-modulated signal.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import hilbert, chirp
>>> duration = 1.0
>>> fs = 400.0
>>> samples = int(fs*duration)
>>> t = np.arange(samples) / fs
We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
apply an amplitude modulation.
>>> signal = chirp(t, 20.0, t[-1], 100.0)
>>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
The amplitude envelope is given by magnitude of the analytic signal. The
instantaneous frequency can be obtained by differentiating the instantaneous
phase in respect to time. The instantaneous phase corresponds to the phase
angle of the analytic signal.
>>> analytic_signal = hilbert(signal)
>>> amplitude_envelope = np.abs(analytic_signal)
>>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
>>> instantaneous_frequency = np.diff(instantaneous_phase) / (2.0*np.pi) * fs
>>> fig = plt.figure()
>>> ax0 = fig.add_subplot(211)
>>> ax0.plot(t, signal, label='signal')
>>> ax0.plot(t, amplitude_envelope, label='envelope')
>>> ax0.set_xlabel("time in seconds")
>>> ax0.legend()
>>> ax1 = fig.add_subplot(212)
>>> ax1.plot(t[1:], instantaneous_frequency)
>>> ax1.set_xlabel("time in seconds")
>>> ax1.set_ylim(0.0, 120.0)
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
.. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
.. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal Processing,
Third Edition, 2009. Chapter 12. ISBN 13: 978-1292-02572-8
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if len(x.shape) > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if len(x.shape) > 2:
raise ValueError("x must be 2-D.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = len(x.shape)
while k > 2:
h = h[:, newaxis]
k -= 1
x = ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
"""
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion.
If ``M = len(b)`` and ``N = len(a)``::
b(s) b[0] x**(M-1) + b[1] x**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] x**(N-1) + a[1] x**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Parameters
----------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
'max': pick the maximum of those roots.
'min': pick the minimum of those roots.
'avg': take the average of those roots.
See Also
--------
residue, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * atleast_1d(poly(t2)))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If ``M = len(b)`` and ``N = len(a)``, then the partial-fraction
expansion H(s) is defined as::
b(s) b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n])
/ factorial(sig - m))
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also
--------
invresz, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion.
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1)...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See Also
--------
residuez, unique_roots, invres
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * atleast_1d(poly(t2)))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input or output samples is large and prime;
see `scipy.fftpack.fft`.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f = signal.resample(y, 100)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
>>> plt.legend(['data', 'resampled'], loc='best')
>>> plt.show()
"""
x = asarray(x)
X = fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftfreq(Nx))
elif isinstance(window, ndarray):
if window.shape != (Nx,):
raise ValueError('window must have the same length as data')
W = window
else:
W = ifftshift(get_window(window, Nx))
newshape = [1] * x.ndim
newshape[axis] = len(W)
W.shape = newshape
X = X * W
sl = [slice(None)] * len(x.shape)
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) // 2, None)
Y[sl] = X[sl]
y = ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
doi: 10.1063/1.3670512
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. doi: 10.1007/s00422-013-0561-7.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
doi: 10.1007/s00422-013-0560-8
'''
events = asarray(events)
period = asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = atleast_2d(events)
period = atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = exp(dot(2j*pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = mean(vectors, axis=1)
strength = abs(vectormean)
phase = angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1e3
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, prod(dshape, axis=0) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Compute an initial state `zi` for the lfilter function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Compute an initial state `zi` for the sosfilt function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
A forward-backward filter.
This function applies a linear filter twice, once forward and once
backwards. The combined filter has linear phase.
The function provides options for handling the edges of the signal.
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output, an array of type numpy.float64 with the same
shape as `x`.
See Also
--------
lfilter_zi, lfilter
Notes
-----
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist rate, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
>>> np.random.seed(123456)
`sig` is a random input signal to be filtered.
>>> n = 60
>>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = np.random.randn(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# `method` is "pad"...
ntaps = max(len(a), len(b))
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = np.zeros(700)
>>> x[0] = 1.
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = np.asarray(x)
sos = atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r.' %
(axis, x.shape, n_sections, x_zi_shape))
zf = zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
from scipy.signal.filter_design import cheby1
from scipy.signal.fir_filter_design import firwin
def decimate(x, q, n=None, ftype='iir', axis=-1):
"""
Downsample the signal by using a filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : ndarray
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor.
n : int, optional
The order of the filter (1 less than the length for 'fir').
ftype : str {'iir', 'fir'}, optional
The type of the lowpass filter.
axis : int, optional
The axis along which to decimate.
Returns
-------
y : ndarray
The down-sampled signal.
See also
--------
resample
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is None:
if ftype == 'fir':
n = 30
else:
n = 8
if ftype == 'fir':
b = firwin(n + 1, 1. / q, window='hamming')
a = 1.
else:
b, a = cheby1(n, 0.05, 0.8 / q)
y = lfilter(b, a, x, axis=axis)
sl = [slice(None)] * y.ndim
sl[axis] = slice(None, None, q)
return y[sl]
| bsd-3-clause |
pdarragh/Viper | viper/parser/grammar_parsing/production_part.py | 1 | 1856 | class ProductionPart:
def __str__(self):
return repr(self)
class LiteralPart(ProductionPart):
def __init__(self, text: str):
self.text = text
def __repr__(self):
return "'" + self.text + "'"
class SpecialPart(ProductionPart):
def __init__(self, special_token: str):
self.token = special_token
def __repr__(self):
return "'" + self.token + "'"
class RulePart(ProductionPart):
def __init__(self, rule_name: str):
self.name = rule_name
def __repr__(self):
return "<" + self.name + ">"
class RepeatPart(ProductionPart):
def __init__(self, repeated_part: ProductionPart):
self.part = repeated_part
def __repr__(self):
return repr(self.part) + "*"
class MinimumRepeatPart(ProductionPart):
def __init__(self, repeated_part: ProductionPart):
self.part = repeated_part
def __repr__(self):
return repr(self.part) + "+"
class SeparatedRepeatPart(ProductionPart):
def __init__(self, separator: ProductionPart, rule_part: ProductionPart):
self.separator = separator
self.rule = rule_part
def __repr__(self):
return repr(self.rule) + "{" + repr(self.separator) + "}&*"
class MinimumSeparatedRepeatPart(ProductionPart):
def __init__(self, separator: ProductionPart, rule_part: ProductionPart):
self.separator = separator
self.rule = rule_part
class OptionPart(ProductionPart):
def __init__(self, optional_part: ProductionPart):
self.part = optional_part
def __repr__(self):
return repr(self.part) + "?"
class ParameterPart(ProductionPart):
def __init__(self, name: str, matched_part: ProductionPart):
self.name = name
self.part = matched_part
def __repr__(self):
return self.name + ": " + repr(self.part)
| apache-2.0 |
kaiix/depot_tools | third_party/boto/gs/cors.py | 88 | 7731 | # Copyright 2012 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import types
from boto.gs.user import User
from boto.exception import InvalidCorsError
from xml.sax import handler
# Relevant tags for the CORS XML document.
CORS_CONFIG = 'CorsConfig'
CORS = 'Cors'
ORIGINS = 'Origins'
ORIGIN = 'Origin'
METHODS = 'Methods'
METHOD = 'Method'
HEADERS = 'ResponseHeaders'
HEADER = 'ResponseHeader'
MAXAGESEC = 'MaxAgeSec'
class Cors(handler.ContentHandler):
"""Encapsulates the CORS configuration XML document"""
def __init__(self):
# List of CORS elements found within a CorsConfig element.
self.cors = []
# List of collections (e.g. Methods, ResponseHeaders, Origins)
# found within a CORS element. We use a list of lists here
# instead of a dictionary because the collections need to be
# preserved in the order in which they appear in the input XML
# document (and Python dictionary keys are inherently unordered).
# The elements on this list are two element tuples of the form
# (collection name, [list of collection contents]).
self.collections = []
# Lists of elements within a collection. Again a list is needed to
# preserve ordering but also because the same element may appear
# multiple times within a collection.
self.elements = []
# Dictionary mapping supported collection names to element types
# which may be contained within each.
self.legal_collections = {
ORIGINS : [ORIGIN],
METHODS : [METHOD],
HEADERS : [HEADER],
MAXAGESEC: []
}
# List of supported element types within any collection, used for
# checking validadity of a parsed element name.
self.legal_elements = [ORIGIN, METHOD, HEADER]
self.parse_level = 0
self.collection = None
self.element = None
def validateParseLevel(self, tag, level):
"""Verify parse level for a given tag."""
if self.parse_level != level:
raise InvalidCorsError('Invalid tag %s at parse level %d: ' %
(tag, self.parse_level))
def startElement(self, name, attrs, connection):
"""SAX XML logic for parsing new element found."""
if name == CORS_CONFIG:
self.validateParseLevel(name, 0)
self.parse_level += 1;
elif name == CORS:
self.validateParseLevel(name, 1)
self.parse_level += 1;
elif name in self.legal_collections:
self.validateParseLevel(name, 2)
self.parse_level += 1;
self.collection = name
elif name in self.legal_elements:
self.validateParseLevel(name, 3)
# Make sure this tag is found inside a collection tag.
if self.collection is None:
raise InvalidCorsError('Tag %s found outside collection' % name)
# Make sure this tag is allowed for the current collection tag.
if name not in self.legal_collections[self.collection]:
raise InvalidCorsError('Tag %s not allowed in %s collection' %
(name, self.collection))
self.element = name
else:
raise InvalidCorsError('Unsupported tag ' + name)
def endElement(self, name, value, connection):
"""SAX XML logic for parsing new element found."""
if name == CORS_CONFIG:
self.validateParseLevel(name, 1)
self.parse_level -= 1;
elif name == CORS:
self.validateParseLevel(name, 2)
self.parse_level -= 1;
# Terminating a CORS element, save any collections we found
# and re-initialize collections list.
self.cors.append(self.collections)
self.collections = []
elif name in self.legal_collections:
self.validateParseLevel(name, 3)
if name != self.collection:
raise InvalidCorsError('Mismatched start and end tags (%s/%s)' %
(self.collection, name))
self.parse_level -= 1;
if not self.legal_collections[name]:
# If this collection doesn't contain any sub-elements, store
# a tuple of name and this tag's element value.
self.collections.append((name, value.strip()))
else:
# Otherwise, we're terminating a collection of sub-elements,
# so store a tuple of name and list of contained elements.
self.collections.append((name, self.elements))
self.elements = []
self.collection = None
elif name in self.legal_elements:
self.validateParseLevel(name, 3)
# Make sure this tag is found inside a collection tag.
if self.collection is None:
raise InvalidCorsError('Tag %s found outside collection' % name)
# Make sure this end tag is allowed for the current collection tag.
if name not in self.legal_collections[self.collection]:
raise InvalidCorsError('Tag %s not allowed in %s collection' %
(name, self.collection))
if name != self.element:
raise InvalidCorsError('Mismatched start and end tags (%s/%s)' %
(self.element, name))
# Terminating an element tag, add it to the list of elements
# for the current collection.
self.elements.append((name, value.strip()))
self.element = None
else:
raise InvalidCorsError('Unsupported end tag ' + name)
def to_xml(self):
"""Convert CORS object into XML string representation."""
s = '<' + CORS_CONFIG + '>'
for collections in self.cors:
s += '<' + CORS + '>'
for (collection, elements_or_value) in collections:
assert collection is not None
s += '<' + collection + '>'
# If collection elements has type string, append atomic value,
# otherwise, append sequence of values in named tags.
if isinstance(elements_or_value, types.StringTypes):
s += elements_or_value
else:
for (name, value) in elements_or_value:
assert name is not None
assert value is not None
s += '<' + name + '>' + value + '</' + name + '>'
s += '</' + collection + '>'
s += '</' + CORS + '>'
s += '</' + CORS_CONFIG + '>'
return s
| bsd-3-clause |
cosmo-ethz/hope | hope/_optimizer.py | 1 | 16844 | # Copyright (c) 2014 ETH Zurich, Institute of Astronomy, Lukas Gamper <[email protected]>
# TODO: remove comment!
# replace pow(a, number(int)) by multiplications
# find subexpressions and factor them out => make this blockwise (for that merge 1d expressions into one block)
# 1: merge subexpressions
# 2: create blacklist out of find altered values
# 3: factor out subexpressions
from __future__ import print_function, division, absolute_import, unicode_literals
from hope._ast import *
from hope._const import *
from hope._dump import Dumper
import sympy as sp
# http://docs.sympy.org/latest/modules/functions/elementary.html
SYM_UNARY_OPERATORS = {}
SYM_UNARY_OPERATORS["+"] = lambda a: +a
SYM_UNARY_OPERATORS["-"] = lambda a: -a
SYM_BINARY_OPERATORS = {}
SYM_BINARY_OPERATORS["+"] = lambda a, b: a + b
SYM_BINARY_OPERATORS["-"] = lambda a, b: a - b
SYM_BINARY_OPERATORS["*"] = lambda a, b: a * b
SYM_BINARY_OPERATORS["/"] = lambda a, b: a / b
SYM_BINARY_OPERATORS["**"] = lambda a, b: a ** b
# SYM_BINARY_OPERATORS["%"] = lambda a, b: a % b
# SYM_BINARY_OPERATORS["<<"] = lambda a, b: a << b
# SYM_BINARY_OPERATORS[">>"] = lambda a, b: a >> b
# SYM_BINARY_OPERATORS["|"] = lambda a, b: a | b
# SYM_BINARY_OPERATORS["^"] = lambda a, b: a ^ b
# SYM_BINARY_OPERATORS["&"] = lambda a, b: a & b
SYM_COMPARE_OPERATORS = {}
SYM_COMPARE_OPERATORS["=="] = lambda a, b: a == b
SYM_COMPARE_OPERATORS["!="] = lambda a, b: a != b
SYM_COMPARE_OPERATORS["<"] = lambda a, b: a < b
SYM_COMPARE_OPERATORS["<="] = lambda a, b: a <= b
SYM_COMPARE_OPERATORS[">"] = lambda a, b: a > b
SYM_COMPARE_OPERATORS[">="] = lambda a, b: a >= b
SYM_NUMBER = {}
SYM_NUMBER[bool] = lambda a: sp.BooleanTrue if a else sp.BooleanFalse
SYM_NUMBER[int] = lambda a: sp.Integer(a)
SYM_NUMBER[float] = lambda a: sp.Float(a, -np.log10(np.finfo(np.float_).resolution))
SYM_UNARY_FUNCTIONS = {}
SYM_UNARY_FUNCTIONS["sin"] = sp.sin
SYM_UNARY_FUNCTIONS["cos"] = sp.cos
SYM_UNARY_FUNCTIONS["tan"] = sp.tan
class CreateExprVisitor(NodeVisitor):
def __init__(self, dumper):
self.dumper = dumper
def visit_Number(self, node):
return SYM_NUMBER[node.dtype](node.value)
def visit_Variable(self, node):
if not self.dumper.visit(node) in self.symbols:
raise Exception("Unknown expression: {0!s}".format(self.dumper.visit(node)))
return sp.Symbol(self.dumper.visit(node))
def visit_Object(self, node):
raise Exception("Unable to create expression from : {0!s}".format(self.dumper.visit(node)))
def visit_ObjectAttr(self, node):
if not self.dumper.visit(node) in self.symbols:
raise Exception("Unknown expression: {0!s}".format(self.dumper.visit(node)))
return sp.Symbol(self.dumper.visit(node))
def visit_View(self, node):
if not self.dumper.visit(node.variable) in self.symbols:
raise Exception("Unknown expression: {0!s}".format(self.dumper.visit(node)))
self.symbols[self.dumper.visit(node)] = node
return sp.Symbol(self.dumper.visit(node))
def visit_UnaryOp(self, node):
return SYM_UNARY_OPERATORS[node.op](self.visit(node.operand))
def visit_BinOp(self, node):
return SYM_BINARY_OPERATORS[node.op](self.visit(node.left), self.visit(node.right))
def visit_Compare(self, node):
return SYM_COMPARE_OPERATORS[node.op](self.visit(node.left), self.visit(node.comparator))
def visit_Call(self, node):
return self.visit(node.name)(*[self.visit(arg) for arg in node.args])
# TODO: implement this!
# def visit_GlobalFunction(self, node): ???
# TODO: implement this!
# def visit_HopeAttr(self, node): ???
def visit_NumpyAttr(self, node):
if node.name in SYM_UNARY_FUNCTIONS:
return SYM_UNARY_FUNCTIONS[node.name]
else:
return sp.Function('np.' + node.name)
# TODO: implement this!
# def visit_NumpyContraction(self, node): ???
class CheckOptimizeVisitor(NodeVisitor):
def visit_Number(self, node): return True
def visit_Variable(self, node): return True
def visit_Object(self, node): return True
def visit_ObjectAttr(self, node): return True
# TODO: can wie optimize the index in the extent?
def visit_View(self, node): return True
def visit_Expr(self, node): return False
def visit_Assign(self, node): return False
def visit_AugAssign(self, node): return False
def visit_UnaryOp(self, node): return self.visit(node.operand)
def visit_BinOp(self, node):
return self.visit(node.left) and self.visit(node.right) if node.op in SYM_BINARY_OPERATORS else False
def visit_BoolOp(self, node): return False
def visit_Compare(self, node): return self.visit(node.left) and self.visit(node.comparator)
def visit_If(self, node): return False
def visit_For(self, node): return False
def visit_While(self, node): return False
def visit_Call(self, node):
return self.visit(node.name) and np.all([self.visit(value) for value in list(node.keywords.values())]
+ [self.visit(arg) for arg in node.args])
def visit_GlobalFunction(self, node): return True
def visit_HopeAttr(self, node): return False
def visit_NumpyAttr(self, node):
return node.name in SYM_UNARY_FUNCTIONS
def visit_NumpyContraction(self, node): return False
def visit_Allocate(self, node): return False
def visit_Return(self, node): return False
def visit_Block(self, node): return False
def visit_Body(self, node): return False
class SympyPowVisitor(object):
def visit(self, expr):
if expr.is_Pow and expr.exp.is_Integer:
return [expr]
else:
return [item for arg in expr.args for item in self.visit(arg)]
class SympyToAstVisitor(object):
def __init__(self):
for name in list(SYM_UNARY_FUNCTIONS.keys()):
if not name in NPY_UNARY_FUNCTIONS and not name in NPY_CAST_FUNCTIONS:
raise Exception("Unknown Function {0}".format(name))
setattr(self, "visit_{0}".format(name), self.npUnaryFunction_visit)
def visit(self, expr):
if hasattr(self, 'visit_{0}'.format(type(expr).__name__)):
return getattr(self, 'visit_{0}'.format(type(expr).__name__))(expr)
else:
return self.generic_visit(expr)
def generic_visit(self, expr):
raise Exception("Not Implemented Expression: {0}: {1!s}".format(type(expr).__name__, expr))
def visit_Add(self, expr):
ret = self.visit(expr.args[0])
for term in expr.args[1:]:
node = self.visit(term)
if isinstance(node, UnaryOp) and node.op == UNARY_OPERATORS["USub"]:
ret = BinOp("Sub", ret, node.operand)
else:
ret = BinOp("Add", ret, node)
return ret
def visit_Mul(self, expr):
sign, numerators, denominator = 1, [], []
for term in expr.as_ordered_factors():
if term.is_Pow and term.exp.is_Rational and term.exp.is_negative:
denominator.append(sp.Pow(term.base, -term.exp, evaluate=term.exp==-1))
elif term.is_Rational:
if term.p == -1:
sign *= -1
elif term.p != 1:
numerators.append(sp.Rational(term.p))
if term.q == -1:
sign *= -1
elif term.q != 1:
denominator.append(sp.Rational(term.q))
else:
numerators.append(term)
if len(numerators) == 0:
ret = Number(1)
else:
ret = self.visit(numerators[0])
for arg in numerators[1:]:
ret = BinOp("Mult", ret, self.visit(arg))
if len(denominator) > 0:
ret = BinOp("Div", ret, self.binOp_visit("Mult", denominator))
if sign < 0:
ret = UnaryOp("USub", ret)
return ret
def visit_Pow(self, expr):
from sympy.core.singleton import S
if expr.exp.is_real and float(int(expr.exp)) == expr.exp:
print("Integer exponent as flaot: {0!s}".format(expr))
if expr.exp is S.Half or (expr.exp.is_real and expr.exp == 0.5):
return Call(NumpyAttr("sqrt"), True, [self.visit(expr.base)])
elif -expr.exp is S.Half or (expr.exp.is_real and -expr.exp == 0.5):
return BinOp("Div", Number(1.), Call(NumpyAttr("sqrt"), True, [self.visit(expr.base)], {}))
elif expr.exp == -1:
return BinOp("Div", Number(1.), self.visit(expr.base))
else:
return self.binOp_visit("Pow", expr.args)
def visit_Integer(self, expr):
return Number(expr.p)
def visit_Rational(self, expr):
return BinOp("Div", Number(float(expr.p)), Number(float(expr.q)))
def visit_Float(self, expr):
return Number(float(expr))
def visit_Symbol(self, expr):
if not expr.name in self.symbols:
raise Exception("Unknown symbol: {0!s}".format(expr.name))
return self.symbols[expr.name]
def visit_NegativeOne(self, expr):
return Number(-1)
def visit_Zero(self, expr):
return Number(0)
def visit_One(self, expr):
return Number(1)
def npUnaryFunction_visit(self, expr):
return Call(NumpyAttr(type(expr).__name__), True, [self.visit(arg) for arg in expr.args], {})
def binOp_visit(self, name, args):
ret = self.visit(args[0])
for arg in args[1:]:
ret = BinOp(name, ret, self.visit(arg))
return ret
class Optimizer(NodeVisitor):
def __init__(self):
self.dumper = Dumper()
self.checkVisitor, self.createExpr, self.sympyToAst, self.sympyPow, self.next = CheckOptimizeVisitor(), CreateExprVisitor(self.dumper), SympyToAstVisitor(), SympyPowVisitor(), 0
def visit_Number(self, node): pass
def visit_NewVariable(self, node): pass
def visit_Variable(self, node): pass
def visit_Object(self, node): pass
def visit_ObjectAttr(self, node): pass
def visit_Dimension(self, node): pass
def visit_View(self, node): pass
def visit_Expr(self, node): pass
def visit_Assign(self, node): pass
def visit_AugAssign(self, node): pass
def visit_UnaryOp(self, node): pass
def visit_BinOp(self, node): pass
def visit_Compare(self, node): pass
def visit_If(self, node):
# TODO: optimize condition
# if condition is compile time -> remove!
self.visit(node.body)
if not node.orelse is None:
self.visit(node.orelse)
def visit_For(self, node):
self.symbols[self.dumper.visit(node.iter)] = node.iter
self.visit(node.body)
del self.symbols[self.dumper.visit(node.iter)]
def visit_While(self, node):
self.visit(node.body)
def visit_Call(self, node): pass
def visit_GlobalFunction(self, node): pass
def visit_HopeAttr(self, node): pass
def visit_NumpyAttr(self, node): pass
def visit_NumpyContraction(self, node): pass
def visit_Allocate(self, node):
self.symbols[self.dumper.visit(node.variable)] = node.variable
def visit_Return(self, node): pass
def visit_Block(self, node):
body, knownexprs, powexprs = [], {}, {}
for astexpr in node.body:
self.visit(astexpr)
if isinstance(astexpr, Assign):
if isinstance(astexpr.target, View):
self.symbols[self.dumper.visit(astexpr.target.variable)] = astexpr.target
elif isinstance(astexpr.target, Variable):
self.symbols[self.dumper.visit(astexpr.target)] = astexpr.target
else:
raise Exception("Unknown token".format(self.dumper.visit(astexpr.target)))
# TODO: implement for expr
# TODO: replace subexpressions over several lines
if isinstance(astexpr, (Assign, AugAssign)) and self.checkVisitor.visit(astexpr.value):
symexpr = sp.simplify(self.createExpr.visit(astexpr.value))
subexprs, newexprs = sp.cse(symexpr, optimizations='basic')
if len(newexprs) != 1:
raise Exception("Error running Common Subexpression Detection for {1!s}".format(symexpr))
newexpr = newexprs[0]
for symbol, subexpr in subexprs:
for subsymbol, newsymbol in list(knownexprs.items()):
subexpr = subexpr.subs(subsymbol, newsymbol)
for powexpr in self.sympyPow.visit(subexpr):
subexpr, _ = self.replace_pow(body, subexpr, powexprs, powexpr, np.abs(powexpr.exp.p))
value = self.sympyToAst.visit(sp.simplify(subexpr))
name, self.next = "__sp{0}".format(self.next), self.next + 1
self.symbols[name] = Variable(name, copy.deepcopy(value.shape), value.dtype)
body.append(Assign(self.symbols[name], value))
knownexprs[symbol] = sp.Symbol(name)
newexpr = newexpr.subs(symbol, knownexprs[symbol])
for powexpr in sorted(self.sympyPow.visit(newexpr), key=lambda x: -np.abs(x.exp.p)):
newexpr, _ = self.replace_pow(body, newexpr, powexprs, powexpr, np.abs(powexpr.exp.p))
newvalue = self.sympyToAst.visit(sp.simplify(newexpr))
if astexpr.value.dtype != newvalue.dtype:
if isinstance(newvalue, Number):
newvalue = Number(astexpr.value.dtype(newvalue.value))
else:
raise Exception("dtype does not match {0} != {1}".format(self.dumper.visit(astexpr.value), self.dumper.visit(newvalue)))
if not(len(astexpr.target.shape) > 0 and len(newvalue.shape) == 0):
if len(astexpr.value.shape) != len(newvalue.shape):
raise Exception("length of shape does not match {0} != {1}".format(self.dumper.visit(astexpr.value), self.dumper.visit(newvalue)))
for extent1, extent2 in zip(astexpr.value.shape, newvalue.shape):
(lower1, upper1), (lower2, upper2) = extent1, extent2
if not ((lower1 is None and lower2 is None) or lower1 == lower2) or upper1 != upper2:
raise Exception("shape does not match {0} != {1}".format(self.dumper.visit(astexpr.value), self.dumper.visit(newvalue)))
astexpr.value = newvalue
body.append(astexpr)
else:
body.append(astexpr)
node.body = body
def visit_Body(self, node):
for block in node.blocks:
self.visit(block)
def visit_FunctionDef(self, node):
if not node.optimized:
self.symbols = {}
for var in node.signature:
self.add_symbol(var)
self.createExpr.symbols, self.sympyToAst.symbols = self.symbols, self.symbols
node.optimized = True
self.visit(node.body)
def visit_Module(self, node):
for fktcls in list(node.functions.values()):
for fkt in fktcls:
self.visit(fkt)
def add_symbol(self, symbol):
if isinstance(symbol, Object):
for attr in list(symbol.attrs.values()):
self.add_symbol(attr)
else:
self.symbols[self.dumper.visit(symbol)] = symbol
def replace_pow(self, body, symexpr, powexprs, expr, exp):
if exp == 1:
return (symexpr, None)
elif not (expr.base, exp) in powexprs:
if exp == 2:
operand = sp.simplify(expr.base)
value = BinOp("Mult", self.sympyToAst.visit(operand), self.sympyToAst.visit(operand))
elif exp % 2 == 1:
_, operand = self.replace_pow(body, symexpr, powexprs, expr, exp - 1)
value = BinOp("Mult", self.symbols[operand], self.sympyToAst.visit(sp.simplify(expr.base)))
else:
_, operand = self.replace_pow(body, symexpr, powexprs, expr, exp / 2)
value = BinOp("Mult", self.symbols[operand], self.symbols[operand])
name, self.next = "__sp{0}".format(self.next), self.next + 1
self.symbols[name] = Variable(name, copy.deepcopy(value.shape), value.dtype)
body.append(Assign(self.symbols[name], value))
powexprs[(expr.base, exp)] = name
if np.abs(expr.exp.p) == exp:
symbol = sp.Symbol(powexprs[(expr.base, exp)])
symexpr = symexpr.subs(expr, self.symbols[powexprs[(expr.base, exp)]].dtype(1) / symbol if expr.exp.is_negative else symbol)
return (symexpr, powexprs[(expr.base, exp)])
| gpl-3.0 |
srm912/servo | tests/wpt/update/update.py | 224 | 1348 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from wptrunner.update.base import Step, StepRunner
from wptrunner.update.update import LoadConfig, SyncFromUpstream, UpdateMetadata
from wptrunner.update.tree import NoVCSTree
from .tree import GitTree, HgTree, GeckoCommit
from .upstream import SyncToUpstream
class LoadTrees(Step):
"""Load gecko tree and sync tree containing web-platform-tests"""
provides = ["local_tree", "sync_tree"]
def create(self, state):
if os.path.exists(state.sync["path"]):
sync_tree = GitTree(root=state.sync["path"])
else:
sync_tree = None
if GitTree.is_type():
local_tree = GitTree(commit_cls=GeckoCommit)
elif HgTree.is_type():
local_tree = HgTree(commit_cls=GeckoCommit)
else:
local_tree = NoVCSTree()
state.update({"local_tree": local_tree,
"sync_tree": sync_tree})
class UpdateRunner(StepRunner):
"""Overall runner for updating web-platform-tests in Gecko."""
steps = [LoadConfig,
LoadTrees,
SyncToUpstream,
SyncFromUpstream,
UpdateMetadata]
| mpl-2.0 |
andyh616/mne-python | mne/preprocessing/tests/test_eeglab_infomax.py | 8 | 7738 | import numpy as np
from scipy.linalg import svd
from mne.io import Raw
from mne import pick_types
import scipy.io as sio
from scipy.linalg import pinv
from mne.preprocessing.infomax_ import infomax
from numpy.testing import assert_almost_equal
from mne.utils import random_permutation
from mne.datasets import testing
import os.path as op
base_dir = op.join(op.dirname(__file__), 'data')
def generate_data_for_comparing_against_eeglab_infomax(ch_type, random_state):
data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')
raw_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
raw = Raw(raw_fname, preload=True)
if ch_type == 'eeg':
picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
else:
picks = pick_types(raw.info, meg=ch_type,
eeg=False, exclude='bads')
# select a small number of channels for the test
number_of_channels_to_use = 5
idx_perm = random_permutation(picks.shape[0], random_state)
picks = picks[idx_perm[:number_of_channels_to_use]]
raw.filter(1, 45, n_jobs=2)
X = raw[picks, :][0][:, ::20]
# Substract the mean
mean_X = X.mean(axis=1)
X -= mean_X[:, None]
# pre_whitening: z-score
X /= np.std(X)
T = X.shape[1]
cov_X = np.dot(X, X.T) / T
# Let's whiten the data
U, D, _ = svd(cov_X)
W = np.dot(U, U.T / np.sqrt(D)[:, None])
Y = np.dot(W, X)
return Y
@testing.requires_testing_data
def test_mne_python_vs_eeglab():
""" Test eeglab vs mne_python infomax code.
"""
random_state = 42
methods = ['infomax', 'infomax', 'extended_infomax', 'extended_infomax']
list_ch_types = ['eeg', 'mag', 'eeg', 'mag']
for method, ch_type in zip(methods, list_ch_types):
if method == 'infomax':
if ch_type == 'eeg':
eeglab_results_file = 'eeglab_infomax_results_eeg_data.mat'
elif ch_type == 'mag':
eeglab_results_file = 'eeglab_infomax_results_meg_data.mat'
elif method == 'extended_infomax':
if ch_type == 'eeg':
eeglab_results_file = ('eeglab_extended_infomax_results_eeg_'
'data.mat')
elif ch_type == 'mag':
eeglab_results_file = ('eeglab_extended_infomax_results_meg_'
'data.mat')
Y = generate_data_for_comparing_against_eeglab_infomax(ch_type,
random_state)
N = Y.shape[0]
T = Y.shape[1]
# For comparasion against eeglab, make sure the folowing
# parameters have the same value in mne_python and eeglab:
#
# - starting point
# - random state
# - learning rate
# - block size
# - blowup parameter
# - blowup_fac parameter
# - tolerance for stopping the algorithm
# - number of iterations
# - anneal_step parameter
#
# Notes:
# * By default, eeglab whiten the data using the "sphering transform"
# instead of pca. The mne_python infomax code does not
# whiten the data. To make sure both mne_python and eeglab starts
# from the same point (i.e., the same matrix), we need to make sure
# to whiten the data outside, and pass these whiten data to
# mne_python and eeglab. Finally, we need to tell eeglab that
# the input data is already whiten, this can be done by calling
# eeglab with the following syntax:
#
# % Run infomax
# [unmixing,sphere,meanvar,bias,signs,lrates,sources,y] = ...
# runica( Y, 'sphering', 'none');
#
# % Run extended infomax
# [unmixing,sphere,meanvar,bias,signs,lrates,sources,y] = ...
# runica( Y, 'sphering', 'none', 'extended', 1);
#
# By calling eeglab using the former code, we are using its default
# parameters, which are specified below in the section
# "EEGLAB default parameters".
#
# * eeglab does not expose a parameter for fixing the random state.
# Therefore, to accomplish this, we need to edit the runica.m
# file located at /path_to_eeglab/functions/sigprocfunc/runica.m
#
# i) Comment the line related with the random number generator
# (line 812).
# ii) Then, add the following line just below line 812:
# rng(42); %use 42 as random seed.
#
# * eeglab does not have the parameter "n_small_angle",
# so we need to disable it for making a fair comparison.
#
# * Finally, we need to take the unmixing matrix estimated by the
# mne_python infomax implementation and order the components
# in the same way that eeglab does. This is done below in the section
# "Order the components in the same way that eeglab does".
###############################################################
# EEGLAB default parameters
###############################################################
l_rate_eeglab = 0.00065 / np.log(N)
block_eeglab = int(np.ceil(np.min([5 * np.log(T), 0.3 * T])))
blowup_eeglab = 1e9
blowup_fac_eeglab = 0.8
max_iter_eeglab = 512
if method == 'infomax':
anneal_step_eeglab = 0.9
use_extended = False
elif method == 'extended_infomax':
anneal_step_eeglab = 0.98
use_extended = True
if N > 32:
w_change_eeglab = 1e-7
else:
w_change_eeglab = 1e-6
###############################################################
# Call mne_python infomax version using the following sintax
# to obtain the same result than eeglab version
unmixing = infomax(Y.T, extended=use_extended,
random_state=random_state,
max_iter=max_iter_eeglab,
l_rate=l_rate_eeglab,
block=block_eeglab,
w_change=w_change_eeglab,
blowup=blowup_eeglab,
blowup_fac=blowup_fac_eeglab,
n_small_angle=None,
anneal_step=anneal_step_eeglab
)
#######################################################################
# Order the components in the same way that eeglab does
#######################################################################
sources = np.dot(unmixing, Y)
mixing = pinv(unmixing)
mvar = np.sum(mixing ** 2, axis=0) * \
np.sum(sources ** 2, axis=1) / (N * T - 1)
windex = np.argsort(mvar)[::-1]
unmixing_ordered = unmixing[windex, :]
#######################################################################
#######################################################################
# Load the eeglab results, then compare the unmixing matrices estimated
# by mne_python and eeglab. To make the comparison use the
# \ell_inf norm:
# ||unmixing_mne_python - unmixing_eeglab||_inf
#######################################################################
eeglab_data = sio.loadmat(op.join(base_dir, eeglab_results_file))
unmixing_eeglab = eeglab_data['unmixing_eeglab']
maximum_difference = np.max(np.abs(unmixing_ordered - unmixing_eeglab))
assert_almost_equal(maximum_difference, 1e-12, decimal=10)
| bsd-3-clause |
ncdesouza/bookworm | env/lib/python2.7/site-packages/whoosh/util/text.py | 96 | 4372 | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import codecs, re
from whoosh.compat import string_type, u, byte
# Note: these functions return a tuple of (text, length), so when you call
# them, you have to add [0] on the end, e.g. str = utf8encode(unicode)[0]
utf8encode = codecs.getencoder("utf-8")
utf8decode = codecs.getdecoder("utf-8")
# Prefix encoding functions
def first_diff(a, b):
"""
Returns the position of the first differing character in the sequences a
and b. For example, first_diff('render', 'rending') == 4. This function
limits the return value to 255 so the difference can be encoded in a single
byte.
"""
i = 0
while i <= 255 and i < len(a) and i < len(b) and a[i] == b[i]:
i += 1
return i
def prefix_encode(a, b):
"""
Compresses bytestring b as a byte representing the prefix it shares with a,
followed by the suffix bytes.
"""
i = first_diff(a, b)
return byte(i) + b[i:]
def prefix_encode_all(ls):
"""Compresses the given list of (unicode) strings by storing each string
(except the first one) as an integer (encoded in a byte) representing
the prefix it shares with its predecessor, followed by the suffix encoded
as UTF-8.
"""
last = u('')
for w in ls:
i = first_diff(last, w)
yield chr(i) + w[i:].encode("utf-8")
last = w
def prefix_decode_all(ls):
"""Decompresses a list of strings compressed by prefix_encode().
"""
last = u('')
for w in ls:
i = ord(w[0])
decoded = last[:i] + w[1:].decode("utf-8")
yield decoded
last = decoded
# Natural key sorting function
_nkre = re.compile(r"\D+|\d+", re.UNICODE)
def _nkconv(i):
try:
return int(i)
except ValueError:
return i.lower()
def natural_key(s):
"""Converts string ``s`` into a tuple that will sort "naturally" (i.e.,
``name5`` will come before ``name10`` and ``1`` will come before ``A``).
This function is designed to be used as the ``key`` argument to sorting
functions.
:param s: the str/unicode string to convert.
:rtype: tuple
"""
# Use _nkre to split the input string into a sequence of
# digit runs and non-digit runs. Then use _nkconv() to convert
# the digit runs into ints and the non-digit runs to lowercase.
return tuple(_nkconv(m) for m in _nkre.findall(s))
# Regular expression functions
def rcompile(pattern, flags=0, verbose=False):
"""A wrapper for re.compile that checks whether "pattern" is a regex object
or a string to be compiled, and automatically adds the re.UNICODE flag.
"""
if not isinstance(pattern, string_type):
# If it's not a string, assume it's already a compiled pattern
return pattern
if verbose:
flags |= re.VERBOSE
return re.compile(pattern, re.UNICODE | flags)
| gpl-3.0 |
tkerola/chainer | chainer/functions/normalization/batch_renormalization.py | 2 | 8182 | import warnings
import numpy
from chainer import backend
from chainer.backends import cuda
from chainer import configuration
from chainer import function
from chainer.functions.normalization import batch_normalization
from chainer.utils import type_check
def _xhat(x, mean, std, expander):
x_mu = x - mean[expander]
x_mu /= std[expander]
return x_mu
class BatchRenormalizationFunction(function.Function):
def __init__(self, eps=2e-5, mean=None, var=None, decay=0.9,
rmax=1, dmax=0, update_statistics=True):
self._running_mean = mean
self._running_var = var
self.rmax = rmax
self.dmax = dmax
self.r = None
self.update_statistics = update_statistics
self.eps = eps
self.decay = decay
def _warn_accessing_property(self):
warnings.warn(
'The attributes of BatchRenormalizationFunction '
'are deprecated. '
'Consider setting update_statistics=True to '
'batch_renormalization to update running statistics.',
DeprecationWarning)
@property
def running_mean(self):
self._warn_accessing_property()
return self._running_mean
@property
def running_var(self):
self._warn_accessing_property()
return self._running_var
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, gamma_type, beta_type = in_types
M = type_check.eval(gamma_type.ndim)
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim >= gamma_type.ndim + 1,
x_type.shape[1:1 + M] == gamma_type.shape,
# TODO(tkerola): Check shape
gamma_type.dtype.kind == 'f',
gamma_type.dtype == beta_type.dtype,
gamma_type.shape == beta_type.shape,
)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
x, gamma, beta = inputs
# Note: we must be in train mode.
assert configuration.config.train
head_ndim = gamma.ndim + 1
expander = (None, Ellipsis) + (None,) * (x.ndim - head_ndim)
# NOTE(tommi): cuDNN is not used since it does not support
# batch renormalization
axis = (0,) + tuple(range(head_ndim, x.ndim))
mean = x.mean(axis=axis, dtype=gamma.dtype)
var = x.var(axis=axis, dtype=gamma.dtype) + self.eps
self.std = xp.sqrt(var, dtype=var.dtype)
running_sigma = xp.sqrt(self._running_var + self.eps,
dtype=self._running_mean.dtype)
self.r = xp.clip(self.std / running_sigma,
1.0 / self.rmax, self.rmax)
d = xp.clip(
(mean - self._running_mean) / running_sigma,
-self.dmax, self.dmax)
gamma = gamma[expander]
beta = beta[expander]
if xp is numpy:
self.x_hat = _xhat(x, mean, self.std, expander)
self.x_hat_renorm = self.x_hat * self.r[expander] + d[expander]
y = gamma * self.x_hat_renorm
y += beta
y = y.astype(dtype=x.dtype)
else:
self.x_hat, self.x_hat_renorm, y = cuda.elementwise(
'T x, U mean, U std, U gamma, U beta, U r, U d',
'U x_hat, U x_hat_renorm, T y',
'''
x_hat = (x - mean) / std;
x_hat_renorm = x_hat * r + d;
y = gamma * x_hat_renorm + beta;
''',
'brn_fwd')(
x, mean[expander], self.std[expander], gamma, beta,
self.r[expander], d[expander])
if self.update_statistics:
m = x.size // gamma[expander].size
self._running_mean *= self.decay
adjust = m / max(m - 1., 1.) # unbiased estimation
temp_ar = xp.array(mean)
temp_ar *= (1 - self.decay)
self._running_mean += temp_ar
del temp_ar
self._running_var *= self.decay
temp_ar = xp.array(var)
temp_ar *= (1 - self.decay) * adjust
self._running_var += temp_ar
del temp_ar
return y,
def backward(self, inputs, grad_outputs):
x, gamma, _ = inputs
gy = grad_outputs[0]
head_ndim = gamma.ndim + 1
expander = (None, Ellipsis) + (None,) * (x.ndim - head_ndim)
m = gamma.dtype.type(x.size // gamma.size)
axis = (0,) + tuple(range(head_ndim, x.ndim))
xp = backend.get_array_module(x)
# Note: we must be in train mode.
assert configuration.config.train
# NOTE(tommi): cuDNN is not used since it does not support
# batch renormalization
gbeta = gy.sum(axis=axis, dtype=gamma.dtype)
ggamma = (gy * self.x_hat_renorm).sum(axis=axis)
gsigma_batch = (gy * self.x_hat).sum(axis=axis)
if xp is numpy:
scale = (self.r * gamma / self.std)[expander]
gx = scale * (gy - (self.x_hat * gsigma_batch[expander] +
gbeta[expander]) / m)
gx = gx.astype(dtype=x.dtype)
else:
inv_m = numpy.float32(1) / m
gx = cuda.elementwise(
'T gy, U x_hat, U gamma, U std, U gsigma_batch, U gbeta, \
U inv_m, U r',
'T gx',
'gx = (r * gamma / std) * (gy - (x_hat * gsigma_batch + gbeta) * \
inv_m)',
'brn_bwd')(
gy, self.x_hat, gamma[expander],
self.std[expander], gsigma_batch[expander],
gbeta[expander], inv_m, self.r[expander])
return gx, ggamma, gbeta
def batch_renormalization(x, gamma, beta, rmax, dmax, eps=2e-5,
running_mean=None, running_var=None, decay=0.9,
update_statistics=False):
"""Batch renormalization function.
This is an extension of batch normalization, which ensures that the
training and inference models generate the same outputs that depend on
individual examples rather than the entire minibatch.
.. note::
This function does not perform in-place update to
``running_mean`` and ``running_var`` by default, contrary to
:func:`~chainer.functions.batch_normalization`.
If the function is called, it will not be possible to access the
updated running mean and variance statistics, because they are members
of the function object, which cannot be accessed by the caller.
If it is desired to update the running statistics, call the function
with ``update_statistics=True`` option.
.. note::
For the consistency with Batch Normalization, this function
intentionally ignores some of the theoretical flaws in Algorithm 1 of
the Batch Renormalization paper:
- ``F.batch_renormalization`` maintains the moving average of variances
:math:`\\sigma^2`, while the original paper maintains the moving
average of standard deviations :math:`\\sigma`.
- ``F.batch_renormalization`` applies Bessel's correction to update the
moving average of variances.
See: `Batch Renormalization: Towards Reducing Minibatch Dependence in
Batch-Normalized Models <https://arxiv.org/abs/1702.03275>`_
.. seealso:: :class:`~chainer.links.BatchRenormalization`
"""
if running_mean is None:
raise TypeError('running_mean is required')
if running_var is None:
raise TypeError('running_var is required')
return BatchRenormalizationFunction(
eps, running_mean, running_var, decay, rmax, dmax, update_statistics
)(x, gamma, beta)
def fixed_batch_renormalization(x, gamma, beta, mean, var, eps=2e-5):
warnings.warn(
'fixed_batch_renormalization is deprecated. '
'Use fixed_batch_normalization instead.',
DeprecationWarning)
with configuration.using_config('train', False):
return batch_normalization.fixed_batch_normalization(
x, gamma, beta, mean, var, eps
)
| mit |
asiersarasua/QGIS | src/plugins/grass/scripts/db.connect-login.pg.py | 17 | 3779 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
***************************************************************************
db.connect-login.pg.py - Connect to PostgreSQL
---------------------
Date : July 2009
Copyright : (C) 2009 by Radim Blazek
Email : radim dot blazek at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Radim Blazek'
__date__ = 'July 2009'
__copyright__ = '(C) 2009, Radim Blazek'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
#%Module
#% description: Make connection to PostgreSQL database and login.
#% keywords: database
#%End
#%option
#% key: host
#% type: string
#% label: Host
#% description: Host name of the machine on which the server is running.
#% required : no
#%end
#%option
#% key: port
#% type: integer
#% label: Port
#% description: TCP port on which the server is listening, usually 5432.
#% required : no
#%end
#%option
#% key: database
#% type: string
#% key_desc : name
#% gisprompt: old_dbname,dbname,dbname
#% label: Database
#% description: Database name
#% required : yes
#%end
#%option
#% key: schema
#% type: string
#% label: Schema
#% description: Database schema.
#% required : no
#%end
#%option
#% key: user
#% type: string
#% label: User
#% description: Connect to the database as the user username instead of the default.
#% required : no
#%end
#%option
#% key: password
#% type: string
#% label: Password
#% description: Password will be stored in file!
#% required : no
#%end
import sys
try:
from grass.script import core as grass
except ImportError:
import grass
except:
raise Exception("Cannot find 'grass' Python module. Python is supported by GRASS from version >= 6.4")
def main():
host = options['host']
port = options['port']
database = options['database']
schema = options['schema']
user = options['user']
password = options['password']
# Test connection
conn = "dbname=" + database
if host:
conn += ",host=" + host
if port:
conn += ",port=" + port
# Unfortunately we cannot test until user/password is set
if user or password:
print("Setting login (db.login) ... ")
sys.stdout.flush()
if grass.run_command('db.login', driver="pg", database=conn, user=user, password=password) != 0:
grass.fatal("Cannot login")
# Try to connect
print("Testing connection ...")
sys.stdout.flush()
if grass.run_command('db.select', quiet=True, flags='c', driver="pg", database=conn, sql="select version()") != 0:
if user or password:
print("Deleting login (db.login) ...")
sys.stdout.flush()
if grass.run_command('db.login', quiet=True, driver="pg", database=conn, user="", password="") != 0:
print("Cannot delete login.")
sys.stdout.flush()
grass.fatal("Cannot connect to database.")
if grass.run_command('db.connect', driver="pg", database=conn, schema=schema) != 0:
grass.fatal("Cannot connect to database.")
if __name__ == "__main__":
options, flags = grass.parser()
main()
| gpl-2.0 |
ianatpn/nupictest | tests/external/py2/testfixture_test.py | 17 | 1769 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Unit tests for our dependencies in the pytest package; at the time of this
writing, we were using an unreleased version of pytest that added support for
the unittest setUpModule fixture and friends. Some of our tests rely on
setUpModule. Once, there was a conflict with pytest installation in our build
system, and an older version of pytest was installed that didn't support
setUpModule, which resulted in subtle side-effects in some of these tests.
"""
import unittest2 as unittest
g_setUpModuleCalled = False
def setUpModule():
global g_setUpModuleCalled
g_setUpModuleCalled = True
class TestPytest(unittest.TestCase):
def testSetUpModuleCalled(self):
self.assertTrue(g_setUpModuleCalled)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
fujunwei/chromium-crosswalk | tools/json_schema_compiler/idl_schema.py | 32 | 20817 | #! /usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import json
import os.path
import pprint
import re
import sys
from json_parse import OrderedDict
# This file is a peer to json_schema.py. Each of these files understands a
# certain format describing APIs (either JSON or IDL), reads files written
# in that format into memory, and emits them as a Python array of objects
# corresponding to those APIs, where the objects are formatted in a way that
# the JSON schema compiler understands. compiler.py drives both idl_schema.py
# and json_schema.py.
# idl_parser expects to be able to import certain files in its directory,
# so let's set things up the way it wants.
_idl_generators_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.pardir, os.pardir, 'ppapi', 'generators')
if _idl_generators_path in sys.path:
import idl_parser
else:
sys.path.insert(0, _idl_generators_path)
try:
import idl_parser
finally:
sys.path.pop(0)
def ProcessComment(comment):
'''
Convert a comment into a parent comment and a list of parameter comments.
Function comments are of the form:
Function documentation. May contain HTML and multiple lines.
|arg1_name|: Description of arg1. Use <var>argument</var> to refer
to other arguments.
|arg2_name|: Description of arg2...
Newlines are removed, and leading and trailing whitespace is stripped.
Args:
comment: The string from a Comment node.
Returns: A tuple that looks like:
(
"The processed comment, minus all |parameter| mentions.",
{
'parameter_name_1': "The comment that followed |parameter_name_1|:",
...
}
)
'''
def add_paragraphs(content):
paragraphs = content.split('\n\n')
if len(paragraphs) < 2:
return content
return '<p>' + '</p><p>'.join(p.strip() for p in paragraphs) + '</p>'
# Find all the parameter comments of the form '|name|: comment'.
parameter_starts = list(re.finditer(r' *\|([^|]*)\| *: *', comment))
# Get the parent comment (everything before the first parameter comment.
first_parameter_location = (parameter_starts[0].start()
if parameter_starts else len(comment))
parent_comment = (add_paragraphs(comment[:first_parameter_location].strip())
.replace('\n', ''))
params = OrderedDict()
for (cur_param, next_param) in itertools.izip_longest(parameter_starts,
parameter_starts[1:]):
param_name = cur_param.group(1)
# A parameter's comment goes from the end of its introduction to the
# beginning of the next parameter's introduction.
param_comment_start = cur_param.end()
param_comment_end = next_param.start() if next_param else len(comment)
params[param_name] = (
add_paragraphs(comment[param_comment_start:param_comment_end].strip())
.replace('\n', ''))
return (parent_comment, params)
class Callspec(object):
'''
Given a Callspec node representing an IDL function declaration, converts into
a tuple:
(name, list of function parameters, return type)
'''
def __init__(self, callspec_node, comment):
self.node = callspec_node
self.comment = comment
def process(self, callbacks):
parameters = []
return_type = None
if self.node.GetProperty('TYPEREF') not in ('void', None):
return_type = Typeref(self.node.GetProperty('TYPEREF'),
self.node.parent,
{'name': self.node.GetName()}).process(callbacks)
# The IDL parser doesn't allow specifying return types as optional.
# Instead we infer any object return values to be optional.
# TODO(asargent): fix the IDL parser to support optional return types.
if return_type.get('type') == 'object' or '$ref' in return_type:
return_type['optional'] = True
for node in self.node.GetChildren():
parameter = Param(node).process(callbacks)
if parameter['name'] in self.comment:
parameter['description'] = self.comment[parameter['name']]
parameters.append(parameter)
return (self.node.GetName(), parameters, return_type)
class Param(object):
'''
Given a Param node representing a function parameter, converts into a Python
dictionary that the JSON schema compiler expects to see.
'''
def __init__(self, param_node):
self.node = param_node
def process(self, callbacks):
return Typeref(self.node.GetProperty('TYPEREF'),
self.node,
{'name': self.node.GetName()}).process(callbacks)
class Dictionary(object):
'''
Given an IDL Dictionary node, converts into a Python dictionary that the JSON
schema compiler expects to see.
'''
def __init__(self, dictionary_node):
self.node = dictionary_node
def process(self, callbacks):
properties = OrderedDict()
for node in self.node.GetChildren():
if node.cls == 'Member':
k, v = Member(node).process(callbacks)
properties[k] = v
result = {'id': self.node.GetName(),
'properties': properties,
'type': 'object'}
if self.node.GetProperty('nodoc'):
result['nodoc'] = True
elif self.node.GetProperty('inline_doc'):
result['inline_doc'] = True
elif self.node.GetProperty('noinline_doc'):
result['noinline_doc'] = True
return result
class Member(object):
'''
Given an IDL dictionary or interface member, converts into a name/value pair
where the value is a Python dictionary that the JSON schema compiler expects
to see.
'''
def __init__(self, member_node):
self.node = member_node
def process(self, callbacks, functions_are_properties=False):
properties = OrderedDict()
name = self.node.GetName()
if self.node.GetProperty('deprecated'):
properties['deprecated'] = self.node.GetProperty('deprecated')
if self.node.GetProperty('allowAmbiguousOptionalArguments'):
properties['allowAmbiguousOptionalArguments'] = True
for property_name in ('OPTIONAL', 'nodoc', 'nocompile', 'nodart'):
if self.node.GetProperty(property_name):
properties[property_name.lower()] = True
for option_name, sanitizer in [
('maxListeners', int),
('supportsFilters', lambda s: s == 'true'),
('supportsListeners', lambda s: s == 'true'),
('supportsRules', lambda s: s == 'true')]:
if self.node.GetProperty(option_name):
if 'options' not in properties:
properties['options'] = {}
properties['options'][option_name] = sanitizer(self.node.GetProperty(
option_name))
type_override = None
parameter_comments = OrderedDict()
for node in self.node.GetChildren():
if node.cls == 'Comment':
(parent_comment, parameter_comments) = ProcessComment(node.GetName())
properties['description'] = parent_comment
elif node.cls == 'Callspec':
name, parameters, return_type = (Callspec(node, parameter_comments)
.process(callbacks))
if functions_are_properties:
# If functions are treated as properties (which will happen if the
# interface is named Properties) then this isn't a function, it's a
# property which is encoded as a function with no arguments. The
# property type is the return type. This is an egregious hack in lieu
# of the IDL parser supporting 'const'.
assert parameters == [], (
'Property "%s" must be no-argument functions '
'with a non-void return type' % name)
assert return_type is not None, (
'Property "%s" must be no-argument functions '
'with a non-void return type' % name)
assert 'type' in return_type, (
'Property return type "%s" from "%s" must specify a '
'fundamental IDL type.' % (pprint.pformat(return_type), name))
type_override = return_type['type']
else:
type_override = 'function'
properties['parameters'] = parameters
if return_type is not None:
properties['returns'] = return_type
properties['name'] = name
if type_override is not None:
properties['type'] = type_override
else:
properties = Typeref(self.node.GetProperty('TYPEREF'),
self.node, properties).process(callbacks)
value = self.node.GetProperty('value')
if value is not None:
# IDL always returns values as strings, so cast to their real type.
properties['value'] = self.cast_from_json_type(properties['type'], value)
enum_values = self.node.GetProperty('legalValues')
if enum_values:
# IDL always returns enum values as strings, so cast to their real type.
properties['enum'] = [self.cast_from_json_type(properties['type'], enum)
for enum in enum_values]
return name, properties
def cast_from_json_type(self, json_type, string_value):
'''Casts from string |string_value| to a real Python type based on a JSON
Schema type |json_type|. For example, a string value of '42' and a JSON
Schema type 'integer' will cast to int('42') ==> 42.
'''
if json_type == 'integer':
return int(string_value)
if json_type == 'number':
return float(string_value)
# Add more as necessary.
assert json_type == 'string', (
'No rule exists to cast JSON Schema type "%s" to its equivalent '
'Python type for value "%s". You must add a new rule here.' %
(json_type, string_value))
return string_value
class Typeref(object):
'''
Given a TYPEREF property representing the type of dictionary member or
function parameter, converts into a Python dictionary that the JSON schema
compiler expects to see.
'''
def __init__(self, typeref, parent, additional_properties):
self.typeref = typeref
self.parent = parent
self.additional_properties = additional_properties
def process(self, callbacks):
properties = self.additional_properties
result = properties
if self.parent.GetPropertyLocal('OPTIONAL'):
properties['optional'] = True
# The IDL parser denotes array types by adding a child 'Array' node onto
# the Param node in the Callspec.
for sibling in self.parent.GetChildren():
if sibling.cls == 'Array' and sibling.GetName() == self.parent.GetName():
properties['type'] = 'array'
properties['items'] = OrderedDict()
properties = properties['items']
break
if self.typeref == 'DOMString':
properties['type'] = 'string'
elif self.typeref == 'boolean':
properties['type'] = 'boolean'
elif self.typeref == 'double':
properties['type'] = 'number'
elif self.typeref == 'long':
properties['type'] = 'integer'
elif self.typeref == 'any':
properties['type'] = 'any'
elif self.typeref == 'object':
properties['type'] = 'object'
if 'additionalProperties' not in properties:
properties['additionalProperties'] = OrderedDict()
properties['additionalProperties']['type'] = 'any'
instance_of = self.parent.GetProperty('instanceOf')
if instance_of:
properties['isInstanceOf'] = instance_of
elif self.typeref == 'ArrayBuffer':
properties['type'] = 'binary'
properties['isInstanceOf'] = 'ArrayBuffer'
elif self.typeref == 'FileEntry':
properties['type'] = 'object'
properties['isInstanceOf'] = 'FileEntry'
if 'additionalProperties' not in properties:
properties['additionalProperties'] = OrderedDict()
properties['additionalProperties']['type'] = 'any'
elif self.parent.GetPropertyLocal('Union'):
properties['choices'] = [Typeref(node.GetProperty('TYPEREF'),
node,
OrderedDict()).process(callbacks)
for node in self.parent.GetChildren()
if node.cls == 'Option']
elif self.typeref is None:
properties['type'] = 'function'
else:
if self.typeref in callbacks:
# Do not override name and description if they are already specified.
name = properties.get('name', None)
description = properties.get('description', None)
properties.update(callbacks[self.typeref])
if description is not None:
properties['description'] = description
if name is not None:
properties['name'] = name
else:
properties['$ref'] = self.typeref
return result
class Enum(object):
'''
Given an IDL Enum node, converts into a Python dictionary that the JSON
schema compiler expects to see.
'''
def __init__(self, enum_node):
self.node = enum_node
self.description = ''
def process(self):
enum = []
for node in self.node.GetChildren():
if node.cls == 'EnumItem':
enum_value = {'name': node.GetName()}
for child in node.GetChildren():
if child.cls == 'Comment':
enum_value['description'] = ProcessComment(child.GetName())[0]
else:
raise ValueError('Did not process %s %s' % (child.cls, child))
enum.append(enum_value)
elif node.cls == 'Comment':
self.description = ProcessComment(node.GetName())[0]
else:
sys.exit('Did not process %s %s' % (node.cls, node))
result = {'id' : self.node.GetName(),
'description': self.description,
'type': 'string',
'enum': enum}
for property_name in (
'inline_doc', 'noinline_doc', 'nodoc', 'cpp_enum_prefix_override',):
if self.node.GetProperty(property_name):
result[property_name] = self.node.GetProperty(property_name)
if self.node.GetProperty('deprecated'):
result['deprecated'] = self.node.GetProperty('deprecated')
return result
class Namespace(object):
'''
Given an IDLNode representing an IDL namespace, converts into a Python
dictionary that the JSON schema compiler expects to see.
'''
def __init__(self,
namespace_node,
description,
nodoc=False,
internal=False,
platforms=None,
compiler_options=None,
deprecated=None,
documentation_options=None):
self.namespace = namespace_node
self.nodoc = nodoc
self.internal = internal
self.platforms = platforms
self.compiler_options = compiler_options
self.events = []
self.functions = []
self.properties = OrderedDict()
self.types = []
self.callbacks = OrderedDict()
self.description = description
self.deprecated = deprecated
self.documentation_options = documentation_options
def process(self):
for node in self.namespace.GetChildren():
if node.cls == 'Dictionary':
self.types.append(Dictionary(node).process(self.callbacks))
elif node.cls == 'Callback':
k, v = Member(node).process(self.callbacks)
self.callbacks[k] = v
elif node.cls == 'Interface' and node.GetName() == 'Functions':
self.functions = self.process_interface(node)
elif node.cls == 'Interface' and node.GetName() == 'Events':
self.events = self.process_interface(node)
elif node.cls == 'Interface' and node.GetName() == 'Properties':
properties_as_list = self.process_interface(
node, functions_are_properties=True)
for prop in properties_as_list:
# Properties are given as key-value pairs, but IDL will parse
# it as a list. Convert back to key-value pairs.
prop_name = prop.pop('name')
assert not self.properties.has_key(prop_name), (
'Property "%s" cannot be specified more than once.' %
prop_name)
self.properties[prop_name] = prop
elif node.cls == 'Enum':
self.types.append(Enum(node).process())
else:
sys.exit('Did not process %s %s' % (node.cls, node))
compiler_options = self.compiler_options or {}
documentation_options = self.documentation_options or {}
return {'namespace': self.namespace.GetName(),
'description': self.description,
'nodoc': self.nodoc,
'types': self.types,
'functions': self.functions,
'properties': self.properties,
'internal': self.internal,
'events': self.events,
'platforms': self.platforms,
'compiler_options': compiler_options,
'deprecated': self.deprecated,
'documentation_options': documentation_options}
def process_interface(self, node, functions_are_properties=False):
members = []
for member in node.GetChildren():
if member.cls == 'Member':
_, properties = Member(member).process(
self.callbacks,
functions_are_properties=functions_are_properties)
members.append(properties)
return members
class IDLSchema(object):
'''
Given a list of IDLNodes and IDLAttributes, converts into a Python list
of api_defs that the JSON schema compiler expects to see.
'''
def __init__(self, idl):
self.idl = idl
def process(self):
namespaces = []
nodoc = False
internal = False
description = None
platforms = None
compiler_options = {}
deprecated = None
documentation_options = {}
for node in self.idl:
if node.cls == 'Namespace':
if not description:
# TODO(kalman): Go back to throwing an error here.
print('%s must have a namespace-level comment. This will '
'appear on the API summary page.' % node.GetName())
description = ''
namespace = Namespace(node, description, nodoc, internal,
platforms=platforms,
compiler_options=compiler_options or None,
deprecated=deprecated,
documentation_options=documentation_options)
namespaces.append(namespace.process())
nodoc = False
internal = False
platforms = None
compiler_options = None
elif node.cls == 'Copyright':
continue
elif node.cls == 'Comment':
description = node.GetName()
elif node.cls == 'ExtAttribute':
if node.name == 'nodoc':
nodoc = bool(node.value)
elif node.name == 'internal':
internal = bool(node.value)
elif node.name == 'platforms':
platforms = list(node.value)
elif node.name == 'implemented_in':
compiler_options['implemented_in'] = node.value
elif node.name == 'camel_case_enum_to_string':
compiler_options['camel_case_enum_to_string'] = node.value
elif node.name == 'deprecated':
deprecated = str(node.value)
elif node.name == 'documentation_title':
documentation_options['title'] = node.value
elif node.name == 'documentation_namespace':
documentation_options['namespace'] = node.value
elif node.name == 'documented_in':
documentation_options['documented_in'] = node.value
else:
continue
else:
sys.exit('Did not process %s %s' % (node.cls, node))
return namespaces
def Load(filename):
'''
Given the filename of an IDL file, parses it and returns an equivalent
Python dictionary in a format that the JSON schema compiler expects to see.
'''
f = open(filename, 'r')
contents = f.read()
f.close()
return Process(contents, filename)
def Process(contents, filename):
'''
Processes the contents of a file and returns an equivalent Python dictionary
in a format that the JSON schema compiler expects to see. (Separate from
Load primarily for testing purposes.)
'''
idl = idl_parser.IDLParser().ParseData(contents, filename)
idl_schema = IDLSchema(idl)
return idl_schema.process()
def Main():
'''
Dump a json serialization of parse result for the IDL files whose names
were passed in on the command line.
'''
if len(sys.argv) > 1:
for filename in sys.argv[1:]:
schema = Load(filename)
print json.dumps(schema, indent=2)
else:
contents = sys.stdin.read()
idl = idl_parser.IDLParser().ParseData(contents, '<stdin>')
schema = IDLSchema(idl).process()
print json.dumps(schema, indent=2)
if __name__ == '__main__':
Main()
| bsd-3-clause |
chjw8016/GreenOdoo7-haibao | openerp/addons/product/product.py | 8 | 48196 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
import re
from _common import rounding
from openerp import tools
from openerp.osv import osv, fields
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
def ean_checksum(eancode):
"""returns the checksum of an ean string of length 13, returns -1 if the string has the wrong length"""
if len(eancode) <> 13:
return -1
oddsum=0
evensum=0
total=0
eanvalue=eancode
reversevalue = eanvalue[::-1]
finalean=reversevalue[1:]
for i in range(len(finalean)):
if i % 2 == 0:
oddsum += int(finalean[i])
else:
evensum += int(finalean[i])
total=(oddsum * 3) + evensum
check = int(10 - math.ceil(total % 10.0)) %10
return check
def check_ean(eancode):
"""returns True if eancode is a valid ean13 string, or null"""
if not eancode:
return True
if len(eancode) <> 13:
return False
try:
int(eancode)
except:
return False
return ean_checksum(eancode) == int(eancode[-1])
def sanitize_ean13(ean13):
"""Creates and returns a valid ean13 from an invalid one"""
if not ean13:
return "0000000000000"
ean13 = re.sub("[A-Za-z]","0",ean13);
ean13 = re.sub("[^0-9]","",ean13);
ean13 = ean13[:13]
if len(ean13) < 13:
ean13 = ean13 + '0' * (13-len(ean13))
return ean13[:-1] + str(ean_checksum(ean13))
#----------------------------------------------------------
# UOM
#----------------------------------------------------------
class product_uom_categ(osv.osv):
_name = 'product.uom.categ'
_description = 'Product uom categ'
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True),
}
product_uom_categ()
class product_uom(osv.osv):
_name = 'product.uom'
_description = 'Product Unit of Measure'
def _compute_factor_inv(self, factor):
return factor and (1.0 / factor) or 0.0
def _factor_inv(self, cursor, user, ids, name, arg, context=None):
res = {}
for uom in self.browse(cursor, user, ids, context=context):
res[uom.id] = self._compute_factor_inv(uom.factor)
return res
def _factor_inv_write(self, cursor, user, id, name, value, arg, context=None):
return self.write(cursor, user, id, {'factor': self._compute_factor_inv(value)}, context=context)
def name_create(self, cr, uid, name, context=None):
""" The UoM category and factor are required, so we'll have to add temporary values
for imported UoMs """
uom_categ = self.pool.get('product.uom.categ')
# look for the category based on the english name, i.e. no context on purpose!
# TODO: should find a way to have it translated but not created until actually used
categ_misc = 'Unsorted/Imported Units'
categ_id = uom_categ.search(cr, uid, [('name', '=', categ_misc)])
if categ_id:
categ_id = categ_id[0]
else:
categ_id, _ = uom_categ.name_create(cr, uid, categ_misc)
uom_id = self.create(cr, uid, {self._rec_name: name,
'category_id': categ_id,
'factor': 1})
return self.name_get(cr, uid, [uom_id], context=context)[0]
def create(self, cr, uid, data, context=None):
if 'factor_inv' in data:
if data['factor_inv'] <> 1:
data['factor'] = self._compute_factor_inv(data['factor_inv'])
del(data['factor_inv'])
return super(product_uom, self).create(cr, uid, data, context)
_order = "name"
_columns = {
'name': fields.char('Unit of Measure', size=64, required=True, translate=True),
'category_id': fields.many2one('product.uom.categ', 'Category', required=True, ondelete='cascade',
help="Conversion between Units of Measure can only occur if they belong to the same category. The conversion will be made based on the ratios."),
'factor': fields.float('Ratio', required=True,digits=(12, 12),
help='How much bigger or smaller this unit is compared to the reference Unit of Measure for this category:\n'\
'1 * (reference unit) = ratio * (this unit)'),
'factor_inv': fields.function(_factor_inv, digits=(12,12),
fnct_inv=_factor_inv_write,
string='Ratio',
help='How many times this Unit of Measure is bigger than the reference Unit of Measure in this category:\n'\
'1 * (this unit) = ratio * (reference unit)', required=True),
'rounding': fields.float('Rounding Precision', digits_compute=dp.get_precision('Product Unit of Measure'), required=True,
help="The computed quantity will be a multiple of this value. "\
"Use 1.0 for a Unit of Measure that cannot be further split, such as a piece."),
'active': fields.boolean('Active', help="By unchecking the active field you can disable a unit of measure without deleting it."),
'uom_type': fields.selection([('bigger','Bigger than the reference Unit of Measure'),
('reference','Reference Unit of Measure for this category'),
('smaller','Smaller than the reference Unit of Measure')],'Type', required=1),
}
_defaults = {
'active': 1,
'rounding': 0.01,
'uom_type': 'reference',
}
_sql_constraints = [
('factor_gt_zero', 'CHECK (factor!=0)', 'The conversion ratio for a unit of measure cannot be 0!')
]
def _compute_qty(self, cr, uid, from_uom_id, qty, to_uom_id=False):
if not from_uom_id or not qty or not to_uom_id:
return qty
uoms = self.browse(cr, uid, [from_uom_id, to_uom_id])
if uoms[0].id == from_uom_id:
from_unit, to_unit = uoms[0], uoms[-1]
else:
from_unit, to_unit = uoms[-1], uoms[0]
return self._compute_qty_obj(cr, uid, from_unit, qty, to_unit)
def _compute_qty_obj(self, cr, uid, from_unit, qty, to_unit, context=None):
if context is None:
context = {}
if from_unit.category_id.id <> to_unit.category_id.id:
if context.get('raise-exception', True):
raise osv.except_osv(_('Error!'), _('Conversion from Product UoM %s to Default UoM %s is not possible as they both belong to different Category!.') % (from_unit.name,to_unit.name,))
else:
return qty
amount = qty / from_unit.factor
if to_unit:
amount = rounding(amount * to_unit.factor, to_unit.rounding)
return amount
def _compute_price(self, cr, uid, from_uom_id, price, to_uom_id=False):
if not from_uom_id or not price or not to_uom_id:
return price
uoms = self.browse(cr, uid, [from_uom_id, to_uom_id])
if uoms[0].id == from_uom_id:
from_unit, to_unit = uoms[0], uoms[-1]
else:
from_unit, to_unit = uoms[-1], uoms[0]
if from_unit.category_id.id <> to_unit.category_id.id:
return price
amount = price * from_unit.factor
if to_uom_id:
amount = amount / to_unit.factor
return amount
def onchange_type(self, cursor, user, ids, value):
if value == 'reference':
return {'value': {'factor': 1, 'factor_inv': 1}}
return {}
def write(self, cr, uid, ids, vals, context=None):
if 'category_id' in vals:
for uom in self.browse(cr, uid, ids, context=context):
if uom.category_id.id != vals['category_id']:
raise osv.except_osv(_('Warning!'),_("Cannot change the category of existing Unit of Measure '%s'.") % (uom.name,))
return super(product_uom, self).write(cr, uid, ids, vals, context=context)
product_uom()
class product_ul(osv.osv):
_name = "product.ul"
_description = "Shipping Unit"
_columns = {
'name' : fields.char('Name', size=64,select=True, required=True, translate=True),
'type' : fields.selection([('unit','Unit'),('pack','Pack'),('box', 'Box'), ('pallet', 'Pallet')], 'Type', required=True),
}
product_ul()
#----------------------------------------------------------
# Categories
#----------------------------------------------------------
class product_category(osv.osv):
def name_get(self, cr, uid, ids, context=None):
if isinstance(ids, (list, tuple)) and not len(ids):
return []
if isinstance(ids, (long, int)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','parent_id'], context=context)
res = []
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1]+' / '+name
res.append((record['id'], name))
return res
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_name = "product.category"
_description = "Product Category"
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True, select=True),
'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one('product.category','Parent Category', select=True, ondelete='cascade'),
'child_id': fields.one2many('product.category', 'parent_id', string='Child Categories'),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of product categories."),
'type': fields.selection([('view','View'), ('normal','Normal')], 'Category Type', help="A category of the view type is a virtual category that can be used as the parent of another category to create a hierarchical structure."),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
}
_defaults = {
'type' : lambda *a : 'normal',
}
_parent_name = "parent_id"
_parent_store = True
_parent_order = 'sequence, name'
_order = 'parent_left'
def _check_recursion(self, cr, uid, ids, context=None):
level = 100
while len(ids):
cr.execute('select distinct parent_id from product_category where id IN %s',(tuple(ids),))
ids = filter(None, map(lambda x:x[0], cr.fetchall()))
if not level:
return False
level -= 1
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
def child_get(self, cr, uid, ids):
return [ids]
product_category()
#----------------------------------------------------------
# Products
#----------------------------------------------------------
class product_template(osv.osv):
_name = "product.template"
_description = "Product Template"
_columns = {
'name': fields.char('Name', size=128, required=True, translate=True, select=True),
'product_manager': fields.many2one('res.users','Product Manager'),
'description': fields.text('Description',translate=True),
'description_purchase': fields.text('Purchase Description',translate=True),
'description_sale': fields.text('Sale Description',translate=True),
'type': fields.selection([('consu', 'Consumable'),('service','Service')], 'Product Type', required=True, help="Consumable are product where you don't manage stock, a service is a non-material product provided by a company or an individual."),
'produce_delay': fields.float('Manufacturing Lead Time', help="Average delay in days to produce this product. In the case of multi-level BOM, the manufacturing lead times of the components will be added."),
'rental': fields.boolean('Can be Rent'),
'categ_id': fields.many2one('product.category','Category', required=True, change_default=True, domain="[('type','=','normal')]" ,help="Select category for the current product"),
'list_price': fields.float('Sale Price', digits_compute=dp.get_precision('Product Price'), help="Base price to compute the customer price. Sometimes called the catalog price."),
'standard_price': fields.float('Cost', digits_compute=dp.get_precision('Product Price'), help="Cost price of the product used for standard stock valuation in accounting and used as a base price on purchase orders.", groups="base.group_user"),
'volume': fields.float('Volume', help="The volume in m3."),
'weight': fields.float('Gross Weight', digits_compute=dp.get_precision('Stock Weight'), help="The gross weight in Kg."),
'weight_net': fields.float('Net Weight', digits_compute=dp.get_precision('Stock Weight'), help="The net weight in Kg."),
'cost_method': fields.selection([('standard','Standard Price'), ('average','Average Price')], 'Costing Method', required=True,
help="Standard Price: The cost price is manually updated at the end of a specific period (usually every year). \nAverage Price: The cost price is recomputed at each incoming shipment."),
'warranty': fields.float('Warranty'),
'sale_ok': fields.boolean('Can be Sold', help="Specify if the product can be selected in a sales order line."),
'state': fields.selection([('',''),
('draft', 'In Development'),
('sellable','Normal'),
('end','End of Lifecycle'),
('obsolete','Obsolete')], 'Status'),
'uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True, help="Default Unit of Measure used for all stock operation."),
'uom_po_id': fields.many2one('product.uom', 'Purchase Unit of Measure', required=True, help="Default Unit of Measure used for purchase orders. It must be in the same category than the default unit of measure."),
'uos_id' : fields.many2one('product.uom', 'Unit of Sale',
help='Sepcify a unit of measure here if invoicing is made in another unit of measure than inventory. Keep empty to use the default unit of measure.'),
'uos_coeff': fields.float('Unit of Measure -> UOS Coeff', digits_compute= dp.get_precision('Product UoS'),
help='Coefficient to convert default Unit of Measure to Unit of Sale\n'
' uos = uom * coeff'),
'mes_type': fields.selection((('fixed', 'Fixed'), ('variable', 'Variable')), 'Measure Type'),
'seller_ids': fields.one2many('product.supplierinfo', 'product_id', 'Supplier'),
'company_id': fields.many2one('res.company', 'Company', select=1),
}
def _get_uom_id(self, cr, uid, *args):
cr.execute('select id from product_uom order by id limit 1')
res = cr.fetchone()
return res and res[0] or False
def _default_category(self, cr, uid, context=None):
if context is None:
context = {}
if 'categ_id' in context and context['categ_id']:
return context['categ_id']
md = self.pool.get('ir.model.data')
res = False
try:
res = md.get_object_reference(cr, uid, 'product', 'product_category_all')[1]
except ValueError:
res = False
return res
def onchange_uom(self, cursor, user, ids, uom_id, uom_po_id):
if uom_id:
return {'value': {'uom_po_id': uom_id}}
return {}
def write(self, cr, uid, ids, vals, context=None):
if 'uom_po_id' in vals:
new_uom = self.pool.get('product.uom').browse(cr, uid, vals['uom_po_id'], context=context)
for product in self.browse(cr, uid, ids, context=context):
old_uom = product.uom_po_id
if old_uom.category_id.id != new_uom.category_id.id:
raise osv.except_osv(_('Unit of Measure categories Mismatch!'), _("New Unit of Measure '%s' must belong to same Unit of Measure category '%s' as of old Unit of Measure '%s'. If you need to change the unit of measure, you may deactivate this product from the 'Procurements' tab and create a new one.") % (new_uom.name, old_uom.category_id.name, old_uom.name,))
return super(product_template, self).write(cr, uid, ids, vals, context=context)
_defaults = {
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'product.template', context=c),
'list_price': 1,
'cost_method': 'standard',
'standard_price': 0.0,
'sale_ok': 1,
'produce_delay': 1,
'uom_id': _get_uom_id,
'uom_po_id': _get_uom_id,
'uos_coeff' : 1.0,
'mes_type' : 'fixed',
'categ_id' : _default_category,
'type' : 'consu',
}
def _check_uom(self, cursor, user, ids, context=None):
for product in self.browse(cursor, user, ids, context=context):
if product.uom_id.category_id.id <> product.uom_po_id.category_id.id:
return False
return True
def _check_uos(self, cursor, user, ids, context=None):
for product in self.browse(cursor, user, ids, context=context):
if product.uos_id \
and product.uos_id.category_id.id \
== product.uom_id.category_id.id:
return False
return True
_constraints = [
(_check_uom, 'Error: The default Unit of Measure and the purchase Unit of Measure must be in the same category.', ['uom_id']),
]
def name_get(self, cr, user, ids, context=None):
if context is None:
context = {}
if 'partner_id' in context:
pass
return super(product_template, self).name_get(cr, user, ids, context)
product_template()
class product_product(osv.osv):
def view_header_get(self, cr, uid, view_id, view_type, context=None):
if context is None:
context = {}
res = super(product_product, self).view_header_get(cr, uid, view_id, view_type, context)
if (context.get('categ_id', False)):
return _('Products: ')+self.pool.get('product.category').browse(cr, uid, context['categ_id'], context=context).name
return res
def _product_price(self, cr, uid, ids, name, arg, context=None):
res = {}
if context is None:
context = {}
quantity = context.get('quantity') or 1.0
pricelist = context.get('pricelist', False)
partner = context.get('partner', False)
if pricelist:
# Support context pricelists specified as display_name or ID for compatibility
if isinstance(pricelist, basestring):
pricelist_ids = self.pool.get('product.pricelist').name_search(
cr, uid, pricelist, operator='=', context=context, limit=1)
pricelist = pricelist_ids[0][0] if pricelist_ids else pricelist
for id in ids:
try:
price = self.pool.get('product.pricelist').price_get(cr,uid,[pricelist], id, quantity, partner=partner, context=context)[pricelist]
except:
price = 0.0
res[id] = price
for id in ids:
res.setdefault(id, 0.0)
return res
def _get_product_available_func(states, what):
def _product_available(self, cr, uid, ids, name, arg, context=None):
return {}.fromkeys(ids, 0.0)
return _product_available
_product_qty_available = _get_product_available_func(('done',), ('in', 'out'))
_product_virtual_available = _get_product_available_func(('confirmed','waiting','assigned','done'), ('in', 'out'))
_product_outgoing_qty = _get_product_available_func(('confirmed','waiting','assigned'), ('out',))
_product_incoming_qty = _get_product_available_func(('confirmed','waiting','assigned'), ('in',))
def _product_lst_price(self, cr, uid, ids, name, arg, context=None):
res = {}
product_uom_obj = self.pool.get('product.uom')
for id in ids:
res.setdefault(id, 0.0)
for product in self.browse(cr, uid, ids, context=context):
if 'uom' in context:
uom = product.uos_id or product.uom_id
res[product.id] = product_uom_obj._compute_price(cr, uid,
uom.id, product.list_price, context['uom'])
else:
res[product.id] = product.list_price
res[product.id] = (res[product.id] or 0.0) * (product.price_margin or 1.0) + product.price_extra
return res
def _get_partner_code_name(self, cr, uid, ids, product, partner_id, context=None):
for supinfo in product.seller_ids:
if supinfo.name.id == partner_id:
return {'code': supinfo.product_code or product.default_code, 'name': supinfo.product_name or product.name, 'variants': ''}
res = {'code': product.default_code, 'name': product.name, 'variants': product.variants}
return res
def _product_code(self, cr, uid, ids, name, arg, context=None):
res = {}
if context is None:
context = {}
for p in self.browse(cr, uid, ids, context=context):
res[p.id] = self._get_partner_code_name(cr, uid, [], p, context.get('partner_id', None), context=context)['code']
return res
def _product_partner_ref(self, cr, uid, ids, name, arg, context=None):
res = {}
if context is None:
context = {}
for p in self.browse(cr, uid, ids, context=context):
data = self._get_partner_code_name(cr, uid, [], p, context.get('partner_id', None), context=context)
if not data['variants']:
data['variants'] = p.variants
if not data['code']:
data['code'] = p.code
if not data['name']:
data['name'] = p.name
res[p.id] = (data['code'] and ('['+data['code']+'] ') or '') + \
(data['name'] or '') + (data['variants'] and (' - '+data['variants']) or '')
return res
def _get_main_product_supplier(self, cr, uid, product, context=None):
"""Determines the main (best) product supplier for ``product``,
returning the corresponding ``supplierinfo`` record, or False
if none were found. The default strategy is to select the
supplier with the highest priority (i.e. smallest sequence).
:param browse_record product: product to supply
:rtype: product.supplierinfo browse_record or False
"""
sellers = [(seller_info.sequence, seller_info)
for seller_info in product.seller_ids or []
if seller_info and isinstance(seller_info.sequence, (int, long))]
return sellers and sellers[0][1] or False
def _calc_seller(self, cr, uid, ids, fields, arg, context=None):
result = {}
for product in self.browse(cr, uid, ids, context=context):
main_supplier = self._get_main_product_supplier(cr, uid, product, context=context)
result[product.id] = {
'seller_info_id': main_supplier and main_supplier.id or False,
'seller_delay': main_supplier.delay if main_supplier else 1,
'seller_qty': main_supplier and main_supplier.qty or 0.0,
'seller_id': main_supplier and main_supplier.name.id or False
}
return result
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image, avoid_resize_medium=True)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_defaults = {
'active': lambda *a: 1,
'price_extra': lambda *a: 0.0,
'price_margin': lambda *a: 1.0,
'color': 0,
}
_name = "product.product"
_description = "Product"
_table = "product_product"
_inherits = {'product.template': 'product_tmpl_id'}
_inherit = ['mail.thread']
_order = 'default_code,name_template'
_columns = {
'qty_available': fields.function(_product_qty_available, type='float', string='Quantity On Hand'),
'virtual_available': fields.function(_product_virtual_available, type='float', string='Quantity Available'),
'incoming_qty': fields.function(_product_incoming_qty, type='float', string='Incoming'),
'outgoing_qty': fields.function(_product_outgoing_qty, type='float', string='Outgoing'),
'price': fields.function(_product_price, type='float', string='Price', digits_compute=dp.get_precision('Product Price')),
'lst_price' : fields.function(_product_lst_price, type='float', string='Public Price', digits_compute=dp.get_precision('Product Price')),
'code': fields.function(_product_code, type='char', string='Internal Reference'),
'partner_ref' : fields.function(_product_partner_ref, type='char', string='Customer ref'),
'default_code' : fields.char('Internal Reference', size=64, select=True),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the product without removing it."),
'variants': fields.char('Variants', size=64),
'product_tmpl_id': fields.many2one('product.template', 'Product Template', required=True, ondelete="cascade", select=True),
'ean13': fields.char('EAN13 Barcode', size=13, help="International Article Number used for product identification."),
'packaging' : fields.one2many('product.packaging', 'product_id', 'Logistical Units', help="Gives the different ways to package the same product. This has no impact on the picking order and is mainly used if you use the EDI module."),
'price_extra': fields.float('Variant Price Extra', digits_compute=dp.get_precision('Product Price')),
'price_margin': fields.float('Variant Price Margin', digits_compute=dp.get_precision('Product Price')),
'pricelist_id': fields.dummy(string='Pricelist', relation='product.pricelist', type='many2one'),
'name_template': fields.related('product_tmpl_id', 'name', string="Template Name", type='char', size=128, store=True, select=True),
'color': fields.integer('Color Index'),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Image",
help="This field holds the image used as image for the product, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized image", type="binary", multi="_get_image",
store={
'product.product': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized image of the product. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved, "\
"only when the image exceeds one of those sizes. Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized image", type="binary", multi="_get_image",
store={
'product.product': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized image of the product. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'seller_info_id': fields.function(_calc_seller, type='many2one', relation="product.supplierinfo", string="Supplier Info", multi="seller_info"),
'seller_delay': fields.function(_calc_seller, type='integer', string='Supplier Lead Time', multi="seller_info", help="This is the average delay in days between the purchase order confirmation and the reception of goods for this product and for the default supplier. It is used by the scheduler to order requests based on reordering delays."),
'seller_qty': fields.function(_calc_seller, type='float', string='Supplier Quantity', multi="seller_info", help="This is minimum quantity to purchase from Main Supplier."),
'seller_id': fields.function(_calc_seller, type='many2one', relation="res.partner", string='Main Supplier', help="Main Supplier who has highest priority in Supplier List.", multi="seller_info"),
}
def unlink(self, cr, uid, ids, context=None):
unlink_ids = []
unlink_product_tmpl_ids = []
for product in self.browse(cr, uid, ids, context=context):
tmpl_id = product.product_tmpl_id.id
# Check if the product is last product of this template
other_product_ids = self.search(cr, uid, [('product_tmpl_id', '=', tmpl_id), ('id', '!=', product.id)], context=context)
if not other_product_ids:
unlink_product_tmpl_ids.append(tmpl_id)
unlink_ids.append(product.id)
res = super(product_product, self).unlink(cr, uid, unlink_ids, context=context)
# delete templates after calling super, as deleting template could lead to deleting
# products due to ondelete='cascade'
self.pool.get('product.template').unlink(cr, uid, unlink_product_tmpl_ids, context=context)
return res
def onchange_uom(self, cursor, user, ids, uom_id, uom_po_id):
if uom_id and uom_po_id:
uom_obj=self.pool.get('product.uom')
uom=uom_obj.browse(cursor,user,[uom_id])[0]
uom_po=uom_obj.browse(cursor,user,[uom_po_id])[0]
if uom.category_id.id != uom_po.category_id.id:
return {'value': {'uom_po_id': uom_id}}
return False
def _check_ean_key(self, cr, uid, ids, context=None):
for product in self.read(cr, uid, ids, ['ean13'], context=context):
res = check_ean(product['ean13'])
return res
_constraints = [(_check_ean_key, 'You provided an invalid "EAN13 Barcode" reference. You may use the "Internal Reference" field instead.', ['ean13'])]
def on_order(self, cr, uid, ids, orderline, quantity):
pass
def name_get(self, cr, user, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not len(ids):
return []
def _name_get(d):
name = d.get('name','')
code = d.get('default_code',False)
if code:
name = '[%s] %s' % (code,name)
if d.get('variants'):
name = name + ' - %s' % (d['variants'],)
return (d['id'], name)
partner_id = context.get('partner_id', False)
result = []
for product in self.browse(cr, user, ids, context=context):
sellers = filter(lambda x: x.name.id == partner_id, product.seller_ids)
if sellers:
for s in sellers:
mydict = {
'id': product.id,
'name': s.product_name or product.name,
'default_code': s.product_code or product.default_code,
'variants': product.variants
}
result.append(_name_get(mydict))
else:
mydict = {
'id': product.id,
'name': product.name,
'default_code': product.default_code,
'variants': product.variants
}
result.append(_name_get(mydict))
return result
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if name:
ids = self.search(cr, user, [('default_code','=',name)]+ args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('ean13','=',name)]+ args, limit=limit, context=context)
if not ids:
# Do not merge the 2 next lines into one single search, SQL search performance would be abysmal
# on a database with thousands of matching products, due to the huge merge+unique needed for the
# OR operator (and given the fact that the 'name' lookup results come from the ir.translation table
# Performing a quick memory merge of ids in Python will give much better performance
ids = set()
ids.update(self.search(cr, user, args + [('default_code',operator,name)], limit=limit, context=context))
if not limit or len(ids) < limit:
# we may underrun the limit because of dupes in the results, that's fine
ids.update(self.search(cr, user, args + [('name',operator,name)], limit=(limit and (limit-len(ids)) or False) , context=context))
ids = list(ids)
if not ids:
ptrn = re.compile('(\[(.*?)\])')
res = ptrn.search(name)
if res:
ids = self.search(cr, user, [('default_code','=', res.group(2))] + args, limit=limit, context=context)
else:
ids = self.search(cr, user, args, limit=limit, context=context)
result = self.name_get(cr, user, ids, context=context)
return result
#
# Could be overrided for variants matrices prices
#
def price_get(self, cr, uid, ids, ptype='list_price', context=None):
if context is None:
context = {}
if 'currency_id' in context:
pricetype_obj = self.pool.get('product.price.type')
price_type_id = pricetype_obj.search(cr, uid, [('field','=',ptype)])[0]
price_type_currency_id = pricetype_obj.browse(cr,uid,price_type_id).currency_id.id
res = {}
product_uom_obj = self.pool.get('product.uom')
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = product[ptype] or 0.0
if ptype == 'list_price':
res[product.id] = (res[product.id] * (product.price_margin or 1.0)) + \
product.price_extra
if 'uom' in context:
uom = product.uom_id or product.uos_id
res[product.id] = product_uom_obj._compute_price(cr, uid,
uom.id, res[product.id], context['uom'])
# Convert from price_type currency to asked one
if 'currency_id' in context:
# Take the price_type currency from the product field
# This is right cause a field cannot be in more than one currency
res[product.id] = self.pool.get('res.currency').compute(cr, uid, price_type_currency_id,
context['currency_id'], res[product.id],context=context)
return res
def copy(self, cr, uid, id, default=None, context=None):
if context is None:
context={}
if not default:
default = {}
# Craft our own `<name> (copy)` in en_US (self.copy_translation()
# will do the other languages).
context_wo_lang = context.copy()
context_wo_lang.pop('lang', None)
product = self.read(cr, uid, id, ['name'], context=context_wo_lang)
default = default.copy()
default.update(name=_("%s (copy)") % (product['name']))
if context.get('variant',False):
fields = ['product_tmpl_id', 'active', 'variants', 'default_code',
'price_margin', 'price_extra']
data = self.read(cr, uid, id, fields=fields, context=context)
for f in fields:
if f in default:
data[f] = default[f]
data['product_tmpl_id'] = data.get('product_tmpl_id', False) \
and data['product_tmpl_id'][0]
del data['id']
return self.create(cr, uid, data)
else:
return super(product_product, self).copy(cr, uid, id, default=default,
context=context)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context and context.get('search_default_categ_id', False):
args.append((('categ_id', 'child_of', context['search_default_categ_id'])))
return super(product_product, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
product_product()
class product_packaging(osv.osv):
_name = "product.packaging"
_description = "Packaging"
_rec_name = 'ean'
_order = 'sequence'
_columns = {
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of packaging."),
'name' : fields.text('Description', size=64),
'qty' : fields.float('Quantity by Package',
help="The total number of products you can put by pallet or box."),
'ul' : fields.many2one('product.ul', 'Type of Package', required=True),
'ul_qty' : fields.integer('Package by layer', help='The number of packages by layer'),
'rows' : fields.integer('Number of Layers', required=True,
help='The number of layers on a pallet or box'),
'product_id' : fields.many2one('product.product', 'Product', select=1, ondelete='cascade', required=True),
'ean' : fields.char('EAN', size=14,
help="The EAN code of the package unit."),
'code' : fields.char('Code', size=14,
help="The code of the transport unit."),
'weight': fields.float('Total Package Weight',
help='The weight of a full package, pallet or box.'),
'weight_ul': fields.float('Empty Package Weight'),
'height': fields.float('Height', help='The height of the package'),
'width': fields.float('Width', help='The width of the package'),
'length': fields.float('Length', help='The length of the package'),
}
def _check_ean_key(self, cr, uid, ids, context=None):
for pack in self.browse(cr, uid, ids, context=context):
res = check_ean(pack.ean)
return res
_constraints = [(_check_ean_key, 'Error: Invalid ean code', ['ean'])]
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
res = []
for pckg in self.browse(cr, uid, ids, context=context):
p_name = pckg.ean and '[' + pckg.ean + '] ' or ''
p_name += pckg.ul.name
res.append((pckg.id,p_name))
return res
def _get_1st_ul(self, cr, uid, context=None):
cr.execute('select id from product_ul order by id asc limit 1')
res = cr.fetchone()
return (res and res[0]) or False
_defaults = {
'rows' : lambda *a : 3,
'sequence' : lambda *a : 1,
'ul' : _get_1st_ul,
}
def checksum(ean):
salt = '31' * 6 + '3'
sum = 0
for ean_part, salt_part in zip(ean, salt):
sum += int(ean_part) * int(salt_part)
return (10 - (sum % 10)) % 10
checksum = staticmethod(checksum)
product_packaging()
class product_supplierinfo(osv.osv):
_name = "product.supplierinfo"
_description = "Information about a product supplier"
def _calc_qty(self, cr, uid, ids, fields, arg, context=None):
result = {}
product_uom_pool = self.pool.get('product.uom')
for supplier_info in self.browse(cr, uid, ids, context=context):
for field in fields:
result[supplier_info.id] = {field:False}
qty = supplier_info.min_qty
result[supplier_info.id]['qty'] = qty
return result
_columns = {
'name' : fields.many2one('res.partner', 'Supplier', required=True,domain = [('supplier','=',True)], ondelete='cascade', help="Supplier of this product"),
'product_name': fields.char('Supplier Product Name', size=128, help="This supplier's product name will be used when printing a request for quotation. Keep empty to use the internal one."),
'product_code': fields.char('Supplier Product Code', size=64, help="This supplier's product code will be used when printing a request for quotation. Keep empty to use the internal one."),
'sequence' : fields.integer('Sequence', help="Assigns the priority to the list of product supplier."),
'product_uom': fields.related('product_id', 'uom_po_id', type='many2one', relation='product.uom', string="Supplier Unit of Measure", readonly="1", help="This comes from the product form."),
'min_qty': fields.float('Minimal Quantity', required=True, help="The minimal quantity to purchase to this supplier, expressed in the supplier Product Unit of Measure if not empty, in the default unit of measure of the product otherwise."),
'qty': fields.function(_calc_qty, store=True, type='float', string='Quantity', multi="qty", help="This is a quantity which is converted into Default Unit of Measure."),
'product_id' : fields.many2one('product.template', 'Product', required=True, ondelete='cascade', select=True),
'delay' : fields.integer('Delivery Lead Time', required=True, help="Lead time in days between the confirmation of the purchase order and the reception of the products in your warehouse. Used by the scheduler for automatic computation of the purchase order planning."),
'pricelist_ids': fields.one2many('pricelist.partnerinfo', 'suppinfo_id', 'Supplier Pricelist'),
'company_id':fields.many2one('res.company','Company',select=1),
}
_defaults = {
'qty': lambda *a: 0.0,
'sequence': lambda *a: 1,
'delay': lambda *a: 1,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'product.supplierinfo', context=c),
}
def price_get(self, cr, uid, supplier_ids, product_id, product_qty=1, context=None):
"""
Calculate price from supplier pricelist.
@param supplier_ids: Ids of res.partner object.
@param product_id: Id of product.
@param product_qty: specify quantity to purchase.
"""
if type(supplier_ids) in (int,long,):
supplier_ids = [supplier_ids]
res = {}
product_pool = self.pool.get('product.product')
partner_pool = self.pool.get('res.partner')
pricelist_pool = self.pool.get('product.pricelist')
currency_pool = self.pool.get('res.currency')
currency_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
for supplier in partner_pool.browse(cr, uid, supplier_ids, context=context):
# Compute price from standard price of product
price = product_pool.price_get(cr, uid, [product_id], 'standard_price', context=context)[product_id]
# Compute price from Purchase pricelist of supplier
pricelist_id = supplier.property_product_pricelist_purchase.id
if pricelist_id:
price = pricelist_pool.price_get(cr, uid, [pricelist_id], product_id, product_qty, context=context).setdefault(pricelist_id, 0)
price = currency_pool.compute(cr, uid, pricelist_pool.browse(cr, uid, pricelist_id).currency_id.id, currency_id, price)
# Compute price from supplier pricelist which are in Supplier Information
supplier_info_ids = self.search(cr, uid, [('name','=',supplier.id),('product_id','=',product_id)])
if supplier_info_ids:
cr.execute('SELECT * ' \
'FROM pricelist_partnerinfo ' \
'WHERE suppinfo_id IN %s' \
'AND min_quantity <= %s ' \
'ORDER BY min_quantity DESC LIMIT 1', (tuple(supplier_info_ids),product_qty,))
res2 = cr.dictfetchone()
if res2:
price = res2['price']
res[supplier.id] = price
return res
_order = 'sequence'
product_supplierinfo()
class pricelist_partnerinfo(osv.osv):
_name = 'pricelist.partnerinfo'
_columns = {
'name': fields.char('Description', size=64),
'suppinfo_id': fields.many2one('product.supplierinfo', 'Partner Information', required=True, ondelete='cascade'),
'min_quantity': fields.float('Quantity', required=True, help="The minimal quantity to trigger this rule, expressed in the supplier Unit of Measure if any or in the default Unit of Measure of the product otherrwise."),
'price': fields.float('Unit Price', required=True, digits_compute=dp.get_precision('Product Price'), help="This price will be considered as a price for the supplier Unit of Measure if any or the default Unit of Measure of the product otherwise"),
}
_order = 'min_quantity asc'
pricelist_partnerinfo()
class res_currency(osv.osv):
_inherit = 'res.currency'
def _check_main_currency_rounding(self, cr, uid, ids, context=None):
cr.execute('SELECT digits FROM decimal_precision WHERE name like %s',('Account',))
digits = cr.fetchone()
if digits and len(digits):
digits = digits[0]
main_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
for currency_id in ids:
if currency_id == main_currency.id:
if main_currency.rounding < 10 ** -digits:
return False
return True
_constraints = [
(_check_main_currency_rounding, 'Error! You cannot define a rounding factor for the company\'s main currency that is smaller than the decimal precision of \'Account\'.', ['rounding']),
]
class decimal_precision(osv.osv):
_inherit = 'decimal.precision'
def _check_main_currency_rounding(self, cr, uid, ids, context=None):
cr.execute('SELECT id, digits FROM decimal_precision WHERE name like %s',('Account',))
res = cr.fetchone()
if res and len(res):
account_precision_id, digits = res
main_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
for decimal_precision in ids:
if decimal_precision == account_precision_id:
if main_currency.rounding < 10 ** -digits:
return False
return True
_constraints = [
(_check_main_currency_rounding, 'Error! You cannot define the decimal precision of \'Account\' as greater than the rounding factor of the company\'s main currency', ['digits']),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| mit |
latuannetnam/django-socialauth | myproject/myproject/settings.py | 1 | 4681 | """
Django settings for myproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from config import *
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k)y7!t^&-p_-7po*$)sgo@x$c^mpohxm8jk2c!+40wp&od92t_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'socialauth',
'social.apps.django_app.default',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'myproject.urls'
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
#Added settings
#TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates').replace('\\','/'),)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates').replace('\\','/'),
)
#--- Loggin--------
# settings.py
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'myproject.log',
'formatter': 'verbose'
},
},
'loggers': {
'django': {
'handlers':['file'],
'propagate': True,
'level':'INFO',
},
'myproject': {
'handlers': ['file'],
'level': 'DEBUG',
},
'socialauth': {
'handlers': ['file'],
'level': 'DEBUG',
},
}
}
#--------------------------Debug bar settings
INTERNAL_IPS = ('192.168.0.112')
AUTH_PROFILE_MODULE = 'socialauth.UserProfile'
#----------------- Social auth settings
AUTHENTICATION_BACKENDS = (
#'social.backends.facebook.FacebookOAuth2',
'social.backends.facebook.Facebook2OAuth2',
'social.backends.twitter.TwitterOAuth',
'social.backends.google.GoogleOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
'django.core.context_processors.request',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/logged/'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login-error/'
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.mail.mail_validation',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'socialauth.pipeline.get_profile',
'social.pipeline.debug.debug',
)
FIELDS_STORED_IN_SESSION = ['key'] | apache-2.0 |
maryklayne/Funcao | sympy/polys/tests/test_factortools.py | 14 | 21613 | """Tools for polynomial factorization routines in characteristic zero. """
from sympy.polys.rings import ring, xring
from sympy.polys.domains import FF, ZZ, QQ, RR, EX
from sympy.polys import polyconfig as config
from sympy.polys.polyerrors import DomainError
from sympy.polys.polyclasses import ANP
from sympy.polys.specialpolys import f_polys, w_polys
from sympy import nextprime, sin, sqrt, I
from sympy.utilities.pytest import raises
from sympy.core.compatibility import xrange
f_0, f_1, f_2, f_3, f_4, f_5, f_6 = f_polys()
w_1, w_2 = w_polys()
def test_dup_trial_division():
R, x = ring("x", ZZ)
assert R.dup_trial_division(x**5 + 8*x**4 + 25*x**3 + 38*x**2 + 28*x + 8, (x + 1, x + 2)) == [(x + 1, 2), (x + 2, 3)]
def test_dmp_trial_division():
R, x, y = ring("x,y", ZZ)
assert R.dmp_trial_division(x**5 + 8*x**4 + 25*x**3 + 38*x**2 + 28*x + 8, (x + 1, x + 2)) == [(x + 1, 2), (x + 2, 3)]
def test_dup_zz_mignotte_bound():
R, x = ring("x", ZZ)
assert R.dup_zz_mignotte_bound(2*x**2 + 3*x + 4) == 32
def test_dmp_zz_mignotte_bound():
R, x, y = ring("x,y", ZZ)
assert R.dmp_zz_mignotte_bound(2*x**2 + 3*x + 4) == 32
def test_dup_zz_hensel_step():
R, x = ring("x", ZZ)
f = x**4 - 1
g = x**3 + 2*x**2 - x - 2
h = x - 2
s = -2
t = 2*x**2 - 2*x - 1
G, H, S, T = R.dup_zz_hensel_step(5, f, g, h, s, t)
assert G == x**3 + 7*x**2 - x - 7
assert H == x - 7
assert S == 8
assert T == -8*x**2 - 12*x - 1
def test_dup_zz_hensel_lift():
R, x = ring("x", ZZ)
f = x**4 - 1
F = [x - 1, x - 2, x + 2, x + 1]
assert R.dup_zz_hensel_lift(ZZ(5), f, F, 4) == \
[x - 1, x - 182, x + 182, x + 1]
def test_dup_zz_irreducible_p():
R, x = ring("x", ZZ)
assert R.dup_zz_irreducible_p(3*x**4 + 2*x**3 + 6*x**2 + 8*x + 7) is None
assert R.dup_zz_irreducible_p(3*x**4 + 2*x**3 + 6*x**2 + 8*x + 4) is None
assert R.dup_zz_irreducible_p(3*x**4 + 2*x**3 + 6*x**2 + 8*x + 10) is True
assert R.dup_zz_irreducible_p(3*x**4 + 2*x**3 + 6*x**2 + 8*x + 14) is True
def test_dup_cyclotomic_p():
R, x = ring("x", ZZ)
assert R.dup_cyclotomic_p(x - 1) is True
assert R.dup_cyclotomic_p(x + 1) is True
assert R.dup_cyclotomic_p(x**2 + x + 1) is True
assert R.dup_cyclotomic_p(x**2 + 1) is True
assert R.dup_cyclotomic_p(x**4 + x**3 + x**2 + x + 1) is True
assert R.dup_cyclotomic_p(x**2 - x + 1) is True
assert R.dup_cyclotomic_p(x**6 + x**5 + x**4 + x**3 + x**2 + x + 1) is True
assert R.dup_cyclotomic_p(x**4 + 1) is True
assert R.dup_cyclotomic_p(x**6 + x**3 + 1) is True
assert R.dup_cyclotomic_p(0) is False
assert R.dup_cyclotomic_p(1) is False
assert R.dup_cyclotomic_p(x) is False
assert R.dup_cyclotomic_p(x + 2) is False
assert R.dup_cyclotomic_p(3*x + 1) is False
assert R.dup_cyclotomic_p(x**2 - 1) is False
f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
assert R.dup_cyclotomic_p(f) is False
g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
assert R.dup_cyclotomic_p(g) is True
R, x = ring("x", QQ)
assert R.dup_cyclotomic_p(x**2 + x + 1) is True
assert R.dup_cyclotomic_p(QQ(1,2)*x**2 + x + 1) is False
R, x = ring("x", ZZ["y"])
assert R.dup_cyclotomic_p(x**2 + x + 1) is False
def test_dup_zz_cyclotomic_poly():
R, x = ring("x", ZZ)
assert R.dup_zz_cyclotomic_poly(1) == x - 1
assert R.dup_zz_cyclotomic_poly(2) == x + 1
assert R.dup_zz_cyclotomic_poly(3) == x**2 + x + 1
assert R.dup_zz_cyclotomic_poly(4) == x**2 + 1
assert R.dup_zz_cyclotomic_poly(5) == x**4 + x**3 + x**2 + x + 1
assert R.dup_zz_cyclotomic_poly(6) == x**2 - x + 1
assert R.dup_zz_cyclotomic_poly(7) == x**6 + x**5 + x**4 + x**3 + x**2 + x + 1
assert R.dup_zz_cyclotomic_poly(8) == x**4 + 1
assert R.dup_zz_cyclotomic_poly(9) == x**6 + x**3 + 1
def test_dup_zz_cyclotomic_factor():
R, x = ring("x", ZZ)
assert R.dup_zz_cyclotomic_factor(0) is None
assert R.dup_zz_cyclotomic_factor(1) is None
assert R.dup_zz_cyclotomic_factor(2*x**10 - 1) is None
assert R.dup_zz_cyclotomic_factor(x**10 - 3) is None
assert R.dup_zz_cyclotomic_factor(x**10 + x**5 - 1) is None
assert R.dup_zz_cyclotomic_factor(x + 1) == [x + 1]
assert R.dup_zz_cyclotomic_factor(x - 1) == [x - 1]
assert R.dup_zz_cyclotomic_factor(x**2 + 1) == [x**2 + 1]
assert R.dup_zz_cyclotomic_factor(x**2 - 1) == [x - 1, x + 1]
assert R.dup_zz_cyclotomic_factor(x**27 + 1) == \
[x + 1, x**2 - x + 1, x**6 - x**3 + 1, x**18 - x**9 + 1]
assert R.dup_zz_cyclotomic_factor(x**27 - 1) == \
[x - 1, x**2 + x + 1, x**6 + x**3 + 1, x**18 + x**9 + 1]
def test_dup_zz_factor():
R, x = ring("x", ZZ)
assert R.dup_zz_factor(0) == (0, [])
assert R.dup_zz_factor(7) == (7, [])
assert R.dup_zz_factor(-7) == (-7, [])
assert R.dup_zz_factor_sqf(0) == (0, [])
assert R.dup_zz_factor_sqf(7) == (7, [])
assert R.dup_zz_factor_sqf(-7) == (-7, [])
assert R.dup_zz_factor(2*x + 4) == (2, [(x + 2, 1)])
assert R.dup_zz_factor_sqf(2*x + 4) == (2, [x + 2])
f = x**4 + x + 1
for i in xrange(0, 20):
assert R.dup_zz_factor(f) == (1, [(f, 1)])
assert R.dup_zz_factor(x**2 + 2*x + 2) == \
(1, [(x**2 + 2*x + 2, 1)])
assert R.dup_zz_factor(18*x**2 + 12*x + 2) == \
(2, [(3*x + 1, 2)])
assert R.dup_zz_factor(-9*x**2 + 1) == \
(-1, [(3*x - 1, 1),
(3*x + 1, 1)])
assert R.dup_zz_factor_sqf(-9*x**2 + 1) == \
(-1, [3*x - 1,
3*x + 1])
assert R.dup_zz_factor(x**3 - 6*x**2 + 11*x - 6) == \
(1, [(x - 3, 1),
(x - 2, 1),
(x - 1, 1)])
assert R.dup_zz_factor_sqf(x**3 - 6*x**2 + 11*x - 6) == \
(1, [x - 3,
x - 2,
x - 1])
assert R.dup_zz_factor(3*x**3 + 10*x**2 + 13*x + 10) == \
(1, [(x + 2, 1),
(3*x**2 + 4*x + 5, 1)])
assert R.dup_zz_factor_sqf(3*x**3 + 10*x**2 + 13*x + 10) == \
(1, [x + 2,
3*x**2 + 4*x + 5])
assert R.dup_zz_factor(-x**6 + x**2) == \
(-1, [(x - 1, 1),
(x + 1, 1),
(x, 2),
(x**2 + 1, 1)])
f = 1080*x**8 + 5184*x**7 + 2099*x**6 + 744*x**5 + 2736*x**4 - 648*x**3 + 129*x**2 - 324
assert R.dup_zz_factor(f) == \
(1, [(5*x**4 + 24*x**3 + 9*x**2 + 12, 1),
(216*x**4 + 31*x**2 - 27, 1)])
f = -29802322387695312500000000000000000000*x**25 \
+ 2980232238769531250000000000000000*x**20 \
+ 1743435859680175781250000000000*x**15 \
+ 114142894744873046875000000*x**10 \
- 210106372833251953125*x**5 \
+ 95367431640625
assert R.dup_zz_factor(f) == \
(-95367431640625, [(5*x - 1, 1),
(100*x**2 + 10*x - 1, 2),
(625*x**4 + 125*x**3 + 25*x**2 + 5*x + 1, 1),
(10000*x**4 - 3000*x**3 + 400*x**2 - 20*x + 1, 2),
(10000*x**4 + 2000*x**3 + 400*x**2 + 30*x + 1, 2)])
f = x**10 - 1
config.setup('USE_CYCLOTOMIC_FACTOR', True)
F_0 = R.dup_zz_factor(f)
config.setup('USE_CYCLOTOMIC_FACTOR', False)
F_1 = R.dup_zz_factor(f)
assert F_0 == F_1 == \
(1, [(x - 1, 1),
(x + 1, 1),
(x**4 - x**3 + x**2 - x + 1, 1),
(x**4 + x**3 + x**2 + x + 1, 1)])
config.setup('USE_CYCLOTOMIC_FACTOR')
f = x**10 + 1
config.setup('USE_CYCLOTOMIC_FACTOR', True)
F_0 = R.dup_zz_factor(f)
config.setup('USE_CYCLOTOMIC_FACTOR', False)
F_1 = R.dup_zz_factor(f)
assert F_0 == F_1 == \
(1, [(x**2 + 1, 1),
(x**8 - x**6 + x**4 - x**2 + 1, 1)])
config.setup('USE_CYCLOTOMIC_FACTOR')
def test_dmp_zz_wang():
R, x,y,z = ring("x,y,z", ZZ)
UV, _x = ring("x", ZZ)
p = ZZ(nextprime(R.dmp_zz_mignotte_bound(w_1)))
assert p == 6291469
t_1, k_1, e_1 = y, 1, ZZ(-14)
t_2, k_2, e_2 = z, 2, ZZ(3)
t_3, k_3, e_3 = y + z, 2, ZZ(-11)
t_4, k_4, e_4 = y - z, 1, ZZ(-17)
T = [t_1, t_2, t_3, t_4]
K = [k_1, k_2, k_3, k_4]
E = [e_1, e_2, e_3, e_4]
T = zip([ t.drop(x) for t in T ], K)
A = [ZZ(-14), ZZ(3)]
S = R.dmp_eval_tail(w_1, A)
cs, s = UV.dup_primitive(S)
assert cs == 1 and s == S == \
1036728*_x**6 + 915552*_x**5 + 55748*_x**4 + 105621*_x**3 - 17304*_x**2 - 26841*_x - 644
assert R.dmp_zz_wang_non_divisors(E, cs, ZZ(4)) == [7, 3, 11, 17]
assert UV.dup_sqf_p(s) and UV.dup_degree(s) == R.dmp_degree(w_1)
_, H = UV.dup_zz_factor_sqf(s)
h_1 = 44*_x**2 + 42*_x + 1
h_2 = 126*_x**2 - 9*_x + 28
h_3 = 187*_x**2 - 23
assert H == [h_1, h_2, h_3]
LC = [ lc.drop(x) for lc in [-4*y - 4*z, -y*z**2, y**2 - z**2] ]
assert R.dmp_zz_wang_lead_coeffs(w_1, T, cs, E, H, A) == (w_1, H, LC)
H_1 = [44*x**2 + 42*x + 1, 126*x**2 - 9*x + 28, 187*x**2 - 23]
H_2 = [-4*x**2*y - 12*x**2 - 3*x*y + 1, -9*x**2*y - 9*x - 2*y, x**2*y**2 - 9*x**2 + y - 9]
H_3 = [-4*x**2*y - 12*x**2 - 3*x*y + 1, -9*x**2*y - 9*x - 2*y, x**2*y**2 - 9*x**2 + y - 9]
c_1 = -70686*x**5 - 5863*x**4 - 17826*x**3 + 2009*x**2 + 5031*x + 74
c_2 = 9*x**5*y**4 + 12*x**5*y**3 - 45*x**5*y**2 - 108*x**5*y - 324*x**5 + 18*x**4*y**3 - 216*x**4*y**2 - 810*x**4*y + 2*x**3*y**4 + 9*x**3*y**3 - 252*x**3*y**2 - 288*x**3*y - 945*x**3 - 30*x**2*y**2 - 414*x**2*y + 2*x*y**3 - 54*x*y**2 - 3*x*y + 81*x + 12*y
c_3 = -36*x**4*y**2 - 108*x**4*y - 27*x**3*y**2 - 36*x**3*y - 108*x**3 - 8*x**2*y**2 - 42*x**2*y - 6*x*y**2 + 9*x + 2*y
# TODO
#assert R.dmp_zz_diophantine(H_1, c_1, [], 5, p) == [-3*x, -2, 1]
#assert R.dmp_zz_diophantine(H_2, c_2, [ZZ(-14)], 5, p) == [-x*y, -3*x, -6]
#assert R.dmp_zz_diophantine(H_3, c_3, [ZZ(-14)], 5, p) == [0, 0, -1]
factors = R.dmp_zz_wang_hensel_lifting(w_1, H, LC, A, p)
assert R.dmp_expand(factors) == w_1
def test_issue_6355():
# This tests a bug in the Wang algorithm that occured only with a very
# specific set of random numbers.
random_sequence = [-1, -1, 0, 0, 0, 0, -1, -1, 0, -1, 3, -1, 3, 3, 3, 3, -1, 3]
R, x, y, z = ring("x,y,z", ZZ)
f = 2*x**2 + y*z - y - z**2 + z
assert R.dmp_zz_wang(f, seed=random_sequence) == [f]
def test_dmp_zz_factor():
R, x = ring("x", ZZ)
assert R.dmp_zz_factor(0) == (0, [])
assert R.dmp_zz_factor(7) == (7, [])
assert R.dmp_zz_factor(-7) == (-7, [])
assert R.dmp_zz_factor(x**2 - 9) == (1, [(x - 3, 1), (x + 3, 1)])
R, x, y = ring("x,y", ZZ)
assert R.dmp_zz_factor(0) == (0, [])
assert R.dmp_zz_factor(7) == (7, [])
assert R.dmp_zz_factor(-7) == (-7, [])
assert R.dmp_zz_factor(x) == (1, [(x, 1)])
assert R.dmp_zz_factor(4*x) == (4, [(x, 1)])
assert R.dmp_zz_factor(4*x + 2) == (2, [(2*x + 1, 1)])
assert R.dmp_zz_factor(x*y + 1) == (1, [(x*y + 1, 1)])
assert R.dmp_zz_factor(y**2 + 1) == (1, [(y**2 + 1, 1)])
assert R.dmp_zz_factor(y**2 - 1) == (1, [(y - 1, 1), (y + 1, 1)])
assert R.dmp_zz_factor(x**2*y**2 + 6*x**2*y + 9*x**2 - 1) == (1, [(x*y + 3*x - 1, 1), (x*y + 3*x + 1, 1)])
assert R.dmp_zz_factor(x**2*y**2 - 9) == (1, [(x*y - 3, 1), (x*y + 3, 1)])
R, x, y, z = ring("x,y,z", ZZ)
assert R.dmp_zz_factor(x**2*y**2*z**2 - 9) == \
(1, [(x*y*z - 3, 1),
(x*y*z + 3, 1)])
R, x, y, z, u = ring("x,y,z,u", ZZ)
assert R.dmp_zz_factor(x**2*y**2*z**2*u**2 - 9) == \
(1, [(x*y*z*u - 3, 1),
(x*y*z*u + 3, 1)])
R, x, y, z = ring("x,y,z", ZZ)
assert R.dmp_zz_factor(f_1) == \
(1, [(x + y*z + 20, 1),
(x*y + z + 10, 1),
(x*z + y + 30, 1)])
assert R.dmp_zz_factor(f_2) == \
(1, [(x**2*y**2 + x**2*z**2 + y + 90, 1),
(x**3*y + x**3*z + z - 11, 1)])
assert R.dmp_zz_factor(f_3) == \
(1, [(x**2*y**2 + x*z**4 + x + z, 1),
(x**3 + x*y*z + y**2 + y*z**3, 1)])
assert R.dmp_zz_factor(f_4) == \
(-1, [(x*y**3 + z**2, 1),
(x**2*z + y**4*z**2 + 5, 1),
(x**3*y - z**2 - 3, 1),
(x**3*y**4 + z**2, 1)])
assert R.dmp_zz_factor(f_5) == \
(-1, [(x + y - z, 3)])
R, x, y, z, t = ring("x,y,z,t", ZZ)
assert R.dmp_zz_factor(f_6) == \
(1, [(47*x*y + z**3*t**2 - t**2, 1),
(45*x**3 - 9*y**3 - y**2 + 3*z**3 + 2*z*t, 1)])
R, x, y, z = ring("x,y,z", ZZ)
assert R.dmp_zz_factor(w_1) == \
(1, [(x**2*y**2 - x**2*z**2 + y - z**2, 1),
(x**2*y*z**2 + 3*x*z + 2*y, 1),
(4*x**2*y + 4*x**2*z + x*y*z - 1, 1)])
R, x, y = ring("x,y", ZZ)
f = -12*x**16*y + 240*x**12*y**3 - 768*x**10*y**4 + 1080*x**8*y**5 - 768*x**6*y**6 + 240*x**4*y**7 - 12*y**9
assert R.dmp_zz_factor(f) == \
(-12, [(y, 1),
(x**2 - y, 6),
(x**4 + 6*x**2*y + y**2, 1)])
def test_dup_ext_factor():
R, x = ring("x", QQ.algebraic_field(I))
def anp(element):
return ANP(element, [QQ(1), QQ(0), QQ(1)], QQ)
assert R.dup_ext_factor(0) == (anp([]), [])
f = anp([QQ(1)])*x + anp([QQ(1)])
assert R.dup_ext_factor(f) == (anp([QQ(1)]), [(f, 1)])
g = anp([QQ(2)])*x + anp([QQ(2)])
assert R.dup_ext_factor(g) == (anp([QQ(2)]), [(f, 1)])
f = anp([QQ(7)])*x**4 + anp([QQ(1, 1)])
g = anp([QQ(1)])*x**4 + anp([QQ(1, 7)])
assert R.dup_ext_factor(f) == (anp([QQ(7)]), [(g, 1)])
f = anp([QQ(1)])*x**4 + anp([QQ(1)])
assert R.dup_ext_factor(f) == \
(anp([QQ(1, 1)]), [(anp([QQ(1)])*x**2 + anp([QQ(-1), QQ(0)]), 1),
(anp([QQ(1)])*x**2 + anp([QQ( 1), QQ(0)]), 1)])
f = anp([QQ(4, 1)])*x**2 + anp([QQ(9, 1)])
assert R.dup_ext_factor(f) == \
(anp([QQ(4, 1)]), [(anp([QQ(1, 1)])*x + anp([-QQ(3, 2), QQ(0, 1)]), 1),
(anp([QQ(1, 1)])*x + anp([ QQ(3, 2), QQ(0, 1)]), 1)])
f = anp([QQ(4, 1)])*x**4 + anp([QQ(8, 1)])*x**3 + anp([QQ(77, 1)])*x**2 + anp([QQ(18, 1)])*x + anp([QQ(153, 1)])
assert R.dup_ext_factor(f) == \
(anp([QQ(4, 1)]), [(anp([QQ(1, 1)])*x + anp([-QQ(4, 1), QQ(1, 1)]), 1),
(anp([QQ(1, 1)])*x + anp([-QQ(3, 2), QQ(0, 1)]), 1),
(anp([QQ(1, 1)])*x + anp([ QQ(3, 2), QQ(0, 1)]), 1),
(anp([QQ(1, 1)])*x + anp([ QQ(4, 1), QQ(1, 1)]), 1)])
R, x = ring("x", QQ.algebraic_field(sqrt(2)))
def anp(element):
return ANP(element, [QQ(1), QQ(0), QQ(-2)], QQ)
f = anp([QQ(1)])*x**4 + anp([QQ(1, 1)])
assert R.dup_ext_factor(f) == \
(anp([QQ(1)]), [(anp([QQ(1)])*x**2 + anp([QQ(-1), QQ(0)])*x + anp([QQ(1)]), 1),
(anp([QQ(1)])*x**2 + anp([QQ( 1), QQ(0)])*x + anp([QQ(1)]), 1)])
f = anp([QQ(1, 1)])*x**2 + anp([QQ(2), QQ(0)])*x + anp([QQ(2, 1)])
assert R.dup_ext_factor(f) == \
(anp([QQ(1, 1)]), [(anp([1])*x + anp([1, 0]), 2)])
assert R.dup_ext_factor(f**3) == \
(anp([QQ(1, 1)]), [(anp([1])*x + anp([1, 0]), 6)])
f *= anp([QQ(2, 1)])
assert R.dup_ext_factor(f) == \
(anp([QQ(2, 1)]), [(anp([1])*x + anp([1, 0]), 2)])
assert R.dup_ext_factor(f**3) == \
(anp([QQ(8, 1)]), [(anp([1])*x + anp([1, 0]), 6)])
def test_dmp_ext_factor():
R, x,y = ring("x,y", QQ.algebraic_field(sqrt(2)))
def anp(x):
return ANP(x, [QQ(1), QQ(0), QQ(-2)], QQ)
assert R.dmp_ext_factor(0) == (anp([]), [])
f = anp([QQ(1)])*x + anp([QQ(1)])
assert R.dmp_ext_factor(f) == (anp([QQ(1)]), [(f, 1)])
g = anp([QQ(2)])*x + anp([QQ(2)])
assert R.dmp_ext_factor(g) == (anp([QQ(2)]), [(f, 1)])
f = anp([QQ(1)])*x**2 + anp([QQ(-2)])*y**2
assert R.dmp_ext_factor(f) == \
(anp([QQ(1)]), [(anp([QQ(1)])*x + anp([QQ(-1), QQ(0)])*y, 1),
(anp([QQ(1)])*x + anp([QQ( 1), QQ(0)])*y, 1)])
f = anp([QQ(2)])*x**2 + anp([QQ(-4)])*y**2
assert R.dmp_ext_factor(f) == \
(anp([QQ(2)]), [(anp([QQ(1)])*x + anp([QQ(-1), QQ(0)])*y, 1),
(anp([QQ(1)])*x + anp([QQ( 1), QQ(0)])*y, 1)])
def test_dup_factor_list():
R, x = ring("x", ZZ)
assert R.dup_factor_list(0) == (0, [])
assert R.dup_factor_list(7) == (7, [])
R, x = ring("x", QQ)
assert R.dup_factor_list(0) == (0, [])
assert R.dup_factor_list(QQ(1, 7)) == (QQ(1, 7), [])
R, x = ring("x", ZZ['t'])
assert R.dup_factor_list(0) == (0, [])
assert R.dup_factor_list(7) == (7, [])
R, x = ring("x", QQ['t'])
assert R.dup_factor_list(0) == (0, [])
assert R.dup_factor_list(QQ(1, 7)) == (QQ(1, 7), [])
R, x = ring("x", ZZ)
assert R.dup_factor_list_include(0) == [(0, 1)]
assert R.dup_factor_list_include(7) == [(7, 1)]
assert R.dup_factor_list(x**2 + 2*x + 1) == (1, [(x + 1, 2)])
assert R.dup_factor_list_include(x**2 + 2*x + 1) == [(x + 1, 2)]
# issue 8037
assert R.dup_factor_list(6*x**2 - 5*x - 6) == (1, [(2*x - 3, 1), (3*x + 2, 1)])
R, x = ring("x", QQ)
assert R.dup_factor_list(QQ(1,2)*x**2 + x + QQ(1,2)) == (QQ(1, 2), [(x + 1, 2)])
R, x = ring("x", FF(2))
assert R.dup_factor_list(x**2 + 1) == (1, [(x + 1, 2)])
R, x = ring("x", RR)
assert R.dup_factor_list(1.0*x**2 + 2.0*x + 1.0) == (1.0, [(1.0*x + 1.0, 2)])
assert R.dup_factor_list(2.0*x**2 + 4.0*x + 2.0) == (2.0, [(1.0*x + 1.0, 2)])
f = 6.7225336055071*x**2 - 10.6463972754741*x - 0.33469524022264
coeff, factors = R.dup_factor_list(f)
assert coeff == RR(1.0) and len(factors) == 1 and factors[0][0].almosteq(f, 1e-10) and factors[0][1] == 1
Rt, t = ring("t", ZZ)
R, x = ring("x", Rt)
f = 4*t*x**2 + 4*t**2*x
assert R.dup_factor_list(f) == \
(4*t, [(x, 1),
(x + t, 1)])
Rt, t = ring("t", QQ)
R, x = ring("x", Rt)
f = QQ(1, 2)*t*x**2 + QQ(1, 2)*t**2*x
assert R.dup_factor_list(f) == \
(QQ(1, 2)*t, [(x, 1),
(x + t, 1)])
R, x = ring("x", QQ.algebraic_field(I))
def anp(element):
return ANP(element, [QQ(1), QQ(0), QQ(1)], QQ)
f = anp([QQ(1, 1)])*x**4 + anp([QQ(2, 1)])*x**2
assert R.dup_factor_list(f) == \
(anp([QQ(1, 1)]), [(anp([QQ(1, 1)])*x, 2),
(anp([QQ(1, 1)])*x**2 + anp([])*x + anp([QQ(2, 1)]), 1)])
R, x = ring("x", EX)
raises(DomainError, lambda: R.dup_factor_list(EX(sin(1))))
def test_dmp_factor_list():
R, x, y = ring("x,y", ZZ)
assert R.dmp_factor_list(0) == (ZZ(0), [])
assert R.dmp_factor_list(7) == (7, [])
R, x, y = ring("x,y", QQ)
assert R.dmp_factor_list(0) == (QQ(0), [])
assert R.dmp_factor_list(QQ(1, 7)) == (QQ(1, 7), [])
Rt, t = ring("t", ZZ)
R, x, y = ring("x,y", Rt)
assert R.dmp_factor_list(0) == (0, [])
assert R.dmp_factor_list(7) == (ZZ(7), [])
Rt, t = ring("t", QQ)
R, x, y = ring("x,y", Rt)
assert R.dmp_factor_list(0) == (0, [])
assert R.dmp_factor_list(QQ(1, 7)) == (QQ(1, 7), [])
R, x, y = ring("x,y", ZZ)
assert R.dmp_factor_list_include(0) == [(0, 1)]
assert R.dmp_factor_list_include(7) == [(7, 1)]
R, X = xring("x:200", ZZ)
f, g = X[0]**2 + 2*X[0] + 1, X[0] + 1
assert R.dmp_factor_list(f) == (1, [(g, 2)])
f, g = X[-1]**2 + 2*X[-1] + 1, X[-1] + 1
assert R.dmp_factor_list(f) == (1, [(g, 2)])
R, x = ring("x", ZZ)
assert R.dmp_factor_list(x**2 + 2*x + 1) == (1, [(x + 1, 2)])
R, x = ring("x", QQ)
assert R.dmp_factor_list(QQ(1,2)*x**2 + x + QQ(1,2)) == (QQ(1,2), [(x + 1, 2)])
R, x, y = ring("x,y", ZZ)
assert R.dmp_factor_list(x**2 + 2*x + 1) == (1, [(x + 1, 2)])
R, x, y = ring("x,y", QQ)
assert R.dmp_factor_list(QQ(1,2)*x**2 + x + QQ(1,2)) == (QQ(1,2), [(x + 1, 2)])
R, x, y = ring("x,y", ZZ)
f = 4*x**2*y + 4*x*y**2
assert R.dmp_factor_list(f) == \
(4, [(y, 1),
(x, 1),
(x + y, 1)])
assert R.dmp_factor_list_include(f) == \
[(4*y, 1),
(x, 1),
(x + y, 1)]
R, x, y = ring("x,y", QQ)
f = QQ(1,2)*x**2*y + QQ(1,2)*x*y**2
assert R.dmp_factor_list(f) == \
(QQ(1,2), [(y, 1),
(x, 1),
(x + y, 1)])
R, x, y = ring("x,y", RR)
f = 2.0*x**2 - 8.0*y**2
assert R.dmp_factor_list(f) == \
(RR(2.0), [(1.0*x - 2.0*y, 1),
(1.0*x + 2.0*y, 1)])
f = 6.7225336055071*x**2*y**2 - 10.6463972754741*x*y - 0.33469524022264
coeff, factors = R.dmp_factor_list(f)
assert coeff == RR(1.0) and len(factors) == 1 and factors[0][0].almosteq(f, 1e-10) and factors[0][1] == 1
Rt, t = ring("t", ZZ)
R, x, y = ring("x,y", Rt)
f = 4*t*x**2 + 4*t**2*x
assert R.dmp_factor_list(f) == \
(4*t, [(x, 1),
(x + t, 1)])
Rt, t = ring("t", QQ)
R, x, y = ring("x,y", Rt)
f = QQ(1, 2)*t*x**2 + QQ(1, 2)*t**2*x
assert R.dmp_factor_list(f) == \
(QQ(1, 2)*t, [(x, 1),
(x + t, 1)])
R, x, y = ring("x,y", FF(2))
raises(NotImplementedError, lambda: R.dmp_factor_list(x**2 + y**2))
R, x, y = ring("x,y", EX)
raises(DomainError, lambda: R.dmp_factor_list(EX(sin(1))))
def test_dup_irreducible_p():
R, x = ring("x", ZZ)
assert R.dup_irreducible_p(x**2 + x + 1) is True
assert R.dup_irreducible_p(x**2 + 2*x + 1) is False
def test_dmp_irreducible_p():
R, x, y = ring("x,y", ZZ)
assert R.dmp_irreducible_p(x**2 + x + 1) is True
assert R.dmp_irreducible_p(x**2 + 2*x + 1) is False
| bsd-3-clause |
adrienbrault/home-assistant | homeassistant/components/microsoft_face_detect/image_processing.py | 21 | 3340 | """Component that will help set the Microsoft face detect processing."""
import logging
import voluptuous as vol
from homeassistant.components.image_processing import (
ATTR_AGE,
ATTR_GENDER,
ATTR_GLASSES,
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
PLATFORM_SCHEMA,
ImageProcessingFaceEntity,
)
from homeassistant.components.microsoft_face import DATA_MICROSOFT_FACE
from homeassistant.core import split_entity_id
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
SUPPORTED_ATTRIBUTES = [ATTR_AGE, ATTR_GENDER, ATTR_GLASSES]
CONF_ATTRIBUTES = "attributes"
DEFAULT_ATTRIBUTES = [ATTR_AGE, ATTR_GENDER]
def validate_attributes(list_attributes):
"""Validate face attributes."""
for attr in list_attributes:
if attr not in SUPPORTED_ATTRIBUTES:
raise vol.Invalid(f"Invalid attribute {attr}")
return list_attributes
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_ATTRIBUTES, default=DEFAULT_ATTRIBUTES): vol.All(
cv.ensure_list, validate_attributes
)
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Microsoft Face detection platform."""
api = hass.data[DATA_MICROSOFT_FACE]
attributes = config[CONF_ATTRIBUTES]
entities = []
for camera in config[CONF_SOURCE]:
entities.append(
MicrosoftFaceDetectEntity(
camera[CONF_ENTITY_ID], api, attributes, camera.get(CONF_NAME)
)
)
async_add_entities(entities)
class MicrosoftFaceDetectEntity(ImageProcessingFaceEntity):
"""Microsoft Face API entity for identify."""
def __init__(self, camera_entity, api, attributes, name=None):
"""Initialize Microsoft Face."""
super().__init__()
self._api = api
self._camera = camera_entity
self._attributes = attributes
if name:
self._name = name
else:
self._name = f"MicrosoftFace {split_entity_id(camera_entity)[1]}"
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def name(self):
"""Return the name of the entity."""
return self._name
async def async_process_image(self, image):
"""Process image.
This method is a coroutine.
"""
face_data = None
try:
face_data = await self._api.call_api(
"post",
"detect",
image,
binary=True,
params={"returnFaceAttributes": ",".join(self._attributes)},
)
except HomeAssistantError as err:
_LOGGER.error("Can't process image on microsoft face: %s", err)
return
if not face_data:
face_data = []
faces = []
for face in face_data:
face_attr = {}
for attr in self._attributes:
if attr in face["faceAttributes"]:
face_attr[attr] = face["faceAttributes"][attr]
if face_attr:
faces.append(face_attr)
self.async_process_faces(faces, len(face_data))
| mit |
ybellavance/python-for-android | python3-alpha/python3-src/Lib/ctypes/test/test_python_api.py | 50 | 3066 | from ctypes import *
import unittest, sys
from ctypes.test import is_resource_enabled
################################################################
# This section should be moved into ctypes\__init__.py, when it's ready.
from _ctypes import PyObj_FromPtr
################################################################
from sys import getrefcount as grc
if sys.version_info > (2, 4):
c_py_ssize_t = c_size_t
else:
c_py_ssize_t = c_int
class PythonAPITestCase(unittest.TestCase):
def test_PyBytes_FromStringAndSize(self):
PyBytes_FromStringAndSize = pythonapi.PyBytes_FromStringAndSize
PyBytes_FromStringAndSize.restype = py_object
PyBytes_FromStringAndSize.argtypes = c_char_p, c_py_ssize_t
self.assertEqual(PyBytes_FromStringAndSize(b"abcdefghi", 3), b"abc")
def test_PyString_FromString(self):
pythonapi.PyBytes_FromString.restype = py_object
pythonapi.PyBytes_FromString.argtypes = (c_char_p,)
s = b"abc"
refcnt = grc(s)
pyob = pythonapi.PyBytes_FromString(s)
self.assertEqual(grc(s), refcnt)
self.assertEqual(s, pyob)
del pyob
self.assertEqual(grc(s), refcnt)
if is_resource_enabled("refcount"):
# This test is unreliable, because it is possible that code in
# unittest changes the refcount of the '42' integer. So, it
# is disabled by default.
def test_PyLong_Long(self):
ref42 = grc(42)
pythonapi.PyLong_FromLong.restype = py_object
self.assertEqual(pythonapi.PyLong_FromLong(42), 42)
self.assertEqual(grc(42), ref42)
pythonapi.PyLong_AsLong.argtypes = (py_object,)
pythonapi.PyLong_AsLong.restype = c_long
res = pythonapi.PyLong_AsLong(42)
self.assertEqual(grc(res), ref42 + 1)
del res
self.assertEqual(grc(42), ref42)
def test_PyObj_FromPtr(self):
s = "abc def ghi jkl"
ref = grc(s)
# id(python-object) is the address
pyobj = PyObj_FromPtr(id(s))
self.assertTrue(s is pyobj)
self.assertEqual(grc(s), ref + 1)
del pyobj
self.assertEqual(grc(s), ref)
def test_PyOS_snprintf(self):
PyOS_snprintf = pythonapi.PyOS_snprintf
PyOS_snprintf.argtypes = POINTER(c_char), c_size_t, c_char_p
buf = c_buffer(256)
PyOS_snprintf(buf, sizeof(buf), b"Hello from %s", b"ctypes")
self.assertEqual(buf.value, b"Hello from ctypes")
PyOS_snprintf(buf, sizeof(buf), b"Hello from %s (%d, %d, %d)", b"ctypes", 1, 2, 3)
self.assertEqual(buf.value, b"Hello from ctypes (1, 2, 3)")
# not enough arguments
self.assertRaises(TypeError, PyOS_snprintf, buf)
def test_pyobject_repr(self):
self.assertEqual(repr(py_object()), "py_object(<NULL>)")
self.assertEqual(repr(py_object(42)), "py_object(42)")
self.assertEqual(repr(py_object(object)), "py_object(%r)" % object)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
ojengwa/talk | venv/lib/python2.7/site-packages/pip/cmdoptions.py | 117 | 11475 | """
shared options and groups
The principle here is to define options once, but *not* instantiate them
globally. One reason being that options with action='append' can carry state
between parses. pip parse's general options twice internally, and shouldn't
pass on state. To be consistent, all options will follow this design.
"""
from __future__ import absolute_import
import copy
from optparse import OptionGroup, SUPPRESS_HELP, Option
from pip.index import PyPI
from pip.locations import CA_BUNDLE_PATH, USER_CACHE_DIR, src_prefix
def make_option_group(group, parser):
"""
Return an OptionGroup object
group -- assumed to be dict with 'name' and 'options' keys
parser -- an optparse Parser
"""
option_group = OptionGroup(parser, group['name'])
for option in group['options']:
option_group.add_option(option.make())
return option_group
class OptionMaker(object):
"""Class that stores the args/kwargs that would be used to make an Option,
for making them later, and uses deepcopy's to reset state."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def make(self):
args_copy = copy.deepcopy(self.args)
kwargs_copy = copy.deepcopy(self.kwargs)
return Option(*args_copy, **kwargs_copy)
###########
# options #
###########
help_ = OptionMaker(
'-h', '--help',
dest='help',
action='help',
help='Show help.')
isolated_mode = OptionMaker(
"--isolated",
dest="isolated_mode",
action="store_true",
default=False,
help=(
"Run pip in an isolated mode, ignoring environment variables and user "
"configuration."
),
)
require_virtualenv = OptionMaker(
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=SUPPRESS_HELP)
verbose = OptionMaker(
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.'
)
version = OptionMaker(
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.')
quiet = OptionMaker(
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help='Give less output.')
log = OptionMaker(
"--log", "--log-file", "--local-log",
dest="log",
metavar="path",
help="Path to a verbose appending log."
)
log_explicit_levels = OptionMaker(
# Writes the log levels explicitely to the log'
'--log-explicit-levels',
dest='log_explicit_levels',
action='store_true',
default=False,
help=SUPPRESS_HELP)
no_input = OptionMaker(
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=SUPPRESS_HELP)
proxy = OptionMaker(
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port.")
retries = OptionMaker(
'--retries',
dest='retries',
type='int',
default=5,
help="Maximum number of retries each connection should attempt "
"(default %default times).")
timeout = OptionMaker(
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).')
default_vcs = OptionMaker(
# The default version control system for editables, e.g. 'svn'
'--default-vcs',
dest='default_vcs',
type='str',
default='',
help=SUPPRESS_HELP)
skip_requirements_regex = OptionMaker(
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=SUPPRESS_HELP)
exists_action = OptionMaker(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup.")
cert = OptionMaker(
'--cert',
dest='cert',
type='str',
default=CA_BUNDLE_PATH,
metavar='path',
help="Path to alternate CA bundle.")
client_cert = OptionMaker(
'--client-cert',
dest='client_cert',
type='str',
default=None,
metavar='path',
help="Path to SSL client certificate, a single file containing the "
"private key and the certificate in PEM format.")
index_url = OptionMaker(
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default=PyPI.simple_url,
help='Base URL of Python Package Index (default %default).')
extra_index_url = OptionMaker(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help='Extra URLs of package indexes to use in addition to --index-url.')
no_index = OptionMaker(
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead).')
find_links = OptionMaker(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='url',
help="If a url or path to an html file, then parse for links to archives. "
"If a local path or file:// url that's a directory, then look for "
"archives in the directory listing.")
# TODO: Remove after 6.0
use_mirrors = OptionMaker(
'-M', '--use-mirrors',
dest='use_mirrors',
action='store_true',
default=False,
help=SUPPRESS_HELP)
# TODO: Remove after 6.0
mirrors = OptionMaker(
'--mirrors',
dest='mirrors',
metavar='URL',
action='append',
default=[],
help=SUPPRESS_HELP)
allow_external = OptionMaker(
"--allow-external",
dest="allow_external",
action="append",
default=[],
metavar="PACKAGE",
help="Allow the installation of a package even if it is externally hosted",
)
allow_all_external = OptionMaker(
"--allow-all-external",
dest="allow_all_external",
action="store_true",
default=False,
help="Allow the installation of all packages that are externally hosted",
)
trusted_host = OptionMaker(
"--trusted-host",
dest="trusted_hosts",
action="append",
metavar="HOSTNAME",
default=[],
help="Mark this host as trusted, even though it does not have valid or "
"any HTTPS.",
)
# Remove after 7.0
no_allow_external = OptionMaker(
"--no-allow-external",
dest="allow_all_external",
action="store_false",
default=False,
help=SUPPRESS_HELP,
)
# Remove --allow-insecure after 7.0
allow_unsafe = OptionMaker(
"--allow-unverified", "--allow-insecure",
dest="allow_unverified",
action="append",
default=[],
metavar="PACKAGE",
help="Allow the installation of a package even if it is hosted "
"in an insecure and unverifiable way",
)
# Remove after 7.0
no_allow_unsafe = OptionMaker(
"--no-allow-insecure",
dest="allow_all_insecure",
action="store_false",
default=False,
help=SUPPRESS_HELP
)
# Remove after 1.5
process_dependency_links = OptionMaker(
"--process-dependency-links",
dest="process_dependency_links",
action="store_true",
default=False,
help="Enable the processing of dependency links.",
)
requirements = OptionMaker(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Install from the given requirements file. '
'This option can be used multiple times.')
editable = OptionMaker(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='path/url',
help=('Install a project in editable mode (i.e. setuptools '
'"develop mode") from a local project path or a VCS url.'),
)
src = OptionMaker(
'--src', '--source', '--source-dir', '--source-directory',
dest='src_dir',
metavar='dir',
default=src_prefix,
help='Directory to check out editable projects into. '
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".'
)
use_wheel = OptionMaker(
'--use-wheel',
dest='use_wheel',
action='store_true',
help=SUPPRESS_HELP,
)
no_use_wheel = OptionMaker(
'--no-use-wheel',
dest='use_wheel',
action='store_false',
default=True,
help=('Do not Find and prefer wheel archives when searching indexes and '
'find-links locations.'),
)
cache_dir = OptionMaker(
"--cache-dir",
dest="cache_dir",
default=USER_CACHE_DIR,
metavar="dir",
help="Store the cache data in <dir>."
)
no_cache = OptionMaker(
"--no-cache-dir",
dest="cache_dir",
action="store_false",
help="Disable the cache.",
)
download_cache = OptionMaker(
'--download-cache',
dest='download_cache',
default=None,
help=SUPPRESS_HELP)
no_deps = OptionMaker(
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help="Don't install package dependencies.")
build_dir = OptionMaker(
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='dir',
help='Directory to unpack packages into and build in.'
)
install_options = OptionMaker(
'--install-option',
dest='install_options',
action='append',
metavar='options',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/"
"bin\"). Use multiple --install-option options to pass multiple "
"options to setup.py install. If you are using an option with a "
"directory path, be sure to use absolute path.")
global_options = OptionMaker(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the install command.")
no_clean = OptionMaker(
'--no-clean',
action='store_true',
default=False,
help="Don't clean up build directories.")
disable_pip_version_check = OptionMaker(
"--disable-pip-version-check",
dest="disable_pip_version_check",
action="store_true",
default=False,
help="Don't periodically check PyPI to determine whether a new version "
"of pip is available for download. Implied with --no-index.")
##########
# groups #
##########
general_group = {
'name': 'General Options',
'options': [
help_,
isolated_mode,
require_virtualenv,
verbose,
version,
quiet,
log,
log_explicit_levels,
no_input,
proxy,
retries,
timeout,
default_vcs,
skip_requirements_regex,
exists_action,
trusted_host,
cert,
client_cert,
cache_dir,
no_cache,
disable_pip_version_check,
]
}
index_group = {
'name': 'Package Index Options',
'options': [
index_url,
extra_index_url,
no_index,
find_links,
use_mirrors,
mirrors,
allow_external,
allow_all_external,
no_allow_external,
allow_unsafe,
no_allow_unsafe,
process_dependency_links,
]
}
| mit |
theonion/bulbs2-elasticsearch | setup.py | 1 | 1308 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
version = "0.0.1"
name = "bulbs2-elasticsearch"
package = "bulbs2_elasticsearch"
description = "The Latest in Onion Technical Debt"
url = "https://github.com/theonion/bulbs2-elasticsearch"
author = "Vince Forgione"
author_email = "[email protected]"
license = "MIT"
setup_requires = [
]
dev_requires = [
"pytest",
"pytest-django",
"model_mommy",
]
install_requires = [
"django>=1.8,<1.9",
"djes",
]
server_requires = []
if "test" in sys.argv:
setup_requires.extend(dev_requires)
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ["tests"]
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name=name,
version=version,
url=url,
license=license,
description=description,
author=author,
author_email=author_email,
packages=find_packages(),
install_requires=install_requires,
tests_require=dev_requires,
extras_require={
"dev": dev_requires,
},
cmdclass={"test": PyTest}
)
| mit |
SoLoHiC/mezzanine | mezzanine/accounts/views.py | 21 | 6696 | from __future__ import unicode_literals
from django.contrib.auth import (login as auth_login, authenticate,
logout as auth_logout, get_user_model)
from django.contrib.auth.decorators import login_required
from django.contrib.messages import info, error
from django.core.urlresolvers import NoReverseMatch, get_script_prefix, reverse
from django.shortcuts import get_object_or_404, redirect
from django.utils.translation import ugettext_lazy as _
from mezzanine.accounts import get_profile_form
from mezzanine.accounts.forms import LoginForm, PasswordResetForm
from mezzanine.conf import settings
from mezzanine.utils.email import send_verification_mail, send_approve_mail
from mezzanine.utils.urls import login_redirect, next_url
from mezzanine.utils.views import render
User = get_user_model()
def login(request, template="accounts/account_login.html", extra_context=None):
"""
Login form.
"""
form = LoginForm(request.POST or None)
if request.method == "POST" and form.is_valid():
authenticated_user = form.save()
info(request, _("Successfully logged in"))
auth_login(request, authenticated_user)
return login_redirect(request)
context = {"form": form, "title": _("Log in")}
context.update(extra_context or {})
return render(request, template, context)
def logout(request):
"""
Log the user out.
"""
auth_logout(request)
info(request, _("Successfully logged out"))
return redirect(next_url(request) or get_script_prefix())
def signup(request, template="accounts/account_signup.html",
extra_context=None):
"""
Signup form.
"""
profile_form = get_profile_form()
form = profile_form(request.POST or None, request.FILES or None)
if request.method == "POST" and form.is_valid():
new_user = form.save()
if not new_user.is_active:
if settings.ACCOUNTS_APPROVAL_REQUIRED:
send_approve_mail(request, new_user)
info(request, _("Thanks for signing up! You'll receive "
"an email when your account is activated."))
else:
send_verification_mail(request, new_user, "signup_verify")
info(request, _("A verification email has been sent with "
"a link for activating your account."))
return redirect(next_url(request) or "/")
else:
info(request, _("Successfully signed up"))
auth_login(request, new_user)
return login_redirect(request)
context = {"form": form, "title": _("Sign up")}
context.update(extra_context or {})
return render(request, template, context)
def signup_verify(request, uidb36=None, token=None):
"""
View for the link in the verification email sent to a new user
when they create an account and ``ACCOUNTS_VERIFICATION_REQUIRED``
is set to ``True``. Activates the user and logs them in,
redirecting to the URL they tried to access when signing up.
"""
user = authenticate(uidb36=uidb36, token=token, is_active=False)
if user is not None:
user.is_active = True
user.save()
auth_login(request, user)
info(request, _("Successfully signed up"))
return login_redirect(request)
else:
error(request, _("The link you clicked is no longer valid."))
return redirect("/")
@login_required
def profile_redirect(request):
"""
Just gives the URL prefix for profiles an action - redirect
to the logged in user's profile.
"""
return redirect("profile", username=request.user.username)
def profile(request, username, template="accounts/account_profile.html",
extra_context=None):
"""
Display a profile.
"""
lookup = {"username__iexact": username, "is_active": True}
context = {"profile_user": get_object_or_404(User, **lookup)}
context.update(extra_context or {})
return render(request, template, context)
@login_required
def account_redirect(request):
"""
Just gives the URL prefix for accounts an action - redirect
to the profile update form.
"""
return redirect("profile_update")
@login_required
def profile_update(request, template="accounts/account_profile_update.html",
extra_context=None):
"""
Profile update form.
"""
profile_form = get_profile_form()
form = profile_form(request.POST or None, request.FILES or None,
instance=request.user)
if request.method == "POST" and form.is_valid():
user = form.save()
info(request, _("Profile updated"))
try:
return redirect("profile", username=user.username)
except NoReverseMatch:
return redirect("profile_update")
context = {"form": form, "title": _("Update Profile")}
context.update(extra_context or {})
return render(request, template, context)
def password_reset(request, template="accounts/account_password_reset.html",
extra_context=None):
form = PasswordResetForm(request.POST or None)
if request.method == "POST" and form.is_valid():
user = form.save()
send_verification_mail(request, user, "password_reset_verify")
info(request, _("A verification email has been sent with "
"a link for resetting your password."))
context = {"form": form, "title": _("Password Reset")}
context.update(extra_context or {})
return render(request, template, context)
def password_reset_verify(request, uidb36=None, token=None):
user = authenticate(uidb36=uidb36, token=token, is_active=True)
if user is not None:
auth_login(request, user)
return redirect("profile_update")
else:
error(request, _("The link you clicked is no longer valid."))
return redirect("/")
def old_account_redirect(request, url_suffix):
"""
Catches and redirects any unmatched account URLs to their
correct version (account/ to accounts/) as per #934.
The URL is constructed manually, handling slashes as appropriate.
"""
if url_suffix is None:
correct_url = reverse("account_redirect")
else:
correct_url = "{account_url}{middle_slash}{suffix}{slash}".format(
account_url=reverse("account_redirect"),
middle_slash="/" if not settings.APPEND_SLASH else "",
suffix=url_suffix,
slash="/" if settings.APPEND_SLASH else "")
next = next_url(request)
if next:
correct_url += "?next=%s" % next
return redirect(correct_url)
| bsd-2-clause |
ysasaki6023/NeuralNetworkStudy | cifar06/net7.py | 1 | 2799 | #!/usr/bin/env python
import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
from chainer.utils import conv
class ImageProcessNetwork(chainer.Chain):
def __init__(self, F_unit):
super(ImageProcessNetwork, self).__init__()
self.IsTrain = True
self.F_unit = F_unit
self.add_link("P1_1",L.Convolution2D( 3, 96, 3, pad=1 ))
self.add_link("P1_2",L.Convolution2D( 96, 96, 3, pad=1 ))
self.add_link("P2_1",L.Convolution2D( 96, 192, 3, pad=1 ))
self.add_link("P2_2",L.Convolution2D(192, 192, 3, pad=1 ))
self.add_link("P2_3",L.Convolution2D(192, 192, 3, pad=1 ))
self.add_link("P3_1",L.Convolution2D(192, 192, 3, pad=1 ))
self.add_link("P3_2",L.Convolution2D(192, 192, 1, pad=1 ))
self.add_link("P3_3",L.Convolution2D(192,F_unit, 1, pad=1 ))
self.add_link("BN1_1",L.BatchNormalization( 96))
self.add_link("BN1_2",L.BatchNormalization( 96))
self.add_link("BN2_1",L.BatchNormalization(192))
self.add_link("BN2_2",L.BatchNormalization(192))
self.add_link("BN2_3",L.BatchNormalization(192))
self.add_link("BN3_1",L.BatchNormalization(192))
self.add_link("BN3_2",L.BatchNormalization(192))
self.add_link("BN3_3",L.BatchNormalization(192))
self.add_link("L1" ,L.Linear(100, 100))
self.add_link("L2" ,L.Linear(100, self.F_unit))
return
def setTrainMode(self, IsTrain):
self.IsTrain = IsTrain
return
def __call__(self, x):
h = x
h = self.__dict__["P1_1"](F.leaky_relu(h))
h = self.__dict__["BN1_1"](h)
h = self.__dict__["P1_2"](F.leaky_relu(h))
h = self.__dict__["BN1_2"](h)
h = F.max_pooling_2d(F.leaky_relu(h), ksize=3, stride=2, cover_all=False)
h = self.__dict__["P2_1"](h)
h = self.__dict__["BN2_1"](h)
h = self.__dict__["P2_2"](F.leaky_relu(h))
h = self.__dict__["BN2_2"](h)
h = self.__dict__["P2_2"](F.leaky_relu(h))
h = self.__dict__["BN2_3"](h)
h = F.max_pooling_2d(F.leaky_relu(h), ksize=3, stride=2, cover_all=False)
h = self.__dict__["P3_1"](h)
h = self.__dict__["BN3_1"](h)
h = self.__dict__["P3_2"](F.leaky_relu(h))
h = self.__dict__["BN3_2"](h)
h = self.__dict__["P3_3"](F.leaky_relu(h))
h = F.average_pooling_2d(F.leaky_relu(h), ksize=6)
#h = self.__dict__["BN3_3"](h)
h = self.__dict__["L1"](F.leaky_relu(h))
h = self.__dict__["L2"](h)
y = h
#h = F.spatial_pyramid_pooling_2d(F.leaky_relu(h), 3)
#y = F.reshape(h,(len(h.data),self.F_unit))
return y
def GenModel(F_unit):
return L.Classifier(ImageProcessNetwork(F_unit))
| mit |
jaumemarti/l10n-spain-txerpa | l10n_es_bank_statement/models/__init__.py | 4 | 1259 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) All rights reserved:
# 2013-2014 Servicios Tecnológicos Avanzados (http://serviciosbaeza.com)
# Pedro Manuel Baeza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import account_bank_statement
from . import account_statement_profile
from . import account_statement_completion_rule
from . import c43_parser
from . import c43_account_statement_profile
| agpl-3.0 |
yjmade/odoo | openerp/addons/test_new_api/models.py | 31 | 7010 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-2014 OpenERP (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class res_partner(osv.Model):
_inherit = 'res.partner'
#
# add related fields to test them
#
_columns = {
# a regular one
'related_company_partner_id': fields.related(
'company_id', 'partner_id', type='many2one', obj='res.partner'),
# a related field with a single field
'single_related_company_id': fields.related(
'company_id', type='many2one', obj='res.company'),
# a related field with a single field that is also a related field!
'related_related_company_id': fields.related(
'single_related_company_id', type='many2one', obj='res.company'),
}
from openerp import models, fields, api, _
class Category(models.Model):
_name = 'test_new_api.category'
name = fields.Char(required=True)
parent = fields.Many2one('test_new_api.category')
display_name = fields.Char(compute='_compute_display_name', inverse='_inverse_display_name')
@api.one
@api.depends('name', 'parent.display_name') # this definition is recursive
def _compute_display_name(self):
if self.parent:
self.display_name = self.parent.display_name + ' / ' + self.name
else:
self.display_name = self.name
@api.one
def _inverse_display_name(self):
names = self.display_name.split('/')
# determine sequence of categories
categories = []
for name in names[:-1]:
category = self.search([('name', 'ilike', name.strip())])
categories.append(category[0])
categories.append(self)
# assign parents following sequence
for parent, child in zip(categories, categories[1:]):
if parent and child:
child.parent = parent
# assign name of last category, and reassign display_name (to normalize it)
self.name = names[-1].strip()
class Discussion(models.Model):
_name = 'test_new_api.discussion'
name = fields.Char(string='Title', required=True,
help="General description of what this discussion is about.")
moderator = fields.Many2one('res.users')
categories = fields.Many2many('test_new_api.category',
'test_new_api_discussion_category', 'discussion', 'category')
participants = fields.Many2many('res.users')
messages = fields.One2many('test_new_api.message', 'discussion')
@api.onchange('moderator')
def _onchange_moderator(self):
self.participants |= self.moderator
class Message(models.Model):
_name = 'test_new_api.message'
discussion = fields.Many2one('test_new_api.discussion', ondelete='cascade')
body = fields.Text()
author = fields.Many2one('res.users', default=lambda self: self.env.user)
name = fields.Char(string='Title', compute='_compute_name', store=True)
display_name = fields.Char(string='Abstract', compute='_compute_display_name')
size = fields.Integer(compute='_compute_size', search='_search_size')
double_size = fields.Integer(compute='_compute_double_size')
discussion_name = fields.Char(related='discussion.name', readonly=True)
@api.one
@api.constrains('author', 'discussion')
def _check_author(self):
if self.discussion and self.author not in self.discussion.participants:
raise ValueError(_("Author must be among the discussion participants."))
@api.one
@api.depends('author.name', 'discussion.name')
def _compute_name(self):
self.name = "[%s] %s" % (self.discussion.name or '', self.author.name)
@api.one
@api.depends('author.name', 'discussion.name', 'body')
def _compute_display_name(self):
stuff = "[%s] %s: %s" % (self.author.name, self.discussion.name or '', self.body or '')
self.display_name = stuff[:80]
@api.one
@api.depends('body')
def _compute_size(self):
self.size = len(self.body or '')
def _search_size(self, operator, value):
if operator not in ('=', '!=', '<', '<=', '>', '>=', 'in', 'not in'):
return []
# retrieve all the messages that match with a specific SQL query
query = """SELECT id FROM "%s" WHERE char_length("body") %s %%s""" % \
(self._table, operator)
self.env.cr.execute(query, (value,))
ids = [t[0] for t in self.env.cr.fetchall()]
return [('id', 'in', ids)]
@api.one
@api.depends('size')
def _compute_double_size(self):
# This illustrates a subtle situation: self.double_size depends on
# self.size. When size is computed, self.size is assigned, which should
# normally invalidate self.double_size. However, this may not happen
# while self.double_size is being computed: the last statement below
# would fail, because self.double_size would be undefined.
self.double_size = 0
size = self.size
self.double_size = self.double_size + size
class Talk(models.Model):
_name = 'test_new_api.talk'
parent = fields.Many2one('test_new_api.discussion', delegate=True, required=True)
class MixedModel(models.Model):
_name = 'test_new_api.mixed'
number = fields.Float(digits=(10, 2), default=3.14)
date = fields.Date()
now = fields.Datetime(compute='_compute_now')
lang = fields.Selection(string='Language', selection='_get_lang')
reference = fields.Reference(string='Related Document',
selection='_reference_models')
@api.one
def _compute_now(self):
# this is a non-stored computed field without dependencies
self.now = fields.Datetime.now()
@api.model
def _get_lang(self):
langs = self.env['res.lang'].search([])
return [(lang.code, lang.name) for lang in langs]
@api.model
def _reference_models(self):
models = self.env['ir.model'].search([('state', '!=', 'manual')])
return [(model.model, model.name)
for model in models
if not model.model.startswith('ir.')]
| agpl-3.0 |
mzdaniel/oh-mainline | vendor/packages/mechanize/test/test_functional.py | 21 | 29138 | #!/usr/bin/env python
# These tests access the network. python test.py runs a local test server and
# doesn't try to fetch anything over the internet, since the few tests here
# that do that are disabled by default since they have test tag "internet".
# thanks Moof (aka Giles Antonio Radford) for some of these
import errno
import os
import socket
import subprocess
import sys
import unittest
import urllib
import urllib2
import mechanize
from mechanize import CookieJar, HTTPCookieProcessor, \
HTTPHandler, HTTPRefreshProcessor, \
HTTPEquivProcessor, HTTPRedirectHandler, \
HTTPRedirectDebugProcessor, HTTPResponseDebugProcessor
from mechanize._rfc3986 import urljoin
from mechanize._util import hide_experimental_warnings, \
reset_experimental_warnings, write_file
import mechanize._opener
import mechanize._rfc3986
import mechanize._sockettimeout
import mechanize._testcase
#from cookielib import CookieJar
#from urllib2 import build_opener, install_opener, urlopen
#from urllib2 import HTTPCookieProcessor, HTTPHandler
#from mechanize import CreateBSDDBCookieJar
## import logging
## logger = logging.getLogger("mechanize")
## logger.addHandler(logging.StreamHandler(sys.stdout))
## #logger.setLevel(logging.DEBUG)
## logger.setLevel(logging.INFO)
class TestCase(mechanize._testcase.TestCase):
# testprogram sets self.no_proxies on each TestCase to request explicitly
# setting proxies so that http*_proxy environment variables are ignored
def _configure_user_agent(self, ua):
if self.no_proxies:
ua.set_proxies({})
def make_browser(self):
browser = mechanize.Browser()
self._configure_user_agent(browser)
return browser
def make_user_agent(self):
ua = mechanize.UserAgent()
self._configure_user_agent(ua)
return ua
def build_opener(self, handlers=(), build_opener=None):
handlers += (mechanize.ProxyHandler(proxies={}),)
if build_opener is None:
build_opener = mechanize.build_opener
return build_opener(*handlers)
def setUp(self):
mechanize._testcase.TestCase.setUp(self)
self.test_uri = urljoin(self.uri, "test_fixtures")
self.server = self.get_cached_fixture("server")
if self.no_proxies:
old_opener_m = mechanize._opener._opener
old_opener_u = urllib2._opener
mechanize.install_opener(mechanize.build_opener(
mechanize.ProxyHandler(proxies={})))
urllib2.install_opener(urllib2.build_opener(
urllib2.ProxyHandler(proxies={})))
def revert_install():
mechanize.install_opener(old_opener_m)
urllib2.install_opener(old_opener_u)
self.add_teardown(revert_install)
def sanepathname2url(path):
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
def read_file(filename):
fh = open(filename)
try:
return fh.read()
finally:
fh.close()
class FtpTestCase(TestCase):
def test_ftp(self):
server = self.get_cached_fixture("ftp_server")
browser = self.make_browser()
path = self.make_temp_dir(dir_=server.root_path)
file_path = os.path.join(path, "stuff")
data = "data\nmore data"
write_file(file_path, data)
relative_path = os.path.join(os.path.basename(path), "stuff")
r = browser.open("ftp://anon@localhost:%s/%s" %
(server.port, relative_path))
self.assertEqual(r.read(), data)
class SocketTimeoutTest(TestCase):
# the timeout tests in this module aren't full functional tests: in order
# to speed things up, don't actually call .settimeout on the socket. XXX
# allow running the tests against a slow server with a real timeout
def _monkey_patch_socket(self):
class Delegator(object):
def __init__(self, delegate):
self._delegate = delegate
def __getattr__(self, name):
return getattr(self._delegate, name)
assertEquals = self.assertEquals
class TimeoutLog(object):
AnyValue = object()
def __init__(self):
self._nr_sockets = 0
self._timeouts = []
self.start()
def start(self):
self._monitoring = True
def stop(self):
self._monitoring = False
def socket_created(self):
if self._monitoring:
self._nr_sockets += 1
def settimeout_called(self, timeout):
if self._monitoring:
self._timeouts.append(timeout)
def verify(self, value=AnyValue):
if sys.version_info[:2] < (2, 6):
# per-connection timeout not supported in Python 2.5
self.verify_default()
else:
assertEquals(len(self._timeouts), self._nr_sockets)
if value is not self.AnyValue:
for timeout in self._timeouts:
assertEquals(timeout, value)
def verify_default(self):
assertEquals(len(self._timeouts), 0)
log = TimeoutLog()
def settimeout(timeout):
log.settimeout_called(timeout)
orig_socket = socket.socket
def make_socket(*args, **kwds):
sock = Delegator(orig_socket(*args, **kwds))
log.socket_created()
sock.settimeout = settimeout
return sock
self.monkey_patch(socket, "socket", make_socket)
return log
class SimpleTests(SocketTimeoutTest):
# thanks Moof (aka Giles Antonio Radford)
def setUp(self):
super(SimpleTests, self).setUp()
self.browser = self.make_browser()
def test_simple(self):
self.browser.open(self.test_uri)
self.assertEqual(self.browser.title(), 'Python bits')
# relative URL
self.browser.open('/mechanize/')
self.assertEqual(self.browser.title(), 'mechanize')
def test_basic_auth(self):
uri = urljoin(self.uri, "basic_auth")
self.assertRaises(mechanize.URLError, self.browser.open, uri)
self.browser.add_password(uri, "john", "john")
self.browser.open(uri)
self.assertEqual(self.browser.title(), 'Basic Auth Protected Area')
def test_digest_auth(self):
uri = urljoin(self.uri, "digest_auth")
self.assertRaises(mechanize.URLError, self.browser.open, uri)
self.browser.add_password(uri, "digestuser", "digestuser")
self.browser.open(uri)
self.assertEqual(self.browser.title(), 'Digest Auth Protected Area')
def test_open_with_default_timeout(self):
timeout_log = self._monkey_patch_socket()
self.browser.open(self.test_uri)
self.assertEqual(self.browser.title(), 'Python bits')
timeout_log.verify_default()
def test_open_with_timeout(self):
timeout_log = self._monkey_patch_socket()
timeout = 10.
self.browser.open(self.test_uri, timeout=timeout)
self.assertEqual(self.browser.title(), 'Python bits')
timeout_log.verify(timeout)
def test_urlopen_with_default_timeout(self):
timeout_log = self._monkey_patch_socket()
response = mechanize.urlopen(self.test_uri)
self.assert_contains(response.read(), "Python bits")
timeout_log.verify_default()
def test_urlopen_with_timeout(self):
timeout_log = self._monkey_patch_socket()
timeout = 10.
response = mechanize.urlopen(self.test_uri, timeout=timeout)
self.assert_contains(response.read(), "Python bits")
timeout_log.verify(timeout)
def test_redirect_with_timeout(self):
timeout_log = self._monkey_patch_socket()
timeout = 10.
# 301 redirect due to missing final '/'
req = mechanize.Request(urljoin(self.test_uri, "test_fixtures"),
timeout=timeout)
r = self.browser.open(req)
self.assert_("GeneralFAQ.html" in r.read(2048))
timeout_log.verify(timeout)
def test_302_and_404(self):
# the combination of 302 and 404 (/redirected is configured to redirect
# to a non-existent URL /nonexistent) has caused problems in the past
# due to accidental double-wrapping of the error response
self.assertRaises(
mechanize.HTTPError,
self.browser.open, urljoin(self.uri, "/redirected"),
)
def test_reread(self):
# closing response shouldn't stop methods working (this happens also to
# be true for e.g. mechanize.OpenerDirector when mechanize's own
# handlers are in use, but is guaranteed to be true for
# mechanize.Browser)
r = self.browser.open(self.uri)
data = r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(), data)
self.assertEqual(self.browser.response().read(), data)
def test_error_recovery(self):
self.assertRaises(mechanize.URLError, self.browser.open,
'file:///c|thisnoexistyiufheiurgbueirgbue')
self.browser.open(self.test_uri)
self.assertEqual(self.browser.title(), 'Python bits')
def test_redirect(self):
# 301 redirect due to missing final '/'
codes = []
class ObservingHandler(mechanize.BaseHandler):
def http_response(self, request, response):
codes.append(response.code)
return response
self.browser.add_handler(ObservingHandler())
r = self.browser.open(urljoin(self.uri, "test_fixtures"))
self.assertEqual(r.code, 200)
self.assertTrue(301 in codes)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_refresh(self):
def refresh_request(seconds):
uri = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
val = urllib.quote_plus('%d; url="%s"' % (seconds, self.uri))
return uri + ("?refresh=%s" % val)
self.browser.set_handle_refresh(True, honor_time=False)
r = self.browser.open(refresh_request(5))
self.assertEqual(r.geturl(), self.uri)
# Set a maximum refresh time of 30 seconds (these long refreshes tend
# to be there only because the website owner wants you to see the
# latest news, or whatever -- they're not essential to the operation of
# the site, and not really useful or appropriate when scraping).
refresh_uri = refresh_request(60)
self.browser.set_handle_refresh(True, max_time=30., honor_time=True)
r = self.browser.open(refresh_uri)
self.assertEqual(r.geturl(), refresh_uri)
# allow long refreshes (but don't actually wait 60 seconds)
self.browser.set_handle_refresh(True, max_time=None, honor_time=False)
r = self.browser.open(refresh_request(60))
self.assertEqual(r.geturl(), self.uri)
def test_file_url(self):
url = "file://%s" % sanepathname2url(
os.path.abspath(os.path.join("test", "test_functional.py")))
r = self.browser.open(url)
self.assert_("this string appears in this file ;-)" in r.read())
def test_open_local_file(self):
# Since the file: URL scheme is not well standardised, Browser has a
# special method to open files by name, for convenience:
path = os.path.join("test", "test_functional.py")
response = self.browser.open_local_file(path)
self.assertIn("this string appears in this file ;-)",
response.get_data())
def test_open_novisit(self):
def test_state(br):
self.assert_(br.request is None)
self.assert_(br.response() is None)
self.assertRaises(mechanize.BrowserStateError, br.back)
test_state(self.browser)
uri = urljoin(self.uri, "test_fixtures")
# note this involves a redirect, which should itself be non-visiting
r = self.browser.open_novisit(uri)
test_state(self.browser)
self.assert_("GeneralFAQ.html" in r.read(2048))
# Request argument instead of URL
r = self.browser.open_novisit(mechanize.Request(uri))
test_state(self.browser)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_non_seekable(self):
# check everything still works without response_seek_wrapper and
# the .seek() method on response objects
ua = self.make_user_agent()
ua.set_seekable_responses(False)
ua.set_handle_equiv(False)
response = ua.open(self.test_uri)
self.failIf(hasattr(response, "seek"))
data = response.read()
self.assert_("Python bits" in data)
class ResponseTests(TestCase):
def test_seek(self):
br = self.make_browser()
r = br.open(self.uri)
html = r.read()
r.seek(0)
self.assertEqual(r.read(), html)
def test_seekable_response_opener(self):
build_opener = mechanize.OpenerFactory(
mechanize.SeekableResponseOpener).build_opener
opener = self.build_opener(build_opener=build_opener)
r = opener.open(urljoin(self.uri, "test_fixtures/cctest2.txt"))
r.read()
r.seek(0)
self.assertEqual(r.read(),
r.get_data(),
"Hello ClientCookie functional test suite.\n")
def test_seek_wrapper_class_name(self):
opener = self.make_user_agent()
opener.set_seekable_responses(True)
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
self.assert_("HTTPError instance" in repr(exc))
def test_no_seek(self):
# should be possible to turn off UserAgent's .seek() functionality
def check_no_seek(opener):
r = opener.open(urljoin(self.uri, "test_fixtures/cctest2.txt"))
self.assert_(not hasattr(r, "seek"))
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
self.assert_(not hasattr(exc, "seek"))
# mechanize.UserAgent
opener = self.make_user_agent()
opener.set_handle_equiv(False)
opener.set_seekable_responses(False)
opener.set_debug_http(False)
check_no_seek(opener)
# mechanize.OpenerDirector
opener = self.build_opener()
check_no_seek(opener)
def test_consistent_seek(self):
# if we explicitly request that returned response objects have the
# .seek() method, then raised HTTPError exceptions should also have the
# .seek() method
def check(opener, excs_also):
r = opener.open(urljoin(self.uri, "test_fixtures/cctest2.txt"))
data = r.read()
r.seek(0)
self.assertEqual(data, r.read(), r.get_data())
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
data = exc.read()
if excs_also:
exc.seek(0)
self.assertEqual(data, exc.read(), exc.get_data())
else:
self.assert_(False)
opener = self.make_user_agent()
opener.set_debug_http(False)
# Here, only the .set_handle_equiv() causes .seek() to be present, so
# exceptions don't necessarily support the .seek() method (and do not,
# at present).
opener.set_handle_equiv(True)
opener.set_seekable_responses(False)
check(opener, excs_also=False)
# Here, (only) the explicit .set_seekable_responses() causes .seek() to
# be present (different mechanism from .set_handle_equiv()). Since
# there's an explicit request, ALL responses are seekable, even
# exception responses (HTTPError instances).
opener.set_handle_equiv(False)
opener.set_seekable_responses(True)
check(opener, excs_also=True)
def test_set_response(self):
br = self.make_browser()
r = br.open(self.test_uri)
html = r.read()
self.assertEqual(br.title(), "Python bits")
newhtml = """<html><body><a href="spam">click me</a></body></html>"""
r.set_data(newhtml)
self.assertEqual(r.read(), newhtml)
self.assertEqual(br.response().read(), html)
br.response().set_data(newhtml)
self.assertEqual(br.response().read(), html)
self.assertEqual(list(br.links())[0].url, "http://sourceforge.net/")
br.set_response(r)
self.assertEqual(br.response().read(), newhtml)
self.assertEqual(list(br.links())[0].url, "spam")
def test_new_response(self):
br = self.make_browser()
data = ("<html><head><title>Test</title></head>"
"<body><p>Hello.</p></body></html>")
response = mechanize.make_response(
data,
[("Content-type", "text/html")],
"http://example.com/",
200,
"OK")
br.set_response(response)
self.assertEqual(br.response().get_data(), data)
def hidden_test_close_pickle_load(self):
print ("Test test_close_pickle_load is expected to fail unless Python "
"standard library patch http://python.org/sf/1144636 has been "
"applied")
import pickle
b = self.make_browser()
r = b.open(urljoin(self.uri, "test_fixtures/cctest2.txt"))
r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
HIGHEST_PROTOCOL = -1
p = pickle.dumps(b, HIGHEST_PROTOCOL)
b = pickle.loads(p)
r = b.response()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
class FunctionalTests(SocketTimeoutTest):
def test_referer(self):
br = self.make_browser()
br.set_handle_refresh(True, honor_time=False)
referer = urljoin(self.uri, "test_fixtures/referertest.html")
info = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
r = br.open(info)
self.assert_(referer not in r.get_data())
br.open(referer)
r = br.follow_link(text="Here")
self.assert_(referer in r.get_data())
def test_cookies(self):
# this test page depends on cookies, and an http-equiv refresh
#cj = CreateBSDDBCookieJar("/home/john/db.db")
cj = CookieJar()
handlers = [
HTTPCookieProcessor(cj),
HTTPRefreshProcessor(max_time=None, honor_time=False),
HTTPEquivProcessor(),
HTTPRedirectHandler(), # needed for Refresh handling in 2.4.0
# HTTPHandler(True),
# HTTPRedirectDebugProcessor(),
# HTTPResponseDebugProcessor(),
]
opener = self.build_opener(handlers)
r = opener.open(urljoin(self.uri, "/cgi-bin/cookietest.cgi"))
data = r.read()
self.assert_(data.find("Your browser supports cookies!") >= 0)
self.assertEquals(len(cj), 2)
# test response.seek() (added by HTTPEquivProcessor)
r.seek(0)
samedata = r.read()
r.close()
self.assertEquals(samedata, data)
def test_robots(self):
plain_opener = self.build_opener(
[mechanize.HTTPRobotRulesProcessor])
browser = self.make_browser()
for opener in plain_opener, browser:
opener.open(urljoin(self.uri, "robots"))
self.assertRaises(
mechanize.RobotExclusionError,
opener.open, urljoin(self.uri, "norobots"))
def _check_retrieve(self, url, filename, headers):
from urllib import urlopen
self.assertEqual(headers.get('Content-Type'), 'text/html')
if self.no_proxies:
proxies = {}
else:
proxies = None
self.assertEqual(read_file(filename),
urlopen(url, proxies=proxies).read())
def test_retrieve_to_named_file(self):
url = urljoin(self.uri, "/mechanize/")
test_filename = os.path.join(self.make_temp_dir(), "python.html")
opener = self.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, test_filename, verif.callback)
self.assertEqual(filename, test_filename)
self._check_retrieve(url, filename, headers)
self.assert_(os.path.isfile(filename))
def test_retrieve(self):
# not passing an explicit filename downloads to a temporary file
# using a Request object instead of a URL works
url = urljoin(self.uri, "/mechanize/")
opener = self.build_opener()
verif = CallbackVerifier(self)
request = mechanize.Request(url)
filename, headers = opener.retrieve(request, reporthook=verif.callback)
self.assertEquals(request.visit, False)
self._check_retrieve(url, filename, headers)
opener.close()
# closing the opener removed the temporary file
self.failIf(os.path.isfile(filename))
def test_urlretrieve(self):
timeout_log = self._monkey_patch_socket()
timeout = 10.
url = urljoin(self.uri, "/mechanize/")
verif = CallbackVerifier(self)
filename, headers = mechanize.urlretrieve(url,
reporthook=verif.callback,
timeout=timeout)
timeout_log.stop()
self._check_retrieve(url, filename, headers)
timeout_log.verify(timeout)
def test_reload_read_incomplete(self):
browser = self.make_browser()
r1 = browser.open(urljoin(self.uri,
"test_fixtures/mechanize_reload_test.html"))
# if we don't do anything and go straight to another page, most of the
# last page's response won't be .read()...
r2 = browser.open(urljoin(self.uri, "mechanize"))
self.assert_(len(r1.get_data()) < 4097) # we only .read() a little bit
# ...so if we then go back, .follow_link() for a link near the end (a
# few kb in, past the point that always gets read in HTML files because
# of HEAD parsing) will only work if it causes a .reload()...
r3 = browser.back()
browser.follow_link(text="near the end")
# ... good, no LinkNotFoundError, so we did reload.
# we have .read() the whole file
self.assertEqual(len(r3._seek_wrapper__cache.getvalue()), 4202)
## def test_cacheftp(self):
## from mechanize import CacheFTPHandler, build_opener
## o = build_opener(CacheFTPHandler())
## r = o.open("ftp://ftp.python.org/pub/www.python.org/robots.txt")
## data1 = r.read()
## r.close()
## r = o.open("ftp://ftp.python.org/pub/www.python.org/2.3.2/announce.txt")
## data2 = r.read()
## r.close()
## self.assert_(data1 != data2)
class CommandFailedError(Exception):
def __init__(self, message, rc):
Exception.__init__(self, message)
self.rc = rc
def get_cmd_stdout(args, **kwargs):
process = subprocess.Popen(args, stdout=subprocess.PIPE, **kwargs)
stdout, stderr = process.communicate()
rc = process.returncode
if rc != 0:
raise CommandFailedError(
"Command failed with return code %i: %s:\n%s" %
(rc, args, stderr), rc)
else:
return stdout
class ExamplesTests(TestCase):
tags = "internet"
def check_download_script(self, name):
python = sys.executable
parent_dir = os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))
temp_dir = self.make_temp_dir()
get_cmd_stdout(
[python, os.path.join(parent_dir, "examples", name)],
cwd=temp_dir)
[tarball] = os.listdir(temp_dir)
self.assertTrue(tarball.endswith(".tar.gz"))
def test_hack21(self):
self.check_download_script("hack21.py")
def test_pypi(self):
self.check_download_script("pypi.py")
def add_to_path(env, name, value):
old = env.get(name)
if old is not None and old != "":
value = old + ":" + value
env[name] = value
class FormsExamplesTests(mechanize._testcase.GoldenTestCase):
def check_forms_example(self, name, golden_path, fixup):
self.get_cached_fixture("server")
python = sys.executable
this_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(this_dir)
forms_examples_dir = os.path.join(parent_dir, "examples", "forms")
output_dir = self.make_temp_dir()
env = os.environ.copy()
add_to_path(env, "PYTHONPATH", parent_dir)
output = get_cmd_stdout([python, name, self.uri],
env=env,
cwd=forms_examples_dir)
output = fixup(output)
write_file(os.path.join(output_dir, "output"), output)
self.assert_golden(output_dir,
os.path.join(this_dir, golden_path))
def test_simple(self):
def fixup(output):
return output.replace("POST %s" % self.uri.rstrip("/"),
"POST http://127.0.0.1:8000")
self.check_forms_example(
"simple.py",
os.path.join("functional_tests_golden",
"FormsExamplesTests.test_simple"),
fixup)
def test_example(self):
def fixup(output):
lines = [l for l in output.splitlines(True) if
not l.startswith("Vary:") and
not l.startswith("Server:") and
not l.startswith("Transfer-Encoding:") and
not l.startswith("Content-Length:")]
output = "".join(lines)
return output.replace(self.uri.rstrip("/"),
"http://127.0.0.1:8000")
self.check_forms_example(
"example.py",
os.path.join("functional_tests_golden",
"FormsExamplesTests.test_example"),
fixup)
class CookieJarTests(TestCase):
def _test_cookiejar(self, make_cookiejar, commit):
cookiejar = make_cookiejar()
br = self.make_browser()
#br.set_debug_http(True)
br.set_cookiejar(cookiejar)
br.set_handle_refresh(False)
url = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
# no cookie was set on the first request
html = br.open(url).read()
self.assertEquals(html.find("Your browser supports cookies!"), -1)
self.assertEquals(len(cookiejar), 2)
# ... but now we have the cookie
html = br.open(url).read()
self.assertIn("Your browser supports cookies!", html)
self.assertIn("Received session cookie", html)
commit(cookiejar)
# should still have the cookie when we load afresh
cookiejar = make_cookiejar()
br.set_cookiejar(cookiejar)
html = br.open(url).read()
self.assertIn("Your browser supports cookies!", html)
self.assertNotIn("Received session cookie", html)
def test_mozilla_cookiejar(self):
filename = os.path.join(self.make_temp_dir(), "cookies.txt")
def make_cookiejar():
cj = mechanize.MozillaCookieJar(filename=filename)
try:
cj.revert()
except IOError, exc:
if exc.errno != errno.ENOENT:
raise
return cj
def commit(cj):
cj.save()
self._test_cookiejar(make_cookiejar, commit)
def test_firefox3_cookiejar(self):
try:
mechanize.Firefox3CookieJar
except AttributeError:
# firefox 3 cookiejar is only supported in Python 2.5 and later;
# also, sqlite3 must be available
raise unittest.SkipTest()
filename = os.path.join(self.make_temp_dir(), "cookies.sqlite")
def make_cookiejar():
hide_experimental_warnings()
try:
return mechanize.Firefox3CookieJar(filename=filename)
finally:
reset_experimental_warnings()
def commit(cj):
pass
self._test_cookiejar(make_cookiejar, commit)
class CallbackVerifier:
# for .test_urlretrieve()
def __init__(self, testcase):
self._count = 0
self._testcase = testcase
def callback(self, block_nr, block_size, total_size):
self._testcase.assertEqual(block_nr, self._count)
self._count = self._count + 1
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
mjirik/teigen | teigen/generators/unconnected_cylinders.py | 1 | 22054 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © %YEAR% %USER% <%MAIL%>
#
# Distributed under terms of the %LICENSE% license.
"""
%HERE%
"""
import logging
logger = logging.getLogger(__name__)
import argparse
import numpy as np
# import ..geometry3 as g3
# from ..geometry3d import plane_fit
from .. import geometry3d as g3
import os.path
from . import general
def __half_plane(self, perp, plane_point, point):
cdf = (np.array(point) - np.array(plane_point))
out = perp[0] * cdf[0] + \
perp[1] * cdf[1] + \
perp[2] * cdf[2]
return out > 0
def _const(value):
return value
class UnconnectedCylinderGenerator(general.GeneralGenerator):
def __init__(self,
build=True,
gtree=None,
# endDistMultiplicator=1,
# use_joints=True,
voxelsize_mm=[1.0, 1.0, 1.0],
# voxelsize_mm_z=1.0,
# voxelsize_mm_x=1.0,
# voxelsize_mm_y=1.0,
areasize_px=[100, 100, 100],
# area_shape_z=100,
# area_shape_x=100,
# area_shape_y=100,
element_number=-1,
radius_distribution="normal",
radius_distribution_uniform=None,
radius_distribution_normal=None,
radius_distribution_fixed=None,
radius_distribution_minimum=0.05,
radius_distribution_maximum=10.0,
radius_distribution_mean=0.1,
radius_distribution_standard_deviation=0.1,
length_distribution_mean=1.0,
length_distribution_standard_deviation=0.1,
# intensity_profile=None
intensity_profile_radius=[0.4, 0.7, 1.0, 1.3],
intensity_profile_intensity=[195, 190, 200, 30],
orientation_anisotropic=True,
orientation_alpha_rad=0.0,
orientation_beta_rad=0.0,
orientation_variance_rad=0.1,
allow_overlap=False,
volume_fraction=0.1,
maximum_1000_iteration_number=10,
random_generator_seed=0,
last_element_can_be_smaller=False,
tube_shape=True
):
"""
gtree is information about input data structure.
endDistMultiplicator: make cylinder shorter by multiplication of radius
intensity_profile: Dictionary type. Key is radius and value is required intensity.
@param tube_shape: create tube shape if true, otherwise create cylinders
"""
# area_shape = [area_shape_z,area_shape_x, area_shape_y]
# voxelsize_mm = [
# voxelsize_mm_z,
# voxelsize_mm_x,
# voxelsize_mm_y
# ]
super(general.GeneralGenerator, self).__init__()
# general.GeneralGenerator.__init__(self)
self.build = build
# self.filename = "output{:05d}.jpg"
self.areasize_px = np.asarray(areasize_px)
self.voxelsize_mm = np.asarray(voxelsize_mm)
self.element_number = element_number
self.radius_maximum = radius_distribution_maximum
self.radius_minimum = radius_distribution_minimum
# self.intensity_profile = intensity_profile
self.intensity_profile = dict(zip(intensity_profile_radius, intensity_profile_intensity))
self._cylinder_nodes = []
self._cylinder_nodes_radiuses = []
self.random_generator_seed = random_generator_seed
self.radius_generator = _const
self.radius_generator_args = [radius_distribution_mean]
self.area_volume = np.prod(self.areasize_px * self.voxelsize_mm)
if radius_distribution_normal is not None:
logger.warning("Deprecated use of radius_distribution_normal. Use radius_distribution='normal'")
else:
if radius_distribution == "normal":
radius_distribution_normal = True
radius_distribution_fixed = False
radius_distribution_uniform = False
elif radius_distribution == "fixed":
radius_distribution_normal = False
radius_distribution_fixed = True
radius_distribution_uniform = False
elif radius_distribution == "uniform":
radius_distribution_normal = False
radius_distribution_fixed= False
radius_distribution_uniform = True
if radius_distribution_uniform:
self.radius_generator = np.random.uniform
self.radius_generator_args = [radius_distribution_minimum, radius_distribution_maximum]
if radius_distribution_normal:
self.radius_generator = general.random_normal
self.radius_generator_args = [radius_distribution_mean, radius_distribution_standard_deviation]
self.alow_overlap = allow_overlap
self.length_generator = general.random_normal
self.length_generator_args = [length_distribution_mean, length_distribution_standard_deviation]
self.requeseted_volume_fraction = volume_fraction
self.max_iteration = 1000 * maximum_1000_iteration_number
self.last_element_can_be_smaller = last_element_can_be_smaller
# import ipdb; ipdb.set_trace()
# input of geometry and topology
# self.V = []
# self.CV = []
# self.joints = {}
# self.joints_lar = []
# self.gtree = gtree
# self.endDistMultiplicator = endDistMultiplicator
# self.use_joints = use_joints
self.surface = 0
# self.LEN_STEP_CONSTANT = 0.1
self.MAKE_IT_SHORTER_CONSTANT = 2.0
# self.DIST_MAX_RADIUS_MULTIPLICATOR = 3.0
self.OVERLAPS_ALOWED = False
self.tree_data = {}
self.progress_callback = None
# self.collision_model = g3.CollisionModelSpheres(areasize=(self.areasize_px * self.voxelsize_mm))
self.collision_model = g3.CollisionModelCombined(areasize=(self.areasize_px * self.voxelsize_mm))
self.area_volume = np.prod(self.voxelsize_mm * self.areasize_px)
self.orientation_anisotropic = orientation_anisotropic
self.orientation_alpha_rad = orientation_alpha_rad
self.orientation_beta_rad = orientation_beta_rad
self.orientation_variance_rad = orientation_variance_rad
self.tube_shape = tube_shape
def _add_cylinder_if_no_collision(self, pt1, pt2, radius,
COLLISION_RADIUS=1.5 # higher then sqrt(2)
):
if self.alow_overlap:
return self.collision_model.add_tube(pt1, pt2, radius)
else:
return self.collision_model.add_tube_if_no_collision(pt1, pt2, radius)
def run(self):
logger.info("cylynder generator running")
self.tree_data = {
}
np.random.seed(self.random_generator_seed)
self.surface = 0
# pts = np.random.random([self.element_number, 3]) * self.areasize_px * self.voxelsize_mm
# construct voronoi
import scipy.spatial
import itertools
self.init_stats()
# radius = self.radius_maximum
# for i, two_points in enumerate(vor3.ridge_points):
# for i in range(self.element_number):
self.generation_break_causes = {
"radius_maximum": 0,
"radius_minimum": 0,
"collision": 0,
"radius_bigger_than_areasize": 0,
"length_bigger_than_areasize": 0,
}
while not self.is_final_iteration():
self.create_cylinder()
logger.info(self.generation_break_causes)
self.get_stats()
logger.debug(self.generation_break_causes)
self.data3d = None
def is_final_iteration(self):
if self.stop:
return True
self.iterations += 1
stats = self.get_stats()
if self.element_number > 0:
n = len(self.geometry_data["volume"])
if n >= self.element_number:
return True
self.actual_object_volume = np.sum(self.geometry_data["volume"])
actual_volume_fraction = self.actual_object_volume / self.area_volume
logger.debug("iteration: " + str(self.iterations) + " / " + str(self.max_iteration))
logger.debug("actual_volume_fraction: " + str(actual_volume_fraction))
if self.iterations > self.max_iteration:
return True
elif actual_volume_fraction > self.requeseted_volume_fraction:
return True
else:
return False
def init_stats(self):
self.stop = False
self.iterations = 0
self.actual_object_volume = 0
self.requeseted_volume = self.requeseted_volume_fraction * self.area_volume
self.geometry_data = {
"length": [],
"radius": [],
"volume": [],
"surface": [],
"vector": [],
"point1": [],
"point2": []
}
def add_cylinder_to_stats(self, pt1, pt2, radius):
pt1 = np.asarray(pt1)
pt2 = np.asarray(pt2)
edge = {
"nodeA_ZYX_mm": pt1,
"nodeB_ZYX_mm": pt2,
# "radius_mm": radius
# "radius_mm": 1 + np.random.rand() * (self.max_radius -1 )
"radius_mm": radius,
}
self.tree_data[len(self.tree_data)] = edge
# line_nodes = g3.get_points_in_line_segment(pt1, pt2, radius)
# self._cylinder_nodes.extend(line_nodes)
length = np.linalg.norm(pt1 - pt2)
# if it is tube (pill)
if self.tube_shape:
surf = g3.tube_surface(radius, length)
volume = g3.tube_volume(radius, length)
else:
# it is cylinder
surf = 2 * np.pi * radius * (radius + length)
volume = np.pi * radius**2 * length
vector = pt1 - pt2
# TODO rename - add units
self.geometry_data["length"].append(length)
self.geometry_data["surface"].append(surf)
self.geometry_data["radius"].append(radius)
self.geometry_data["volume"].append(volume)
self.geometry_data["vector"].append(vector)
self.geometry_data["point1"].append(pt1)
self.geometry_data["point2"].append(pt2)
# self.geometry_data["collide_with_prevs"].append(collide_with_prevs)
self.surface += surf
def create_cylinder(
self,
try_shorter_iteration_number=8,
n_nearest=4,
length_to_radius_ratio=4
):
generated = False
while not generated:
self.iterations += 1
if self.is_final_iteration():
if self.progress_callback is not None:
self.progress_callback(self, 1.0, statusbar_text="Skeleton created")
return
progress = self.iterations / (1. * self.max_iteration)
# logger.debug("progress " + str(progress))
object_volume = np.sum(self.geometry_data["volume"])
actual_volume_fraction = object_volume / self.area_volume
statusbar_text = str(self.iterations) + " Vv " + str(actual_volume_fraction)
if self.progress_callback is not None:
self.progress_callback(self, progress, statusbar_text=statusbar_text)
# logger.debug(progress)
# logger.debug(self.radius_generator_args)
radius = self.radius_generator(*self.radius_generator_args)
if radius > self.radius_maximum:
self.generation_break_causes["radius_maximum"] += 1
continue
if radius < self.radius_minimum:
self.generation_break_causes["radius_minimum"] += 1
continue
if (radius > (self.areasize_px * self.voxelsize_mm)).all():
self.generation_break_causes["radius_bigger_than_areasize"] += 1
continue
# pt1 = self.collision_model.get_random_point(radius=radius)
# pt2 = self.collision_model.get_random_point(radius=radius)
# pt1 = np.random.random([3]) * self.areasize_px * self.voxelsize_mm
# pt2 = np.random.random([3]) * self.areasize_px * self.voxelsize_mm
# pt1, pt2 = self._make_cylinder_shorter(pt1, pt2, radius*self.MAKE_IT_SHORTER_CONSTANT)
center = np.random.random([3]) * self.areasize_px * self.voxelsize_mm
if self.collision_model.object_number > 2 * n_nearest:
if self.iterations % 5:
# if self.collision_model.get_node_number() > n_nearest:
center = np.asarray(center)
npts, indexes, lengths = self.collision_model.n_closest_end_points(center, n_nearest)
center = np.mean(npts, axis=0)
if self.orientation_anisotropic:
# direction_vector = np.asarray(self.orientation_main).reshape(3, 1)
# past solution
# direction_vector = np.random.normal(direction_vector, self.orientation_variance_rad)
# direction_vector = direction_vector / np.linalg.norm(direction_vector)
direction_vector = g3.random_vector_along_direction(
alpha=self.orientation_alpha_rad, beta=self.orientation_beta_rad,
sigma=self.orientation_variance_rad, size=1
)
direction_vector = direction_vector.squeeze()
else:
direction_vector = g3.random_direction_vector()
# direction_vector = np.asarray([0, 2**-0.5, 2**-0.5])
# direction_vector = np.asarray([0, 2, 0])
length = self.length_generator(*self.length_generator_args)
if (length > (self.areasize_px * self.voxelsize_mm)).all():
self.generation_break_causes["length_bigger_than_areasize"] += 1
continue
if self.tube_shape:
volume = g3.tube_volume(radius, length)
else:
volume = g3.cylinder_volume(radius, length)
planned_volume_is_too_much = \
(( self.actual_object_volume + volume) / self.area_volume) > self.requeseted_volume_fraction
if planned_volume_is_too_much and self.last_element_can_be_smaller:
# just in case of last element and if is this feature enabled
radius, length = self.pill_parameter_suggestion_for_last_object(radius, length)
# pokud je navrhovaný objem přílišný
pt1 = np.asarray(g3.translate(center, direction_vector, 0.5 * length))
pt2 = np.asarray(g3.translate(center, direction_vector, -0.5 * length))
collision = self._add_cylinder_if_no_collision(pt1, pt2, radius)
else:
# normal run
pt1 = np.asarray(g3.translate(center, direction_vector, 0.5 * length))
pt2 = np.asarray(g3.translate(center, direction_vector, -0.5 * length))
try_shorter_i = 0
collision = self._add_cylinder_if_no_collision(pt1, pt2, radius)
while (collision is True and try_shorter_i < try_shorter_iteration_number):
try_shorter_i += 1
pt1, pt2 = g3.get_points_closer(pt1, pt2, relative_length=0.75)
collision = self._add_cylinder_if_no_collision(pt1, pt2, radius)
if not collision:
generated = True
else:
self.generation_break_causes["collision"] += 1
if generated:
self.add_cylinder_to_stats(pt1, pt2, radius=radius)
# else:
# logger.debug(self.generation_break_causes)
return
def pill_parameter_suggestion_for_last_object(self, first_radius, first_length):
length = first_length
radius = g3.tube_radius_from_volume(self.actual_object_volume - self.requeseted_volume, length)
# for alpha in np.linspace(1., 0., 10):
# length = alpha * first_length
# radius = g3.pill_radius_from_volume(self.actual_object_volume - self.requeseted_volume, length)
#
# if radius >= self.radius_minimum:
# break
if radius < self.radius_minimum:
radius = self.radius_minimum
return radius, length
def get_stats(self):
# self.assertTrue(False)
import pandas as pd
df = pd.DataFrame(self.geometry_data)
# desc = df.describe()
return df
def _make_cylinder_shorter(self, nodeA, nodeB, radius): # , radius, cylinder_id):
vector = (np.asarray(nodeA) - np.asarray(nodeB)).tolist()
if np.linalg.norm(vector) < 2 * radius:
return None, None
# mov circles to center of cylinder by size of radius because of joint
nodeA = g3.translate(nodeA, vector,
-radius) # * self.endDistMultiplicator)
nodeB = g3.translate(nodeB, vector,
radius) # * self.endDistMultiplicator)
return nodeA, nodeB
def _is_in_area(self, node, radius=None):
return self.collision_model.is_point_in_area(node, radius)
def add_cylinder(self, nodeA, nodeB, radius, cylinder_id):
try:
idA = tuple(nodeA) # self.gtree.tree_data[cylinder_id]['nodeIdA']
idB = tuple(nodeB) # self.gtree.tree_data[cylinder_id]['nodeIdB']
except:
idA = 0
idB = 0
self.use_joints = False
# vect = nodeA - nodeB
# self.__draw_circle(nodeB, vect, radius)
vector = (np.asarray(nodeA) - np.asarray(nodeB)).tolist()
# mov circles to center of cylinder by size of radius because of joint
nodeA = g3.translate(nodeA, vector,
-radius * self.endDistMultiplicator)
nodeB = g3.translate(nodeB, vector,
radius * self.endDistMultiplicator)
if all(nodeA == nodeB):
logger.error("End points are on same place")
ptsA, ptsB = g3.cylinder_circles(nodeA, nodeB, radius,
element_number=30)
CVlistA = self.__construct_cylinder_end(ptsA, idA)
CVlistB = self.__construct_cylinder_end(ptsB, idB)
CVlist = CVlistA + CVlistB
self.CV.append(CVlist)
# lar add ball
# ball0 = mapper.larBall(radius, angle1=PI, angle2=2*PI)([10, 16])
# V, CV = ball0
# # mapper.T
# # ball = STRUCT(MKPOLS(ball0))
#
# # mapper.T(1)(nodeA[0])(mapper.T(2)(nodeA[1])(mapper.T(3)(nodeA[1])(ball)))
#
# lenV = len(self.V)
#
# self.V = self.V + (np.array(V) + np.array(nodeA)).tolist()
# self.CV = self.CV + (np.array(CV) + lenV).tolist()
def main():
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
# input parser
parser = argparse.ArgumentParser(
description='Histology analyser reporter. Try: \
python src/tb_volume.py -i ./tests/hist_stats_test.yaml'
)
parser.add_argument(
'-i', '--inputfile',
default=None,
# required=True,
help='input file, yaml file'
)
parser.add_argument(
'-o', '--outputfile',
default=None,
help='output file, .raw, .dcm, .tiff, given by extension '
)
parser.add_argument(
'-ot', '--outputfiletype',
default='pkl',
help='output file type. raw, dcm, tiff, or pkl, default is pkl, '
)
parser.add_argument(
'-vs', '--voxelsize',
default=[1.0, 1.0, 1.0],
type=float,
metavar='N',
nargs='+',
help='size of voxel (ZYX)'
)
parser.add_argument(
'-ds', '--datashape',
default=[200, 200, 200],
type=int,
metavar='N',
nargs='+',
help='size of output data in pixels for each axis (ZYX)'
)
parser.add_argument(
'-g', '--generator',
default='vol',
type=str,
help='Volume or surface model can be generated by use this option. \
Use "vol", "volume" for volumetric model. For LAR surface model\
use "lar". For VTK file use "vtk".'
)
parser.add_argument(
'-d', '--debug', action='store_true',
help='Debug mode')
parser.add_argument(
'-l', '--useLar', action='store_true',
help='Use LAR')
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
# startTime = datetime.now()
generator_params = None
generator_class = args.generator
# if args.generator == "vtk":
# import gen_vtk_tree
# gen_vtk_tree.vt2vtk_file(args.inputfile, args.outputfile)
# return
cylgen = CylinderGenerator()
cylgen.step1()
# tg = TreeBuilder(generator_class, generator_params)
# tg.importFromYaml(args.inputfile)
# tg.voxelsize_mm = args.voxelsize
# tg.shape = args.datashape
# tg.use_lar = args.useLar
# data3d = tg.buildTree()
#
# logger.info("TimeUsed:" + str(datetime.now() - startTime))
# # volume_px = sum(sum(sum(data3d)))
# # volume_mm3 = volume_px * \
# # (tg.voxelsize_mm[0] * tg.voxelsize_mm[1] * tg.voxelsize_mm[2])
# # logger.info("Volume px:" + str(volume_px))
# # logger.info("Volume mm3:" + str(volume_mm3))
#
# # vizualizace
# logger.debug("before visualization")
# tg.show()
# logger.debug("after visualization")
# ukládání do souboru
# if args.outputfile is not None:
# tg.saveToFile(args.outputfile, args.outputfiletype)
# class TreeGenerator(TreeConstructor):
# """
# back compatibility
# """
# pass
if __name__ == "__main__":
main()
| apache-2.0 |
jcupitt/libvips | test/test-suite/test_draw.py | 3 | 2750 | # vim: set fileencoding=utf-8 :
import pytest
import pyvips
class TestDraw:
def test_draw_circle(self):
im = pyvips.Image.black(100, 100)
im = im.draw_circle(100, 50, 50, 25)
pixel = im(25, 50)
assert len(pixel) == 1
assert pixel[0] == 100
pixel = im(26, 50)
assert len(pixel) == 1
assert pixel[0] == 0
im = pyvips.Image.black(100, 100)
im = im.draw_circle(100, 50, 50, 25, fill=True)
pixel = im(25, 50)
assert len(pixel) == 1
assert pixel[0] == 100
pixel = im(26, 50)
assert pixel[0] == 100
pixel = im(24, 50)
assert pixel[0] == 0
def test_draw_flood(self):
im = pyvips.Image.black(100, 100)
im = im.draw_circle(100, 50, 50, 25)
im = im.draw_flood(100, 50, 50)
im2 = pyvips.Image.black(100, 100)
im2 = im2.draw_circle(100, 50, 50, 25, fill=True)
diff = (im - im2).abs().max()
assert diff == 0
def test_draw_image(self):
im = pyvips.Image.black(51, 51)
im = im.draw_circle(100, 25, 25, 25, fill=True)
im2 = pyvips.Image.black(100, 100)
im2 = im2.draw_image(im, 25, 25)
im3 = pyvips.Image.black(100, 100)
im3 = im3.draw_circle(100, 50, 50, 25, fill=True)
diff = (im2 - im3).abs().max()
assert diff == 0
def test_draw_line(self):
im = pyvips.Image.black(100, 100)
im = im.draw_line(100, 0, 0, 100, 0)
pixel = im(0, 0)
assert len(pixel) == 1
assert pixel[0] == 100
pixel = im(0, 1)
assert len(pixel) == 1
assert pixel[0] == 0
def test_draw_mask(self):
mask = pyvips.Image.black(51, 51)
mask = mask.draw_circle(128, 25, 25, 25, fill=True)
im = pyvips.Image.black(100, 100)
im = im.draw_mask(200, mask, 25, 25)
im2 = pyvips.Image.black(100, 100)
im2 = im2.draw_circle(100, 50, 50, 25, fill=True)
diff = (im - im2).abs().max()
assert diff == 0
def test_draw_rect(self):
im = pyvips.Image.black(100, 100)
im = im.draw_rect(100, 25, 25, 50, 50, fill=True)
im2 = pyvips.Image.black(100, 100)
for y in range(25, 75):
im2 = im2.draw_line(100, 25, y, 74, y)
diff = (im - im2).abs().max()
assert diff == 0
def test_draw_smudge(self):
im = pyvips.Image.black(100, 100)
im = im.draw_circle(100, 50, 50, 25, fill=True)
im2 = im.draw_smudge(10, 10, 50, 50)
im3 = im.crop(10, 10, 50, 50)
im4 = im2.draw_image(im3, 10, 10)
diff = (im4 - im).abs().max()
assert diff == 0
if __name__ == '__main__':
pytest.main()
| lgpl-2.1 |
geerlingguy/ansible | test/integration/targets/vault/test-vault-client.py | 139 | 1818 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
import argparse
import sys
# TODO: could read these from the files I suppose...
secrets = {'vault-password': 'test-vault-password',
'vault-password-wrong': 'hunter42',
'vault-password-ansible': 'ansible',
'password': 'password',
'vault-client-password-1': 'password-1',
'vault-client-password-2': 'password-2'}
def build_arg_parser():
parser = argparse.ArgumentParser(description='Get a vault password from user keyring')
parser.add_argument('--vault-id', action='store', default=None,
dest='vault_id',
help='name of the vault secret to get from keyring')
parser.add_argument('--username', action='store', default=None,
help='the username whose keyring is queried')
parser.add_argument('--set', action='store_true', default=False,
dest='set_password',
help='set the password instead of getting it')
return parser
def get_secret(keyname):
return secrets.get(keyname, None)
def main():
rc = 0
arg_parser = build_arg_parser()
args = arg_parser.parse_args()
# print('args: %s' % args)
keyname = args.vault_id or 'ansible'
if args.set_password:
print('--set is not supported yet')
sys.exit(1)
secret = get_secret(keyname)
if secret is None:
sys.stderr.write('test-vault-client could not find key for vault-id="%s"\n' % keyname)
# key not found rc=2
return 2
sys.stdout.write('%s\n' % secret)
return rc
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
DCSaunders/tensorflow | tensorflow/contrib/layers/python/layers/summaries.py | 14 | 5345 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for summary creation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
from tensorflow.python import summary
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import standard_ops
__all__ = [
'summarize_tensor',
'summarize_activation',
'summarize_tensors',
'summarize_collection',
'summarize_variables',
'summarize_weights',
'summarize_biases',
'summarize_activations',
]
# TODO(wicke): add more unit tests for summarization functions.
def _add_scalar_summary(tensor, tag=None):
"""Add a scalar summary operation for the tensor.
Args:
tensor: The tensor to summarize.
tag: The tag to use, if None then use tensor's op's name.
Returns:
The created histogram summary.
Raises:
ValueError: If the tag is already in use or the rank is not 0.
"""
tensor.get_shape().assert_has_rank(0)
tag = tag or '%s_summary' % tensor.op.name
return summary.scalar(tag, tensor)
def _add_histogram_summary(tensor, tag=None):
"""Add a summary operation for the histogram of a tensor.
Args:
tensor: The tensor to summarize.
tag: The tag to use, if None then use tensor's op's name.
Returns:
The created histogram summary.
Raises:
ValueError: If the tag is already in use.
"""
tag = tag or '%s_summary' % tensor.op.name
return summary.histogram(tag, tensor)
def summarize_activation(op):
"""Summarize an activation.
This applies the given activation and adds useful summaries specific to the
activation.
Args:
op: The tensor to summarize (assumed to be a layer activation).
Returns:
The summary op created to summarize `op`.
"""
if op.op.type in ('Relu', 'Softplus', 'Relu6'):
# Using inputs to avoid floating point equality and/or epsilons.
_add_scalar_summary(
standard_ops.reduce_mean(standard_ops.to_float(standard_ops.less(
op.op.inputs[0], standard_ops.cast(0.0, op.op.inputs[0].dtype)))),
'%s/zeros' % op.op.name)
if op.op.type == 'Relu6':
_add_scalar_summary(
standard_ops.reduce_mean(standard_ops.to_float(standard_ops.greater(
op.op.inputs[0], standard_ops.cast(6.0, op.op.inputs[0].dtype)))),
'%s/sixes' % op.op.name)
return _add_histogram_summary(op, '%s/activation' % op.op.name)
def summarize_tensor(tensor, tag=None):
"""Summarize a tensor using a suitable summary type.
This function adds a summary op for `tensor`. The type of summary depends on
the shape of `tensor`. For scalars, a `scalar_summary` is created, for all
other tensors, `histogram_summary` is used.
Args:
tensor: The tensor to summarize
tag: The tag to use, if None then use tensor's op's name.
Returns:
The summary op created or None for string tensors.
"""
# Skips string tensors and boolean tensors (not handled by the summaries).
if (tensor.dtype.is_compatible_with(dtypes.string) or
tensor.dtype.base_dtype == dtypes.bool):
return None
if tensor.get_shape().ndims == 0:
# For scalars, use a scalar summary.
return _add_scalar_summary(tensor, tag)
else:
# We may land in here if the rank is still unknown. The histogram won't
# hurt if this ends up being a scalar.
return _add_histogram_summary(tensor, tag)
def summarize_tensors(tensors, summarizer=summarize_tensor):
"""Summarize a set of tensors."""
return [summarizer(tensor) for tensor in tensors]
def summarize_collection(collection, name_filter=None,
summarizer=summarize_tensor):
"""Summarize a graph collection of tensors, possibly filtered by name."""
tensors = []
for op in ops.get_collection(collection):
if name_filter is None or re.match(name_filter, op.op.name):
tensors.append(op)
return summarize_tensors(tensors, summarizer)
# Utility functions for commonly used collections
summarize_variables = functools.partial(summarize_collection,
ops.GraphKeys.GLOBAL_VARIABLES)
summarize_weights = functools.partial(summarize_collection,
ops.GraphKeys.WEIGHTS)
summarize_biases = functools.partial(summarize_collection,
ops.GraphKeys.BIASES)
def summarize_activations(name_filter=None, summarizer=summarize_activation):
"""Summarize activations, using `summarize_activation` to summarize."""
return summarize_collection(ops.GraphKeys.ACTIVATIONS, name_filter,
summarizer)
| apache-2.0 |
KohlsTechnology/ansible | test/units/module_utils/parsing/test_convert_bool.py | 173 | 1924 | # -*- coding: utf-8 -*-
# Copyright: (c) 2017 Ansible Project
# License: GNU General Public License v3 or later (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt )
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import pytest
from ansible.module_utils.parsing.convert_bool import boolean
class TestBoolean:
def test_bools(self):
assert boolean(True) is True
assert boolean(False) is False
def test_none(self):
with pytest.raises(TypeError):
assert boolean(None, strict=True) is False
assert boolean(None, strict=False) is False
def test_numbers(self):
assert boolean(1) is True
assert boolean(0) is False
assert boolean(0.0) is False
# Current boolean() doesn't consider these to be true values
# def test_other_numbers(self):
# assert boolean(2) is True
# assert boolean(-1) is True
# assert boolean(0.1) is True
def test_strings(self):
assert boolean("true") is True
assert boolean("TRUE") is True
assert boolean("t") is True
assert boolean("yes") is True
assert boolean("y") is True
assert boolean("on") is True
def test_junk_values_nonstrict(self):
assert boolean("flibbity", strict=False) is False
assert boolean(42, strict=False) is False
assert boolean(42.0, strict=False) is False
assert boolean(object(), strict=False) is False
def test_junk_values_strict(self):
with pytest.raises(TypeError):
assert boolean("flibbity", strict=True)is False
with pytest.raises(TypeError):
assert boolean(42, strict=True)is False
with pytest.raises(TypeError):
assert boolean(42.0, strict=True)is False
with pytest.raises(TypeError):
assert boolean(object(), strict=True)is False
| gpl-3.0 |
pigshell/nhnick | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/error_handlers_unittest.py | 122 | 8772 | # Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for error_handlers.py."""
import unittest2 as unittest
from checker import StyleProcessorConfiguration
from error_handlers import DefaultStyleErrorHandler
from filter import FilterConfiguration
class DefaultStyleErrorHandlerTest(unittest.TestCase):
"""Tests the DefaultStyleErrorHandler class."""
def setUp(self):
self._error_messages = []
self._error_count = 0
_category = "whitespace/tab"
"""The category name for the tests in this class."""
_file_path = "foo.h"
"""The file path for the tests in this class."""
def _mock_increment_error_count(self):
self._error_count += 1
def _mock_stderr_write(self, message):
self._error_messages.append(message)
def _style_checker_configuration(self):
"""Return a StyleProcessorConfiguration instance for testing."""
base_rules = ["-whitespace", "+whitespace/tab"]
filter_configuration = FilterConfiguration(base_rules=base_rules)
return StyleProcessorConfiguration(
filter_configuration=filter_configuration,
max_reports_per_category={"whitespace/tab": 2},
min_confidence=3,
output_format="vs7",
stderr_write=self._mock_stderr_write)
def _error_handler(self, configuration, line_numbers=None):
return DefaultStyleErrorHandler(configuration=configuration,
file_path=self._file_path,
increment_error_count=self._mock_increment_error_count,
line_numbers=line_numbers)
def _check_initialized(self):
"""Check that count and error messages are initialized."""
self.assertEqual(0, self._error_count)
self.assertEqual(0, len(self._error_messages))
def _call_error_handler(self, handle_error, confidence, line_number=100):
"""Call the given error handler with a test error."""
handle_error(line_number=line_number,
category=self._category,
confidence=confidence,
message="message")
def test_eq__true_return_value(self):
"""Test the __eq__() method for the return value of True."""
handler1 = self._error_handler(configuration=None)
handler2 = self._error_handler(configuration=None)
self.assertTrue(handler1.__eq__(handler2))
def test_eq__false_return_value(self):
"""Test the __eq__() method for the return value of False."""
def make_handler(configuration=self._style_checker_configuration(),
file_path='foo.txt', increment_error_count=lambda: True,
line_numbers=[100]):
return DefaultStyleErrorHandler(configuration=configuration,
file_path=file_path,
increment_error_count=increment_error_count,
line_numbers=line_numbers)
handler = make_handler()
# Establish a baseline for our comparisons below.
self.assertTrue(handler.__eq__(make_handler()))
# Verify that a difference in any argument causes equality to fail.
self.assertFalse(handler.__eq__(make_handler(configuration=None)))
self.assertFalse(handler.__eq__(make_handler(file_path='bar.txt')))
self.assertFalse(handler.__eq__(make_handler(increment_error_count=None)))
self.assertFalse(handler.__eq__(make_handler(line_numbers=[50])))
def test_ne(self):
"""Test the __ne__() method."""
# By default, __ne__ always returns true on different objects.
# Thus, check just the distinguishing case to verify that the
# code defines __ne__.
handler1 = self._error_handler(configuration=None)
handler2 = self._error_handler(configuration=None)
self.assertFalse(handler1.__ne__(handler2))
def test_non_reportable_error(self):
"""Test __call__() with a non-reportable error."""
self._check_initialized()
configuration = self._style_checker_configuration()
confidence = 1
# Confirm the error is not reportable.
self.assertFalse(configuration.is_reportable(self._category,
confidence,
self._file_path))
error_handler = self._error_handler(configuration)
self._call_error_handler(error_handler, confidence)
self.assertEqual(0, self._error_count)
self.assertEqual([], self._error_messages)
# Also serves as a reportable error test.
def test_max_reports_per_category(self):
"""Test error report suppression in __call__() method."""
self._check_initialized()
configuration = self._style_checker_configuration()
error_handler = self._error_handler(configuration)
confidence = 5
# First call: usual reporting.
self._call_error_handler(error_handler, confidence)
self.assertEqual(1, self._error_count)
self.assertEqual(1, len(self._error_messages))
self.assertEqual(self._error_messages,
["foo.h(100): message [whitespace/tab] [5]\n"])
# Second call: suppression message reported.
self._call_error_handler(error_handler, confidence)
# The "Suppressing further..." message counts as an additional
# message (but not as an addition to the error count).
self.assertEqual(2, self._error_count)
self.assertEqual(3, len(self._error_messages))
self.assertEqual(self._error_messages[-2],
"foo.h(100): message [whitespace/tab] [5]\n")
self.assertEqual(self._error_messages[-1],
"Suppressing further [whitespace/tab] reports "
"for this file.\n")
# Third call: no report.
self._call_error_handler(error_handler, confidence)
self.assertEqual(3, self._error_count)
self.assertEqual(3, len(self._error_messages))
def test_line_numbers(self):
"""Test the line_numbers parameter."""
self._check_initialized()
configuration = self._style_checker_configuration()
error_handler = self._error_handler(configuration,
line_numbers=[50])
confidence = 5
# Error on non-modified line: no error.
self._call_error_handler(error_handler, confidence, line_number=60)
self.assertEqual(0, self._error_count)
self.assertEqual([], self._error_messages)
# Error on modified line: error.
self._call_error_handler(error_handler, confidence, line_number=50)
self.assertEqual(1, self._error_count)
self.assertEqual(self._error_messages,
["foo.h(50): message [whitespace/tab] [5]\n"])
# Error on non-modified line after turning off line filtering: error.
error_handler.turn_off_line_filtering()
self._call_error_handler(error_handler, confidence, line_number=60)
self.assertEqual(2, self._error_count)
self.assertEqual(self._error_messages,
['foo.h(50): message [whitespace/tab] [5]\n',
'foo.h(60): message [whitespace/tab] [5]\n',
'Suppressing further [whitespace/tab] reports for this file.\n'])
| bsd-3-clause |
rui-castro/Sick-Beard | tests/db_tests.py | 73 | 1415 | # coding=UTF-8
# Author: Dennis Lutter <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import unittest
import test_lib as test
class DBBasicTests(test.SickbeardTestDBCase):
def setUp(self):
super(DBBasicTests, self).setUp()
self.db = test.db.DBConnection()
def test_select(self):
self.db.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [0000])
if __name__ == '__main__':
print "=================="
print "STARTING - DB TESTS"
print "=================="
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(DBBasicTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-3.0 |
blackzw/openwrt_sdk_dev1 | staging_dir/host/lib/python2.7/test/test_fpformat.py | 129 | 2309 | '''
Tests for fpformat module
Nick Mathewson
'''
from test.test_support import run_unittest, import_module
import unittest
fpformat = import_module('fpformat', deprecated=True)
fix, sci, NotANumber = fpformat.fix, fpformat.sci, fpformat.NotANumber
StringType = type('')
# Test the old and obsolescent fpformat module.
#
# (It's obsolescent because fix(n,d) == "%.*f"%(d,n) and
# sci(n,d) == "%.*e"%(d,n)
# for all reasonable numeric n and d, except that sci gives 3 exponent
# digits instead of 2.
#
# Differences only occur for unreasonable n and d. <.2 wink>)
class FpformatTest(unittest.TestCase):
def checkFix(self, n, digits):
result = fix(n, digits)
if isinstance(n, StringType):
n = repr(n)
expected = "%.*f" % (digits, float(n))
self.assertEqual(result, expected)
def checkSci(self, n, digits):
result = sci(n, digits)
if isinstance(n, StringType):
n = repr(n)
expected = "%.*e" % (digits, float(n))
# add the extra 0 if needed
num, exp = expected.split("e")
if len(exp) < 4:
exp = exp[0] + "0" + exp[1:]
expected = "%se%s" % (num, exp)
self.assertEqual(result, expected)
def test_basic_cases(self):
self.assertEqual(fix(100.0/3, 3), '33.333')
self.assertEqual(sci(100.0/3, 3), '3.333e+001')
def test_reasonable_values(self):
for d in range(7):
for val in (1000.0/3, 1000, 1000.0, .002, 1.0/3, 1e10):
for realVal in (val, 1.0/val, -val, -1.0/val):
self.checkFix(realVal, d)
self.checkSci(realVal, d)
def test_failing_values(self):
# Now for 'unreasonable n and d'
self.assertEqual(fix(1.0, 1000), '1.'+('0'*1000))
self.assertEqual(sci("1"+('0'*1000), 0), '1e+1000')
# This behavior is inconsistent. sci raises an exception; fix doesn't.
yacht = "Throatwobbler Mangrove"
self.assertEqual(fix(yacht, 10), yacht)
try:
sci(yacht, 10)
except NotANumber:
pass
else:
self.fail("No exception on non-numeric sci")
def test_main():
run_unittest(FpformatTest)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.