repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
swirkert/ipcai2016 | msi/test/test_spectrometerreader.py | 1 | 1921 | # -*- coding: utf-8 -*-
"""
ipcai2016
Copyright (c) German Cancer Research Center,
Computer Assisted Interventions.
All rights reserved.
This software is distributed WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE.
See LICENSE for details
"""
"""
Created on Fri Aug 7 18:02:08 2015
@author: wirkert
"""
import unittest
from msi.io.spectrometerreader import SpectrometerReader
class TestSpectrometer(unittest.TestCase):
def setUp(self):
self.exampleFileName = "./msi/data/Transmission_15-49-35-978_filter700nm.txt"
self.reader = SpectrometerReader()
def tearDown(self):
pass
def test_create(self):
self.assertTrue(True, "Created empty reader during setup")
def test_read_spectrum(self):
msi = self.reader.read(self.exampleFileName)
self.assertAlmostEqual(msi.get_image()[0],
70.50,
msg="first spectral element is read correctly")
self.assertAlmostEqual(msi.get_image()[-1],
68.13,
msg="last sprectral element is read correctly")
self.assertTrue(msi.get_image().size == 2048,
"correct number of elements read")
def test_read_wavelengths(self):
msi = self.reader.read(self.exampleFileName)
self.assertAlmostEqual(msi.get_wavelengths()[0],
187.255 * 10 ** -9,
msg="first wavelength element is read correctly")
self.assertAlmostEqual(msi.get_wavelengths()[-1],
1103.852 * 10 ** -9,
msg="last wavelength element is read correctly")
self.assertTrue(msi.get_wavelengths().size == 2048,
"correct number of elements read")
| bsd-3-clause |
thaim/ansible | test/units/modules/network/check_point/test_checkpoint_host.py | 40 | 3698 | # Copyright (c) 2018 Red Hat
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleFailJson, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import checkpoint_host
OBJECT = {'name': 'foo', 'ipv4-address': '192.168.0.15'}
CREATE_PAYLOAD = {'name': 'foo', 'ip_address': '192.168.0.15'}
UPDATE_PAYLOAD = {'name': 'foo', 'ip_address': '192.168.0.16'}
DELETE_PAYLOAD = {'name': 'foo', 'state': 'absent'}
class TestCheckpointHost(object):
module = checkpoint_host
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.modules.network.check_point.checkpoint_host.Connection')
return connection_class_mock.return_value
@pytest.fixture
def get_host_200(self, mocker):
mock_function = mocker.patch('ansible.modules.network.check_point.checkpoint_host.get_host')
mock_function.return_value = (200, OBJECT)
return mock_function.return_value
@pytest.fixture
def get_host_404(self, mocker):
mock_function = mocker.patch('ansible.modules.network.check_point.checkpoint_host.get_host')
mock_function.return_value = (404, 'Object not found')
return mock_function.return_value
def test_create(self, get_host_404, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(CREATE_PAYLOAD)
assert result['changed']
assert 'checkpoint_hosts' in result
def test_create_idempotent(self, get_host_200, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(CREATE_PAYLOAD)
assert not result['changed']
def test_update(self, get_host_200, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(UPDATE_PAYLOAD)
assert result['changed']
def test_delete(self, get_host_200, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(DELETE_PAYLOAD)
assert result['changed']
def test_delete_idempotent(self, get_host_404, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(DELETE_PAYLOAD)
assert not result['changed']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
def _run_module_with_fail_json(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleFailJson) as exc:
self.module.main()
result = exc.value.args[0]
return result
| mit |
MaplePlan/djwp | django/db/models/sql/compiler.py | 15 | 50040 | import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends.util import truncate_name
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import select_related_descend, QueryWrapper
from django.db.models.sql.constants import (SINGLE, MULTI, ORDER_DIR,
GET_ITERATOR_CHUNK_SIZE, SelectInfo)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_order_dir, Query
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.six.moves import zip
from django.utils import timezone
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
# When ordering a queryset with distinct on a column not part of the
# select set, the ordering column needs to be added to the select
# clause. This information is needed both in SQL construction and
# masking away the ordering selects from the returned row.
self.ordering_aliases = []
self.ordering_params = []
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
# TODO: after the query has been executed, the altered state should be
# cleaned. We are not using a clone() of the query here.
"""
if not self.query.tables:
self.query.join((None, self.query.get_meta().db_table, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
self.refcounts_before = self.query.alias_refcount.copy()
out_cols, s_params = self.get_columns(with_col_aliases)
ordering, o_params, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering' and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
having_group_by = self.query.having.get_cols()
params = []
for val in six.itervalues(self.query.extra_select):
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
params.extend(o_params)
result.append(', '.join(out_cols + self.ordering_aliases))
params.extend(s_params)
params.extend(self.ordering_params)
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) not implemented.")
if not ordering:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
# If we've been asked for a NOWAIT query but the backend does not support it,
# raise a DatabaseError otherwise we could get an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(self.refcounts_before)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
obj.bump_prefix()
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement, as well as
a list any extra parameters that need to be included. If no columns
have been specified, returns all columns relating to fields in the
model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in six.iteritems(self.query.extra_select)]
params = []
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col, _ in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
col_sql, col_params = col.as_sql(qn, self.connection)
result.append(col_sql)
params.extend(col_params)
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
for alias, aggregate in self.query.aggregate_select.items():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
if alias is None:
result.append(agg_sql)
else:
result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length))))
params.extend(agg_params)
for (table, col), _ in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result, params
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field, model in opts.get_concrete_fields_with_model():
if from_parent and model is not None and issubclass(from_parent, model):
# Avoid loading data for already loaded parents.
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
table = self.query.alias_map[alias].table_name
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
field, cols, alias, _, _ = self._setup_joins(parts, opts, None)
cols, alias = self._final_join_removal(cols, alias)
for col in cols:
result.append("%s.%s" % (qn(alias), qn2(col)))
return result
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by
or self.query.get_meta().ordering
or [])
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
params = []
ordering_params = []
for pos, field in enumerate(ordering):
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((str(field), []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (qn(col), order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif get_order_dir(field)[0] not in self.query.extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, cols, order in self.find_ordering_name(field,
self.query.get_meta(), default_order=asc):
for col in cols:
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if col not in self.query.extra_select:
sql = "(%s) AS %s" % (self.query.extra[col][0], elt)
ordering_aliases.append(sql)
ordering_params.extend(self.query.extra[col][1])
else:
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
ordering_params.extend(params)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra[col])
self.ordering_aliases = ordering_aliases
self.ordering_params = ordering_params
return result, params, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
field, cols, alias, joins, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and len(joins) > 1 and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple([self.query.alias_map[j].table_name for j in joins])
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
cols, alias = self._final_join_removal(cols, alias)
return [(alias, cols, order)]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_ordering and get_distinct. This method will
call query.setup_joins, handle refcounts and then promote the joins.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, _ = self.query.setup_joins(
pieces, opts, alias)
# We will later on need to promote those joins that were added to the
# query afresh above.
joins_to_promote = [j for j in joins if self.query.alias_refcount[j] < 2]
alias = joins[-1]
cols = [target.column for target in targets]
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.query.ref_alias(alias)
# Must use left outer joins for nullable fields and their relations.
# Ordering or distinct must not affect the returned set, and INNER
# JOINS for nullable fields could do this.
self.query.promote_joins(joins_to_promote)
return field, cols, alias, joins, opts
def _final_join_removal(self, cols, alias):
"""
A helper method for get_distinct and get_ordering. This method will
trim extra not-needed joins from the tail of the join chain.
This is very similar to what is done in trim_joins, but we will
trim LEFT JOINS here. It would be a good idea to consolidate this
method and query.trim_joins().
"""
if alias:
while 1:
join = self.query.alias_map[alias]
lhs_cols, rhs_cols = zip(*[(lhs_col, rhs_col) for lhs_col, rhs_col in join.join_cols])
if set(cols) != set(rhs_cols):
break
cols = [lhs_cols[rhs_cols.index(col)] for col in cols]
self.query.unref_alias(alias)
alias = join.lhs_alias
return cols, alias
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
from_params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, join_cols, _, join_field = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = '' if alias == name else (' %s' % alias)
if join_type and not first:
extra_cond = join_field.get_extra_restriction(
self.query.where_class, alias, lhs)
if extra_cond:
extra_sql, extra_params = extra_cond.as_sql(
qn, self.connection)
extra_sql = 'AND (%s)' % extra_sql
from_params.extend(extra_params)
else:
extra_sql = ""
result.append('%s %s%s ON ('
% (join_type, qn(name), alias_str))
for index, (lhs_col, rhs_col) in enumerate(join_cols):
if index != 0:
result.append(' AND ')
result.append('%s.%s = %s.%s' %
(qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col)))
result.append('%s)' % extra_sql)
else:
connector = '' if first else ', '
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = '' if first else ', '
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, from_params
def get_grouping(self, having_group_by, ordering_group_by):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
select_cols = self.query.select + self.query.related_select_cols
# Just the column, not the fields.
select_cols = [s[0] for s in select_cols]
if (len(self.query.get_meta().concrete_fields) == len(self.query.select)
and self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.get_meta().db_table, self.query.get_meta().pk.column)
]
select_cols = []
seen = set()
cols = self.query.group_by + having_group_by + select_cols
for col in cols:
col_params = ()
if isinstance(col, (list, tuple)):
sql = '%s.%s' % (qn(col[0]), qn(col[1]))
elif hasattr(col, 'as_sql'):
sql, col_params = col.as_sql(qn, self.connection)
else:
sql = '(%s)' % str(col)
if sql not in seen:
result.append(sql)
params.extend(col_params)
seen.add(sql)
# Still, we need to add all stuff in ordering (except if the backend can
# group by just by PK).
if ordering_group_by and not self.connection.features.allows_group_by_pk:
for order, order_params in ordering_group_by:
# Even if we have seen the same SQL string, it might have
# different params, so, we add same SQL in "has params" case.
if order not in seen or order_params:
result.append(order)
params.extend(order_params)
seen.add(order)
# Unconditionally add the extra_select items.
for extra_select, extra_params in self.query.extra_select.values():
sql = '(%s)' % str(extra_select)
result.append(sql)
params.extend(extra_params)
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None, nullable=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
# The get_fields_with_model() returns None for fields that live
# in the field's local model. So, for those fields we want to use
# the f.model - that is the field's local model.
field_model = model or f.model
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
promote = nullable or f.null
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias, outer_if_first=promote)
alias = joins[-1]
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field in zip(columns, f.rel.to._meta.concrete_fields))
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
next, restricted, new_nullable)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
_, _, _, joins, _ = self.query.setup_joins(
[f.related_query_name()], opts, root_alias, outer_if_first=True)
alias = joins[-1]
from_parent = (opts.model if issubclass(model, opts.model)
else None)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, from_parent=from_parent)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field
in zip(columns, model._meta.concrete_fields))
next = requested.get(f.related_query_name(), {})
# Use True here because we are looking at the _reverse_ side of
# the relation, which is always nullable.
new_nullable = True
self.fill_related_selections(model._meta, alias, cur_depth + 1,
next, restricted, new_nullable)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
for rows in self.execute_sql(MULTI):
for row in rows:
if has_aggregate_select:
loaded_fields = self.query.get_loaded_field_names().get(self.query.model, set()) or self.query.select
aggregate_start = len(self.query.extra_select) + len(loaded_fields)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_cols isn't populated until
# execute_sql() has been called.
# We also include types of fields of related models that
# will be included via select_related() for the benefit
# of MySQL/MySQLdb when boolean fields are involved
# (#15040).
# This code duplicates the logic for the order of fields
# found in get_columns(). It would be nice to clean this up.
if self.query.select:
fields = [f.field for f in self.query.select]
else:
fields = self.query.get_meta().concrete_fields
fields = fields + [f.field for f in self.query.related_select_cols]
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
fields = [f for f in fields if f.model._meta.db_table not in only_load or
f.column in only_load[f.model._meta.db_table]]
if has_aggregate_select:
# pad None in to fields for aggregates
fields = fields[:aggregate_start] + [
None for x in range(0, aggregate_end - aggregate_start)
] + fields[aggregate_start:]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
row = tuple(row[:aggregate_start]) + tuple([
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
]) + tuple(row[aggregate_end:])
yield row
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.ordering_aliases:
return cursor.fetchone()[:-len(self.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.ordering_aliases:
result = order_modified_iter(cursor, len(self.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
def as_subquery_condition(self, alias, columns, qn):
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs = '%s.%s' % (qn(select_col.col[0]), qn2(select_col.col[1]))
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs, rhs), []), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behaviour for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple([v for val in values for v in val]))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
cursor = self.connection.cursor()
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query.bump_prefix()
query.extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
# Recheck the count - it is possible that fiddling with the select
# fields above removes tables from the query. Refs #18304.
count = query.count_active_tables()
if not self.query.related_updates and count == 1:
return
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self.quote_name_unless_alias
sql, params = [], []
for aggregate in self.query.aggregate_select.values():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
sql.append(agg_sql)
params.extend(agg_params)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateField
fields = [DateField()]
else:
from django.db.backends.util import typecast_date
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_date(str(date))
if isinstance(date, datetime.datetime):
date = date.date()
yield date
class SQLDateTimeCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
datetime = row[offset]
if resolve_columns:
datetime = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
datetime = typecast_timestamp(str(datetime))
# Datetimes are artifically returned in UTC on databases that
# don't support time zone. Restore the zone used in the query.
if settings.USE_TZ:
if datetime is None:
raise ValueError("Database returned an invalid value "
"in QuerySet.dates(). Are time zone "
"definitions and pytz installed?")
datetime = datetime.replace(tzinfo=None)
datetime = timezone.make_aware(datetime, self.query.tzinfo)
yield datetime
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
| lgpl-3.0 |
rmcgibbo/scipy | scipy/sparse/csr.py | 39 | 14549 | """Compressed Sparse Row matrix format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['csr_matrix', 'isspmatrix_csr']
import numpy as np
from scipy._lib.six import xrange
from ._sparsetools import csr_tocsc, csr_tobsr, csr_count_blocks, \
get_csr_submatrix, csr_sample_values
from .sputils import upcast, isintlike, IndexMixin, issequence, get_index_dtype
from .compressed import _cs_matrix
class csr_matrix(_cs_matrix, IndexMixin):
"""
Compressed Sparse Row matrix
This can be instantiated in several ways:
csr_matrix(D)
with a dense matrix or rank-2 ndarray D
csr_matrix(S)
with another sparse matrix S (equivalent to S.tocsr())
csr_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
csr_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
where ``data``, ``row_ind`` and ``col_ind`` satisfy the
relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
csr_matrix((data, indices, indptr), [shape=(M, N)])
is the standard CSR representation where the column indices for
row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their
corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``.
If the shape parameter is not supplied, the matrix dimensions
are inferred from the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
CSR format data array of the matrix
indices
CSR format index array of the matrix
indptr
CSR format index pointer array of the matrix
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the CSR format
- efficient arithmetic operations CSR + CSR, CSR * CSR, etc.
- efficient row slicing
- fast matrix vector products
Disadvantages of the CSR format
- slow column slicing operations (consider CSC)
- changes to the sparsity structure are expensive (consider LIL or DOK)
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csr_matrix
>>> csr_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 0, 1, 2, 2, 2])
>>> col = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> csr_matrix((data, indices, indptr), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
As an example of how to construct a CSR matrix incrementally,
the following snippet builds a term-document matrix from texts:
>>> docs = [["hello", "world", "hello"], ["goodbye", "cruel", "world"]]
>>> indptr = [0]
>>> indices = []
>>> data = []
>>> vocabulary = {}
>>> for d in docs:
... for term in d:
... index = vocabulary.setdefault(term, len(vocabulary))
... indices.append(index)
... data.append(1)
... indptr.append(len(indices))
...
>>> csr_matrix((data, indices, indptr), dtype=int).toarray()
array([[2, 1, 0, 0],
[0, 1, 1, 1]])
"""
def transpose(self, copy=False):
from .csc import csc_matrix
M,N = self.shape
return csc_matrix((self.data,self.indices,self.indptr), shape=(N,M), copy=copy)
def tolil(self):
from .lil import lil_matrix
lil = lil_matrix(self.shape,dtype=self.dtype)
self.sort_indices() # lil_matrix needs sorted column indices
ptr,ind,dat = self.indptr,self.indices,self.data
rows, data = lil.rows, lil.data
for n in xrange(self.shape[0]):
start = ptr[n]
end = ptr[n+1]
rows[n] = ind[start:end].tolist()
data[n] = dat[start:end].tolist()
return lil
def tocsr(self, copy=False):
if copy:
return self.copy()
else:
return self
def tocsc(self):
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(self.nnz, self.shape[0]))
indptr = np.empty(self.shape[1] + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
csr_tocsc(self.shape[0], self.shape[1],
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
indptr,
indices,
data)
from .csc import csc_matrix
A = csc_matrix((data, indices, indptr), shape=self.shape)
A.has_sorted_indices = True
return A
def tobsr(self, blocksize=None, copy=True):
from .bsr import bsr_matrix
if blocksize is None:
from .spfuncs import estimate_blocksize
return self.tobsr(blocksize=estimate_blocksize(self))
elif blocksize == (1,1):
arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr)
return bsr_matrix(arg1, shape=self.shape, copy=copy)
else:
R,C = blocksize
M,N = self.shape
if R < 1 or C < 1 or M % R != 0 or N % C != 0:
raise ValueError('invalid blocksize %s' % blocksize)
blks = csr_count_blocks(M,N,R,C,self.indptr,self.indices)
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(N//C, blks))
indptr = np.empty(M//R+1, dtype=idx_dtype)
indices = np.empty(blks, dtype=idx_dtype)
data = np.zeros((blks,R,C), dtype=self.dtype)
csr_tobsr(M, N, R, C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
indptr, indices, data.ravel())
return bsr_matrix((data,indices,indptr), shape=self.shape)
# these functions are used by the parent class (_cs_matrix)
# to remove redudancy between csc_matrix and csr_matrix
def _swap(self,x):
"""swap the members of x if this is a column-oriented matrix
"""
return (x[0],x[1])
def __getitem__(self, key):
def asindices(x):
try:
x = np.asarray(x)
# Check index contents, to avoid creating 64-bit arrays needlessly
idx_dtype = get_index_dtype((x,), check_contents=True)
if idx_dtype != x.dtype:
x = x.astype(idx_dtype)
except:
raise IndexError('invalid index')
else:
return x
def check_bounds(indices, N):
if indices.size == 0:
return (0, 0)
max_indx = indices.max()
if max_indx >= N:
raise IndexError('index (%d) out of range' % max_indx)
min_indx = indices.min()
if min_indx < -N:
raise IndexError('index (%d) out of range' % (N + min_indx))
return (min_indx,max_indx)
def extractor(indices,N):
"""Return a sparse matrix P so that P*self implements
slicing of the form self[[1,2,3],:]
"""
indices = asindices(indices)
(min_indx,max_indx) = check_bounds(indices,N)
if min_indx < 0:
indices = indices.copy()
indices[indices < 0] += N
indptr = np.arange(len(indices)+1, dtype=indices.dtype)
data = np.ones(len(indices), dtype=self.dtype)
shape = (len(indices),N)
return csr_matrix((data,indices,indptr), shape=shape)
row, col = self._unpack_index(key)
# First attempt to use original row optimized methods
# [1, ?]
if isintlike(row):
# [i, j]
if isintlike(col):
return self._get_single_element(row, col)
# [i, 1:2]
elif isinstance(col, slice):
return self._get_row_slice(row, col)
# [i, [1, 2]]
elif issequence(col):
P = extractor(col,self.shape[1]).T
return self[row, :] * P
elif isinstance(row, slice):
# [1:2,??]
if ((isintlike(col) and row.step in (1, None)) or
(isinstance(col, slice) and
col.step in (1, None) and
row.step in (1, None))):
# col is int or slice with step 1, row is slice with step 1.
return self._get_submatrix(row, col)
elif issequence(col):
# row is slice, col is sequence.
P = extractor(col,self.shape[1]).T # [1:2,[1,2]]
sliced = self
if row != slice(None, None, None):
sliced = sliced[row,:]
return sliced * P
elif issequence(row):
# [[1,2],??]
if isintlike(col) or isinstance(col,slice):
P = extractor(row, self.shape[0]) # [[1,2],j] or [[1,2],1:2]
extracted = P * self
if col == slice(None, None, None):
return extracted
else:
return extracted[:,col]
if not (issequence(col) and issequence(row)):
# Sample elementwise
row, col = self._index_to_arrays(row, col)
row = asindices(row)
col = asindices(col)
if row.shape != col.shape:
raise IndexError('number of row and column indices differ')
assert row.ndim <= 2
num_samples = np.size(row)
if num_samples == 0:
return csr_matrix(np.atleast_2d(row).shape, dtype=self.dtype)
check_bounds(row, self.shape[0])
check_bounds(col, self.shape[1])
val = np.empty(num_samples, dtype=self.dtype)
csr_sample_values(self.shape[0], self.shape[1],
self.indptr, self.indices, self.data,
num_samples, row.ravel(), col.ravel(), val)
if row.ndim == 1:
# row and col are 1d
return np.asmatrix(val)
return self.__class__(val.reshape(row.shape))
def getrow(self, i):
"""Returns a copy of row i of the matrix, as a (1 x n)
CSR matrix (row vector).
"""
return self._get_submatrix(i, slice(None))
def getcol(self, i):
"""Returns a copy of column i of the matrix, as a (m x 1)
CSR matrix (column vector).
"""
return self._get_submatrix(slice(None), i)
def _get_row_slice(self, i, cslice):
"""Returns a copy of row self[i, cslice]
"""
if i < 0:
i += self.shape[0]
if i < 0 or i >= self.shape[0]:
raise IndexError('index (%d) out of range' % i)
start, stop, stride = cslice.indices(self.shape[1])
if stride == 1:
# for stride == 1, _get_submatrix is ~30% faster than below
row_slice = self._get_submatrix(i, cslice)
else:
# other strides need new code
row_indices = self.indices[self.indptr[i]:self.indptr[i + 1]]
row_data = self.data[self.indptr[i]:self.indptr[i + 1]]
if stride > 0:
ind = (row_indices >= start) & (row_indices < stop)
elif stride < 0:
ind = (row_indices <= start) & (row_indices > stop)
if abs(stride) > 1:
ind = ind & ((row_indices - start) % stride == 0)
row_indices = (row_indices[ind] - start) // stride
row_data = row_data[ind]
row_indptr = np.array([0, len(row_indices)])
if stride < 0:
row_data = row_data[::-1]
row_indices = abs(row_indices[::-1])
shape = (1, int(np.ceil(float(stop - start) / stride)))
row_slice = csr_matrix((row_data, row_indices, row_indptr),
shape=shape)
return row_slice
def _get_submatrix(self, row_slice, col_slice):
"""Return a submatrix of this matrix (new matrix is created)."""
M,N = self.shape
def process_slice(sl, num):
if isinstance(sl, slice):
if sl.step not in (1, None):
raise ValueError('slicing with step != 1 not supported')
i0, i1 = sl.start, sl.stop
if i0 is None:
i0 = 0
elif i0 < 0:
i0 = num + i0
if i1 is None:
i1 = num
elif i1 < 0:
i1 = num + i1
return i0, i1
elif isintlike(sl):
if sl < 0:
sl += num
return sl, sl + 1
else:
raise TypeError('expected slice or scalar')
def check_bounds(i0, i1, num):
if not (0 <= i0 <= num) or not (0 <= i1 <= num) or not (i0 <= i1):
raise IndexError(
"index out of bounds: 0 <= %d <= %d, 0 <= %d <= %d,"
" %d <= %d" % (i0, num, i1, num, i0, i1))
i0, i1 = process_slice(row_slice, M)
j0, j1 = process_slice(col_slice, N)
check_bounds(i0, i1, M)
check_bounds(j0, j1, N)
indptr, indices, data = get_csr_submatrix(M, N,
self.indptr, self.indices, self.data,
int(i0), int(i1), int(j0), int(j1))
shape = (i1 - i0, j1 - j0)
return self.__class__((data,indices,indptr), shape=shape)
def isspmatrix_csr(x):
return isinstance(x, csr_matrix)
| bsd-3-clause |
Qalthos/ansible | lib/ansible/modules/network/f5/bigip_gtm_virtual_server.py | 38 | 40714 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_gtm_virtual_server
short_description: Manages F5 BIG-IP GTM virtual servers
description:
- Manages F5 BIG-IP GTM virtual servers. A GTM server can have many virtual servers
associated with it. They are arranged in much the same way that pool members are
to pools.
version_added: 2.6
options:
name:
description:
- Specifies the name of the virtual server.
type: str
version_added: 2.6
server_name:
description:
- Specifies the name of the server that the virtual server is associated with.
type: str
version_added: 2.6
address:
description:
- Specifies the IP Address of the virtual server.
- When creating a new GTM virtual server, this parameter is required.
type: str
version_added: 2.6
port:
description:
- Specifies the service port number for the virtual server or pool member. For example,
the HTTP service is typically port 80.
- To specify all ports, use an C(*).
- When creating a new GTM virtual server, if this parameter is not specified, a
default of C(*) will be used.
type: int
translation_address:
description:
- Specifies the translation IP address for the virtual server.
- To unset this parameter, provide an empty string (C("")) as a value.
- When creating a new GTM virtual server, if this parameter is not specified, a
default of C(::) will be used.
type: str
version_added: 2.6
translation_port:
description:
- Specifies the translation port number or service name for the virtual server.
- To specify all ports, use an C(*).
- When creating a new GTM virtual server, if this parameter is not specified, a
default of C(*) will be used.
type: str
version_added: 2.6
availability_requirements:
description:
- Specifies, if you activate more than one health monitor, the number of health
monitors that must receive successful responses in order for the link to be
considered available.
type: dict
suboptions:
type:
description:
- Monitor rule type when C(monitors) is specified.
- When creating a new virtual, if this value is not specified, the default of 'all' will be used.
type: str
choices:
- all
- at_least
- require
at_least:
description:
- Specifies the minimum number of active health monitors that must be successful
before the link is considered up.
- This parameter is only relevant when a C(type) of C(at_least) is used.
- This parameter will be ignored if a type of either C(all) or C(require) is used.
type: int
number_of_probes:
description:
- Specifies the minimum number of probes that must succeed for this server to be declared up.
- When creating a new virtual server, if this parameter is specified, then the C(number_of_probers)
parameter must also be specified.
- The value of this parameter should always be B(lower) than, or B(equal to), the value of C(number_of_probers).
- This parameter is only relevant when a C(type) of C(require) is used.
- This parameter will be ignored if a type of either C(all) or C(at_least) is used.
type: int
number_of_probers:
description:
- Specifies the number of probers that should be used when running probes.
- When creating a new virtual server, if this parameter is specified, then the C(number_of_probes)
parameter must also be specified.
- The value of this parameter should always be B(higher) than, or B(equal to), the value of C(number_of_probers).
- This parameter is only relevant when a C(type) of C(require) is used.
- This parameter will be ignored if a type of either C(all) or C(at_least) is used.
type: int
version_added: 2.6
monitors:
description:
- Specifies the health monitors that the system currently uses to monitor this resource.
- When C(availability_requirements.type) is C(require), you may only have a single monitor in the
C(monitors) list.
type: list
version_added: 2.6
virtual_server_dependencies:
description:
- Specifies the virtual servers on which the current virtual server depends.
- If any of the specified servers are unavailable, the current virtual server is also listed as unavailable.
type: list
suboptions:
server:
description:
- Server which the dependant virtual server is part of.
type: str
required: True
virtual_server:
description:
- Virtual server to depend on.
type: str
required: True
version_added: 2.6
link:
description:
- Specifies a link to assign to the server or virtual server.
type: str
version_added: 2.6
limits:
description:
- Specifies resource thresholds or limit requirements at the server level.
- When you enable one or more limit settings, the system then uses that data to take servers in and out
of service.
- You can define limits for any or all of the limit settings. However, when a server does not meet the resource
threshold limit requirement, the system marks the entire server as unavailable and directs load-balancing
traffic to another resource.
- The limit settings available depend on the type of server.
type: dict
suboptions:
bits_enabled:
description:
- Whether the bits limit is enabled or not.
- This parameter allows you to switch on or off the effect of the limit.
type: bool
packets_enabled:
description:
- Whether the packets limit is enabled or not.
- This parameter allows you to switch on or off the effect of the limit.
type: bool
connections_enabled:
description:
- Whether the current connections limit is enabled or not.
- This parameter allows you to switch on or off the effect of the limit.
type: bool
bits_limit:
description:
- Specifies the maximum allowable data throughput rate, in bits per second, for the virtual servers on the server.
- If the network traffic volume exceeds this limit, the system marks the server as unavailable.
type: int
packets_limit:
description:
- Specifies the maximum allowable data transfer rate, in packets per second, for the virtual servers on the server.
- If the network traffic volume exceeds this limit, the system marks the server as unavailable.
type: int
connections_limit:
description:
- Specifies the maximum number of concurrent connections, combined, for all of the virtual servers on the server.
- If the connections exceed this limit, the system marks the server as unavailable.
type: int
version_added: 2.6
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
version_added: 2.6
state:
description:
- When C(present), ensures that the resource exists.
- When C(absent), ensures the resource is removed.
type: str
choices:
- present
- absent
- enabled
- disabled
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Enable virtual server
bigip_gtm_virtual_server:
server_name: server1
name: my-virtual-server
state: enabled
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
server_name:
description: The server name associated with the virtual server.
returned: changed
type: str
sample: /Common/my-gtm-server
address:
description: The new address of the resource.
returned: changed
type: str
sample: 1.2.3.4
port:
description: The new port of the resource.
returned: changed
type: int
sample: 500
translation_address:
description: The new translation address of the resource.
returned: changed
type: int
sample: 500
translation_port:
description: The new translation port of the resource.
returned: changed
type: int
sample: 500
availability_requirements:
description: The new availability requirement configurations for the resource.
returned: changed
type: dict
sample: {'type': 'all'}
monitors:
description: The new list of monitors for the resource.
returned: changed
type: list
sample: ['/Common/monitor1', '/Common/monitor2']
virtual_server_dependencies:
description: The new list of virtual server dependencies for the resource
returned: changed
type: list
sample: ['/Common/vs1', '/Common/vs2']
link:
description: The new link value for the resource.
returned: changed
type: str
sample: /Common/my-link
limits:
description: The new limit configurations for the resource.
returned: changed
type: dict
sample: { 'bits_enabled': true, 'bits_limit': 100 }
'''
import os
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.compat.ipaddress import ip_address
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.compare import compare_complex_list
from library.module_utils.network.f5.icontrol import module_provisioned
from library.module_utils.network.f5.ipaddress import is_valid_ip
from library.module_utils.network.f5.ipaddress import validate_ip_v6_address
except ImportError:
from ansible.module_utils.compat.ipaddress import ip_address
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.compare import compare_complex_list
from ansible.module_utils.network.f5.icontrol import module_provisioned
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
from ansible.module_utils.network.f5.ipaddress import validate_ip_v6_address
class Parameters(AnsibleF5Parameters):
api_map = {
'limitMaxBps': 'bits_limit',
'limitMaxBpsStatus': 'bits_enabled',
'limitMaxConnections': 'connections_limit',
'limitMaxConnectionsStatus': 'connections_enabled',
'limitMaxPps': 'packets_limit',
'limitMaxPpsStatus': 'packets_enabled',
'translationAddress': 'translation_address',
'translationPort': 'translation_port',
'dependsOn': 'virtual_server_dependencies',
'explicitLinkName': 'link',
'monitor': 'monitors'
}
api_attributes = [
'dependsOn',
'destination',
'disabled',
'enabled',
'explicitLinkName',
'limitMaxBps',
'limitMaxBpsStatus',
'limitMaxConnections',
'limitMaxConnectionsStatus',
'limitMaxPps',
'limitMaxPpsStatus',
'translationAddress',
'translationPort',
'monitor',
]
returnables = [
'bits_enabled',
'bits_limit',
'connections_enabled',
'connections_limit',
'destination',
'disabled',
'enabled',
'link',
'monitors',
'packets_enabled',
'packets_limit',
'translation_address',
'translation_port',
'virtual_server_dependencies',
'availability_requirements',
]
updatables = [
'bits_enabled',
'bits_limit',
'connections_enabled',
'connections_limit',
'destination',
'enabled',
'link',
'monitors',
'packets_limit',
'packets_enabled',
'translation_address',
'translation_port',
'virtual_server_dependencies',
]
class ApiParameters(Parameters):
@property
def address(self):
if self._values['destination'].count(':') >= 2:
# IPv6
parts = self._values['destination'].split('.')
else:
# IPv4
parts = self._values['destination'].split(':')
if is_valid_ip(parts[0]):
return str(parts[0])
raise F5ModuleError(
"'address' parameter from API was not an IP address."
)
@property
def port(self):
if self._values['destination'].count(':') >= 2:
# IPv6
parts = self._values['destination'].split('.')
return parts[1]
# IPv4
parts = self._values['destination'].split(':')
return int(parts[1])
@property
def virtual_server_dependencies(self):
if self._values['virtual_server_dependencies'] is None:
return None
results = []
for dependency in self._values['virtual_server_dependencies']:
parts = dependency['name'].split(':')
result = dict(
server=parts[0],
virtual_server=parts[1],
)
results.append(result)
if results:
results = sorted(results, key=lambda k: k['server'])
return results
@property
def enabled(self):
if 'enabled' in self._values:
return True
else:
return False
@property
def disabled(self):
if 'disabled' in self._values:
return True
return False
@property
def availability_requirement_type(self):
if self._values['monitors'] is None:
return None
if 'min ' in self._values['monitors']:
return 'at_least'
elif 'require ' in self._values['monitors']:
return 'require'
else:
return 'all'
@property
def monitors_list(self):
if self._values['monitors'] is None:
return []
try:
result = re.findall(r'/\w+/[^\s}]+', self._values['monitors'])
result.sort()
return result
except Exception:
return self._values['monitors']
@property
def monitors(self):
if self._values['monitors'] is None:
return None
monitors = [fq_name(self.partition, x) for x in self.monitors_list]
if self.availability_requirement_type == 'at_least':
monitors = ' '.join(monitors)
result = 'min {0} of {{ {1} }}'.format(self.at_least, monitors)
elif self.availability_requirement_type == 'require':
monitors = ' '.join(monitors)
result = 'require {0} from {1} {{ {2} }}'.format(self.number_of_probes, self.number_of_probers, monitors)
else:
result = ' and '.join(monitors).strip()
return result
@property
def number_of_probes(self):
"""Returns the probes value from the monitor string.
The monitor string for a Require monitor looks like this.
require 1 from 2 { /Common/tcp }
This method parses out the first of the numeric values. This values represents
the "probes" value that can be updated in the module.
Returns:
int: The probes value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'require\s+(?P<probes>\d+)\s+from'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return matches.group('probes')
@property
def number_of_probers(self):
"""Returns the probers value from the monitor string.
The monitor string for a Require monitor looks like this.
require 1 from 2 { /Common/tcp }
This method parses out the first of the numeric values. This values represents
the "probers" value that can be updated in the module.
Returns:
int: The probers value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'require\s+\d+\s+from\s+(?P<probers>\d+)\s+'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return matches.group('probers')
@property
def at_least(self):
"""Returns the 'at least' value from the monitor string.
The monitor string for a Require monitor looks like this.
min 1 of { /Common/gateway_icmp }
This method parses out the first of the numeric values. This values represents
the "at_least" value that can be updated in the module.
Returns:
int: The at_least value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'min\s+(?P<least>\d+)\s+of\s+'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return matches.group('least')
class ModuleParameters(Parameters):
def _get_limit_value(self, type):
if self._values['limits'] is None:
return None
if self._values['limits'][type] is None:
return None
return int(self._values['limits'][type])
def _get_availability_value(self, type):
if self._values['availability_requirements'] is None:
return None
if self._values['availability_requirements'][type] is None:
return None
return int(self._values['availability_requirements'][type])
def _get_limit_status(self, type):
if self._values['limits'] is None:
return None
if self._values['limits'][type] is None:
return None
if self._values['limits'][type]:
return 'enabled'
return 'disabled'
@property
def address(self):
if self._values['address'] is None:
return None
if is_valid_ip(self._values['address']):
ip = str(ip_address(u'{0}'.format(self._values['address'])))
return ip
raise F5ModuleError(
"Specified 'address' is not an IP address."
)
@property
def port(self):
if self._values['port'] is None:
return None
if self._values['port'] == '*':
return 0
return int(self._values['port'])
@property
def destination(self):
if self.address is None:
return None
if self.port is None:
return None
if validate_ip_v6_address(self.address):
result = '{0}.{1}'.format(self.address, self.port)
else:
result = '{0}:{1}'.format(self.address, self.port)
return result
@property
def link(self):
if self._values['link'] is None:
return None
return fq_name(self.partition, self._values['link'])
@property
def bits_limit(self):
return self._get_limit_value('bits_limit')
@property
def packets_limit(self):
return self._get_limit_value('packets_limit')
@property
def connections_limit(self):
return self._get_limit_value('connections_limit')
@property
def bits_enabled(self):
return self._get_limit_status('bits_enabled')
@property
def packets_enabled(self):
return self._get_limit_status('packets_enabled')
@property
def connections_enabled(self):
return self._get_limit_status('connections_enabled')
@property
def translation_address(self):
if self._values['translation_address'] is None:
return None
if self._values['translation_address'] == '':
return 'none'
return self._values['translation_address']
@property
def translation_port(self):
if self._values['translation_port'] is None:
return None
if self._values['translation_port'] in ['*', ""]:
return 0
return int(self._values['translation_port'])
@property
def virtual_server_dependencies(self):
if self._values['virtual_server_dependencies'] is None:
return None
results = []
for dependency in self._values['virtual_server_dependencies']:
result = dict(
server=fq_name(self.partition, dependency['server']),
virtual_server=os.path.basename(dependency['virtual_server'])
)
results.append(result)
if results:
results = sorted(results, key=lambda k: k['server'])
return results
@property
def enabled(self):
if self._values['state'] == 'enabled':
return True
elif self._values['state'] == 'disabled':
return False
else:
return None
@property
def disabled(self):
if self._values['state'] == 'enabled':
return False
elif self._values['state'] == 'disabled':
return True
else:
return None
@property
def monitors_list(self):
if self._values['monitors'] is None:
return []
try:
result = re.findall(r'/\w+/[^\s}]+', self._values['monitors'])
result.sort()
return result
except Exception:
return self._values['monitors']
@property
def monitors(self):
if self._values['monitors'] is None:
return None
monitors = [fq_name(self.partition, x) for x in self.monitors_list]
if self.availability_requirement_type == 'at_least':
if self.at_least > len(self.monitors_list):
raise F5ModuleError(
"The 'at_least' value must not exceed the number of 'monitors'."
)
monitors = ' '.join(monitors)
result = 'min {0} of {{ {1} }}'.format(self.at_least, monitors)
elif self.availability_requirement_type == 'require':
monitors = ' '.join(monitors)
if self.number_of_probes > self.number_of_probers:
raise F5ModuleError(
"The 'number_of_probes' must not exceed the 'number_of_probers'."
)
result = 'require {0} from {1} {{ {2} }}'.format(self.number_of_probes, self.number_of_probers, monitors)
else:
result = ' and '.join(monitors).strip()
return result
@property
def availability_requirement_type(self):
if self._values['availability_requirements'] is None:
return None
return self._values['availability_requirements']['type']
@property
def number_of_probes(self):
return self._get_availability_value('number_of_probes')
@property
def number_of_probers(self):
return self._get_availability_value('number_of_probers')
@property
def at_least(self):
return self._get_availability_value('at_least')
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def virtual_server_dependencies(self):
if self._values['virtual_server_dependencies'] is None:
return None
results = []
for depend in self._values['virtual_server_dependencies']:
name = '{0}:{1}'.format(depend['server'], depend['virtual_server'])
results.append(dict(name=name))
return results
@property
def monitors(self):
monitor_string = self._values['monitors']
if monitor_string is None:
return None
if '{' in monitor_string and '}':
tmp = monitor_string.strip('}').split('{')
monitor = ''.join(tmp).rstrip()
return monitor
return monitor_string
class ReportableChanges(Changes):
@property
def monitors(self):
if self._values['monitors'] is None:
return []
try:
result = re.findall(r'/\w+/[^\s}]+', self._values['monitors'])
result.sort()
return result
except Exception:
return self._values['monitors']
@property
def availability_requirement_type(self):
if self._values['monitors'] is None:
return None
if 'min ' in self._values['monitors']:
return 'at_least'
elif 'require ' in self._values['monitors']:
return 'require'
else:
return 'all'
@property
def number_of_probes(self):
"""Returns the probes value from the monitor string.
The monitor string for a Require monitor looks like this.
require 1 from 2 { /Common/tcp }
This method parses out the first of the numeric values. This values represents
the "probes" value that can be updated in the module.
Returns:
int: The probes value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'require\s+(?P<probes>\d+)\s+from'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return int(matches.group('probes'))
@property
def number_of_probers(self):
"""Returns the probers value from the monitor string.
The monitor string for a Require monitor looks like this.
require 1 from 2 { /Common/tcp }
This method parses out the first of the numeric values. This values represents
the "probers" value that can be updated in the module.
Returns:
int: The probers value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'require\s+\d+\s+from\s+(?P<probers>\d+)\s+'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return int(matches.group('probers'))
@property
def at_least(self):
"""Returns the 'at least' value from the monitor string.
The monitor string for a Require monitor looks like this.
min 1 of { /Common/gateway_icmp }
This method parses out the first of the numeric values. This values represents
the "at_least" value that can be updated in the module.
Returns:
int: The at_least value if found. None otherwise.
"""
if self._values['monitors'] is None:
return None
pattern = r'min\s+(?P<least>\d+)\s+of\s+'
matches = re.search(pattern, self._values['monitors'])
if matches is None:
return None
return int(matches.group('least'))
@property
def availability_requirements(self):
if self._values['monitors'] is None:
return None
result = dict()
result['type'] = self.availability_requirement_type
result['at_least'] = self.at_least
result['number_of_probers'] = self.number_of_probers
result['number_of_probes'] = self.number_of_probes
return result
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def destination(self):
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.address is None:
self.want.update({'address': self.have.address})
if self.want.destination != self.have.destination:
return self.want.destination
@property
def virtual_server_dependencies(self):
if self.have.virtual_server_dependencies is None:
return self.want.virtual_server_dependencies
if self.want.virtual_server_dependencies is None and self.have.virtual_server_dependencies is None:
return None
if self.want.virtual_server_dependencies is None:
return None
result = compare_complex_list(self.want.virtual_server_dependencies, self.have.virtual_server_dependencies)
return result
@property
def enabled(self):
if self.want.state == 'enabled' and self.have.disabled:
result = dict(
enabled=True,
disabled=False
)
return result
elif self.want.state == 'disabled' and self.have.enabled:
result = dict(
enabled=False,
disabled=True
)
return result
@property
def monitors(self):
if self.have.monitors is None:
return self.want.monitors
if self.have.monitors != self.want.monitors:
return self.want.monitors
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
if not module_provisioned(self.client, 'gtm'):
raise F5ModuleError(
"GTM must be provisioned to use this module."
)
changed = False
result = dict()
state = self.want.state
if state in ['present', 'enabled', 'disabled']:
changed = self.present()
elif state == 'absent':
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
if self.want.port in [None, ""]:
self.want.update({'port': '*'})
if self.want.translation_port in [None, ""]:
self.want.update({'translation_port': '*'})
if self.want.translation_address in [None, ""]:
self.want.update({'translation_address': '::'})
self._set_changed_options()
if self.want.address is None:
raise F5ModuleError(
"You must supply an 'address' when creating a new virtual server."
)
if self.want.availability_requirement_type == 'require' and len(self.want.monitors_list) > 1:
raise F5ModuleError(
"Only one monitor may be specified when using an availability_requirement type of 'require'"
)
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/server/{2}/virtual-servers/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.server_name),
transform_name(name=self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/gtm/server/{2}/virtual-servers/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.server_name)
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/gtm/server/{2}/virtual-servers/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.server_name),
transform_name(name=self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/server/{2}/virtual-servers/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.server_name),
transform_name(name=self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/server/{2}/virtual-servers/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.server_name),
transform_name(name=self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
server_name=dict(required=True),
address=dict(),
port=dict(type='int'),
translation_address=dict(),
translation_port=dict(),
availability_requirements=dict(
type='dict',
options=dict(
type=dict(
choices=['all', 'at_least', 'require'],
required=True
),
at_least=dict(type='int'),
number_of_probes=dict(type='int'),
number_of_probers=dict(type='int')
),
mutually_exclusive=[
['at_least', 'number_of_probes'],
['at_least', 'number_of_probers'],
],
required_if=[
['type', 'at_least', ['at_least']],
['type', 'require', ['number_of_probes', 'number_of_probers']]
]
),
monitors=dict(type='list'),
virtual_server_dependencies=dict(
type='list',
options=dict(
server=dict(required=True),
virtual_server=dict(required=True)
)
),
link=dict(),
limits=dict(
type='dict',
options=dict(
bits_enabled=dict(type='bool'),
packets_enabled=dict(type='bool'),
connections_enabled=dict(type='bool'),
bits_limit=dict(type='int'),
packets_limit=dict(type='int'),
connections_limit=dict(type='int')
)
),
state=dict(
default='present',
choices=['present', 'absent', 'disabled', 'enabled']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
broadcastyourseb/SADR | raspberry/allskySCRIPT/dev/test.py | 1 | 6342 | #!/usr/bin/python
#-*- coding: iso-8859-15 -*-
# SADR METEOLLSKY
# http://www.sadr.fr
# SEBASTIEN LECLERC 2018
# Inspired by Marcus Degenkolbe
# http://indilib.org/develop/tutorials/151-time-lapse-astrophotography-with-indi-python.html
# allsky frame script
import sys, time, logging
import PyIndi
import pyfits
import cv2
from meteollskyconfig import *
class IndiClient(PyIndi.BaseClient):
global exposition
exposition = 0.000001
global USB_TRAFFIC
USB_TRAFFIC=0
global USB_SPEED
USB_SPEED=0
def __init__(self):
super(IndiClient, self).__init__()
self.logger = logging.getLogger('PyQtIndi.IndiClient')
self.logger.info('creating an instance of PyQtIndi.IndiClient')
def newDevice(self, d):
self.logger.info("new device " + d.getDeviceName())
if d.getDeviceName() == INDIDEVICE:
self.logger.info("Set new device %s!" % INDIDEVICE)
# save reference to the device in member variable
self.device = d
def newProperty(self, p):
self.logger.info("new property "+ p.getName() + " for device "+ p.getDeviceName())
if self.device is not None and p.getName() == "CONNECTION" and p.getDeviceName() == self.device.getDeviceName():
self.logger.info("Got property CONNECTION for %s!" % INDIDEVICE)
# connect to device
self.connectDevice(self.device.getDeviceName())
# set BLOB mode to BLOB_ALSO
self.setBLOBMode(1, self.device.getDeviceName(), None)
if p.getName() == "CCD_EXPOSURE":
# take first exposure
self.takeExposure()
if p.getName() == "USB_TRAFFIC":
traffic = self.device.getNumber("USB_TRAFFIC")
print ("USB Traffic: "+str(traffic[0].value))
if traffic[0].value <> 0:
traffic[0].value = 0
self.sendNewNumber(traffic)
if p.getName() == "USB_SPEED":
speed = self.device.getNumber("USB_SPEED")
print ("USB Speed: "+str(speed[0].value))
if speed[0].value <> 0:
speed[0].value = 0
self.sendNewNumber(speed)
def removeProperty(self, p):
self.logger.info("remove property "+ p.getName() + " for device "+ p.getDeviceName())
def newBLOB(self, bp):
self.logger.info("new BLOB "+ bp.name)
# get image data
img = bp.getblobdata()
# write image data to BytesIO buffer
import io
blobfile = io.BytesIO(img)
# open a file and save buffer to disk
with open(CHARTPATH+"dev/frame.fits", "wb") as f:
f.write(blobfile.getvalue())
self.imageProcessing()
self.takeExposure()
def newSwitch(self, svp):
self.logger.info ("new Switch "+ svp.name + " for device "+ svp.device)
self.logger.info ("label "+ svp.label)
self.logger.info ("state "+ str(svp.s))
def newNumber(self, nvp):
self.logger.info("new Number "+ nvp.name + " for device "+ nvp.device)
def newText(self, tvp):
self.logger.info("new Text "+ tvp.name + " for device "+ tvp.device)
self.logger.info("label "+ tvp.label)
def newLight(self, lvp):
self.logger.info("new Light "+ lvp.name + " for device "+ lvp.device)
def newMessage(self, d, m):
#self.logger.info("new Message "+ d.messageQueue(m))
pass
def serverConnected(self):
print("Server connected ("+self.getHost()+":"+str(self.getPort())+")")
def serverDisconnected(self, code):
self.logger.info("Server disconnected (exit code = "+str(code)+","+str(self.getHost())+":"+str(self.getPort())+")")
def takeExposure(self):
self.logger.info(">>>>>>>>")
#get current exposure time
exp = self.device.getNumber("CCD_EXPOSURE")
# set exposure time to 5 seconds
exp[0].value = exposition
# send new exposure time to server/device
#time.sleep(1000)
self.sendNewNumber(exp)
def imageProcessing(self):
global exposition
self.logger.info("<<<<<<<< Image processing >>>>>>>>>")
hdulist = pyfits.open(CHARTPATH+"dev/frame.fits")
scidata = hdulist[0].data
moyenne = scidata.mean()
self.logger.info("Moyenne: " + str(moyenne))
self.logger.info("Ancienne exposition: " + str(exposition))
gain = self.device.getNumber("CCD_GAIN")
self.logger.info("Ancien gain: " + str(gain[0].value))
#if moyenne > 120:
# if gain[0].value == 1:
# exposition = float(exposition) / 10
# if exposition < 0.000001:
# exposition = 0.000001
# #COLOR =1
# else :
# gain[0].value -= 5
# if gain[0].value < 1:
# gain[0].value = 1
# #COLOR = 1
#elif moyenne < 100:
# if exposition < 120:
# exposition *= 10
# if exposition > 120:
# exposition = 120
# #COLOR = 0
# else :
# gain[0].value += 5
# if gain[0].value > 50:
# gain[0].value = 50
# #COLOR = 1
# send new gain to server/device
self.logger.info("Nouvelle exposition: " + str(exposition))
self.logger.info("Nouveau gain: " + str(gain[0].value))
if gain <> self.device.getNumber("CCD_GAIN"):
self.sendNewNumber(gain)
# on passe l'image en noir et blanc
processedImage = cv2.cvtColor(scidata, cv2.COLOR_BAYER_GR2RGB)
cv2.imwrite(CHARTPATH+"dev/allsky.jpg" , processedImage)
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
# instantiate the client
indiclient=IndiClient()
# set indi server localhost and port 7624
indiclient.setServer("allsky.sadr",7624)
# connect to indi server
print("Connecting to indiserver")
if (not(indiclient.connectServer())):
print("No indiserver running on "+indiclient.getHost()+":"+str(indiclient.getPort())+" - Try to run")
print(" indiserver indi_simulator_telescope indi_simulator_ccd")
sys.exit(1)
# start endless loop, client works asynchron in background
while True:
time.sleep(1)
| apache-2.0 |
stefan-caraiman/cloudbase-init | cloudbaseinit/tests/metadata/services/test_ec2service.py | 1 | 3611 | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit import conf as cloudbaseinit_conf
from cloudbaseinit.metadata.services import ec2service
from cloudbaseinit.tests import testutils
CONF = cloudbaseinit_conf.CONF
class EC2ServiceTest(unittest.TestCase):
def setUp(self):
self._service = ec2service.EC2Service()
@mock.patch('cloudbaseinit.utils.network.check_metadata_ip_route')
@mock.patch('cloudbaseinit.metadata.services.ec2service.EC2Service'
'.get_host_name')
def _test_load(self, mock_get_host_name, mock_check_metadata_ip_route,
side_effect):
mock_get_host_name.side_effect = [side_effect]
with testutils.LogSnatcher('cloudbaseinit.metadata.services.'
'ec2service'):
response = self._service.load()
mock_check_metadata_ip_route.assert_called_once_with(
CONF.ec2.metadata_base_url)
mock_get_host_name.assert_called_once_with()
if side_effect is Exception:
self.assertFalse(response)
else:
self.assertTrue(response)
def test_load(self):
self._test_load(side_effect=None)
def test_load_exception(self):
self._test_load(side_effect=Exception)
@mock.patch('cloudbaseinit.metadata.services.ec2service.EC2Service'
'._get_cache_data')
def test_get_host_name(self, mock_get_cache_data):
response = self._service.get_host_name()
mock_get_cache_data.assert_called_once_with(
'%s/meta-data/local-hostname' % self._service._metadata_version,
decode=True)
self.assertEqual(mock_get_cache_data.return_value, response)
@mock.patch('cloudbaseinit.metadata.services.ec2service.EC2Service'
'._get_cache_data')
def test_get_instance_id(self, mock_get_cache_data):
response = self._service.get_instance_id()
mock_get_cache_data.assert_called_once_with(
'%s/meta-data/instance-id' % self._service._metadata_version,
decode=True)
self.assertEqual(mock_get_cache_data.return_value, response)
@mock.patch('cloudbaseinit.metadata.services.ec2service.EC2Service'
'._get_cache_data')
def test_get_public_keys(self, mock_get_cache_data):
mock_get_cache_data.side_effect = ['key=info', 'fake key\n']
response = self._service.get_public_keys()
expected = [
mock.call('%s/meta-data/public-keys' %
self._service._metadata_version,
decode=True),
mock.call('%(version)s/meta-data/public-keys/%('
'idx)s/openssh-key' %
{'version': self._service._metadata_version,
'idx': 'key'}, decode=True)]
self.assertEqual(expected, mock_get_cache_data.call_args_list)
self.assertEqual(['fake key'], response)
| apache-2.0 |
alexthered/kienhoc-platform | common/djangoapps/student/migrations/0029_add_lookup_table_between_user_and_anonymous_student_id.py | 114 | 16366 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AnonymousUserId'
db.create_table('student_anonymoususerid', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('anonymous_user_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=16)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
))
db.send_create_signal('student', ['AnonymousUserId'])
def backwards(self, orm):
# Deleting model 'AnonymousUserId'
db.delete_table('student_anonymoususerid')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '16'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.testcenterregistration': {
'Meta': {'object_name': 'TestCenterRegistration'},
'accommodation_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'accommodation_request': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'authorization_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'client_authorization_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'}),
'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'eligibility_appointment_date_first': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'eligibility_appointment_date_last': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'exam_series_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'testcenter_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['student.TestCenterUser']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'student.testcenteruser': {
'Meta': {'object_name': 'TestCenterUser'},
'address_1': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'address_2': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'address_3': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'candidate_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'client_candidate_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'confirmed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'extension': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
'fax_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'middle_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'phone_country_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'db_index': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'processed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'salutation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'suffix': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'upload_error_message': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'upload_status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '20', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'unique': 'True'}),
'user_updated_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
l2isbad/netdata | collectors/python.d.plugin/python_modules/bases/FrameworkServices/ExecutableService.py | 3 | 3319 | # -*- coding: utf-8 -*-
# Description:
# Author: Pawel Krupa (paulfantom)
# Author: Ilya Mashchenko (ilyam8)
# SPDX-License-Identifier: GPL-3.0-or-later
import os
from subprocess import Popen, PIPE
from bases.FrameworkServices.SimpleService import SimpleService
from bases.collection import find_binary
class ExecutableService(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
self.command = None
def _get_raw_data(self, stderr=False, command=None):
"""
Get raw data from executed command
:return: <list>
"""
command = command or self.command
self.debug("Executing command '{0}'".format(' '.join(command)))
try:
p = Popen(command, stdout=PIPE, stderr=PIPE)
except Exception as error:
self.error('Executing command {0} resulted in error: {1}'.format(command, error))
return None
data = list()
std = p.stderr if stderr else p.stdout
for line in std:
try:
data.append(line.decode('utf-8'))
except TypeError:
continue
return data
def check(self):
"""
Parse basic configuration, check if command is whitelisted and is returning values
:return: <boolean>
"""
# Preference: 1. "command" from configuration file 2. "command" from plugin (if specified)
if 'command' in self.configuration:
self.command = self.configuration['command']
# "command" must be: 1.not None 2. type <str>
if not (self.command and isinstance(self.command, str)):
self.error('Command is not defined or command type is not <str>')
return False
# Split "command" into: 1. command <str> 2. options <list>
command, opts = self.command.split()[0], self.command.split()[1:]
# Check for "bad" symbols in options. No pipes, redirects etc.
opts_list = ['&', '|', ';', '>', '<']
bad_opts = set(''.join(opts)) & set(opts_list)
if bad_opts:
self.error("Bad command argument(s): {opts}".format(opts=bad_opts))
return False
# Find absolute path ('echo' => '/bin/echo')
if '/' not in command:
command = find_binary(command)
if not command:
self.error('Can\'t locate "{command}" binary'.format(command=self.command))
return False
# Check if binary exist and executable
else:
if not os.access(command, os.X_OK):
self.error('"{binary}" is not executable'.format(binary=command))
return False
self.command = [command] + opts if opts else [command]
try:
data = self._get_data()
except Exception as error:
self.error('_get_data() failed. Command: {command}. Error: {error}'.format(command=self.command,
error=error))
return False
if isinstance(data, dict) and data:
return True
self.error('Command "{command}" returned no data'.format(command=self.command))
return False
| gpl-3.0 |
MikeAmy/django | tests/i18n/sampleproject/update_catalogs.py | 344 | 1780 | #!/usr/bin/env python
"""
Helper script to update sampleproject's translation catalogs.
When a bug has been identified related to i18n, this helps capture the issue
by using catalogs created from management commands.
Example:
The string "Two %% Three %%%" renders differently using trans and blocktrans.
This issue is difficult to debug, it could be a problem with extraction,
interpolation, or both.
How this script helps:
* Add {% trans "Two %% Three %%%" %} and blocktrans equivalent to templates.
* Run this script.
* Test extraction - verify the new msgid in sampleproject's django.po.
* Add a translation to sampleproject's django.po.
* Run this script.
* Test interpolation - verify templatetag rendering, test each in a template
that is rendered using an activated language from sampleproject's locale.
* Tests should fail, issue captured.
* Fix issue.
* Run this script.
* Tests all pass.
"""
import os
import re
import sys
proj_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(proj_dir, '..', '..', '..')))
def update_translation_catalogs():
"""Run makemessages and compilemessages in sampleproject."""
from django.core.management import call_command
prev_cwd = os.getcwd()
os.chdir(proj_dir)
call_command('makemessages')
call_command('compilemessages')
# keep the diff friendly - remove 'POT-Creation-Date'
pofile = os.path.join(proj_dir, 'locale', 'fr', 'LC_MESSAGES', 'django.po')
with open(pofile) as f:
content = f.read()
content = re.sub(r'^"POT-Creation-Date.+$\s', '', content, flags=re.MULTILINE)
with open(pofile, 'w') as f:
f.write(content)
os.chdir(prev_cwd)
if __name__ == "__main__":
update_translation_catalogs()
| bsd-3-clause |
caidongyun/Dato-Core | src/unity/python/graphlab/meta/decompiler/tests/test_simple.py | 13 | 8056 | '''
Created on Nov 9, 2011
@author: sean
'''
from graphlab.meta.testing import py2only
from graphlab.meta.decompiler.tests import Base
import unittest
class Simple(Base):
def test_assign(self):
'a = b'
self.statement('a = b')
def test_assign2(self):
'a = b = c'
self.statement('a = b')
def test_assign3(self):
'a = b,d = c'
self.statement('a = b')
def test_assign4(self):
'a.y = b,d = c'
self.statement('a = b')
def test_setattr(self):
'a.b = b'
self.statement('a.b = b')
def test_getattr(self):
'a = b.b'
self.statement('a = b.b')
def test_add(self):
'a+b'
self.statement('a+b')
def test_sub(self):
'a-b'
self.statement('a-b')
def test_mul(self):
'a*b'
self.statement('a*b')
def test_div(self):
'a/b'
self.statement('a/b')
def test_floordiv(self):
'a//b'
self.statement('a//b')
def test_pow(self):
'a**b'
self.statement('a**b')
def test_eq(self):
'a==b'
self.statement('a==b')
def test_iadd(self):
'a+=b'
self.statement('a+=b')
def test_isub(self):
'a-=b'
self.statement('a-=b')
def test_binary_and(self):
'a & b'
self.statement('a & b')
def test_binary_lshift(self):
'a << b'
self.statement('a << b')
def test_binary_rshift(self):
'a >> b'
self.statement('a >> b')
def test_binary_mod(self):
'a % b'
self.statement('a % b')
def test_binary_or(self):
'a | b'
self.statement('a | b')
def test_binary_xor(self):
'a ^ b'
self.statement('a ^ b')
def test_build_list(self):
'[x,y, 1, None]'
self.statement('[x,y, 1, None]')
def test_build_tuple(self):
'(x,y, 1, None)'
self.statement('(x,y, 1, None)')
def test_build_set(self):
'{x,y, 1, None}'
self.statement('{x,y, 1, None}')
def test_build_dict(self):
'{a:x,b:y, c:1, d:None}'
self.statement('{a:x,b:y, c:1, d:None}')
def test_unpack_tuple(self):
'(a,b) = c'
self.statement('(a,b) = c')
def test_delete_name(self):
stmnt = 'del a'
self.statement(stmnt)
def test_delete_attr(self):
stmnt = 'del a.a'
self.statement(stmnt)
@py2only
def test_exec1(self):
stmnt = 'exec a'
self.statement(stmnt)
@py2only
def test_exec2(self):
stmnt = 'exec a in b'
self.statement(stmnt)
@py2only
def test_exec3(self):
stmnt = 'exec a in b,c'
self.statement(stmnt)
@py2only
def test_exec4(self):
stmnt = 'exec a in {2:1}, { }'
self.statement(stmnt)
def test_import_star(self):
stmnt = 'from a import *'
self.statement(stmnt)
stmnt = 'from a.v import *'
self.statement(stmnt)
def test_import(self):
stmnt = 'import a'
self.statement(stmnt)
def test_import_as(self):
stmnt = 'import a as b'
self.statement(stmnt)
def test_import_from(self):
stmnt = 'from c import a as b'
self.statement(stmnt)
def test_import_from2(self):
stmnt = 'from c import a \nimport x'
self.statement(stmnt)
def test_not(self):
stmnt = 'not a'
self.statement(stmnt)
def test_call(self):
stmnt = 'a()'
self.statement(stmnt)
def test_call_args(self):
stmnt = 'a(a, b)'
self.statement(stmnt)
def test_call_args1(self):
stmnt = 'a(a, b, c=33)'
self.statement(stmnt)
def test_call_varargs(self):
stmnt = 'a(*a)'
self.statement(stmnt)
def test_call_kwargs(self):
stmnt = 'a(a, b=0, **a)'
self.statement(stmnt)
def test_call_var_kwargs(self):
stmnt = 'a(a, b=0, *d, **a)'
self.statement(stmnt)
@py2only
def test_print(self):
stmnt = 'print foo,'
self.statement(stmnt)
@py2only
def test_printnl(self):
stmnt = 'print foo'
self.statement(stmnt)
@py2only
def test_printitems(self):
stmnt = 'print foo, bar, bas,'
self.statement(stmnt)
@py2only
def test_printitemsnl(self):
stmnt = 'print foo, bar, bas'
self.statement(stmnt)
@py2only
def test_print_to(self):
stmnt = 'print >> stream, foo,'
self.statement(stmnt)
@py2only
def test_print_to_nl(self):
stmnt = 'print >> stream, foo'
self.statement(stmnt)
@py2only
def test_printitems_to(self):
stmnt = 'print >> stream, foo, bar, bas,'
self.statement(stmnt)
@py2only
def test_printitems_to_nl(self):
stmnt = 'print >> stream, foo, bar, bas'
self.statement(stmnt)
def test_subscr(self):
stmnt = 'x[y]'
self.statement(stmnt)
def test_subscr_assign(self):
stmnt = 'x[y] =z'
self.statement(stmnt)
def test_subscr_del(self):
stmnt = 'del x[y]'
self.statement(stmnt)
def test_subscr0(self):
stmnt = 'x[:]'
self.statement(stmnt)
def test_subscr_assign0(self):
stmnt = 'x[:] =z'
self.statement(stmnt)
def test_subscr_del0(self):
stmnt = 'del x[:]'
self.statement(stmnt)
def test_subscr1(self):
stmnt = 'x[a:]'
self.statement(stmnt)
def test_subscr_assign1(self):
stmnt = 'x[a:] =z'
self.statement(stmnt)
def test_subscr_del1(self):
stmnt = 'del x[a:]'
self.statement(stmnt)
def test_subscr2(self):
stmnt = 'x[:a]'
self.statement(stmnt)
def test_subscr_assign2(self):
stmnt = 'x[:a] =z'
self.statement(stmnt)
def test_subscr_del2(self):
stmnt = 'del x[:a]'
self.statement(stmnt)
def test_subscr3(self):
stmnt = 'x[b:a]'
self.statement(stmnt)
def test_subscr_assign3(self):
stmnt = 'x[b:a] =z'
self.statement(stmnt)
def test_subscr_del3(self):
stmnt = 'del x[b:a]'
self.statement(stmnt)
def test_subscrX(self):
stmnt = 'x[b:a:c]'
self.statement(stmnt)
def test_subscr_assignX(self):
stmnt = 'x[b:a:c] =z'
self.statement(stmnt)
def test_subscr_delX(self):
stmnt = 'del x[b:a:c]'
self.statement(stmnt)
def test_subscrX2(self):
stmnt = 'x[::]'
self.statement(stmnt)
def test_subscr_assignX2(self):
stmnt = 'x[::] =z'
self.statement(stmnt)
def test_subscr_delX2(self):
stmnt = 'del x[::]'
self.statement(stmnt)
def test_subscr_tuple(self):
stmnt = 'x[x,a]'
self.statement(stmnt)
def test_subscr_tuple_set(self):
stmnt = 'x[x,a] =z'
self.statement(stmnt)
def test_subscr_tuple_del(self):
stmnt = 'del x[x,a]'
self.statement(stmnt)
def test_subscrX3(self):
stmnt = 'x[x,:a]'
self.statement(stmnt)
def test_subscr_assignX3(self):
stmnt = 'x[x,:a] =z'
self.statement(stmnt)
def test_subscr_delX3(self):
stmnt = 'del x[x,:a]'
self.statement(stmnt)
def test_bug_001(self):
stmnt = 'a = 1; b = 2; (a, b) = (b, a)'
self.statement(stmnt)
def test_bug_0021(self):
stmnt = '(a, b, c) = (c, b, a)'
self.statement(stmnt)
def test_bug_002(self):
stmnt = "x = range(6)\nx[2:4] += 'abc'"
self.statement(stmnt)
def test_bug_003(self):
stmnt = "raise V"
self.statement(stmnt)
def test_bug_004(self):
stmnt = '(a, b, c) = (c, b, a) = (x, y, z)'
self.statement(stmnt)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test_assign']
unittest.main()
| agpl-3.0 |
agat63/E4GT_ICS_kernel | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
supriyantomaftuh/pip | pip/_vendor/requests/packages/chardet/charsetprober.py | 3127 | 1902 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
| mit |
slank/ansible | lib/ansible/plugins/callback/debug.py | 137 | 1190 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default): # pylint: disable=too-few-public-methods,no-init
'''
Override for the default callback module.
Render std err/out outside of the rest of the result which it prints with
indentation.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'debug'
def _dump_results(self, result):
'''Return the text to output for a result.'''
# Enable JSON identation
result['_ansible_verbose_always'] = True
save = {}
for key in ['stdout', 'stdout_lines', 'stderr', 'stderr_lines', 'msg']:
if key in result:
save[key] = result.pop(key)
output = CallbackModule_default._dump_results(self, result)
for key in ['stdout', 'stderr', 'msg']:
if key in save and save[key]:
output += '\n\n%s:\n\n%s\n' % (key.upper(), save[key])
for key, value in save.items():
result[key] = value
return output
| gpl-3.0 |
arulkumarkandasamy/clictest | clictest/common/rpc.py | 1 | 9370 | # Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
RPC Controller
"""
import datetime
import traceback
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
import oslo_utils.importutils as imp
import six
from webob import exc
from clictest.common import client
from clictest.common import exception
from clictest.common import timeutils
from clictest.common import wsgi
from clictest.i18n import _, _LE
LOG = logging.getLogger(__name__)
rpc_opts = [
# NOTE(flaper87): Shamelessly copied
# from oslo rpc.
cfg.ListOpt('allowed_rpc_exception_modules',
default=['clictest.common.exception',
'builtins',
'exceptions',
],
help='Modules of exceptions that are permitted to be recreated'
' upon receiving exception data from an rpc call.'),
]
CONF = cfg.CONF
CONF.register_opts(rpc_opts)
class RPCJSONSerializer(wsgi.JSONResponseSerializer):
@staticmethod
def _to_primitive(_type, _value):
return {"_type": _type, "_value": _value}
def _sanitizer(self, obj):
if isinstance(obj, datetime.datetime):
return self._to_primitive("datetime",
obj.isoformat())
return super(RPCJSONSerializer, self)._sanitizer(obj)
class RPCJSONDeserializer(wsgi.JSONRequestDeserializer):
@staticmethod
def _to_datetime(obj):
return timeutils.normalize_time(timeutils.parse_isotime(obj))
def _sanitizer(self, obj):
try:
_type, _value = obj["_type"], obj["_value"]
return getattr(self, "_to_" + _type)(_value)
except (KeyError, AttributeError):
return obj
class Controller(object):
"""
Base RPCController.
This is the base controller for RPC based APIs. Commands
handled by this controller respect the following form:
.. code-block:: json
[{
'command': 'method_name',
'kwargs': {...}
}]
The controller is capable of processing more than one command
per request and will always return a list of results.
:param bool raise_exc: Specifies whether to raise
exceptions instead of "serializing" them.
"""
def __init__(self, raise_exc=False):
self._registered = {}
self.raise_exc = raise_exc
def register(self, resource, filtered=None, excluded=None, refiner=None):
"""
Exports methods through the RPC Api.
:param resource: Resource's instance to register.
:param filtered: List of methods that *can* be registered. Read
as "Method must be in this list".
:param excluded: List of methods to exclude.
:param refiner: Callable to use as filter for methods.
:raises TypeError: If refiner is not callable.
"""
funcs = filter(lambda x: not x.startswith("_"), dir(resource))
if filtered:
funcs = [f for f in funcs if f in filtered]
if excluded:
funcs = [f for f in funcs if f not in excluded]
if refiner:
funcs = filter(refiner, funcs)
for name in funcs:
meth = getattr(resource, name)
if not callable(meth):
continue
self._registered[name] = meth
def __call__(self, req, body):
"""
Executes the command
"""
if not isinstance(body, list):
msg = _("Request must be a list of commands")
raise exc.HTTPBadRequest(explanation=msg)
def validate(cmd):
if not isinstance(cmd, dict):
msg = _("Bad Command: %s") % str(cmd)
raise exc.HTTPBadRequest(explanation=msg)
command, kwargs = cmd.get("command"), cmd.get("kwargs")
if (not command or not isinstance(command, six.string_types) or
(kwargs and not isinstance(kwargs, dict))):
msg = _("Wrong command structure: %s") % (str(cmd))
raise exc.HTTPBadRequest(explanation=msg)
method = self._registered.get(command)
if not method:
# Just raise 404 if the user tries to
# access a private method. No need for
# 403 here since logically the command
# is not registered to the rpc dispatcher
raise exc.HTTPNotFound(explanation=_("Command not found"))
return True
# If more than one command were sent then they might
# be intended to be executed sequentially, that for,
# lets first verify they're all valid before executing
# them.
commands = filter(validate, body)
results = []
for cmd in commands:
# kwargs is not required
command, kwargs = cmd["command"], cmd.get("kwargs", {})
method = self._registered[command]
try:
result = method(req.context, **kwargs)
except Exception as e:
if self.raise_exc:
raise
cls, val = e.__class__, encodeutils.exception_to_unicode(e)
msg = (_LE("RPC Call Error: %(val)s\n%(tb)s") %
dict(val=val, tb=traceback.format_exc()))
LOG.error(msg)
# NOTE(flaper87): Don't propagate all exceptions
# but the ones allowed by the user.
module = cls.__module__
if module not in CONF.allowed_rpc_exception_modules:
cls = exception.RPCError
val = encodeutils.exception_to_unicode(
exception.RPCError(cls=cls, val=val))
cls_path = "%s.%s" % (cls.__module__, cls.__name__)
result = {"_error": {"cls": cls_path, "val": val}}
results.append(result)
return results
class RPCClient(client.BaseClient):
def __init__(self, *args, **kwargs):
self._serializer = RPCJSONSerializer()
self._deserializer = RPCJSONDeserializer()
self.raise_exc = kwargs.pop("raise_exc", True)
self.base_path = kwargs.pop("base_path", '/rpc')
super(RPCClient, self).__init__(*args, **kwargs)
@client.handle_unauthenticated
def bulk_request(self, commands):
"""
Execute multiple commands in a single request.
:param commands: List of commands to send. Commands
must respect the following form
.. code-block:: json
{
'command': 'method_name',
'kwargs': method_kwargs
}
"""
body = self._serializer.to_json(commands)
response = super(RPCClient, self).do_request('POST',
self.base_path,
body)
return self._deserializer.from_json(response.read())
def do_request(self, method, **kwargs):
"""
Simple do_request override. This method serializes
the outgoing body and builds the command that will
be sent.
:param method: The remote python method to call
:param kwargs: Dynamic parameters that will be
passed to the remote method.
"""
content = self.bulk_request([{'command': method,
'kwargs': kwargs}])
# NOTE(flaper87): Return the first result if
# a single command was executed.
content = content[0]
# NOTE(flaper87): Check if content is an error
# and re-raise it if raise_exc is True. Before
# checking if content contains the '_error' key,
# verify if it is an instance of dict - since the
# RPC call may have returned something different.
if self.raise_exc and (isinstance(content, dict)
and '_error' in content):
error = content['_error']
try:
exc_cls = imp.import_class(error['cls'])
raise exc_cls(error['val'])
except ImportError:
# NOTE(flaper87): The exception
# class couldn't be imported, using
# a generic exception.
raise exception.RPCError(**error)
return content
def __getattr__(self, item):
"""
This method returns a method_proxy that
will execute the rpc call in the registry
service.
"""
if item.startswith('_'):
raise AttributeError(item)
def method_proxy(**kw):
return self.do_request(item, **kw)
return method_proxy
| apache-2.0 |
CiscoSystems/fabric_enabler | setup_enabler.py | 1 | 36241 | # Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import os
import re
import subprocess
import sys
import platform
import time
import ConfigParser
import cisco_scp
import paramiko
import errno
import json
import socket
startup_cmds = {
'ubuntu': {
'stop_agent': 'stop fabric-enabler-agent',
'start_agent': 'start fabric-enabler-agent',
'stop_keystone': 'service apache2 stop',
'start_keystone': 'service apache2 start',
'stop_server': 'stop fabric-enabler-server',
'start_server': 'start fabric-enabler-server',
'stop_neutron_server': 'stop neutron-server',
'start_neutron_server': 'start neutron-server',
'get_pty': False,
},
'redhat': {
'stop_agent': 'systemctl stop fabric-enabler-agent',
'start_agent': 'systemctl start fabric-enabler-agent',
'stop_keystone': 'systemctl stop openstack-keystone',
'start_keystone': 'systemctl start openstack-keystone',
'stop_server': 'systemctl stop fabric-enabler-server',
'start_server': 'systemctl start fabric-enabler-server',
'stop_neutron_server': 'systemctl stop neutron-server',
'start_neutron_server': 'systemctl start neutron-server',
'get_pty': True,
},
'centos': {
'stop_agent': 'systemctl stop fabric-enabler-agent',
'start_agent': 'systemctl start fabric-enabler-agent',
'stop_keystone': 'systemctl stop httpd',
'start_keystone': 'systemctl start httpd',
'stop_server': 'systemctl stop fabric-enabler-server',
'start_server': 'systemctl start fabric-enabler-server',
'stop_neutron_server': 'systemctl stop neutron-server',
'start_neutron_server': 'systemctl start neutron-server',
'get_pty': True,
}
}
class NexusFabricEnablerInstaller(object):
"""Represents Fabric Enabler Installation."""
def __init__(self, mysql_user, mysql_passwd, mysql_host):
self.mysql_user = mysql_user
self.mysql_password = mysql_passwd
self.mysql_host = mysql_host
self.http_proxy = None
self.https_proxy = None
self.vendor_os_rel = None
self.upgrade = False
self.restart_on_upgrades = True
self.restart_lldpad_on_upgrades = False
self.root_helper = '' if os.geteuid() == 0 else 'sudo '
self.src_dir = os.path.basename(
os.path.dirname(os.path.realpath(__file__)))
self.ssh_client_log = '%s/paramiko.log' % self.src_dir
self.uplink_file = "uplink"
self.script_dir = '%s/dfa/scripts' % self.src_dir
self.rm_uplink = '%s rm -f /tmp/uplink*' % self.root_helper
self.cp_uplink = '[[ -e %s/%s ]] && cp %s/%s /tmp' % (
self.src_dir, self.uplink_file,
self.src_dir, self.uplink_file)
self.run_dfa_prep_on_control = (
'%s python %s/dfa_prepare_setup.py --node-function=control '
'%s %s %s' % (
self.root_helper, self.script_dir,
'--mysql-user=' + mysql_user if mysql_user else '',
'--mysql-password=' + mysql_passwd if mysql_passwd else '',
'--mysql-host=' + mysql_host if mysql_host else ''))
self.run_dfa_prep_on_hacontrol = (
'%s python %s/dfa_prepare_setup.py --node-function=ha-control '
'%s %s %s' % (
self.root_helper, self.script_dir,
'--mysql-user=' + mysql_user if mysql_user else '',
'--mysql-password=' + mysql_passwd if mysql_passwd else '',
'--mysql-host=' + mysql_host if mysql_host else ''))
self.run_dfa_prep_on_compute = ('%s python %s/dfa_prepare_setup.py '
'--node-function=compute' % (
self.root_helper, self.script_dir))
self.add_req_txt = 'touch %s/requirements.txt' % self.src_dir
sudo_cmd = (self.root_helper + '-E ') if self.root_helper else ''
self.install_pkg = ("cd %s;"
"python setup.py build;%spython setup.py bdist_egg;"
"%spython setup.py install" % (
self.src_dir, sudo_cmd, sudo_cmd))
self.distr_name = platform.dist()[0].lower()
self.run_lldpad = '%s %s/run_lldpad.sh %s' % (
self.root_helper, self.script_dir, self.src_dir)
self.cleanup = "cd %s ; %s rm -rf %s %s %s" % (
self.src_dir,
self.root_helper,
"openstack_fabric_enabler.egg-info",
"build",
"dist")
self.neutron_restart_procs = [
'neutron-server']
def restart_neutron_processes(self):
print(' Restarting Neutron Processes ')
if (os.path.isfile('/etc/init/neutron-server.conf') or
os.path.isfile('/usr/lib/systemd/system/neutron-server.service')):
self.run_cmd_line(self.stop_neutron_server,
check_result=False)
time.sleep(10)
self.run_cmd_line(self.start_neutron_server,
check_result=False)
else:
reg_exes = {}
for proc in self.neutron_restart_procs:
reg_exes[proc] = re.compile(
"^(?P<uid>\S+)\s+(?P<pid>\d+)\s+(?P<ppid>\d+)."
"*python(?P<cmd>.*%s.*)" % proc)
ps_output, rc = self.run_cmd_line('ps -ef')
for line in ps_output.splitlines():
for proc, reg_ex in reg_exes.items():
result = reg_ex.search(line)
if result:
print 'Restarting ', proc
# Kill the process
kill_cmd = ''.join((self.root_helper,
('kill -9 %d' % (
int(result.group('pid'))))))
self.run_cmd_line(kill_cmd)
cmd = result.group('cmd') + ' > %s/%s 2>&1 &' % (
self.src_dir, 'enabler_neutron_svc.log')
print cmd
os.system(cmd)
print 'Neutron processes: '
ps_output, rc = self.run_cmd_line('ps -ef')
for line in ps_output.splitlines():
for proc, reg_ex in reg_exes.items():
result = reg_ex.search(line)
if result:
print line
def run_cmd_line(self, cmd_str, stderr=None, shell=False,
echo_cmd=True, check_result=True):
if echo_cmd:
print cmd_str
if shell:
cmd_args = cmd_str
else:
cmd_args = cmd_str.split()
output = None
returncode = 0
try:
output = subprocess.check_output(cmd_args, shell=shell,
stderr=stderr)
except subprocess.CalledProcessError as e:
if check_result:
print e.output
sys.exit(e.returncode)
else:
returncode = e.returncode
return output, returncode
def find_computes(self):
"""Returns commpute nodes in the setup."""
compute_list = []
cmd = ''.join((self.root_helper, "-E neutron agent-list -f json"))
output, returncode = self.run_cmd_line(cmd,check_result=False)
if returncode != 0:
print(("Command '%s' could not be invoked. " +
"Please source suitable openrc file") % cmd)
sys.exit(1)
output_json = json.loads(output)
for e in output_json:
if e['agent_type'] != 'Open vSwitch agent':
continue
if e['host'] == socket.gethostname():
continue
compute_list.append(e['host'])
return compute_list
def parse_config(self):
"""Parses enabler config file.
It returns compute nodes credentails and also list of compute nodes
and uplink interfaces, if they are defined.
"""
compute_name_list = None
compute_uplink_list = None
configfile = '/etc/saf/enabler_conf.ini'
if os.path.exists(configfile) is False:
print "Config file %s is missing\n" % configfile
sys.exit(1)
config = ConfigParser.ConfigParser()
config.read(configfile)
try:
compute_names = config.get("compute", "node")
if compute_names:
compute_name_list = compute_names.split(',')
compute_uplinks = config.get("compute", "node_uplink")
if compute_uplinks:
compute_uplink_list = compute_uplinks.split(',')
except:
pass
return (config.get("general", "compute_user"),
config.get("general", "compute_passwd"),
compute_name_list, compute_uplink_list)
def create_sshClient(self, host, user, passwd=None):
try:
client = paramiko.SSHClient()
client.load_system_host_keys()
paramiko.util.log_to_file(self.ssh_client_log)
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(host, username=user, password=passwd)
return client
except:
print("Filed to create SSH client for %s %s" % (host, user))
def copy_dir(self, target_host, target_user, target_password=None):
"""Copy source files into compute nodes for installation."""
print("Copying dir " + self.src_dir + " to " + target_host)
client = self.create_sshClient(target_host, target_user,
target_password)
if client is None:
print("Failed to copy source files.")
return
scp_client = cisco_scp.cisco_SCPClient(client.get_transport())
scp_client.set_verbose(False)
scp_client.put(self.src_dir, recursive=True)
client.close()
def generate_uplink_file(self, compute_uplink):
uplink_file_str = self.src_dir + '/' + self.uplink_file
if "auto" in compute_uplink.lower():
if os.path.isfile(uplink_file_str):
os.remove(uplink_file_str)
else:
filep = open(uplink_file_str, "w")
filep.write(compute_uplink)
filep.close()
def setup_control(self, hamode):
"""Install enabler package on control node."""
output, returncode = self.run_cmd_line(self.install_pkg, shell=True)
print output
output, returncode = self.run_cmd_line(
self.run_dfa_prep_on_hacontrol if hamode else
self.run_dfa_prep_on_control)
print output
if not self.upgrade or (self.upgrade and self.restart_lldpad_on_upgrades):
output, returncode = self.run_cmd_line(self.run_lldpad, shell=True, check_result=False)
print output
output, returncode = self.run_cmd_line(self.cleanup, shell=True, check_result=False)
print output
if self.vendor_os_rel == 'rhel-osp7':
self.rhel_osp7_setup(hamode)
else:
if not self.upgrade:
print "restarting keystone"
self.restart_keystone_process()
time.sleep(10)
self.restart_neutron_processes()
time.sleep(10)
if not self.upgrade or (self.upgrade and self.restart_on_upgrades):
if hamode is False:
self.restart_fabric_enabler_server()
self.restart_fabric_enabler_agent()
def install_remote(self, command, host, user, password=None):
"""Run script on remote node."""
print("Invoking installation on %s, please wait..." % (host))
c = self.create_sshClient(host, user, password)
if c is None:
print "Could not connect to remote host %s" % (host)
return
c.get_transport().open_session().set_combine_stderr(True)
print("CMD: %s" % command)
ssh_stdin, ssh_stdout, ssh_stderr = c.exec_command(command,
get_pty=True)
for i in ssh_stdout.readlines():
print "(%s) %s" % (host, i.encode('utf-8')),
c.close()
def setup_control_remote(self, control_name, control_user,
control_password=None, ha_mode=False):
"""Invoke installation on remote control node."""
self.copy_dir(control_name, control_user, control_password)
cmd = "cd %s; yes | " % (self.src_dir)
if self.http_proxy is not None:
cmd += "http_proxy=%s " % (self.http_proxy)
if self.https_proxy is not None:
cmd += "https_proxy=%s " % (self.https_proxy)
cmd += "python setup_enabler.py "
if self.mysql_user is not None:
cmd += "--mysql-user=%s " % (self.mysql_user)
if self.mysql_password is not None:
cmd += "--mysql-password=\"%s\" " % (self.mysql_password)
if self.mysql_host is not None:
cmd += "--mysql-host=%s " % (self.mysql_host)
if self.vendor_os_rel is not None:
cmd += "--vendor-os-release=%s " % (self.vendor_os_rel)
if self.upgrade:
cmd += "--upgrade=True "
cmd += "--restart=%s " % (self.restart_on_upgrades)
cmd += "--restart-lldpad=%s " % (self.restart_lldpad_on_upgrades)
cmd += "--controller-only=True "
if ha_mode:
cmd += "--ha-mode=True"
else:
cmd += "--ha-mode=False"
self.install_remote(cmd, control_name, control_user, control_password)
def setup_compute_remote(self, compute_name, compute_uplink,
compute_user, compute_password=None):
"""Invoke installation on remote compute node"""
self.copy_dir(compute_name, compute_user, compute_password)
cmd = "cd %s; yes | " % (self.src_dir)
if self.http_proxy is not None:
cmd += "http_proxy=%s " % (self.http_proxy)
if self.https_proxy is not None:
cmd += "https_proxy=%s " % (self.https_proxy)
cmd += "python setup_enabler.py --compute-local=True "
if compute_uplink is not None:
cmd += "--uplink=%s " % (compute_uplink)
if self.vendor_os_rel is not None:
cmd += "--vendor-os-release=%s " % (self.vendor_os_rel)
if self.upgrade:
cmd += "--upgrade=True "
cmd += "--restart=%s " % (self.restart_on_upgrades)
cmd += "--restart-lldpad=%s " % (self.restart_lldpad_on_upgrades)
self.install_remote(cmd, compute_name, compute_user, compute_password)
def setup_compute_local(self, input_compute_uplink):
"""Install Enabler on local compute node"""
if self.upgrade:
script_list = [ self.run_dfa_prep_on_compute,
self.add_req_txt, self.install_pkg ]
if self.restart_on_upgrades:
script_list.extend([ self.stop_agent,
self.start_agent])
if self.restart_lldpad_on_upgrades:
script_list.append(" ".join((self.run_lldpad, "restart")))
script_list.append(self.cleanup)
else:
script_list = [self.rm_uplink, self.cp_uplink,
self.run_dfa_prep_on_compute,
self.add_req_txt, self.install_pkg,
self.stop_agent, self.start_agent,
self.run_lldpad, self.cleanup]
if input_compute_uplink is None:
input_compute_uplink = 'auto'
self.generate_uplink_file(input_compute_uplink)
for script in script_list:
self.run_cmd_line(script, shell=True, check_result=False)
def setup_compute(self, input_compute_name, input_compute_uplink):
"""Install Enabler on computes in enabler_conf.ini or
provided as input"""
compute_user, compute_passwd, compute_list, compute_uplinks = (
self.parse_config())
if input_compute_name is not None:
compute_list = []
compute_list.append(input_compute_name)
if input_compute_uplink is not None:
compute_uplinks = []
compute_uplinks.append(input_compute_uplink)
if compute_user is not None:
if compute_list is None:
print ("The user did not specify compute list ,"
"will auto detect.\n")
compute_list = self.find_computes()
if compute_uplinks is None:
compute_uplinks = ['auto']
while (len(compute_uplinks) < len(compute_list)):
print("Will use the last uplink ports for the rest of "
"compute nodes")
compute_uplinks.append(compute_uplinks[-1])
print('Compute User: %s' % compute_user)
print('Compute nodes: %s' % compute_list)
print('Uplinks : %s' % compute_uplinks)
for compute_host, compute_uplink in zip(compute_list, compute_uplinks):
self.setup_compute_remote(compute_host, compute_uplink,
compute_user, compute_passwd)
@property
def stop_neutron_server(self):
if startup_cmds[self.distr_name]:
return ''.join((self.root_helper,
startup_cmds[self.distr_name].get(
'stop_neutron_server')))
@property
def start_neutron_server(self):
if startup_cmds[self.distr_name]:
return ''.join((self.root_helper,
startup_cmds[self.distr_name].get(
'start_neutron_server')))
@property
def stop_keystone(self):
if startup_cmds[self.distr_name]:
return ''.join((self.root_helper,
startup_cmds[self.distr_name].get(
'stop_keystone')))
@property
def start_keystone(self):
if startup_cmds[self.distr_name]:
return ''.join((self.root_helper,
startup_cmds[self.distr_name].get(
'start_keystone')))
@property
def stop_server(self):
if startup_cmds[self.distr_name]:
return ''.join((self.root_helper,
startup_cmds[self.distr_name].get('stop_server')))
@property
def start_server(self):
if startup_cmds[self.distr_name]:
return ''.join((self.root_helper,
startup_cmds[self.distr_name].get('start_server')))
@property
def stop_agent(self):
if startup_cmds[self.distr_name]:
return ''.join((self.root_helper,
startup_cmds[self.distr_name].get('stop_agent')))
@property
def start_agent(self):
if startup_cmds[self.distr_name]:
return ''.join((self.root_helper,
startup_cmds[self.distr_name].get('start_agent')))
@property
def get_pty(self):
if startup_cmds[self.distr_name]:
return startup_cmds[self.distr_name].get('get_pty')
def restart_keystone_process(self):
self.run_cmd_line(self.stop_keystone, check_result=False)
time.sleep(5)
self.run_cmd_line(self.start_keystone, check_result=False)
def restart_fabric_enabler_server(self):
self.run_cmd_line(self.stop_server, check_result=False)
time.sleep(5)
self.run_cmd_line(self.start_server)
def restart_fabric_enabler_agent(self):
self.run_cmd_line(self.stop_agent, check_result=False)
time.sleep(5)
self.run_cmd_line(self.start_agent)
def set_http_proxy(self, http_proxy):
self.http_proxy = http_proxy
def set_https_proxy(self, https_proxy):
self.https_proxy = https_proxy
def set_vendor_os_release(self, vendor_os_release):
if vendor_os_release is None:
return
# Save value...
self.vendor_os_rel = vendor_os_release
# ...and modify commands run locally
o = " --vendor-os-release=%s" % (vendor_os_release)
self.run_dfa_prep_on_control += o
self.run_dfa_prep_on_hacontrol += o
self.run_dfa_prep_on_compute += o
def set_upgrade(self, upgrade):
# Save value...
self.upgrade = upgrade
# ...and modify commands run locally
o = ' --upgrade=%s' % ("True" if upgrade else "False")
self.run_dfa_prep_on_control += o
self.run_dfa_prep_on_hacontrol += o
self.run_dfa_prep_on_compute += o
def set_restart_on_upgrades(self, restart):
self.restart_on_upgrades = restart
def set_restart_lldpad_on_upgrades(self, restart):
self.restart_lldpad_on_upgrades = restart
def rhel_osp7_setup(self, hamode):
# If upgrading restart only Fabric Enabler Server and Agent resource only
if self.upgrade:
pcs_resources_restart = []
if self.restart_on_upgrades:
pcs_resources_restart.extend(['fabric-enabler-server',
'fabric-enabler-agent'])
if self.restart_lldpad_on_upgrades:
pcs_resources_restart.append('lldpad')
for resource in pcs_resources_restart:
cmd = "%s pcs resource restart %s" % \
(self.root_helper, resource)
o, rc = self.run_cmd_line(cmd, check_result=False)
print(o)
if rc != 0:
cmd = "%s pcs resource cleanup %s" % \
(self.root_helper, resource)
o, rc = self.run_cmd_line(cmd, check_result=False)
print(o)
return
if hamode:
return
# Restart keystone/neutron
print("Restarting keystone and neutron")
cmds = ["pcs resource restart openstack-keystone",
"pcs resource restart neutron-server"]
for c in cmds:
cmd = "%s %s" % (self.root_helper, c)
o, rc = self.run_cmd_line(cmd, check_result=False)
print(o)
# Setup Pacemaker/Start resources
pcs_resources = {
'fabric-enabler-server':
["pcs resource create fabric-enabler-server systemd:fabric-enabler-server",
"pcs resource meta fabric-enabler-server migration-threshold=1",
"pcs constraint order promote galera-master then start fabric-enabler-server",
"pcs constraint order start rabbitmq-clone then start fabric-enabler-server",
"pcs resource enable fabric-enabler-server"],
'fabric-enabler-agent':
["pcs resource create fabric-enabler-agent systemd:fabric-enabler-agent --clone interleave=true",
"pcs constraint order start rabbitmq-clone then start fabric-enabler-agent-clone",
"pcs constraint order start neutron-openvswitch-agent-clone then start fabric-enabler-agent-clone",
"pcs resource enable fabric-enabler-agent"],
'lldpad':
["pcs resource create lldpad systemd:lldpad --clone interleave=true",
"pcs resource enable lldpad"]
}
print("Setting up and starting Pacemaker resources")
for resource in pcs_resources:
cmd = "%s pcs resource show %s 2>/dev/null" % \
(self.root_helper, resource)
o, rc = self.run_cmd_line(cmd, check_result=False, shell=True)
if o is None:
for c in pcs_resources[resource]:
cmd = "%s %s" % (self.root_helper, c)
o, rc = self.run_cmd_line(cmd, check_result=False)
print(o)
else:
print(o)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--ha-mode", default=None,
help="Set this value to True, if installing ONLY on "
"a controller node in an HA setup.")
parser.add_argument("--compute-name", default=None,
help="Set this value to thecontrol name or ip to "
"install the Enabler on a remote compute node.")
parser.add_argument("--control-name", default=None,
help="Set this value to the control name or ip to "
"install the Enabler on a remote control node.")
parser.add_argument("--remote-user", default=None,
help="Remote user for ssh access.")
parser.add_argument("--remote-password", default=None,
help="Remote password for ssh access.")
parser.add_argument("--http-proxy", default=None,
help="HTTP proxy URL.")
parser.add_argument("--https-proxy", default=None,
help="HTTPS proxy URL.")
parser.add_argument("--compute-local", default=False,
help="Set this value to True, if installing ONLY on "
"a local compute node.")
parser.add_argument("--controller-only", default=False,
help="Set this value to True, if installing only "
"on the controller.")
parser.add_argument("--uplink", help="compute uplink to leaf switch")
parser.add_argument("--mysql-user",
help="MySQL user name (only for control node)")
parser.add_argument("--mysql-password",
help="MySQL passsword (only for control node)")
parser.add_argument("--mysql-host",
help="MySQL Host name or IP address "
"(only for control node)")
parser.add_argument("--vendor-os-release", default=None,
help="Vendor specific OS release, e.g. rhel-osp7.")
parser.add_argument("--upgrade", default=None,
help="Set to True if upgrading an existing installation")
parser.add_argument("--restart", default=None,
help="Set to True to restart Fabric Enabler Server/Agent on upgrades")
parser.add_argument("--restart-lldpad", default=None,
help="Set to True to restart LLDPAD on upgrades")
args = parser.parse_args()
input_compute_name = args.compute_name
input_uplink = args.uplink
hamode = True if args.ha_mode is not None \
and args.ha_mode.lower() == 'true' else False
local_compute = True if args.compute_local and \
args.compute_local.lower() == 'true' else False
controller_only = True if args.controller_only and \
args.controller_only.lower() == 'true' or \
args.control_name is not None else False
install_control = False if args.compute_local or \
args.compute_name is not None else True
control_node = "n/a" if not install_control else \
args.control_name if args.control_name is not None else \
"remote" if args.vendor_os_release == 'rhel-osp7' and \
not args.controller_only \
else "local"
upgrade = True if args.upgrade is not None \
and args.upgrade.lower() == 'true' else False
restart = True if args.restart is None \
or args.restart.lower() == 'true' else False
restart_lldpad = True if args.restart_lldpad is not None \
and args.restart_lldpad.lower() == 'true' else False
if args.vendor_os_release == 'rhel-osp7' and \
not local_compute and not controller_only \
and args.control_name is None \
and args.compute_name is None:
if args.ha_mode is not None:
print("!!! WARNING: --ha-mode will be ignored.")
print("!!! Installer will take care of proper HA config.")
control_ha_mode = "auto"
compute_nodes = "as per 'nova list' output"
else:
control_ha_mode = "n/a" if not install_control else args.ha_mode
compute_nodes = "n/a" if controller_only \
else "local" if args.compute_local \
else args.compute_name \
if args.compute_name is not None \
else "as per enabler_conf.ini"
op = "upgrade" if upgrade else "install"
print("This script will %s the Openstack Fabric Enabler as follows:" % (op))
print(" - %s on control node: %s" % \
(op, "yes" if install_control else "no"))
print(" - control node(s): %s" % (control_node))
print(" - control HA mode: %s" % (control_ha_mode))
print(" - %s on compute nodes: %s" %
(op, "no" if controller_only else "yes"))
print(" - compute node(s): %s" % (compute_nodes))
print(" - uplink: %s" % ("auto" if input_uplink is None else input_uplink))
if upgrade:
print(" - restart agent/server: %s" % ("yes" if restart else "no"))
print(" - restart LLDPAD: %s" % ("yes" if restart_lldpad else "no"))
print("\n!!!!!!!!!!!!!!!!!!!! WARNING !!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("Upgrade will overwrite /etc/saf/enabler_conf.ini")
print("Please make sure your local enabler_conf.ini is up to date")
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
try:
user_answer = raw_input("Would you like to continue(y/n)? ").lower()
if user_answer.startswith('n'):
sys.exit(1)
except KeyboardInterrupt:
print
sys.exit(1)
fabric_inst = NexusFabricEnablerInstaller(args.mysql_user,
args.mysql_password,
args.mysql_host)
fabric_inst.set_http_proxy(args.http_proxy)
fabric_inst.set_https_proxy(args.https_proxy)
fabric_inst.set_vendor_os_release(args.vendor_os_release)
fabric_inst.set_upgrade(upgrade)
fabric_inst.set_restart_on_upgrades(restart)
fabric_inst.set_restart_lldpad_on_upgrades(restart_lldpad)
# RHEL-OSP7 specific behavior
if args.vendor_os_release == 'rhel-osp7':
root_helper = '' if os.geteuid() == 0 else 'sudo '
if args.remote_user is None:
args.remote_user = 'heat-admin'
extra_rpms_dir = "./extra-rpms"
pkgs = ["lldpad.x86_64",
"libconfig.x86_64"]
if not upgrade:
if local_compute or (args.control_name is None and controller_only):
# Install RPMs in extra_rpms_dir
cmd = "%s rpm -ihv %s/*" % (root_helper, extra_rpms_dir)
o, rc = fabric_inst .run_cmd_line(cmd, shell=True,
check_result=False)
if o is not None:
print(o)
else:
# Get extra RPMs
try:
os.mkdir(extra_rpms_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise exc
os.chdir(extra_rpms_dir)
cmd = "%s yumdownloader %s" % (root_helper, " ".join(pkgs))
o, rc = fabric_inst.run_cmd_line(cmd, check_result=False)
if len(os.listdir(".")) < 1:
print("Could not download rpms and %s is empty!" % extra_rpms_dir)
sys.exit(1)
os.chdir("../")
if not local_compute and not controller_only \
and args.control_name is None \
and args.compute_name is None:
# Install Fabric Enabler on controllers and computes
os.chdir("../")
cmd = "nova list | grep ctlplane= "
o, rc = fabric_inst.run_cmd_line(cmd, shell=True,
check_result=False)
if o is None:
print 'NOTICE: the script could not retrieve overcloud information'
print ' This could be due to stackrc not being sourced'
print ' or overcloud not being deployed.'
print ' Please make sure overcloud is deployed and stackrc'
print ' is sourced before running this command. Thank you.'
sys.exit(1)
print(o)
nodes = { 'compute': [], 'controller': [] }
for l in o.splitlines():
node_ip = None
s = l.split('|')
node_ip = s[6].split('=')[1]
node_type = 'compute' if 'compute' in s[2] else \
'controller' if 'controller' in s[2] else None
if node_type == 'compute' or node_type == 'controller':
nodes[node_type].append(node_ip)
for node_ip in nodes['compute']:
print 'Installing Fabric Enabler on compute', node_ip
fabric_inst.setup_compute_remote(node_ip, input_uplink,
args.remote_user,
args.remote_password)
cn = len(nodes['controller'])
for node_ip in nodes['controller']:
print 'Installing Fabric Enabler on controller', node_ip
if cn == 1:
fabric_inst.set_restart_on_upgrades(restart)
fabric_inst.set_restart_lldpad_on_upgrades(restart_lldpad)
else:
fabric_inst.set_restart_on_upgrades(False)
fabric_inst.set_restart_lldpad_on_upgrades(False)
fabric_inst.setup_control_remote(node_ip,
args.remote_user,
args.remote_password,
cn != 1)
cn -= 1
# Done!
sys.exit(0)
elif args.vendor_os_release is not None:
print 'ERROR: Vendor OS release %s is not supported' % (args.vendor_os_release)
print ' Supported vendor OS releases are:'
print ' - rhel-osp7'
sys.exit(1)
os.chdir("../")
if local_compute:
# Compute-only enabler installation
fabric_inst.setup_compute_local(input_uplink)
sys.exit(0)
if input_compute_name is None:
# Enabler installation on control node
if args.control_name is None:
fabric_inst.setup_control(hamode)
else:
fabric_inst.setup_control_remote(args.control_name,
args.remote_user,
args.remote_password,
hamode)
# Setup compute node.
if not hamode and not controller_only:
if args.remote_user is not None:
fabric_inst.setup_compute_remote(input_compute_name,
input_uplink,
args.remote_user,
args.remote_password)
else:
fabric_inst.setup_compute(input_compute_name, input_uplink)
| apache-2.0 |
2ndQuadrant/ansible | lib/ansible/modules/commands/telnet.py | 38 | 2786 | # this is a virtual module that is entirely implemented server side
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: telnet
short_description: Executes a low-down and dirty telnet command
version_added: 2.4
description:
- Executes a low-down and dirty telnet command, not going through the module subsystem.
- This is mostly to be used for enabling ssh on devices that only have telnet enabled by default.
options:
command:
description:
- List of commands to be executed in the telnet session.
required: True
aliases: ['commands']
host:
description:
- The host/target on which to execute the command
required: False
default: remote_addr
user:
description:
- The user for login
required: False
default: remote_user
password:
description:
- The password for login
port:
description:
- Remote port to use
default: 23
timeout:
description:
- timeout for remote operations
default: 120
prompts:
description:
- List of prompts expected before sending next command
required: False
default: ['$']
login_prompt:
description:
- Login or username prompt to expect
required: False
default: 'login: '
password_prompt:
description:
- Login or username prompt to expect
required: False
default: 'Password: '
pause:
description:
- Seconds to pause between each command issued
required: False
default: 1
send_newline:
description:
- Sends a newline character upon successful connection to start the
terminal session.
required: False
default: False
type: bool
version_added: "2.7"
notes:
- The C(environment) keyword does not work with this task
author:
- Ansible Core Team
'''
EXAMPLES = '''
- name: send configuration commands to IOS
telnet:
user: cisco
password: cisco
login_prompt: "Username: "
prompts:
- "[>#]"
command:
- terminal length 0
- configure terminal
- hostname ios01
- name: run show commands
telnet:
user: cisco
password: cisco
login_prompt: "Username: "
prompts:
- "[>#]"
command:
- terminal length 0
- show version
'''
RETURN = '''
output:
description: output of each command is an element in this list
type: list
returned: always
sample: [ 'success', 'success', '', 'warning .. something' ]
'''
| gpl-3.0 |
pelson/pyggybank | testing/test_wizard.py | 1 | 3067 | """
These integration tests exist solely to test the interaction between pyggybank and GPG on the CLI.
All attempts should be made to avoid extending these tests in preference for unit tests of the functions
themselves (where necessary, mocking out the GPG interactions).
TODO: It would be great to bring these tests into the pyggybank.test module, and marking them as
full-blown integration tests.
"""
import pexpect
import sys
import os
import shutil
from pathlib import Path
gpg_vn = 2
def test_gpg_new_key_prompt():
global gpg_vn
# Check that pyggybank drops us into the gpg keygen prompt if we don't have any keys
tmp = Path('tmp')
if tmp.exists():
shutil.rmtree(tmp)
tmp.mkdir()
child = pexpect.spawnu('pyggybank wizard --gpg-home={}'.format(tmp))
# child.logfile = os.fdopen(sys.stdout.fileno(), 'w')
# We just want to check that we have initiated the gpg wizard correctly. The details aren't important.
newer_gpg = True
try:
child.expect('Your selection?', timeout=1)
child.sendline('1')
child.expect('What keysize do you want?', timeout=1)
child.sendline('2048')
newer_gpg = False
gpg_vn = 1
child.expect('key expires in n years', timeout=1)
child.sendline('0')
except pexpect.exceptions.TIMEOUT:
pass
if newer_gpg:
child.expect('Real name:')
child.sendline('Testing Real Me')
child.expect('Email address:')
child.sendline('[email protected]')
child.expect('\(O\)kay\/\(Q\)uit\?')
child.close()
# Let's get a newline afterwards.
assert True
print()
def test_gpg_no_agent():
# Check the pyggybank behaviour when the gpg key hasn't been unlocked
# (i.e. the gpg-agent is fresh)
gpghome = Path(__file__).parent/'gpg'
accounts_file = Path('accounts.encrypted.{}.yml'.format(gpg_vn))
if gpg_vn < 2:
raise RuntimeError('Cant yet handle older gpg.')
if accounts_file.exists():
accounts_file.unlink()
child = pexpect.spawnu('pyggybank wizard --gpg-home={} --accounts-file={}'.format(gpghome, accounts_file))
# child.logfile = os.fdopen(sys.stdout.fileno(), 'w')
child.expect('GPG identity would you like to encrypt with\?', timeout=5)
child.sendline('Testing Name <[email protected]>')
child.expect('Provider:')
child.sendline('Test provider')
child.expect('User ID')
child.sendline('abcdef')
child.expect('password')
child.sendline('123456')
child.expect('Wrote config')
# --------
child = pexpect.spawnu('pyggybank accounts --accounts-file={} --gpg-home={}'.format(accounts_file, gpghome))
#child.logfile = os.fdopen(sys.stdout.fileno(), 'w')
# Will only be called if gpg-agent isn't running.
child.expect('GPG passphrase\:')
child.sendline('Th15154T35t')
child.expect('Test provider')
# Let's get a newline afterwards.
assert True
print()
if __name__ == '__main__':
test_gpg_new_key_prompt()
test_gpg_no_agent()
| bsd-3-clause |
matheuscas/hackathon_paypal | modules/requests/packages/chardet/escprober.py | 2936 | 3187 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
| mit |
UOWPhysSoc/Barnes-Hutt-Simulation--James-MattG- | barnesdist.py | 1 | 4826 | '''
Distribution function module for Barnes-Hutt n-body simulation developed by Matt Griffiths and James Archer.
Example distribution written by Matt Griffiths, list idea concieved by James Archer.
Special thanks to Matt Sanderson for ideas regarding distribution implementation.
Avaliable for use under a GPL v3 licence.
'''
#Import dependent libraries
from random import *
#from visual import *
from vector import *
from math import *
class distributions():
def __init__(self, dist_name, G):
self.dist_name = dist_name
self.part = []
self.n = 0
self.index = 0
self.G = G
def call(self):
for i in self.dist_name:
try:
getattr(self, i)()
except:
pass
return self.part
def uniform_cube(self):
self.n += int(input('Number of bodies: '))
#x_range = float(input('X range: '))
#y_range = float(input('Y range: '))
#z_range = float(input('Z range: '))
x_range = 10
y_range = 10
z_range = 10
for i in range(self.n):
r = random_vect(x_range,y_range,z_range)
self.part.append({
'pos-1':r,
'pos':r,
'mass':1/self.n,
'vel':random_vect(x_range*0.1,y_range*0.1,z_range*0.1),
'acc':vector(0,0,0),
'num':self.index
})
self.index += 1
def ring_old(index,posd,veld,centralmass):
#Ring type distribution around a massive central body
# NOT APPROPRIATED FOR NEW DISTRIBUTION SYSTEM
n = int(input('Number of bodies: '))
posd
#For first index, create central body
if index == 0:
return({
'pos-1':vector(0,0,0),
'pos':vector(0,0,0),
'mass':centralmass,
'vel':vector(0,0,0),
'acc':vector(0,0,0),
'num':0
})
#For further indexes, add smaller orbiting bodies to a ring
else:
zunit = vector(0,0,1)
tempvect = vector(0,0,0)
temptheta = uniform(0,2*pi)
rad = gauss(posd,posd/10)
tempvect.x = rad*math.cos(temptheta)
tempvect.y = rad*math.sin(temptheta)
tempvect.z = gauss(posd/10,posd/10)
tempvel = math.sqrt(centralmass/posd)*(tempvect/abs(tempvect)).cross(zunit)
tempm = 1
return ({
'pos-1':tempvect,
'pos':vector(0,0,0),
'mass':tempm,
'vel':tempvel,
'acc':vector(0,0,0),
'num':index
})
def kepler(self):
n_new = int(input('Number of bodies: '))
self.n += n_new
cent_mass = float(input('Central body mass: '))
other_mass = float(input('Other masses: '))
mean_r = float(input('Mean radius: '))
self.part.append({
'pos-1':vector(0,0,0),
'pos':vector(0,0,0),
'mass':cent_mass,
'vel':vector(0,0,0),
'acc':vector(0,0,0),
'num':self.index
})
self.index += 1
for i in range(0,n_new - 1):
r = vector(1,0,0) * expovariate(1./mean_r)
r = rotate(r, uniform(0, 2*pi), vector(0,0,1))
self.part.append({
'pos-1':r,
'pos':r,
'mass':other_mass,
'vel':cross(r/mag(r),vector(0,0,1))*pow(self.G*(cent_mass + n_new*other_mass*(1-exp(-mag(r)/mean_r)))/mag(r),0.5),
'acc':vector(0,0,0),
'num':self.index
})
self.index += 1
def two_body(self):
self.n += 2
mass1 = float(input('First body mass: '))
mass2 = float(input('Second body mass: '))
r = vector(1,0,0)*float(input('Separation distance: '))
mu = mass1 * mass2 /(mass1 + mass2)
self.part.append({
'pos-1':vector(0,0,0),
'pos':vector(0,0,0),
'mass':mass1,
'vel':sqrt(mass2**2/(mag(r)*(mass1 + mass2))) * vector(0,1,0),
'acc':vector(0,0,0),
'num':self.index
})
self.index += 1
self.part.append({
'pos-1':r,
'pos':r,
'mass':mass2,
'vel':sqrt(mass1**2/(mag(r)*(mass1 + mass2))) * vector(0,-1,0),
'acc':vector(0,0,0),
'num':self.index
})
self.index += 1
def random_vect(dx, dy, dz):
return vector(uniform(-dx/2, dx/2),uniform(-dy/2,dy/2),uniform(-dz/2,dz/2))
#james rules
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/django/apps/config.py | 51 | 8187 | import os
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from django.utils._os import upath
from django.utils.module_loading import module_has_submodule
MODELS_MODULE_NAME = 'models'
class AppConfig(object):
"""
Class representing a Django application and its configuration.
"""
def __init__(self, app_name, app_module):
# Full Python path to the application eg. 'django.contrib.admin'.
self.name = app_name
# Root module for the application eg. <module 'django.contrib.admin'
# from 'django/contrib/admin/__init__.pyc'>.
self.module = app_module
# Reference to the Apps registry that holds this AppConfig. Set by the
# registry when it registers the AppConfig instance.
self.apps = None
# The following attributes could be defined at the class level in a
# subclass, hence the test-and-set pattern.
# Last component of the Python path to the application eg. 'admin'.
# This value must be unique across a Django project.
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
# Human-readable name for the application eg. "Admin".
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
# Filesystem path to the application directory eg.
# u'/usr/lib/python2.7/dist-packages/django/contrib/admin'. Unicode on
# Python 2 and a str on Python 3.
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
# Module containing models eg. <module 'django.contrib.admin.models'
# from 'django/contrib/admin/models.pyc'>. Set by import_models().
# None if the application doesn't have a models module.
self.models_module = None
# Mapping of lower case model names to model classes. Initially set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
def _path_from_module(self, module):
"""Attempt to determine app's filesystem path from its module."""
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert paths to list because Python 3's _NamespacePath does not
# support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
else:
# For unknown reasons, sometimes the list returned by __path__
# contains duplicates that must be removed (#25246).
paths = list(set(paths))
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module,))
return upath(paths[0])
@classmethod
def create(cls, entry):
"""
Factory that creates an app config from an entry in INSTALLED_APPS.
"""
try:
# If import_module succeeds, entry is a path to an app module,
# which may specify an app config class with default_app_config.
# Otherwise, entry is a path to an app config class or an error.
module = import_module(entry)
except ImportError:
# Track that importing as an app module failed. If importing as an
# app config class fails too, we'll trigger the ImportError again.
module = None
mod_path, _, cls_name = entry.rpartition('.')
# Raise the original exception when entry cannot be a path to an
# app config class.
if not mod_path:
raise
else:
try:
# If this works, the app module specifies an app config class.
entry = module.default_app_config
except AttributeError:
# Otherwise, it simply uses the default app config class.
return cls(entry, module)
else:
mod_path, _, cls_name = entry.rpartition('.')
# If we're reaching this point, we must attempt to load the app config
# class located at <mod_path>.<cls_name>
mod = import_module(mod_path)
try:
cls = getattr(mod, cls_name)
except AttributeError:
if module is None:
# If importing as an app module failed, that error probably
# contains the most informative traceback. Trigger it again.
import_module(entry)
else:
raise
# Check for obvious errors. (This check prevents duck typing, but
# it could be removed if it became a problem in practice.)
if not issubclass(cls, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
try:
app_name = cls.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry)
# Ensure app_name points to a valid module.
try:
app_module = import_module(app_name)
except ImportError:
raise ImproperlyConfigured(
"Cannot import '%s'. Check that '%s.%s.name' is correct." % (
app_name, mod_path, cls_name,
)
)
# Entry is a path to an app config class.
return cls(app_name, app_module)
def get_model(self, model_name, require_ready=True):
"""
Returns the model with the given case-insensitive model_name.
Raises LookupError if no model exists with this name.
"""
if require_ready:
self.apps.check_models_ready()
else:
self.apps.check_apps_ready()
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False, include_swapped=False):
"""
Returns an iterable of models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
Keyword arguments aren't documented; they're a private API.
"""
self.apps.check_models_ready()
for model in self.models.values():
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
self.models = self.apps.all_models[self.label]
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
| gpl-3.0 |
40223123/finaltest2 | static/Brython3.1.3-20150514-095342/Lib/site-packages/pygame/display.py | 603 | 25179 | #!/usr/bin/env python
'''Pygame module to control the display window and screen.
This module offers control over the pygame display. Pygame has a single display
Surface that is either contained in a window or runs full screen. Once you
create the display you treat it as a regular Surface. Changes are not
immediately visible onscreen, you must choose one of the two flipping functions
to update the actual display.
The pygame display can actually be initialized in one of several modes. By
default the display is a basic software driven framebuffer. You can request
special modules like hardware acceleration and OpenGL support. These are
controlled by flags passed to pygame.display.set_mode().
Pygame can only have a single display active at any time. Creating a new one
with pygame.display.set_mode() will close the previous display. If precise
control is needed over the pixel format or display resolutions, use the
functions pygame.display.mode_ok(), pygame.display.list_modes(), and
pygame.display.Info() to query information about the display.
Once the display Surface is created, the functions from this module
effect the single existing display. The Surface becomes invalid if the module
is uninitialized. If a new display mode is set, the existing Surface will
automatically switch to operate on the new display.
Then the display mode is set, several events are placed on the pygame
event queue. pygame.QUIT is sent when the user has requested the program
to shutdown. The window will receive pygame.ACTIVEEVENT events as the
display gains and loses input focus. If the display is set with the
pygame.RESIZABLE flag, pygame.VIDEORESIZE events will be sent when the
user adjusts the window dimensions. Hardware displays that draw direct
to the screen will get pygame.VIDEOEXPOSE events when portions of the
window must be redrawn.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import sys
from SDL import *
import pygame.base
import pygame.pkgdata
import pygame.surface
#brython
import pygame.constants
from browser import window
#from javascript import console
_display_surface = None
_icon_was_set = 0
_icon_defaultname = 'pygame_icon.bmp'
_init_video=False
def __PYGAMEinit__():
pygame.base.register_quit(_display_autoquit)
def _display_autoquit():
global _display_surface
_display_surface = None
def init():
'''Initialize the display module.
Initializes the pygame display module. The display module cannot do
anything until it is initialized. This is usually handled for you
automatically when you call the higher level `pygame.init`.
Pygame will select from one of several internal display backends when it
is initialized. The display mode will be chosen depending on the platform
and permissions of current user. Before the display module is initialized
the environment variable SDL_VIDEODRIVER can be set to control which
backend is used. The systems with multiple choices are listed here.
Windows
windib, directx
Unix
x11, dga, fbcon, directfb, ggi, vgl, svgalib, aalib
On some platforms it is possible to embed the pygame display into an already
existing window. To do this, the environment variable SDL_WINDOWID must be
set to a string containing the window id or handle. The environment variable
is checked when the pygame display is initialized. Be aware that there can
be many strange side effects when running in an embedded display.
It is harmless to call this more than once, repeated calls have no effect.
'''
pygame.base._video_autoinit()
__PYGAMEinit__()
def quit():
'''Uninitialize the display module.
This will shut down the entire display module. This means any active
displays will be closed. This will also be handled automatically when the
program exits.
It is harmless to call this more than once, repeated calls have no effect.
'''
pygame.base._video_autoquit()
_display_autoquit()
def get_init():
'''Get status of display module initialization.
:rtype: bool
:return: True if SDL's video system is currently initialized.
'''
return SDL_WasInit(SDL_INIT_VIDEO) != 0
def set_mode(resolution, flags=0, depth=0):
'''Initialize a window or screen for display.
This function will create a display Surface. The arguments passed in are
requests for a display type. The actual created display will be the best
possible match supported by the system.
The `resolution` argument is a pair of numbers representing the width and
height. The `flags` argument is a collection of additional options.
The `depth` argument represents the number of bits to use for color.
The Surface that gets returned can be drawn to like a regular Surface but
changes will eventually be seen on the monitor.
It is usually best to not pass the depth argument. It will default to the
best and fastest color depth for the system. If your game requires a
specific color format you can control the depth with this argument. Pygame
will emulate an unavailable color depth which can be slow.
When requesting fullscreen display modes, sometimes an exact match for the
requested resolution cannot be made. In these situations pygame will select
the closest compatable match. The returned surface will still always match
the requested resolution.
The flags argument controls which type of display you want. There are
several to choose from, and you can even combine multiple types using the
bitwise or operator, (the pipe "|" character). If you pass 0 or no flags
argument it will default to a software driven window. Here are the display
flags you will want to choose from:
pygame.FULLSCREEN
create a fullscreen display
pygame.DOUBLEBUF
recommended for HWSURFACE or OPENGL
pygame.HWSURFACE
hardware accelereated, only in FULLSCREEN
pygame.OPENGL
create an opengl renderable display
pygame.RESIZABLE
display window should be sizeable
pygame.NOFRAME
display window will have no border or controls
:Parameters:
- `resolution`: int, int
- `flags`: int
- `depth`: int
:rtype: `Surface`
'''
global _display_surface
w, h = resolution
if w <= 0 or h <= 0:
raise pygame.base.error('Cannot set 0 sized display mode')
if not SDL_WasInit(SDL_INIT_VIDEO):
init()
if flags & SDL_OPENGL:
if flags & SDL_DOUBLEBUF:
flags &= ~SDL_DOUBLEBUF
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1)
else:
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 0)
if depth:
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, depth)
surf = SDL_SetVideoMode(w, h, depth, flags)
if SDL_GL_GetAttribute(SDL_GL_DOUBLEBUFFER):
surf.flags |= SDL_DOUBLEBUF
else:
if not depth:
flags |= SDL_ANYFORMAT
surf = SDL_SetVideoMode(w, h, depth, flags)
title, icontitle = SDL_WM_GetCaption()
if not title:
SDL_WM_SetCaption('pygame window', 'pygame')
SDL_PumpEvents()
if _display_surface:
_display_surface._surf = surf
else:
#_display_surface = pygame.surface.Surface(surf=surf)
_display_surface = pygame.surface.Surface(dim=(w,h))
document['pydiv'] <= _display_surface.canvas
if sys.platform != 'darwin':
if not _icon_was_set:
try:
file = pygame.pkgdata.getResource(_icon_defaultname)
iconsurf = pygame.image.load(file)
SDL_SetColorKey(iconsurf._surf, SDL_SRCCOLORKEY, 0)
set_icon(iconsurf)
except IOError:
# Not worth dying over.
pass
return _display_surface
def get_surface():
'''Get current display surface.
Returns a `Surface` object representing the current display. Will
return None if called before the display mode is set.
:rtype: `Surface`
'''
return _display_surface
def flip():
'''Update the full display surface to the screen.
This will update the contents of the entire display. If your display mode
is using the flags pygame.HWSURFACE and pygame.DOUBLEBUF, this will wait
for a vertical retrace and swap the surfaces. If you are using a different
type of display mode, it will simply update the entire contents of the
surface.
When using an pygame.OPENGL display mode this will perform a gl buffer
swap.
'''
pass
_video_init_check()
screen = SDL_GetVideoSurface()
if not screen:
raise pygame.base.error('Display mode not set')
if screen.flags & SDL_OPENGL:
SDL_GL_SwapBuffers()
else:
SDL_Flip(screen)
def _crop_rect(w, h, rect):
if rect.x >= w or rect.y >= h or \
rect.x + rect.w <= 0 or rect.y + rect.h <= 0:
return None
rect.x = max(rect.x, 0)
rect.y = max(rect.y, 0)
rect.w = min(rect.x + rect.w, w) - rect.x
rect.h = min(rect.y + rect.h, h) - rect.y
return rect
def update(*rectangle):
'''Update portions of the screen for software displays.
This function is like an optimized version of pygame.display.flip() for
software displays. It allows only a portion of the screen to updated,
instead of the entire area. If no argument is passed it updates the entire
Surface area like `flip`.
You can pass the function a single rectangle, or a sequence of rectangles.
It is more efficient to pass many rectangles at once than to call update
multiple times with single or a partial list of rectangles. If passing
a sequence of rectangles it is safe to include None values in the list,
which will be skipped.
This call cannot be used on pygame.OPENGL displays and will generate an
exception.
:Parameters:
`rectangle` : Rect or sequence of Rect
Area(s) to update
'''
# Undocumented: also allows argument tuple to represent one rect;
# e.g. update(0, 0, 10, 10) or update((0, 0), (10, 10))
_video_init_check()
screen = SDL_GetVideoSurface()
if not screen:
raise pygame.base.error('Display mode not set')
if screen.flags & SDL_OPENGL:
raise pygame.base.error('Cannot update an OPENGL display')
if not rectangle:
SDL_UpdateRect(screen, 0, 0, 0, 0)
else:
w, h = screen.w, screen.h
w, h = screen.width, screen.height
try:
rect = pygame.rect._rect_from_object(rectangle)._r
rect = _crop_rect(w, h, rect)
if rect:
SDL_UpdateRect(screen, rect.x, rect.y, rect.w, rect.h)
except TypeError:
rectangle = rectangle[0]
rects = [_crop_rect(w, h, pygame.rect._rect_from_object(r)._r) \
for r in rectangle if r]
SDL_UpdateRects(screen, rects)
def get_driver():
'''Get name of the pygame display backend.
Pygame chooses one of many available display backends when it is
initialized. This returns the internal name used for the display backend.
This can be used to provide limited information about what display
capabilities might be accelerated.
:rtype: str
'''
_video_init_check()
return SDL_VideoDriverName()
def Info():
'''Create a video display information object.
Creates a simple object containing several attributes to describe the
current graphics environment. If this is called before
`set_mode` some platforms can provide information about the default
display mode. This can also be called after setting the display mode to
verify specific display options were satisfied.
:see: `VideoInfo`
:rtype: `VideoInfo`
'''
_video_init_check()
return VideoInfo()
class VideoInfo:
'''Video display information.
:Ivariables:
`hw` : bool
True if the display is hardware accelerated.
`wm` : bool
True if windowed display modes can be used.
`video_mem` : int
The amount of video memory on the displaoy, in megabytes. 0 if
unknown.
`bitsize` : int
Number of bits used to store each pixel.
`bytesize` : int
Number of bytes used to store each pixel.
`masks` : (int, int, int, int)
RGBA component mask.
`shifts` : (int, int, int, int)
RGBA component shift amounts.
`losses` : (int, int, int, int)
Number of bits lost from a 32 bit depth for each RGBA component.
`blit_hw` : bool
True if hardware Surface blitting is accelerated
`blit_hw_CC` : bool
True if hardware Surface colorkey blitting is accelerated
`blit_hw_A` : bool
True if hardware Surface pixel alpha blitting is accelerated
`blit_sw` : bool
True if software Surface blitting is accelerated
`blit_sw_CC` : bool
True if software Surface colorkey blitting is accelerated
`blit_sw_A` : bool
True if software Surface pixel alpha blitting is acclerated
'''
def __init__(self):
#brython
#info = SDL_GetVideoInfo()
info=None
if not info:
raise pygame.base.error('Could not retrieve video info')
self.hw = info.hw_available
self.wm = info.wm_available
self.blit_hw = info.blit_hw
self.blit_hw_CC = info.blit_hw_CC
self.blit_hw_A = info.blit_hw_A
self.blit_sw = info.blit_sw
self.blit_sw_CC = info.blit_sw_CC
self.blit_sw_A = info.blit_sw_A
self.blit_fill = info.blit_fill
self.video_mem = info.video_mem
self.bitsize = info.vfmt.BitsPerPixel
self.bytesize = info.vfmt.BytesPerPixel
self.masks = (info.vfmt.Rmask, info.vfmt.Gmask,
info.vfmt.Bmask, info.vfmt.Amask)
self.shifts = (info.vfmt.Rshift, info.vfmt.Gshift,
info.vfmt.Bshift, info.vfmt.Ashift)
self.losses = (info.vfmt.Rloss, info.vfmt.Gloss,
info.vfmt.Bloss, info.vfmt.Aloss)
def __str__(self):
return ('<VideoInfo(hw = %d, wm = %d,video_mem = %d\n' + \
' blit_hw = %d, blit_hw_CC = %d, blit_hw_A = %d,\n'
' blit_sw = %d, blit_sw_CC = %d, blit_sw_A = %d,\n'
' bitsize = %d, bytesize = %d,\n'
' masks = (%d, %d, %d, %d),\n'
' shifts = (%d, %d, %d, %d),\n'
' losses = (%d, %d, %d, %d)>\n') % \
(self.hw, self.wm, self.video_mem,
self.blit_hw, self.blit_hw_CC, self.blit_hw_A,
self.blit_sw, self.blit_sw_CC, self.blit_sw_A,
self.bitsize, self.bytesize,
self.masks[0], self.masks[1], self.masks[2], self.masks[3],
self.shifts[0], self.shifts[1], self.shifts[2], self.shifts[3],
self.losses[0], self.losses[1], self.losses[2], self.losses[3])
def __repr__(self):
return str(self)
def get_wm_info():
'''Get settings from the system window manager.
:note: Currently unimplemented, returns an empty dict.
:rtype: dict
'''
_video_init_check()
return {}
def list_modes(depth=0, flags=pygame.constants.FULLSCREEN):
'''Get list of available fullscreen modes.
This function returns a list of possible dimensions for a specified color
depth. The return value will be an empty list if no display modes are
available with the given arguments. A return value of -1 means that any
requested resolution should work (this is likely the case for windowed
modes). Mode sizes are sorted from biggest to smallest.
If depth is 0, SDL will choose the current/best color depth for the
display. The flags defaults to pygame.FULLSCREEN, but you may need to add
additional flags for specific fullscreen modes.
:rtype: list of (int, int), or -1
:return: list of (width, height) pairs, or -1 if any mode is suitable.
'''
_video_init_check()
#brython
#format = SDL_PixelFormat()
#format.BitsPerPixel = depth
#brython
#if not format.BitsPerPixel:
# format.BitsPerPixel = SDL_GetVideoInfo().vfmt.BitsPerPixel
#brython
#rects = SDL_ListModes(format, flags)
if rects == -1:
return -1
return [(r.w, r.h) for r in rects]
def mode_ok(size, flags=0, depth=0):
'''Pick the best color depth for a display mode
This function uses the same arguments as pygame.display.set_mode(). It is
used to depermine if a requested display mode is available. It will return
0 if the display mode cannot be set. Otherwise it will return a pixel
depth that best matches the display asked for.
Usually the depth argument is not passed, but some platforms can support
multiple display depths. If passed it will hint to which depth is a better
match.
The most useful flags to pass will be pygame.HWSURFACE, pygame.DOUBLEBUF,
and maybe pygame.FULLSCREEN. The function will return 0 if these display
flags cannot be set.
:rtype: int
:return: depth, in bits per pixel, or 0 if the requested mode cannot be
set.
'''
_video_init_check()
if not depth:
depth = SDL_GetVideoInfo().vfmt.BitsPerPixel
return SDL_VideoModeOK(size[0], size[1], depth, flags)
def gl_set_attribute(flag, value):
'''Set special OpenGL attributes.
When calling `pygame.display.set_mode` with the OPENGL flag,
pygame automatically handles setting the OpenGL attributes like
color and doublebuffering. OpenGL offers several other attributes
you may want control over. Pass one of these attributes as the
flag, and its appropriate value.
This must be called before `pygame.display.set_mode`.
The OPENGL flags are: GL_ALPHA_SIZE, GL_DEPTH_SIZE, GL_STENCIL_SIZE,
GL_ACCUM_RED_SIZE, GL_ACCUM_GREEN_SIZE, GL_ACCUM_BLUE_SIZE,
GL_ACCUM_ALPHA_SIZE GL_MULTISAMPLEBUFFERS, GL_MULTISAMPLESAMPLES,
GL_STEREO.
:Parameters:
- `flag`: int
- `value`: int
'''
_video_init_check()
SDL_GL_SetAttribute(flag, value)
def gl_get_attribute(flag):
'''Get special OpenGL attributes.
After calling `pygame.display.set_mode` with the OPENGL flag
you will likely want to check the value of any special OpenGL
attributes you requested. You will not always get what you
requested.
See `gl_set_attribute` for a list of flags.
:Parameters:
- `flag`: int
:rtype: int
'''
_video_init_check()
return SDL_GL_GetAttribute(flag)
def get_active():
'''Get state of display mode
Returns True if the current display is active on the screen. This
done with the call to ``pygame.display.set_mode()``. It is
potentially subject to the activity of a running window manager.
Calling `set_mode` will change all existing display surface
to reference the new display mode. The old display surface will
be lost after this call.
'''
brython
return SDL_GetAppState() & SDL_APPACTIVE != 0
def iconify():
'''Iconify the display surface.
Request the window for the display surface be iconified or hidden. Not all
systems and displays support an iconified display. The function will
return True if successfull.
When the display is iconified pygame.display.get_active() will return
False. The event queue should receive a pygame.APPACTIVE event when the
window has been iconified.
:rtype: bool
:return: True on success
'''
_video_init_check()
try:
SDL_WM_IconifyWindow()
return True
except SDL_Exception:
return False
def toggle_fullscreen():
'''Switch between fullscreen and windowed displays.
Switches the display window between windowed and fullscreen modes. This
function only works under the unix x11 video driver. For most situations
it is better to call pygame.display.set_mode() with new display flags.
:rtype: bool
'''
_video_init_check()
screen = SDL_GetVideoSurface()
try:
SDL_WM_ToggleFullScreen(screen)
return True
except SDL_Exception:
return False
return False
def set_gamma(red, green=None, blue=None):
'''Change the hardware gamma ramps.
Set the red, green, and blue gamma values on the display hardware. If the
green and blue arguments are not passed, they will both be the same as
red. Not all systems and hardware support gamma ramps, if the function
succeeds it will return True.
A gamma value of 1.0 creates a linear color table. Lower values will
darken the display and higher values will brighten.
:Parameters:
`red` : float
Red gamma value
`green` : float
Green gamma value
`blue` : float
Blue gamma value
:rtype: bool
'''
brython
_video_init_check()
if not green or not blue:
green = red
blue = red
try:
SDL_SetGamma(red, green, blue)
return True
except SDL_Exception:
return False
def set_gamma_ramp(red, green, blue):
'''Change the hardware gamma ramps with a custom lookup.
Set the red, green, and blue gamma ramps with an explicit lookup table.
Each argument should be sequence of 256 integers. The integers should
range between 0 and 0xffff. Not all systems and hardware support gamma
ramps, if the function succeeds it will return True.
:Parameters:
`red` : sequence of int
Sequence of 256 ints in range [0, 0xffff] giving red component
lookup.
`green` : sequence of int
Sequence of 256 ints in range [0, 0xffff] giving green component
lookup.
`blue` : sequence of int
Sequence of 256 ints in range [0, 0xffff] giving blue component
lookup.
:rtype: bool
'''
_video_init_check()
try:
SDL_SetGammaRamp(red, green, blue)
return True
except SDL_Exception:
return False
def set_icon(surface):
'''Change the system image for the display window.
Sets the runtime icon the system will use to represent the display window.
All windows default to a simple pygame logo for the window icon.
You can pass any surface, but most systems want a smaller image around
32x32. The image can have colorkey transparency which will be passed to
the system.
Some systems do not allow the window icon to change after it has been
shown. This function can be called before `set_mode` to
create the icon before the display mode is set.
:Parameters:
`surface` : `Surface`
Surface containing image to set.
'''
global _icon_was_set
pygame.base._video_autoinit()
SDL_WM_SetIcon(surface._surf, None)
_icon_was_set = 1
def set_caption(title, icontitle=None):
'''Set the current window caption.
If the display has a window title, this function will change the name on
the window. Some systems support an alternate shorter title to be used for
minimized displays.
:Parameters:
`title` : unicode
Window caption
`icontitle` : unicode
Icon caption, if supported
'''
if not icontitle:
icontitle = title
SDL_WM_SetCaption(title, icontitle)
def get_caption():
'''Get the current window caption.
Returns the title and icontitle for the display Surface. These will often
be the same value.
:rtype: (unicode, unicode)
:return: title, icontitle
'''
# XXX deviation from pygame, don't return () if title == None
#return SDL_WM_GetCaption()
return "", ""
def set_palette(palette=None):
'''Set the display color palette for indexed displays.
This will change the video display color palette for 8bit displays. This
does not change the palette for the actual display Surface, only the
palette that is used to display the Surface. If no palette argument is
passed, the system default palette will be restored. The palette is a
sequence of RGB triplets.
:Parameters:
`palette` : sequence of (int, int, int)
Sequence having at most 256 RGB triplets.
'''
_video_init_check()
surf = SDL_GetVideoSurface()
if not surf:
raise pygame.base.error('No display mode is set')
if surf.format.BytesPerPixel != 1 or not surf.format._palette:
raise pygame.base.error('Display mode is not colormapped')
if not palette:
SDL_SetPalette(surf, SDL_PHYSPAL, surf.format.palette.colors, 0)
length = min(surf.format.palette.ncolors, len(palette))
colors = [SDL_Color(r, g, b) for r, g, b in palette[:length]]
SDL_SetPalette(surf, SDL_PHYSPAL, colors, 0)
def _video_init_check():
if not SDL_WasInit(SDL_INIT_VIDEO):
raise pygame.base.error('video system not initialized')
| gpl-3.0 |
shadyueh/pyranking | env/lib/python2.7/site-packages/django/contrib/gis/db/models/functions.py | 59 | 15777 | from decimal import Decimal
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.sql import AreaField
from django.contrib.gis.measure import (
Area as AreaMeasure, Distance as DistanceMeasure,
)
from django.core.exceptions import FieldError
from django.db.models import FloatField, IntegerField, TextField
from django.db.models.expressions import Func, Value
from django.utils import six
NUMERIC_TYPES = six.integer_types + (float, Decimal)
class GeoFunc(Func):
function = None
output_field_class = None
geom_param_pos = 0
def __init__(self, *expressions, **extra):
if 'output_field' not in extra and self.output_field_class:
extra['output_field'] = self.output_field_class()
super(GeoFunc, self).__init__(*expressions, **extra)
@property
def name(self):
return self.__class__.__name__
@property
def srid(self):
expr = self.source_expressions[self.geom_param_pos]
if hasattr(expr, 'srid'):
return expr.srid
try:
return expr.field.srid
except (AttributeError, FieldError):
return None
def as_sql(self, compiler, connection):
if self.function is None:
self.function = connection.ops.spatial_function_name(self.name)
return super(GeoFunc, self).as_sql(compiler, connection)
def resolve_expression(self, *args, **kwargs):
res = super(GeoFunc, self).resolve_expression(*args, **kwargs)
base_srid = res.srid
if not base_srid:
raise TypeError("Geometry functions can only operate on geometric content.")
for pos, expr in enumerate(res.source_expressions[1:], start=1):
if isinstance(expr, GeomValue) and expr.srid != base_srid:
# Automatic SRID conversion so objects are comparable
res.source_expressions[pos] = Transform(expr, base_srid).resolve_expression(*args, **kwargs)
return res
def _handle_param(self, value, param_name='', check_types=None):
if not hasattr(value, 'resolve_expression'):
if check_types and not isinstance(value, check_types):
raise TypeError(
"The %s parameter has the wrong type: should be %s." % (
param_name, str(check_types))
)
return value
class GeomValue(Value):
geography = False
@property
def srid(self):
return self.value.srid
def as_sql(self, compiler, connection):
if self.geography:
self.value = connection.ops.Adapter(self.value, geography=self.geography)
else:
self.value = connection.ops.Adapter(self.value)
return super(GeomValue, self).as_sql(compiler, connection)
def as_mysql(self, compiler, connection):
return 'GeomFromText(%%s, %s)' % self.srid, [connection.ops.Adapter(self.value)]
def as_sqlite(self, compiler, connection):
return 'GeomFromText(%%s, %s)' % self.srid, [connection.ops.Adapter(self.value)]
def as_oracle(self, compiler, connection):
return 'SDO_GEOMETRY(%%s, %s)' % self.srid, [connection.ops.Adapter(self.value)]
class GeoFuncWithGeoParam(GeoFunc):
def __init__(self, expression, geom, *expressions, **extra):
if not hasattr(geom, 'srid') or not geom.srid:
raise ValueError("Please provide a geometry attribute with a defined SRID.")
super(GeoFuncWithGeoParam, self).__init__(expression, GeomValue(geom), *expressions, **extra)
class SQLiteDecimalToFloatMixin(object):
"""
By default, Decimal values are converted to str by the SQLite backend, which
is not acceptable by the GIS functions expecting numeric values.
"""
def as_sqlite(self, compiler, connection):
for expr in self.get_source_expressions():
if hasattr(expr, 'value') and isinstance(expr.value, Decimal):
expr.value = float(expr.value)
return super(SQLiteDecimalToFloatMixin, self).as_sql(compiler, connection)
class OracleToleranceMixin(object):
tolerance = 0.05
def as_oracle(self, compiler, connection):
tol = self.extra.get('tolerance', self.tolerance)
self.template = "%%(function)s(%%(expressions)s, %s)" % tol
return super(OracleToleranceMixin, self).as_sql(compiler, connection)
class Area(OracleToleranceMixin, GeoFunc):
def as_sql(self, compiler, connection):
if connection.ops.geography:
# Geography fields support area calculation, returns square meters.
self.output_field = AreaField('sq_m')
elif not self.output_field.geodetic(connection):
# Getting the area units of the geographic field.
units = self.output_field.units_name(connection)
if units:
self.output_field = AreaField(
AreaMeasure.unit_attname(self.output_field.units_name(connection))
)
else:
self.output_field = FloatField()
else:
# TODO: Do we want to support raw number areas for geodetic fields?
raise NotImplementedError('Area on geodetic coordinate systems not supported.')
return super(Area, self).as_sql(compiler, connection)
def as_oracle(self, compiler, connection):
self.output_field = AreaField('sq_m') # Oracle returns area in units of meters.
return super(Area, self).as_oracle(compiler, connection)
class AsGeoJSON(GeoFunc):
output_field_class = TextField
def __init__(self, expression, bbox=False, crs=False, precision=8, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', six.integer_types))
options = 0
if crs and bbox:
options = 3
elif bbox:
options = 1
elif crs:
options = 2
if options:
expressions.append(options)
super(AsGeoJSON, self).__init__(*expressions, **extra)
class AsGML(GeoFunc):
geom_param_pos = 1
output_field_class = TextField
def __init__(self, expression, version=2, precision=8, **extra):
expressions = [version, expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', six.integer_types))
super(AsGML, self).__init__(*expressions, **extra)
class AsKML(AsGML):
def as_sqlite(self, compiler, connection):
# No version parameter
self.source_expressions.pop(0)
return super(AsKML, self).as_sql(compiler, connection)
class AsSVG(GeoFunc):
output_field_class = TextField
def __init__(self, expression, relative=False, precision=8, **extra):
relative = relative if hasattr(relative, 'resolve_expression') else int(relative)
expressions = [
expression,
relative,
self._handle_param(precision, 'precision', six.integer_types),
]
super(AsSVG, self).__init__(*expressions, **extra)
class BoundingCircle(GeoFunc):
def __init__(self, expression, num_seg=48, **extra):
super(BoundingCircle, self).__init__(*[expression, num_seg], **extra)
class Centroid(OracleToleranceMixin, GeoFunc):
pass
class Difference(OracleToleranceMixin, GeoFuncWithGeoParam):
pass
class DistanceResultMixin(object):
def convert_value(self, value, expression, connection, context):
if value is None:
return None
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if geo_field.geodetic(connection):
dist_att = 'm'
else:
units = geo_field.units_name(connection)
if units:
dist_att = DistanceMeasure.unit_attname(units)
else:
dist_att = None
if dist_att:
return DistanceMeasure(**{dist_att: value})
return value
class Distance(DistanceResultMixin, OracleToleranceMixin, GeoFuncWithGeoParam):
output_field_class = FloatField
spheroid = None
def __init__(self, expr1, expr2, spheroid=None, **extra):
expressions = [expr1, expr2]
if spheroid is not None:
self.spheroid = spheroid
expressions += (self._handle_param(spheroid, 'spheroid', bool),)
super(Distance, self).__init__(*expressions, **extra)
def as_postgresql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
src_field = self.get_source_fields()[0]
geography = src_field.geography and self.srid == 4326
if geography:
# Set parameters as geography if base field is geography
for pos, expr in enumerate(
self.source_expressions[self.geom_param_pos + 1:], start=self.geom_param_pos + 1):
if isinstance(expr, GeomValue):
expr.geography = True
elif geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need special distance functions
if self.spheroid:
self.function = 'ST_Distance_Spheroid' # More accurate, resource intensive
# Replace boolean param by the real spheroid of the base field
self.source_expressions[2] = Value(geo_field._spheroid)
else:
self.function = 'ST_Distance_Sphere'
return super(Distance, self).as_sql(compiler, connection)
def as_oracle(self, compiler, connection):
if self.spheroid:
self.source_expressions.pop(2)
return super(Distance, self).as_oracle(compiler, connection)
class Envelope(GeoFunc):
pass
class ForceRHR(GeoFunc):
pass
class GeoHash(GeoFunc):
output_field_class = TextField
def __init__(self, expression, precision=None, **extra):
expressions = [expression]
if precision is not None:
expressions.append(self._handle_param(precision, 'precision', six.integer_types))
super(GeoHash, self).__init__(*expressions, **extra)
class Intersection(OracleToleranceMixin, GeoFuncWithGeoParam):
pass
class Length(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
output_field_class = FloatField
def __init__(self, expr1, spheroid=True, **extra):
self.spheroid = spheroid
super(Length, self).__init__(expr1, **extra)
def as_sql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
if geo_field.geodetic(connection) and not connection.features.supports_length_geodetic:
raise NotImplementedError("This backend doesn't support Length on geodetic fields")
return super(Length, self).as_sql(compiler, connection)
def as_postgresql(self, compiler, connection):
geo_field = GeometryField(srid=self.srid) # Fake field to get SRID info
src_field = self.get_source_fields()[0]
geography = src_field.geography and self.srid == 4326
if geography:
self.source_expressions.append(Value(self.spheroid))
elif geo_field.geodetic(connection):
# Geometry fields with geodetic (lon/lat) coordinates need length_spheroid
self.function = 'ST_Length_Spheroid'
self.source_expressions.append(Value(geo_field._spheroid))
else:
dim = min(f.dim for f in self.get_source_fields() if f)
if dim > 2:
self.function = connection.ops.length3d
return super(Length, self).as_sql(compiler, connection)
def as_sqlite(self, compiler, connection):
geo_field = GeometryField(srid=self.srid)
if geo_field.geodetic(connection):
if self.spheroid:
self.function = 'GeodesicLength'
else:
self.function = 'GreatCircleLength'
return super(Length, self).as_sql(compiler, connection)
class MemSize(GeoFunc):
output_field_class = IntegerField
class NumGeometries(GeoFunc):
output_field_class = IntegerField
class NumPoints(GeoFunc):
output_field_class = IntegerField
def as_sqlite(self, compiler, connection):
if self.source_expressions[self.geom_param_pos].output_field.geom_type != 'LINESTRING':
raise TypeError("Spatialite NumPoints can only operate on LineString content")
return super(NumPoints, self).as_sql(compiler, connection)
class Perimeter(DistanceResultMixin, OracleToleranceMixin, GeoFunc):
output_field_class = FloatField
def as_postgresql(self, compiler, connection):
dim = min(f.dim for f in self.get_source_fields())
if dim > 2:
self.function = connection.ops.perimeter3d
return super(Perimeter, self).as_sql(compiler, connection)
class PointOnSurface(OracleToleranceMixin, GeoFunc):
pass
class Reverse(GeoFunc):
pass
class Scale(SQLiteDecimalToFloatMixin, GeoFunc):
def __init__(self, expression, x, y, z=0.0, **extra):
expressions = [
expression,
self._handle_param(x, 'x', NUMERIC_TYPES),
self._handle_param(y, 'y', NUMERIC_TYPES),
]
if z != 0.0:
expressions.append(self._handle_param(z, 'z', NUMERIC_TYPES))
super(Scale, self).__init__(*expressions, **extra)
class SnapToGrid(SQLiteDecimalToFloatMixin, GeoFunc):
def __init__(self, expression, *args, **extra):
nargs = len(args)
expressions = [expression]
if nargs in (1, 2):
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args]
)
elif nargs == 4:
# Reverse origin and size param ordering
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[2:]]
)
expressions.extend(
[self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[0:2]]
)
else:
raise ValueError('Must provide 1, 2, or 4 arguments to `SnapToGrid`.')
super(SnapToGrid, self).__init__(*expressions, **extra)
class SymDifference(OracleToleranceMixin, GeoFuncWithGeoParam):
pass
class Transform(GeoFunc):
def __init__(self, expression, srid, **extra):
expressions = [
expression,
self._handle_param(srid, 'srid', six.integer_types),
]
super(Transform, self).__init__(*expressions, **extra)
@property
def srid(self):
# Make srid the resulting srid of the transformation
return self.source_expressions[self.geom_param_pos + 1].value
def convert_value(self, value, expression, connection, context):
value = super(Transform, self).convert_value(value, expression, connection, context)
if not connection.ops.postgis and not value.srid:
# Some backends do not set the srid on the returning geometry
value.srid = self.srid
return value
class Translate(Scale):
def as_sqlite(self, compiler, connection):
func_name = connection.ops.spatial_function_name(self.name)
if func_name == 'ST_Translate' and len(self.source_expressions) < 4:
# Always provide the z parameter for ST_Translate (Spatialite >= 3.1)
self.source_expressions.append(Value(0))
elif func_name == 'ShiftCoords' and len(self.source_expressions) > 3:
raise ValueError("This version of Spatialite doesn't support 3D")
return super(Translate, self).as_sqlite(compiler, connection)
class Union(OracleToleranceMixin, GeoFuncWithGeoParam):
pass
| mit |
popazerty/obh-sh4 | lib/python/Plugins/Extensions/MediaPlayer/settings.py | 28 | 5037 | from Screens.Screen import Screen
from Screens.HelpMenu import HelpableScreen
from Components.FileList import FileList
from Components.Sources.StaticText import StaticText
from Components.MediaPlayer import PlayList
from Components.config import config, getConfigListEntry, ConfigSubsection, configfile, ConfigText, ConfigYesNo, ConfigDirectory
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import ActionMap
config.mediaplayer = ConfigSubsection()
config.mediaplayer.repeat = ConfigYesNo(default=False)
config.mediaplayer.savePlaylistOnExit = ConfigYesNo(default=True)
config.mediaplayer.saveDirOnExit = ConfigYesNo(default=False)
config.mediaplayer.defaultDir = ConfigDirectory()
config.mediaplayer.useAlternateUserAgent = ConfigYesNo(default=False)
config.mediaplayer.alternateUserAgent = ConfigText(default="")
config.mediaplayer.sortPlaylists = ConfigYesNo(default=False)
config.mediaplayer.alwaysHideInfoBar = ConfigYesNo(default=True)
config.mediaplayer.onMainMenu = ConfigYesNo(default=False)
class DirectoryBrowser(Screen, HelpableScreen):
def __init__(self, session, currDir):
Screen.__init__(self, session)
# for the skin: first try MediaPlayerDirectoryBrowser, then FileBrowser, this allows individual skinning
self.skinName = ["MediaPlayerDirectoryBrowser", "FileBrowser" ]
HelpableScreen.__init__(self)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Use"))
self.filelist = FileList(currDir, matchingPattern="")
self["filelist"] = self.filelist
self["FilelistActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.use,
"red": self.exit,
"ok": self.ok,
"cancel": self.exit
})
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Directory browser"))
def ok(self):
if self.filelist.canDescent():
self.filelist.descent()
def use(self):
if self["filelist"].getCurrentDirectory() is not None:
if self.filelist.canDescent() and self["filelist"].getFilename() and len(self["filelist"].getFilename()) > len(self["filelist"].getCurrentDirectory()):
self.filelist.descent()
self.close(self["filelist"].getCurrentDirectory())
else:
self.close(self["filelist"].getFilename())
def exit(self):
self.close(False)
class MediaPlayerSettings(Screen,ConfigListScreen):
def __init__(self, session, parent):
Screen.__init__(self, session)
# for the skin: first try MediaPlayerSettings, then Setup, this allows individual skinning
self.skinName = ["MediaPlayerSettings", "Setup" ]
self.setup_title = _("Edit settings")
self.onChangedEntry = [ ]
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
ConfigListScreen.__init__(self, [], session = session, on_change = self.changedEntry)
self.parent = parent
self.initConfigList()
config.mediaplayer.saveDirOnExit.addNotifier(self.initConfigList)
self["setupActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.save,
"red": self.cancel,
"cancel": self.cancel,
"ok": self.ok,
}, -2)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def initConfigList(self, element=None):
print "[initConfigList]", element
try:
self.list = []
self.list.append(getConfigListEntry(_("repeat playlist"), config.mediaplayer.repeat))
self.list.append(getConfigListEntry(_("save playlist on exit"), config.mediaplayer.savePlaylistOnExit))
self.list.append(getConfigListEntry(_("save last directory on exit"), config.mediaplayer.saveDirOnExit))
if not config.mediaplayer.saveDirOnExit.getValue():
self.list.append(getConfigListEntry(_("start directory"), config.mediaplayer.defaultDir))
self.list.append(getConfigListEntry(_("sorting of playlists"), config.mediaplayer.sortPlaylists))
self.list.append(getConfigListEntry(_("Always hide infobar"), config.mediaplayer.alwaysHideInfoBar))
self.list.append(getConfigListEntry(_("show mediaplayer on mainmenu"), config.mediaplayer.onMainMenu))
self["config"].setList(self.list)
except KeyError:
print "keyError"
def changedConfigList(self):
self.initConfigList()
def ok(self):
if self["config"].getCurrent()[1] == config.mediaplayer.defaultDir:
self.session.openWithCallback(self.DirectoryBrowserClosed, DirectoryBrowser, self.parent.filelist.getCurrentDirectory())
def DirectoryBrowserClosed(self, path):
print "PathBrowserClosed:" + str(path)
if path != False:
config.mediaplayer.defaultDir.setValue(path)
def save(self):
for x in self["config"].list:
x[1].save()
self.close()
def cancel(self):
self.close()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].getText())
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
| gpl-2.0 |
willhess/aima-python | submissions/Conklin/vaccuum.py | 18 | 6471 | import agents as ag
import envgui as gui
import random
# ______________________________________________________________________________
loc_A, loc_B = (1, 1), (2, 1) # The two locations for the Vacuum world
def RandomVacuumAgent():
"Randomly choose one of the actions from the vacuum environment."
p = ag.RandomAgentProgram(['Right', 'Left', 'Up', 'Down', 'Suck', 'NoOp'])
return ag.Agent(p)
def TableDrivenVacuumAgent():
"[Figure 2.3]"
table = {((loc_A, 'Clean'),): 'Right',
((loc_A, 'Dirty'),): 'Suck',
((loc_B, 'Clean'),): 'Left',
((loc_B, 'Dirty'),): 'Suck',
((loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
}
p = ag.TableDrivenAgentProgram(table)
return ag.Agent()
def ReflexVacuumAgent():
"A reflex agent for the two-state vacuum environment. [Figure 2.8]"
def program(percept):
location, status = percept
if status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return ag.Agent(program)
def ModelBasedVacuumAgent() -> object:
"An agent that keeps track of what locations are clean or dirty."
model = {loc_A: None, loc_B: None}
def program(percept):
"Same as ReflexVacuumAgent, except if everything is clean, do NoOp."
location, status = percept
model[location] = status # Update the model here
if model[loc_A] == model[loc_B] == 'Clean':
return 'NoOp'
elif status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return ag.Agent(program)
# ______________________________________________________________________________
# Vacuum environment
class Dirt(ag.Thing):
pass
# class Floor(ag.Thing):
# pass
class VacuumEnvironment(ag.XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=4, height=3):
super(VacuumEnvironment, self).__init__(width, height)
self.add_walls()
def thing_classes(self):
return [ag.Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,
TableDrivenVacuumAgent, ModelBasedVacuumAgent]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (bump, status)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super(VacuumEnvironment, self).execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
class TrivialVacuumEnvironment(VacuumEnvironment):
"""This environment has two locations, A and B. Each can be Dirty
or Clean. The agent perceives its location and the location's
status. This serves as an example of how to implement a simple
Environment."""
def __init__(self):
super(TrivialVacuumEnvironment, self).__init__()
choice = random.randint(0, 3)
if choice % 2: # 1 or 3
self.add_thing(Dirt(), loc_A)
if choice > 1: # 2 or 3
self.add_thing(Dirt(), loc_B)
def percept(self, agent):
"Returns the agent's location, and the location status (Dirty/Clean)."
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
return (agent.location, status)
#
# def execute_action(self, agent, action):
# """Change agent's location and/or location's status; track performance.
# Score 10 for each dirt cleaned; -1 for each move."""
# if action == 'Right':
# agent.location = loc_B
# agent.performance -= 1
# elif action == 'Left':
# agent.location = loc_A
# agent.performance -= 1
# elif action == 'Suck':
# if self.status[agent.location] == 'Dirty':
# agent.performance += 10
# self.status[agent.location] = 'Clean'
#
def add_agent(self, a):
"Agents start in either location at random."
super().add_thing(a, random.choice([loc_A, loc_B]))
# _________________________________________________________________________
# >>> a = ReflexVacuumAgent()
# >>> a.program((loc_A, 'Clean'))
# 'Right'
# >>> a.program((loc_B, 'Clean'))
# 'Left'
# >>> a.program((loc_A, 'Dirty'))
# 'Suck'
# >>> a.program((loc_A, 'Dirty'))
# 'Suck'
#
# >>> e = TrivialVacuumEnvironment()
# >>> e.add_thing(ModelBasedVacuumAgent())
# >>> e.run(5)
# Produces text-based status output
# v = TrivialVacuumEnvironment()
# a = ModelBasedVacuumAgent()
# a = ag.TraceAgent(a)
# v.add_agent(a)
# v.run(10)
# Launch GUI of Trivial Environment
# v = TrivialVacuumEnvironment()
# a = RandomVacuumAgent()
# a = ag.TraceAgent(a)
# v.add_agent(a)
# g = gui.EnvGUI(v, 'Vaccuum')
# c = g.getCanvas()
# c.mapImageNames({
# Dirt: 'images/dirt.png',
# ag.Wall: 'images/wall.jpg',
# # Floor: 'images/floor.png',
# ag.Agent: 'images/vacuum.png',
# })
# c.update()
# g.mainloop()
# Launch GUI of more complex environment
v = VacuumEnvironment(5, 4)
#a = ModelBasedVacuumAgent()
a = RandomVacuumAgent()
a = ag.TraceAgent(a)
loc = v.random_location_inbounds()
v.add_thing(a, location=loc)
v.scatter_things(Dirt)
g = gui.EnvGUI(v, 'Vaccuum')
c = g.getCanvas()
c.mapImageNames({
ag.Wall: 'submissions/Conklin/immahoers.jpg',
# Floor: 'images/floor.png',
Dirt: 'images/dirt.png',
ag.Agent: 'images/vacuum.png',
})
c.update()
g.mainloop() | mit |
jesusfcr/airflow | tests/contrib/sensors/test_hdfs_sensors.py | 28 | 9240 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import unittest
import re
from datetime import timedelta
from airflow.contrib.sensors.hdfs_sensors import HdfsSensorFolder, HdfsSensorRegex
from airflow.exceptions import AirflowSensorTimeout
class HdfsSensorFolderTests(unittest.TestCase):
def setUp(self):
if sys.version_info[0] == 3:
raise unittest.SkipTest('HdfsSensor won\'t work with python3. No need to test anything here')
from tests.core import FakeHDFSHook
self.hook = FakeHDFSHook
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
def test_should_be_empty_directory(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.logger.debug('#' * 10)
self.logger.debug('Running %s', self._testMethodName)
self.logger.debug('#' * 10)
task = HdfsSensorFolder(task_id='Should_be_empty_directory',
filepath='/datadirectory/empty_directory',
be_empty=True,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
task.execute(None)
# Then
# Nothing happens, nothing is raised exec is ok
def test_should_be_empty_directory_fail(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.logger.debug('#' * 10)
self.logger.debug('Running %s', self._testMethodName)
self.logger.debug('#' * 10)
task = HdfsSensorFolder(task_id='Should_be_empty_directory_fail',
filepath='/datadirectory/not_empty_directory',
be_empty=True,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
# Then
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
def test_should_be_a_non_empty_directory(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.logger.debug('#' * 10)
self.logger.debug('Running %s', self._testMethodName)
self.logger.debug('#' * 10)
task = HdfsSensorFolder(task_id='Should_be_non_empty_directory',
filepath='/datadirectory/not_empty_directory',
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
task.execute(None)
# Then
# Nothing happens, nothing is raised exec is ok
def test_should_be_non_empty_directory_fail(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.logger.debug('#' * 10)
self.logger.debug('Running %s', self._testMethodName)
self.logger.debug('#' * 10)
task = HdfsSensorFolder(task_id='Should_be_empty_directory_fail',
filepath='/datadirectory/empty_directory',
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
# Then
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
class HdfsSensorRegexTests(unittest.TestCase):
def setUp(self):
if sys.version_info[0] == 3:
raise unittest.SkipTest('HdfsSensor won\'t work with python3. No need to test anything here')
from tests.core import FakeHDFSHook
self.hook = FakeHDFSHook
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
def test_should_match_regex(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.logger.debug('#' * 10)
self.logger.debug('Running %s', self._testMethodName)
self.logger.debug('#' * 10)
compiled_regex = re.compile("test[1-2]file")
task = HdfsSensorRegex(task_id='Should_match_the_regex',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
task.execute(None)
# Then
# Nothing happens, nothing is raised exec is ok
def test_should_not_match_regex(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.logger.debug('#' * 10)
self.logger.debug('Running %s', self._testMethodName)
self.logger.debug('#' * 10)
compiled_regex = re.compile("^IDoNotExist")
task = HdfsSensorRegex(task_id='Should_not_match_the_regex',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
# Then
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
def test_should_match_regex_and_filesize(self):
"""
test the file size behaviour with regex
:return:
"""
# Given
self.logger.debug('#' * 10)
self.logger.debug('Running %s', self._testMethodName)
self.logger.debug('#' * 10)
compiled_regex = re.compile("test[1-2]file")
task = HdfsSensorRegex(task_id='Should_match_the_regex_and_filesize',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
ignore_copying=True,
ignored_ext=['_COPYING_', 'sftp'],
file_size=10,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
task.execute(None)
# Then
# Nothing happens, nothing is raised exec is ok
def test_should_match_regex_but_filesize(self):
"""
test the file size behaviour with regex
:return:
"""
# Given
self.logger.debug('#' * 10)
self.logger.debug('Running %s', self._testMethodName)
self.logger.debug('#' * 10)
compiled_regex = re.compile("test[1-2]file")
task = HdfsSensorRegex(task_id='Should_match_the_regex_but_filesize',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
file_size=20,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
# Then
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
def test_should_match_regex_but_copyingext(self):
"""
test the file size behaviour with regex
:return:
"""
# Given
self.logger.debug('#' * 10)
self.logger.debug('Running %s', self._testMethodName)
self.logger.debug('#' * 10)
compiled_regex = re.compile("copying_file_\d+.txt")
task = HdfsSensorRegex(task_id='Should_match_the_regex_but_filesize',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
ignored_ext=['_COPYING_', 'sftp'],
file_size=20,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
# Then
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
| apache-2.0 |
bashrc/zeronet-debian | src/src/lib/pybitcointools/bitcoin/composite.py | 1 | 3833 | #!/usr/bin/python
from .main import *
from .transaction import *
from .bci import *
from .deterministic import *
from .blocks import *
# Takes privkey, address, value (satoshis), fee (satoshis)
def send(frm, to, value, fee=10000):
return sendmultitx(frm, to + ":" + str(value), fee)
# Takes privkey, "address1:value1,address2:value2" (satoshis), fee (satoshis)
def sendmultitx(frm, tovalues, fee=10000, **kwargs):
tv, fee = args[:-1], int(args[-1])
outs = []
outvalue = 0
for a in tv:
outs.append(a)
outvalue += int(a.split(":")[1])
u = unspent(privtoaddr(frm), **kwargs)
u2 = select(u, int(outvalue)+int(fee))
argz = u2 + outs + [frm, fee]
tx = mksend(*argz)
tx2 = signall(tx, frm)
return pushtx(tx2, **kwargs)
# Takes address, address, value (satoshis), fee(satoshis)
def preparetx(frm, to, value, fee=10000, **kwargs):
tovalues = to + ":" + str(value)
return preparemultitx(frm, tovalues, fee, **kwargs)
# Takes address, address:value, address:value ... (satoshis), fee(satoshis)
def preparemultitx(frm, *args, **kwargs):
tv, fee = args[:-1], int(args[-1])
outs = []
outvalue = 0
for a in tv:
outs.append(a)
outvalue += int(a.split(":")[1])
u = unspent(frm, **kwargs)
u2 = select(u, int(outvalue)+int(fee))
argz = u2 + outs + [frm, fee]
return mksend(*argz)
# BIP32 hierarchical deterministic multisig script
def bip32_hdm_script(*args):
if len(args) == 3:
keys, req, path = args
else:
i, keys, path = 0, [], []
while len(args[i]) > 40:
keys.append(args[i])
i += 1
req = int(args[i])
path = map(int, args[i+1:])
pubs = sorted(map(lambda x: bip32_descend(x, path), keys))
return mk_multisig_script(pubs, req)
# BIP32 hierarchical deterministic multisig address
def bip32_hdm_addr(*args):
return scriptaddr(bip32_hdm_script(*args))
# Setup a coinvault transaction
def setup_coinvault_tx(tx, script):
txobj = deserialize(tx)
N = deserialize_script(script)[-2]
for inp in txobj["ins"]:
inp["script"] = serialize_script([None] * (N+1) + [script])
return serialize(txobj)
# Sign a coinvault transaction
def sign_coinvault_tx(tx, priv):
pub = privtopub(priv)
txobj = deserialize(tx)
subscript = deserialize_script(txobj['ins'][0]['script'])
oscript = deserialize_script(subscript[-1])
k, pubs = oscript[0], oscript[1:-2]
for j in range(len(txobj['ins'])):
scr = deserialize_script(txobj['ins'][j]['script'])
for i, p in enumerate(pubs):
if p == pub:
scr[i+1] = multisign(tx, j, subscript[-1], priv)
if len(filter(lambda x: x, scr[1:-1])) >= k:
scr = [None] + filter(lambda x: x, scr[1:-1])[:k] + [scr[-1]]
txobj['ins'][j]['script'] = serialize_script(scr)
return serialize(txobj)
# Inspects a transaction
def inspect(tx, **kwargs):
d = deserialize(tx)
isum = 0
ins = {}
for _in in d['ins']:
h = _in['outpoint']['hash']
i = _in['outpoint']['index']
prevout = deserialize(fetchtx(h, **kwargs))['outs'][i]
isum += prevout['value']
a = script_to_address(prevout['script'])
ins[a] = ins.get(a, 0) + prevout['value']
outs = []
osum = 0
for _out in d['outs']:
outs.append({'address': script_to_address(_out['script']),
'value': _out['value']})
osum += _out['value']
return {
'fee': isum - osum,
'outs': outs,
'ins': ins
}
def merkle_prove(txhash):
blocknum = str(get_block_height(txhash))
header = get_block_header_data(blocknum)
hashes = get_txs_in_block(blocknum)
i = hashes.index(txhash)
return mk_merkle_proof(header, hashes, i)
| gpl-2.0 |
Yuego/dbfread | dbfread/struct_parser.py | 5 | 1169 | """
Parser that converts (C style) binary structs named tuples.
The struct can be read from a file or a byte string.
"""
import struct
import collections
def _make_struct_class(name, names):
class Struct(object):
_names = names
def __init__(self, **kwargs):
vars(self).update(kwargs)
def __repr__(self):
fields = ', '.join('{}={!r}'.format(name, getattr(self, name))
for name in self._names)
return '{}({})'.format(self.__class__.__name__, fields)
Struct.__name__ = name
return Struct
class StructParser:
def __init__(self, name, format, names):
self.format = format
self.names = names
self.struct = struct.Struct(format)
self.Class = _make_struct_class(name, names)
def unpack(self, data):
"""Unpack struct from binary string and return a named tuple."""
items = zip(self.names, self.struct.unpack(data))
return self.Class(**dict(items))
def read(self, file):
"""Read struct from a file-like object (implenting read())."""
return self.unpack(file.read(self.struct.size))
| mit |
revolunet/requests | requests/packages/chardet/mbcharsetprober.py | 215 | 3182 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants, sys
from constants import eStart, eError, eItsMe
from charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = ['\x00', '\x00']
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = ['\x00', '\x00']
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == eError:
if constants._debug:
sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n')
self._mState = constants.eNotMe
break
elif codingState == eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i-1:i+1], charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if self._mDistributionAnalyzer.got_enough_data() and \
(self.get_confidence() > constants.SHORTCUT_THRESHOLD):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| isc |
Ledoux/ShareYourSystem | Pythonlogy/ShareYourSystem/Specials/_Lifers/Stabilizer/tests/test_05_stability_lif_I_ExampleCell.py | 2 | 1370 | #ImportModules
import ShareYourSystem as SYS
#ImportModules
import ShareYourSystem as SYS
LateralWeightVariablesList=[
[[-100.]]
]
#Check
for __LateralWeightVariable in LateralWeightVariablesList:
#Define
MyStabilizer=SYS.StabilizerClass(
).stationarize(
_MeanWeightVariable=__LateralWeightVariable,
_ConstantTimeVariable=[0.01],
_RateVariable=[15.],
_InteractionStr="Spike"
).stabilize(
_DelayTimeVariable=0.001,
#_DecayTimeVariable=0.005,
#_RiseTimeVariable=0.0005,
_ScanFrequencyVariable=[10.]
)
#Choose the parameters to print
KeyStrsList=[
'StationarizingMeanWeightVariable',
'StabilizingConstantTimeVariable', #in ms
'StabilizingDelayTimeVariable',
'StabilizedPerturbationComplex',
'StabilizedTotalPerturbationComplexesArray', #matrix M
'StabilizedDeterminantFloatsTuple', #If it has converged, then it has to be closed to (0,0)
'StabilizedBiggestLambdaFloatsTuple',
'StabilizedInstabilityLambdaFloatsTuple', # real part should be negative if stable, (from this particular initial condition)
'StabilizedInstabilityFrequencyFloat'
]
#print
SYS._print(SYS.collections.OrderedDict(zip(KeyStrsList,MyStabilizer.mapGet(KeyStrsList))))
| mit |
Narcolapser/Octo | logfile.py | 2 | 7834 | from tkinter import *
from tkinter import ttk
import connection
import paramiko
import threading
import sqlite3
import time
import os
import random
class LogFile(ttk.Frame):
def __init__(self,master,filterFrame,con,log,addr,tempdir):
ttk.Frame.__init__(self,master)
self.master = master
self.filterFrame = filterFrame
self.con = con
self.log = self.preProcess(log)
self.addr = addr
self.tempdir = tempdir.name
self.vis = False
self.lines = []
self.tail = ""
self.progress = 0.0
self.new_lines = []
self.numRows = 0
self.lastAdded = 0
self.updater = None
self.alive = True
self.has_new = False
self.command = "SELECT * FROM lines WHERE ID > {0}"
self.local_pointer = 0
self.__makeConnection()
self.__makeGUI()
self.__makeName()
self.__makeDB()
def __makeConnection(self):
## self.sftp = self.con.open_sftp()
## self.file = self.sftp.open(self.log)
self.file = self.con.openFile(self.log)
## self.lastEdit = self.sftp.stat(self.log)
## self.lastEdit = time.localtime(self.lastEdit.st_mtime)
## self.lastEdit = time.strftime("%Y-%m-%d",self.lastEdit)
self.lastEdit = self.file.lastEdit()
def __makeGUI(self):
self.disp_tree = ttk.Treeview(self.master)
self.s = ttk.Scrollbar(self.master,orient='vertical', command=self.disp_tree.yview)
self.disp_tree.configure(yscroll=self.s.set)
self.disp_tree.heading('#0', text=self.addr, anchor='w')
self.filters = ttk.Treeview(self.filterFrame)
self.scrollFilters = ttk.Scrollbar(self.filterFrame,orient='vertical',
command=self.filters.yview)
self.filters.configure(yscroll=self.scrollFilters.set)
self.filters.heading('#0', text='Filterss for: '+self.addr, anchor='w')
def __makeName(self):
self.name = self.log[self.log.rfind("/")+1:]
self.dbname = self.name[:self.name.find(".")]
self.dbname = self.dbname.replace("-","_")
self.dbname = self.addr[:self.addr.find("-")] + "_" + self.dbname + str(random.randint(0,1000))
def __makeDB(self):
self.logDB = self.getDBHandle()
cur = self.logDB.cursor()
com = "CREATE TABLE lines (ID integer, line text)"
cur.execute(com)
self.logDB.commit()
#self.update_cur = cur
self.update_cur = None
self.db_lock = threading.Lock()
def getDBHandle(self):
tf = self.tempdir + "\\" + self.addr + " - " + self.log[self.log.rfind("/")+1:]
print(tf)
con = sqlite3.connect(tf)
return con
def preProcess(self,val):
if "{date}" in val:
val = val.format(date=time.strftime("%Y-%m-%d",time.gmtime()))
return val
def update(self,update_num=1000):
i = update_num
if self.has_new:
if self.db_lock.acquire(timeout=0.01):
self.update_cur = self.logDB.cursor()
com = self.command.format(self.lastAdded)
self.update_cur.execute(com)
row = self.update_cur.fetchone()
while i > 0 and row:
self.lastAdded = row[0]
self.disp_tree.insert('','end',text=row[1])
i -= 1
row = self.update_cur.fetchone()
if not row:
self.has_new = False
self.update_cur.close()
self.db_lock.release()
def __populate(self):
values = str(self.file.read(65535),'utf-8')
self.local_pointer += len(values)
per = 0.1
upDB = self.getDBHandle()
cur = upDB.cursor()
while self.alive:
while len(values) > 100:
values = self.tail + values
lines = values.splitlines()
#deal with line fragments
self.tail = lines.pop()
self.append_lines_db(lines,upDB,cur)
values = str(self.file.read(65535),'utf-8')
self.local_pointer += len(values)
fstats = self.file.stat()
size = fstats.st_size
loc = self.file.tell()
self.progress = loc/(size*1.0)
##self.file.close()
##self.sftp.close()
self.tail += values
time.sleep(60)
##self.__makeConnection()
def append_lines(self,lines):
for line in lines:
if self.check_line(line):
self.lines.append(line)
self.new_lines.append(line)
def append_lines_db(self,lines,upDB,cur):
if len(lines):
for line in lines:
self.numRows += 1
inserts = 'INSERT INTO lines VALUES (?,?)'.format(self.dbname)
cur.execute(inserts,(self.numRows,line))
for i in range(10):
try:
self.db_lock.acquire(timeout=5)
upDB.commit()
self.has_new = True
self.db_lock.release()
break
except Exception as e:
print("failed to commit to {1} on try {0}: ".format(i,self.dbname),e)
print(len(lines))
def getName(self):
ret = self.addr + ":" + self.log
return ret
def __name__(self):
return getName()
def setVisible(self,state=True):
if state:
if not self.updater:
self.updater = threading.Thread(name=self.getName()+"updater",target=self.__populate)
self.updater.start()
if not self.vis:
self.s.pack(side='right',fill=BOTH)
self.disp_tree.pack(side='top',fill=BOTH,expand=1)
self.filters.pack(side='top',expand=1)
self.vis=True
else:
if self.vis:
self.s.pack_forget()
self.disp_tree.pack_forget()
self.filters.pack_forget()
self.vis=False
def addFilter(self,fstring):
print("adding filter!")
self.filters.insert('','end',text=fstring)
#self.refilter()
def removeFilter(self):
print("removing filter!")
filters = self.filters.selection()
for fil in filters:
self.filters.delete(fil)
#self.filters.insert('','end',text=fstring)
#self.refilter()
def refilter(self):
filters = []
for f in self.filters.get_children():
filters.append(self.filters.item(f)['text'])
if len(filters):
wheres = " AND ".join(filters)
self.command = "SELECT * FROM lines WHERE ID > {0} AND " + wheres
else:
self.command = "SELECT * FROM lines WHERE ID > {0}"
for i in self.disp_tree.get_children():
self.disp_tree.delete(i)
self.db_lock.acquire()
self.lastAdded = 0
self.db_lock.release()
print(self.command)
self.update()
self.has_new = True
def download(self,path):
## sftp = self.con.open_sftp()
## sftp.get(self.log,path+self.name)
## sftp.close()
self.con.getFile(self.log,path+self.name)
def selected_values(self):
ret = []
for i in self.disp_tree.selection():
item = self.disp_tree.item(i)
ret.append(item['text'])
return "\n".join(ret)
def checkExists(self):
return self.file.checkExists()
| apache-2.0 |
scaphilo/koalixcrm | koalixcrm/accounting/rest/product_categorie_rest.py | 2 | 2866 | # -*- coding: utf-8 -*-
from koalixcrm.accounting.rest.account_rest import OptionAccountJSONSerializer
from rest_framework import serializers
from koalixcrm.accounting.accounting.product_category import ProductCategory
from koalixcrm.accounting.models import Account
class ProductCategoryMinimalJSONSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.IntegerField(required=False)
title = serializers.CharField(read_only=True)
class Meta:
model = Account
fields = ('id',
'title')
class ProductCategoryJSONSerializer(serializers.HyperlinkedModelSerializer):
profitAccount = OptionAccountJSONSerializer(source='profit_account')
lossAccount = OptionAccountJSONSerializer(source='loss_account')
class Meta:
model = ProductCategory
fields = ('id',
'title',
'profitAccount',
'lossAccount')
depth = 1
def create(self, validated_data):
product_category = ProductCategory()
product_category.title = validated_data['title']
# Deserialize profit account
profit_account = validated_data.pop('profit_account')
if profit_account:
if profit_account.get('id', None):
product_category.profit_account = Account.objects.get(id=profit_account.get('id', None))
else:
product_category.profit_account = None
# Deserialize loss account
loss_account = validated_data.pop('loss_account')
if loss_account:
if loss_account.get('id', None):
product_category.loss_account = Account.objects.get(id=loss_account.get('id', None))
else:
product_category.loss_account = None
product_category.save()
return product_category
def update(self, instance, validated_data):
instance.title = validated_data.get('title', instance.title)
# Deserialize profit account
profit_account = validated_data.pop('profit_account')
if profit_account:
if profit_account.get('id', instance.profit_account_id):
instance.profit_account = Account.objects.get(id=profit_account.get('id', None))
else:
instance.profit_account = instance.profit_account_id
else:
instance.profit_account = None
# Deserialize loss account
loss_account = validated_data.pop('loss_account')
if loss_account:
if loss_account.get('id', instance.loss_account_id):
instance.loss_account = Account.objects.get(id=loss_account.get('id', None))
else:
instance.loss_account = instance.loss_account_id
else:
instance.loss_account = None
instance.save()
return instance
| bsd-3-clause |
nickdex/cosmos | code/data_structures/src/tree/multiway_tree/union_find/union_find.py | 3 | 1628 | #!/usr/bin/env python
class UnionFind:
def __init__(self):
self.parent = {}
self.rank = {}
def root(self, a):
current_item = a
path = []
while self.parent[current_item] != current_item:
path.append(current_item)
current_item = self.parent[current_item]
for node in path:
self.parent[node] = current_item
return current_item
def connected(self, a, b):
return self.root(a) == self.root(b)
def find(self, a):
return self.root(a)
def create(self, a):
if a not in self.parent:
self.parent[a] = a
self.rank[a] = 1
def union(self, a, b):
self.create(a)
self.create(b)
a_root = self.root(a)
b_root = self.root(b)
if self.rank[a_root] > self.rank[b_root]:
self.parent[b_root] = a_root
self.rank[a_root] += self.rank[b_root]
else:
self.parent[a_root] = b_root
self.rank[b_root] += self.rank[a_root]
def count(self, a):
if a not in self.parent:
return 0
return self.rank[self.root(a)]
def main():
union_find = UnionFind()
union_find.union(1, 3)
union_find.union(1, 4)
union_find.union(2, 5)
union_find.union(5, 6)
union_find.union(7, 8)
union_find.union(7, 9)
union_find.union(3, 9)
for i in range(1, 10):
print(
"{} is in group {} with {} elements".format(
i, union_find.find(i), union_find.count(i)
)
)
if __name__ == "__main__":
main()
| gpl-3.0 |
Opentopic/ot-api | ot_api/base.py | 1 | 2508 | import json
from booby import Model
from ot_api.endpoints import GET_URL
from ot_api.exceptions import NoParamException
from .utils import build_endpoint_url
class OpentopicModel(Model):
"""
Base Model class. Provide functionalists for needed in all opentopic objects return by endpoints
"""
parser = None
def decode(self, response):
return self.parser(json.loads(response.body))
class OpentopicCollectionMeta(type):
"""
Define some params for collection just after create a class, also check
if all required class params like `endpoint_name` and `parser` are setuped
"""
def __init__(cls, name, bases, nmspc):
super(OpentopicCollectionMeta, cls).__init__(name, bases, nmspc)
cls.collection_name = name.lower().replace('collection', '')
if not name == 'OpentopicCollection':
if cls.endpoint_name is None:
raise NoParamException('No endpoint_name in collection class defined')
if cls.parser is None:
raise NoParamException('No parser in collection class defined')
class OpentopicCollection(object, metaclass=OpentopicCollectionMeta):
"""
Base Collection class. Provide functionalists needed in all opentopic endpoints
"""
endpoint_name = None
init_params = []
def __init__(self, account_name, *args, **kwargs):
self.account_name = account_name
missing_params = self.__class__.validate_init_params(kwargs.keys())
if missing_params:
raise NoParamException('This Api Endpoint required following params: {0}'.format(
','.join(missing_params)))
for param in self.__class__.init_params:
setattr(self, param, kwargs.pop(param))
kwargs = {}
super(OpentopicCollection, self).__init__(*args, **kwargs)
@classmethod
def parser(cls):
return cls.model.parser
@property
def url(self):
return build_endpoint_url(account_name=self.account_name, endpoint_name=self.endpoint_name)
def get_url(self, object_pk):
"""return url to to detail of object based on it `pk`"""
return GET_URL.format(all_url=self.url, object_pk=object_pk)
@classmethod
def validate_init_params(cls, params):
"""
:param params: list of params delivered to init method
:return: return list of params that are required and that were not delivered
"""
return list(set(cls.init_params) - set(params))
| gpl-2.0 |
grigorisg9gr/menpo | menpo/image/test/boolean_image_constrain_test.py | 4 | 2020 | import numpy as np
from numpy.testing import assert_allclose
from menpo.image import BooleanImage
from menpo.shape import PointCloud
def test_boolean_image_constrain_landmarks():
mask = BooleanImage.init_blank((10, 10), fill=False)
mask.landmarks['test'] = PointCloud(
np.array([[1, 1], [8, 1], [8, 8], [1, 8]]))
new_mask = mask.constrain_to_landmarks('test')
assert_allclose(new_mask.pixels[1:-1, 1:-1], True)
assert new_mask.n_true() == 64
def test_boolean_image_constrain_pointcloud_pwa():
mask = BooleanImage.init_blank((10, 10), fill=False)
pc = PointCloud(np.array([[1, 1], [8, 1], [8, 8], [1, 8]]))
new_mask = mask.constrain_to_pointcloud(pc, point_in_pointcloud='pwa')
assert_allclose(new_mask.pixels[:, 1:-1, 1:-1], True)
assert new_mask.n_true() == 64
def test_boolean_image_constrain_pointcloud_convex_hull():
mask = BooleanImage.init_blank((10, 10), fill=False)
pc = PointCloud(np.array([[1, 1], [8, 1], [8, 8], [1, 8]]))
new_mask = mask.constrain_to_pointcloud(pc,
point_in_pointcloud='convex_hull')
assert_allclose(new_mask.pixels[:, 2:-1, 2:-1], True)
# Points on the boundary are OUTSIDE
assert new_mask.n_true() == 56
def test_boolean_image_init_from_pointcloud():
pc = PointCloud(np.array([[5, 5], [5, 20], [20, 20]]))
im = BooleanImage.init_from_pointcloud(pc, fill=False, constrain=False)
assert im.n_true() == 0
assert im.shape == (15, 15)
def test_boolean_image_init_from_pointcloud_constraint():
pc = PointCloud(np.array([[5, 5], [5, 20], [20, 20]]))
im = BooleanImage.init_from_pointcloud(pc, fill=False, constrain=True)
assert im.n_true() == 120
assert im.shape == (15, 15)
def test_boolean_image_init_from_pointcloud_constrain_all_true():
pc = PointCloud(np.array([[5, 5], [5, 20], [20, 20]]))
im = BooleanImage.init_from_pointcloud(pc, fill=True, constrain=True)
assert im.n_true() == 120
assert im.shape == (15, 15)
| bsd-3-clause |
cylc/cylc | tests/unit/batch_sys_handlers/test_lsf.py | 1 | 2520 | # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from cylc.flow.batch_sys_handlers.lsf import BATCH_SYS_HANDLER
@pytest.mark.parametrize(
'job_conf,lines',
[
( # basic
{
'batch_system_conf': {},
'directives': {},
'execution_time_limit': 180,
'job_file_path': '$HOME/cylc-run/chop/log/job/1/axe/01/job',
'suite_name': 'chop',
'task_id': 'axe.1',
},
[
'#BSUB -J axe.1.chop',
'#BSUB -o cylc-run/chop/log/job/1/axe/01/job.out',
'#BSUB -e cylc-run/chop/log/job/1/axe/01/job.err',
'#BSUB -W 3',
],
),
( # some useful directives
{
'batch_system_conf': {},
'directives': {
'-q': 'forever',
'-B': '',
'-ar': '',
},
'execution_time_limit': 200,
'job_file_path': '$HOME/cylc-run/chop/log/job/1/axe/01/job',
'suite_name': 'chop',
'task_id': 'axe.1',
},
[
'#BSUB -J axe.1.chop',
'#BSUB -o cylc-run/chop/log/job/1/axe/01/job.out',
'#BSUB -e cylc-run/chop/log/job/1/axe/01/job.err',
'#BSUB -W 4',
'#BSUB -q forever',
'#BSUB -B',
'#BSUB -ar',
],
),
],
)
def test_format_directives(job_conf: dict, lines: list):
assert BATCH_SYS_HANDLER.format_directives(job_conf) == lines
def test_get_submit_stdin():
outs = BATCH_SYS_HANDLER.get_submit_stdin(__file__, None)
assert outs[0].name == __file__
assert outs[1] is None
| gpl-3.0 |
Adai0808/BuildingMachineLearningSystemsWithPython | ch06/install.py | 23 | 9264 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# Sanders-Twitter Sentiment Corpus Install Script
# Version 0.1
#
# Pulls tweet data from Twitter because ToS prevents distributing it directly.
#
# - Niek Sanders
# [email protected]
# October 20, 2011
#
#
# In Sanders' original form, the code was using Twitter API 1.0.
# Now that Twitter moved to 1.1, we had to make a few changes.
# Cf. twitterauth.py for the details.
# Regarding rate limiting, please check
# https://dev.twitter.com/rest/public/rate-limiting
import sys
import csv
import json
import os
import time
try:
import twitter
except ImportError:
print("""\
You need to ...
pip install twitter
If pip is not found you might have to install it using easy_install.
If it does not work on your system, you might want to follow instructions
at https://github.com/sixohsix/twitter, most likely:
$ git clone https://github.com/sixohsix/twitter
$ cd twitter
$ sudo python setup.py install
""")
sys.exit(1)
from twitterauth import CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET
api = twitter.Twitter(auth=twitter.OAuth(consumer_key=CONSUMER_KEY, consumer_secret=CONSUMER_SECRET,
token=ACCESS_TOKEN_KEY, token_secret=ACCESS_TOKEN_SECRET))
DATA_PATH = "data"
# for some reasons TWeets disappear. In this file we collect those
MISSING_ID_FILE = os.path.join(DATA_PATH, "missing.tsv")
NOT_AUTHORIZED_ID_FILE = os.path.join(DATA_PATH, "not_authorized.tsv")
def get_user_params(DATA_PATH):
user_params = {}
# get user input params
user_params['inList'] = os.path.join(DATA_PATH, 'corpus.csv')
user_params['outList'] = os.path.join(DATA_PATH, 'full-corpus.csv')
user_params['rawDir'] = os.path.join(DATA_PATH, 'rawdata/')
# apply defaults
if user_params['inList'] == '':
user_params['inList'] = './corpus.csv'
if user_params['outList'] == '':
user_params['outList'] = './full-corpus.csv'
if user_params['rawDir'] == '':
user_params['rawDir'] = './rawdata/'
return user_params
def dump_user_params(user_params):
# dump user params for confirmation
print('Input: ' + user_params['inList'])
print('Output: ' + user_params['outList'])
print('Raw data: ' + user_params['rawDir'])
def read_total_list(in_filename):
# read total fetch list csv
fp = open(in_filename, 'rt')
reader = csv.reader(fp, delimiter=',', quotechar='"')
if os.path.exists(MISSING_ID_FILE):
missing_ids = [line.strip()
for line in open(MISSING_ID_FILE, "r").readlines()]
else:
missing_ids = []
if os.path.exists(NOT_AUTHORIZED_ID_FILE):
not_authed_ids = [line.strip()
for line in open(NOT_AUTHORIZED_ID_FILE, "r").readlines()]
else:
not_authed_ids = []
print("We will skip %i tweets that are not available or visible any more on twitter" % (
len(missing_ids) + len(not_authed_ids)))
ignore_ids = set(missing_ids + not_authed_ids)
total_list = []
for row in reader:
if row[2] not in ignore_ids:
total_list.append(row)
return total_list
def purge_already_fetched(fetch_list, raw_dir):
# list of tweet ids that still need downloading
rem_list = []
count_done = 0
# check each tweet to see if we have it
for item in fetch_list:
# check if json file exists
tweet_file = os.path.join(raw_dir, item[2] + '.json')
if os.path.exists(tweet_file):
# attempt to parse json file
try:
parse_tweet_json(tweet_file)
count_done += 1
except RuntimeError:
print("Error parsing", item)
rem_list.append(item)
else:
rem_list.append(item)
print("We have already downloaded %i tweets." % count_done)
return rem_list
def download_tweets(fetch_list, raw_dir):
# ensure raw data directory exists
if not os.path.exists(raw_dir):
os.mkdir(raw_dir)
# download tweets
for idx in range(0, len(fetch_list)):
# current item
item = fetch_list[idx]
print(item)
print('--> downloading tweet #%s (%d of %d)' %
(item[2], idx + 1, len(fetch_list)))
try:
#import pdb;pdb.set_trace()
response = api.statuses.show(_id=item[2])
if response.rate_limit_remaining <= 0:
wait_seconds = response.rate_limit_reset - time.time()
print("Rate limiting requests us to wait %f seconds" %
wait_seconds)
time.sleep(wait_seconds+5)
except twitter.TwitterError as e:
fatal = True
print(e)
for m in json.loads(e.response_data.decode())['errors']:
if m['code'] == 34:
print("Tweet missing: ", item)
with open(MISSING_ID_FILE, "at") as f:
f.write(item[2] + "\n")
fatal = False
break
elif m['code'] == 63:
print("User of tweet '%s' has been suspended." % item)
with open(MISSING_ID_FILE, "at") as f:
f.write(item[2] + "\n")
fatal = False
break
elif m['code'] == 88:
print("Rate limit exceeded.")
fatal = True
break
elif m['code'] == 179:
print("Not authorized to view this tweet.")
with open(NOT_AUTHORIZED_ID_FILE, "at") as f:
f.write(item[2] + "\n")
fatal = False
break
if fatal:
raise
else:
continue
with open(raw_dir + item[2] + '.json', "wt") as f:
f.write(json.dumps(dict(response)) + "\n")
return
def parse_tweet_json(filename):
# read tweet
fp = open(filename, 'r')
# parse json
try:
tweet_json = json.load(fp)
except ValueError as e:
print(e)
raise RuntimeError('error parsing json')
# look for twitter api error msgs
if 'error' in tweet_json or 'errors' in tweet_json:
raise RuntimeError('error in downloaded tweet')
# extract creation date and tweet text
return [tweet_json['created_at'], tweet_json['text']]
def build_output_corpus(out_filename, raw_dir, total_list):
# open csv output file
fp = open(out_filename, 'wb')
writer = csv.writer(fp, delimiter=',', quotechar='"', escapechar='\\',
quoting=csv.QUOTE_ALL)
# write header row
writer.writerow(
['Topic', 'Sentiment', 'TweetId', 'TweetDate', 'TweetText'])
# parse all downloaded tweets
missing_count = 0
for item in total_list:
# ensure tweet exists
if os.path.exists(raw_dir + item[2] + '.json'):
try:
# parse tweet
parsed_tweet = parse_tweet_json(raw_dir + item[2] + '.json')
full_row = item + parsed_tweet
# character encoding for output
for i in range(0, len(full_row)):
full_row[i] = full_row[i].encode("utf-8")
# write csv row
writer.writerow(full_row)
except RuntimeError:
print('--> bad data in tweet #' + item[2])
missing_count += 1
else:
print('--> missing tweet #' + item[2])
missing_count += 1
# indicate success
if missing_count == 0:
print('\nSuccessfully downloaded corpus!')
print('Output in: ' + out_filename + '\n')
else:
print('\nMissing %d of %d tweets!' % (missing_count, len(total_list)))
print('Partial output in: ' + out_filename + '\n')
return
def main():
# get user parameters
user_params = get_user_params(DATA_PATH)
print(user_params)
dump_user_params(user_params)
# get fetch list
total_list = read_total_list(user_params['inList'])
# remove already fetched or missing tweets
fetch_list = purge_already_fetched(total_list, user_params['rawDir'])
print("Fetching %i tweets..." % len(fetch_list))
if fetch_list:
# start fetching data from twitter
download_tweets(fetch_list, user_params['rawDir'])
# second pass for any failed downloads
fetch_list = purge_already_fetched(total_list, user_params['rawDir'])
if fetch_list:
print('\nStarting second pass to retry %i failed downloads...' %
len(fetch_list))
download_tweets(fetch_list, user_params['rawDir'])
else:
print("Nothing to fetch any more.")
# build output corpus
build_output_corpus(user_params['outList'], user_params['rawDir'],
total_list)
if __name__ == '__main__':
main()
| mit |
polimediaupv/edx-platform | common/lib/xmodule/xmodule/raw_module.py | 146 | 2027 | from lxml import etree
from xmodule.editing_module import XMLEditingDescriptor
from xmodule.xml_module import XmlDescriptor
import logging
from xblock.fields import String, Scope
from exceptions import SerializationError
log = logging.getLogger(__name__)
class RawDescriptor(XmlDescriptor, XMLEditingDescriptor):
"""
Module that provides a raw editing view of its data and children. It
requires that the definition xml is valid.
"""
data = String(help="XML data for the module", default="", scope=Scope.content)
@classmethod
def definition_from_xml(cls, xml_object, system):
return {'data': etree.tostring(xml_object, pretty_print=True, encoding='unicode')}, []
def definition_to_xml(self, resource_fs):
try:
return etree.fromstring(self.data)
except etree.XMLSyntaxError as err:
# Can't recover here, so just add some info and
# re-raise
lines = self.data.split('\n')
line, offset = err.position
msg = (
u"Unable to create xml for module {loc}. "
u"Context: '{context}'"
).format(
context=lines[line - 1][offset - 40:offset + 40],
loc=self.location,
)
raise SerializationError(self.location, msg)
class EmptyDataRawDescriptor(XmlDescriptor, XMLEditingDescriptor):
"""
Version of RawDescriptor for modules which may have no XML data,
but use XMLEditingDescriptor for import/export handling.
"""
data = String(default='', scope=Scope.content)
@classmethod
def definition_from_xml(cls, xml_object, system):
if len(xml_object) == 0 and len(xml_object.items()) == 0:
return {'data': ''}, []
return {'data': etree.tostring(xml_object, pretty_print=True, encoding='unicode')}, []
def definition_to_xml(self, resource_fs):
if self.data:
return etree.fromstring(self.data)
return etree.Element(self.category)
| agpl-3.0 |
rssenar/PyToolkit | JoinDatasets.py | 1 | 2552 |
#!/usr/bin/env python3.4
# ---------------------------------------------------------------------------- #
import os, csv, glob, re
import pandas as pd
from Constants import ConvPercentage
from tqdm import tqdm
# ---------------------------------------------------------------------------- #
os.chdir('../../../../Desktop/')
# ---------------------------------------------------------------------------- #
File1 = 'a.csv'
File2 = 'b.csv'
ziproute = 0
Description = 1
Records = 2
total = 3
dfo = 4
Percentage = 5
RTotal = 6
AdjRec = 7
AdjRecPerc = 8
RecRTotal = 9
OutputHeaderRow = [
'ziproute',
'Description',
'Records',
'Total_Sat',
'Dist(m)',
'Sat%',
'R-TOTAL',
'ADJ_Rec',
'ADJ_Sat%',
'ADJ_R-TOTAL'
]
def Join():
ds1 = pd.read_csv(File1)
ds2 = pd.read_csv(File2)
merged = ds1.merge(ds2, how = 'inner')
merged['Percentage'] = ''
merged['RTotal'] = ''
merged['AdjRec'] = ''
merged['AdjRecPerc'] = ''
merged['AdjRecRTotal'] = ''
merged.to_csv('temp.csv', encoding = 'utf-8', index=False)
def ReformatOutputReport():
CSVFiles = glob.glob('temp.csv')
for file in tqdm(CSVFiles):
with open(file,'rU') as InputFile,\
open('DATA.csv','at') as OutputFile:
Input = csv.reader(InputFile)
Output = csv.writer(OutputFile)
Output.writerow(OutputHeaderRow)
RunningTotal = 0
AdjRecRTotal = 0
RowCounter = 2
next(InputFile)
for Row in tqdm(Input):
if int(Row[Records]) >= 135:
Row[dfo] = round(float(Row[dfo]),1)
Row[Percentage] = round(ConvPercentage(Row[Records],Row[total]),0)
Row[RTotal] = '=SUM($C$2:$C{})'.format(RowCounter)
if int(Row[Percentage]) >= 74:
Row[AdjRec] = round(float(Row[total]) * 0.73,0)
else:
Row[AdjRec] = Row[Records]
Row[AdjRecPerc] = round(ConvPercentage(Row[AdjRec],Row[total]),0)
Row[RecRTotal] = '=SUM($H$2:$H{})'.format(RowCounter)
Output.writerow(Row)
RowCounter += 1
# ---------------------------------------------------------------------------- #
if __name__ == '__main__':
print('=======================================')
print(' JOIN DATASETS ')
print('=======================================')
Join()
ReformatOutputReport()
Files = glob.glob('*.csv')
for Record in Files:
if bool(re.match(r'\btemp\b', Record, flags = re.I)):
os.remove(Record)
print('=======================================')
print(' COMPLETED ')
print()
| bsd-2-clause |
IoveSunny/DreamBox | package/mac80211/files/host_bin/b43-fwsquash.py | 30 | 3181 | #!/usr/bin/env python
#
# b43 firmware file squasher
# Removes unnecessary firmware files
#
# Copyright (c) 2009 Michael Buesch <[email protected]>
#
# Licensed under the GNU/GPL version 2 or (at your option) any later version.
#
import sys
import os
def usage():
print("Usage: %s PHYTYPES COREREVS /path/to/extracted/firmware" % sys.argv[0])
print("")
print("PHYTYPES is a comma separated list of:")
print("A => A-PHY")
print("AG => Dual A-PHY G-PHY")
print("G => G-PHY")
print("LP => LP-PHY")
print("N => N-PHY")
print("")
print("COREREVS is a comma separated list of core revision numbers.")
if len(sys.argv) != 4:
usage()
sys.exit(1)
phytypes = sys.argv[1]
corerevs = sys.argv[2]
fwpath = sys.argv[3]
phytypes = phytypes.split(',')
try:
corerevs = map(lambda r: int(r), corerevs.split(','))
except ValueError:
print("ERROR: \"%s\" is not a valid COREREVS string\n" % corerevs)
usage()
sys.exit(1)
fwfiles = os.listdir(fwpath)
fwfiles = filter(lambda str: str.endswith(".fw"), fwfiles)
if not fwfiles:
print("ERROR: No firmware files found in %s" % fwpath)
sys.exit(1)
required_fwfiles = []
def revs_match(revs_a, revs_b):
for rev in revs_a:
if rev in revs_b:
return True
return False
def phytypes_match(types_a, types_b):
for type in types_a:
type = type.strip().upper()
if type in types_b:
return True
return False
revmapping = {
"ucode2.fw" : (2,3,),
"ucode4.fw" : (4,),
"ucode5.fw" : (5,6,7,8,9,10,),
"ucode11.fw" : (11,12,),
"ucode13.fw" : (13,),
"ucode14.fw" : (14,),
"ucode15.fw" : (15,),
"ucode16_mimo.fw" : (16,),
"ucode24_mimo.fw" : (24,),
"ucode29_mimo.fw" : (29,),
"pcm4.fw" : (1,2,3,4,),
"pcm5.fw" : (5,6,7,8,9,10,),
}
initvalmapping = {
"a0g1initvals5.fw" : ( (5,6,7,8,9,10,), ("AG",), ),
"a0g0initvals5.fw" : ( (5,6,7,8,9,10,), ("A", "AG",), ),
"b0g0initvals2.fw" : ( (2,4,), ("G",), ),
"b0g0initvals5.fw" : ( (5,6,7,8,9,10,), ("G",), ),
"b0g0initvals13.fw" : ( (13,), ("G",), ),
"n0initvals11.fw" : ( (11,12,), ("N",), ),
"n0initvals16.fw" : ( (16,), ("N",), ),
"lp0initvals13.fw" : ( (13,), ("LP",), ),
"lp0initvals14.fw" : ( (14,), ("LP",), ),
"lp0initvals15.fw" : ( (15,), ("LP",), ),
"a0g1bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("AG",), ),
"a0g0bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("A", "AG"), ),
"b0g0bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("G",), ),
"n0bsinitvals11.fw" : ( (11,12,), ("N",), ),
"n0bsinitvals16.fw" : ( (16,), ("N",), ),
"lp0bsinitvals13.fw" : ( (13,), ("LP",), ),
"lp0bsinitvals14.fw" : ( (14,), ("LP",), ),
"lp0bsinitvals15.fw" : ( (15,), ("LP",), ),
"lcn0initvals24.fw" : ( (24,), ("LNC",), ),
"ht0initvals29.fw" : ( (29,), ("HT",), ),
}
for f in fwfiles:
if f in revmapping:
if revs_match(corerevs, revmapping[f]):
required_fwfiles += [f]
continue
if f in initvalmapping:
if revs_match(corerevs, initvalmapping[f][0]) and\
phytypes_match(phytypes, initvalmapping[f][1]):
required_fwfiles += [f]
continue
print("WARNING: Firmware file %s not found in the mapping lists" % f)
for f in fwfiles:
if f not in required_fwfiles:
print("Deleting %s" % f)
os.unlink(fwpath + '/' + f)
| gpl-2.0 |
mahendra-r/edx-platform | cms/djangoapps/contentstore/views/tests/test_library.py | 114 | 9392 | """
Unit tests for contentstore.views.library
More important high-level tests are in contentstore/tests/test_libraries.py
"""
from contentstore.tests.utils import AjaxEnabledTestClient, parse_json
from contentstore.utils import reverse_course_url, reverse_library_url
from contentstore.views.component import get_component_templates
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import LibraryFactory
from mock import patch
from opaque_keys.edx.locator import CourseKey, LibraryLocator
import ddt
from student.roles import LibraryUserRole
LIBRARY_REST_URL = '/library/' # URL for GET/POST requests involving libraries
def make_url_for_lib(key):
""" Get the RESTful/studio URL for testing the given library """
if isinstance(key, LibraryLocator):
key = unicode(key)
return LIBRARY_REST_URL + key
@ddt.ddt
class UnitTestLibraries(ModuleStoreTestCase):
"""
Unit tests for library views
"""
def setUp(self):
user_password = super(UnitTestLibraries, self).setUp()
self.client = AjaxEnabledTestClient()
self.client.login(username=self.user.username, password=user_password)
######################################################
# Tests for /library/ - list and create libraries:
@patch("contentstore.views.library.LIBRARIES_ENABLED", False)
def test_with_libraries_disabled(self):
"""
The library URLs should return 404 if libraries are disabled.
"""
response = self.client.get_json(LIBRARY_REST_URL)
self.assertEqual(response.status_code, 404)
def test_list_libraries(self):
"""
Test that we can GET /library/ to list all libraries visible to the current user.
"""
# Create some more libraries
libraries = [LibraryFactory.create() for _ in range(3)]
lib_dict = dict([(lib.location.library_key, lib) for lib in libraries])
response = self.client.get_json(LIBRARY_REST_URL)
self.assertEqual(response.status_code, 200)
lib_list = parse_json(response)
self.assertEqual(len(lib_list), len(libraries))
for entry in lib_list:
self.assertIn("library_key", entry)
self.assertIn("display_name", entry)
key = CourseKey.from_string(entry["library_key"])
self.assertIn(key, lib_dict)
self.assertEqual(entry["display_name"], lib_dict[key].display_name)
del lib_dict[key] # To ensure no duplicates are matched
@ddt.data("delete", "put")
def test_bad_http_verb(self, verb):
"""
We should get an error if we do weird requests to /library/
"""
response = getattr(self.client, verb)(LIBRARY_REST_URL)
self.assertEqual(response.status_code, 405)
def test_create_library(self):
""" Create a library. """
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': 'org',
'library': 'lib',
'display_name': "New Library",
})
self.assertEqual(response.status_code, 200)
# That's all we check. More detailed tests are in contentstore.tests.test_libraries...
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True})
def test_lib_create_permission(self):
"""
Users who are not given course creator roles should still be able to
create libraries.
"""
self.client.logout()
ns_user, password = self.create_non_staff_user()
self.client.login(username=ns_user.username, password=password)
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': 'org', 'library': 'lib', 'display_name': "New Library",
})
self.assertEqual(response.status_code, 200)
@ddt.data(
{},
{'org': 'org'},
{'library': 'lib'},
{'org': 'C++', 'library': 'lib', 'display_name': 'Lib with invalid characters in key'},
{'org': 'Org', 'library': 'Wh@t?', 'display_name': 'Lib with invalid characters in key'},
)
def test_create_library_invalid(self, data):
"""
Make sure we are prevented from creating libraries with invalid keys/data
"""
response = self.client.ajax_post(LIBRARY_REST_URL, data)
self.assertEqual(response.status_code, 400)
def test_no_duplicate_libraries(self):
"""
We should not be able to create multiple libraries with the same key
"""
lib = LibraryFactory.create()
lib_key = lib.location.library_key
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': lib_key.org,
'library': lib_key.library,
'display_name': "A Duplicate key, same as 'lib'",
})
self.assertIn('already a library defined', parse_json(response)['ErrMsg'])
self.assertEqual(response.status_code, 400)
######################################################
# Tests for /library/:lib_key/ - get a specific library as JSON or HTML editing view
def test_get_lib_info(self):
"""
Test that we can get data about a library (in JSON format) using /library/:key/
"""
# Create a library
lib_key = LibraryFactory.create().location.library_key
# Re-load the library from the modulestore, explicitly including version information:
lib = self.store.get_library(lib_key, remove_version=False, remove_branch=False)
version = lib.location.library_key.version_guid
self.assertNotEqual(version, None)
response = self.client.get_json(make_url_for_lib(lib_key))
self.assertEqual(response.status_code, 200)
info = parse_json(response)
self.assertEqual(info['display_name'], lib.display_name)
self.assertEqual(info['library_id'], unicode(lib_key))
self.assertEqual(info['previous_version'], None)
self.assertNotEqual(info['version'], None)
self.assertNotEqual(info['version'], '')
self.assertEqual(info['version'], unicode(version))
def test_get_lib_edit_html(self):
"""
Test that we can get the studio view for editing a library using /library/:key/
"""
lib = LibraryFactory.create()
response = self.client.get(make_url_for_lib(lib.location.library_key))
self.assertEqual(response.status_code, 200)
self.assertIn("<html", response.content)
self.assertIn(lib.display_name, response.content)
@ddt.data('library-v1:Nonexistent+library', 'course-v1:Org+Course', 'course-v1:Org+Course+Run', 'invalid')
def test_invalid_keys(self, key_str):
"""
Check that various Nonexistent/invalid keys give 404 errors
"""
response = self.client.get_json(make_url_for_lib(key_str))
self.assertEqual(response.status_code, 404)
def test_bad_http_verb_with_lib_key(self):
"""
We should get an error if we do weird requests to /library/
"""
lib = LibraryFactory.create()
for verb in ("post", "delete", "put"):
response = getattr(self.client, verb)(make_url_for_lib(lib.location.library_key))
self.assertEqual(response.status_code, 405)
def test_no_access(self):
user, password = self.create_non_staff_user()
self.client.login(username=user, password=password)
lib = LibraryFactory.create()
response = self.client.get(make_url_for_lib(lib.location.library_key))
self.assertEqual(response.status_code, 403)
def test_get_component_templates(self):
"""
Verify that templates for adding discussion and advanced components to
content libraries are not provided.
"""
lib = LibraryFactory.create()
lib.advanced_modules = ['lti']
lib.save()
templates = [template['type'] for template in get_component_templates(lib, library=True)]
self.assertIn('problem', templates)
self.assertNotIn('discussion', templates)
self.assertNotIn('advanced', templates)
def test_manage_library_users(self):
"""
Simple test that the Library "User Access" view works.
Also tests that we can use the REST API to assign a user to a library.
"""
library = LibraryFactory.create()
extra_user, _ = self.create_non_staff_user()
manage_users_url = reverse_library_url('manage_library_users', unicode(library.location.library_key))
response = self.client.get(manage_users_url)
self.assertEqual(response.status_code, 200)
# extra_user has not been assigned to the library so should not show up in the list:
self.assertNotIn(extra_user.username, response.content)
# Now add extra_user to the library:
user_details_url = reverse_course_url(
'course_team_handler',
library.location.library_key, kwargs={'email': extra_user.email}
)
edit_response = self.client.ajax_post(user_details_url, {"role": LibraryUserRole.ROLE})
self.assertIn(edit_response.status_code, (200, 204))
# Now extra_user should apear in the list:
response = self.client.get(manage_users_url)
self.assertEqual(response.status_code, 200)
self.assertIn(extra_user.username, response.content)
| agpl-3.0 |
zeroSteiner/smoke-zephyr | smoke_zephyr/utilities.py | 1 | 27644 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# smoke_zephyr/utilities.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import functools
import inspect
import ipaddress
import itertools
import logging
import os
import random
import re
import shutil
import string
import subprocess
import sys
import time
import unittest
import urllib.parse
import urllib.request
import weakref
EMAIL_REGEX = re.compile(r'^[a-z0-9._%+-]+@[a-z0-9.-]+\.[a-z]{2,6}$', flags=re.IGNORECASE)
class AttributeDict(dict):
"""
This class allows dictionary keys to be accessed as attributes. For
example: ``ad = AttributeDict(test=1); ad['test'] == ad.test``
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
class BruteforceGenerator(object):
"""
This class allows itarating sequences for bruteforcing.
"""
# requirments = itertools
def __init__(self, startlen, endlen=None, charset=None):
"""
:param int startlen: The minimum sequence size to generate.
:param int endlen: The maximum sequence size to generate.
:param charset: The characters to include in the resulting sequences.
"""
self.startlen = startlen
if endlen is None:
self.endlen = startlen
else:
self.endlen = endlen
if charset is None:
charset = list(map(chr, range(0, 256)))
elif isinstance(charset, str):
charset = list(charset)
elif isinstance(charset, bytes):
charset = list(map(chr, charset))
charset.sort()
self.charset = tuple(charset)
self.length = self.startlen
self._product = itertools.product(self.charset, repeat=self.length)
self._next = self.__next__
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
try:
value = next(self._product)
except StopIteration:
if self.length == self.endlen:
raise StopIteration
self.length += 1
self._product = itertools.product(self.charset, repeat=self.length)
value = next(self._product)
return ''.join(value)
_ArgSpec = collections.namedtuple('_ArgSpec', ('args', 'varargs', 'keywords', 'defaults'))
class Cache(object):
"""
This class provides a simple to use cache object which can be applied
as a decorator.
"""
def __init__(self, timeout):
"""
:param timeout: The amount of time in seconds that a cached
result will be considered valid for.
:type timeout: int, str
"""
if isinstance(timeout, str):
timeout = parse_timespan(timeout)
self.cache_timeout = timeout
self._target_function = None
self._target_function_arg_spec = None
self.__cache = {}
self.__obj = None
def __get__(self, instance, _):
self.__obj = instance
return self
def __call__(self, *args, **kwargs):
if not getattr(self, '_target_function', False):
target_function = args[0]
if not inspect.isfunction(target_function) and not inspect.ismethod(target_function):
raise RuntimeError('the cached object must be a function or method')
arg_spec = inspect.getfullargspec(target_function) # pylint: disable=W1505
arg_spec = _ArgSpec(args=arg_spec.args, varargs=arg_spec.varargs, keywords=arg_spec.kwonlyargs, defaults=arg_spec.defaults)
if arg_spec.varargs or arg_spec.keywords:
raise RuntimeError('the cached function can not use dynamic args or kwargs')
self._target_function = target_function
self._target_function_arg_spec = arg_spec
return functools.wraps(target_function)(self)
self.cache_clean()
if self.__obj is not None:
args = (self.__obj,) + args
self.__obj = None
is_method = True
else:
is_method = False
args = self._flatten_args(args, kwargs)
if is_method:
inst = args.popleft()
args = tuple(args)
ref = weakref.ref(inst, functools.partial(self._ref_callback, args))
cache_args = (ref,) + args
args = (inst,) + args
else:
cache_args = tuple(args)
args = tuple(args)
result, expiration = self.__cache.get(cache_args, (None, 0))
if expiration > time.time():
return result
result = self._target_function(*args)
self.__cache[cache_args] = (result, time.time() + self.cache_timeout)
return result
def __repr__(self):
return "<cached function {0} at 0x{1:x}>".format(self._target_function.__name__, id(self._target_function))
def _flatten_args(self, args, kwargs):
flattened_args = collections.deque(args)
arg_spec = self._target_function_arg_spec
arg_spec_defaults = (arg_spec.defaults or [])
default_args = tuple(arg_spec.args[:-len(arg_spec_defaults)])
default_kwargs = dict(zip(arg_spec.args[-len(arg_spec_defaults):], arg_spec_defaults))
for arg_id in range(len(args), len(arg_spec.args)):
arg_name = arg_spec.args[arg_id]
if arg_name in default_args:
if not arg_name in kwargs:
raise TypeError("{0}() missing required argument '{1}'".format(self._target_function.__name__, arg_name))
flattened_args.append(kwargs.pop(arg_name))
else:
flattened_args.append(kwargs.pop(arg_name, default_kwargs[arg_name]))
if kwargs:
unexpected_kwargs = tuple("'{0}'".format(a) for a in kwargs.keys())
raise TypeError("{0}() got an unexpected keyword argument{1} {2}".format(self._target_function.__name__, ('' if len(unexpected_kwargs) == 1 else 's'), ', '.join(unexpected_kwargs)))
return flattened_args
def _ref_callback(self, args, ref):
args = (ref,) + args
self.__cache.pop(args, None)
def cache_clean(self):
"""
Remove expired items from the cache.
"""
now = time.time()
keys_for_removal = collections.deque()
for key, (_, expiration) in self.__cache.items():
if expiration < now:
keys_for_removal.append(key)
for key in keys_for_removal:
del self.__cache[key]
def cache_clear(self):
"""
Remove all items from the cache.
"""
self.__cache = {}
class FileWalker(object):
"""
This class is used to easily iterate over files and subdirectories of a
specified parent directory.
"""
def __init__(self, filespath, absolute_path=False, skip_files=False, skip_dirs=False, filter_func=None, follow_links=False, max_depth=None):
"""
.. versionchanged:: 1.4.0
Added the *follow_links* and *max_depth* parameters.
:param str filespath: A path to either a file or a directory. If
a file is passed then that will be the only file returned
during the iteration. If a directory is passed, all files and
subdirectories will be recursively returned during the iteration.
:param bool absolute_path: Whether or not the absolute path or a
relative path should be returned.
:param bool skip_files: Whether or not to skip files.
:param bool skip_dirs: Whether or not to skip directories.
:param function filter_func: If defined, the filter_func function will
be called for each path (with the path as the one and only argument)
and if the function returns false the path will be skipped.
:param bool follow_links: Whether or not to follow directories pointed
to by symlinks.
:param max_depth: A maximum depth to recurse into.
"""
if not (os.path.isfile(filespath) or os.path.isdir(filespath)):
raise Exception(filespath + ' is neither a file or directory')
if absolute_path:
self.filespath = os.path.abspath(filespath)
else:
self.filespath = os.path.relpath(filespath)
self.skip_files = skip_files
self.skip_dirs = skip_dirs
self.filter_func = filter_func
self.follow_links = follow_links
self.max_depth = float('inf') if max_depth is None else max_depth
if os.path.isdir(self.filespath):
self._walk = None
self._next = self._next_dir
elif os.path.isfile(self.filespath):
self._next = self._next_file
def __iter__(self):
return self._next()
def _skip(self, cur_file):
if self.skip_files and os.path.isfile(cur_file):
return True
if self.skip_dirs and os.path.isdir(cur_file):
return True
if self.filter_func is not None:
if not self.filter_func(cur_file):
return True
return False
def _next_dir(self):
for root, dirs, files in os.walk(self.filespath, followlinks=self.follow_links):
if root == self.filespath:
depth = 0
else:
depth = os.path.relpath(root, start=self.filespath).count(os.path.sep) + 1
if depth >= self.max_depth:
continue
for entry in itertools.chain(dirs, files):
current_path = os.path.join(root, entry)
if not self._skip(current_path):
yield current_path
if self.max_depth >= 0 and not self._skip(self.filespath):
yield self.filespath
def _next_file(self):
if self.max_depth >= 0 and not self._skip(self.filespath):
yield self.filespath
class SectionConfigParser(object):
"""
Proxy access to a section of a ConfigParser object.
"""
__version__ = '0.2'
def __init__(self, section_name, config_parser):
"""
:param str section_name: Name of the section to proxy access for.
:param config_parser: ConfigParser object to proxy access for.
:type config_parse: :py:class:`ConfigParser.ConfigParser`
"""
self.section_name = section_name
self.config_parser = config_parser
def _get_raw(self, option, opt_type, default=None):
get_func = getattr(self.config_parser, 'get' + opt_type)
if default is None:
return get_func(self.section_name, option)
elif self.config_parser.has_option(self.section_name, option):
return get_func(self.section_name, option)
else:
return default
def get(self, option, default=None):
"""
Retrieve *option* from the config, returning *default* if it
is not present.
:param str option: The name of the value to return.
:param default: Default value to return if the option does not exist.
"""
return self._get_raw(option, '', default)
def getint(self, option, default=None):
"""
Retrieve *option* from the config, returning *default* if it
is not present.
:param str option: The name of the value to return.
:param default: Default value to return if the option does not exist.
:rtype: int
"""
return self._get_raw(option, 'int', default)
def getfloat(self, option, default=None):
"""
Retrieve *option* from the config, returning *default* if it
is not present.
:param str option: The name of the value to return.
:param default: Default value to return if the option does not exist.
:rtype: float
"""
return self._get_raw(option, 'float', default)
def getboolean(self, option, default=None):
"""
Retrieve *option* from the config, returning *default* if it
is not present.
:param str option: The name of the value to return.
:param default: Default value to return if the option does not exist.
:rtype: bool
"""
return self._get_raw(option, 'boolean', default)
def has_option(self, option):
"""
Check that *option* exists in the configuration file.
:param str option: The name of the option to check.
:rtype: bool
"""
return self.config_parser.has_option(self.section_name, option)
def options(self):
"""
Get a list of all options that are present in the section of the
configuration.
:return: A list of all set options.
:rtype: list
"""
return self.config_parser.options(self.section_name)
def items(self):
"""
Return all options and their values in the form of a list of tuples.
:return: A list of all values and options.
:rtype: list
"""
return self.config_parser.items(self.section_name)
def set(self, option, value):
"""
Set an option to an arbitrary value.
:param str option: The name of the option to set.
:param value: The value to set the option to.
"""
self.config_parser.set(self.section_name, option, value)
class TestCase(unittest.TestCase):
"""
This class provides additional functionality over the built in
:py:class:`unittest.TestCase` object, including better compatibility for
methods across Python 2.x and Python 3.x.
"""
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
if not hasattr(self, 'assertRegex') and hasattr(self, 'assertRegexpMatches'):
self.assertRegex = self.assertRegexpMatches
if not hasattr(self, 'assertNotRegex') and hasattr(self, 'assertNotRegexpMatches'):
self.assertNotRegex = self.assertNotRegexpMatches
if not hasattr(self, 'assertRaisesRegex') and hasattr(self, 'assertRaisesRegexp'):
self.assertRaisesRegex = self.assertRaisesRegexp
def configure_stream_logger(logger='', level=None, formatter='%(levelname)-8s %(message)s'):
"""
Configure the default stream handler for logging messages to the console,
remove other logging handlers, and enable capturing warnings.
.. versionadded:: 1.3.0
:param str logger: The logger to add the stream handler for.
:param level: The level to set the logger to, will default to WARNING if no level is specified.
:type level: None, int, str
:param formatter: The format to use for logging messages to the console.
:type formatter: str, :py:class:`logging.Formatter`
:return: The new configured stream handler.
:rtype: :py:class:`logging.StreamHandler`
"""
level = level or logging.WARNING
if isinstance(level, str):
level = getattr(logging, level, None)
if level is None:
raise ValueError('invalid log level: ' + level)
root_logger = logging.getLogger('')
for handler in root_logger.handlers:
root_logger.removeHandler(handler)
logging.getLogger(logger).setLevel(logging.DEBUG)
console_log_handler = logging.StreamHandler()
console_log_handler.setLevel(level)
if isinstance(formatter, str):
formatter = logging.Formatter(formatter)
elif not isinstance(formatter, logging.Formatter):
raise TypeError('formatter must be an instance of logging.Formatter')
console_log_handler.setFormatter(formatter)
logging.getLogger(logger).addHandler(console_log_handler)
logging.captureWarnings(True)
return console_log_handler
def download(url, filename=None):
"""
Download a file from a url and save it to disk.
:param str url: The URL to fetch the file from.
:param str filename: The destination file to write the data to.
"""
# requirements os, shutil, urllib.parse, urllib.request
if not filename:
url_parts = urllib.parse.urlparse(url)
filename = os.path.basename(url_parts.path)
url_h = urllib.request.urlopen(url)
with open(filename, 'wb') as file_h:
shutil.copyfileobj(url_h, file_h)
url_h.close()
return
def escape_single_quote(unescaped):
"""
Escape a string containing single quotes and backslashes with backslashes.
This is useful when a string is evaluated in some way.
:param str unescaped: The string to escape.
:return: The escaped string.
:rtype: str
"""
# requirements = re
return re.sub(r'(\'|\\)', r'\\\1', unescaped)
def format_bytes_size(val):
"""
Take a number of bytes and convert it to a human readable number.
:param int val: The number of bytes to format.
:return: The size in a human readable format.
:rtype: str
"""
if not val:
return '0 bytes'
for sz_name in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']:
if val < 1024.0:
return "{0:.2f} {1}".format(val, sz_name)
val /= 1024.0
raise OverflowError()
def grep(expression, file, flags=0, invert=False):
"""
Search a file and return a list of all lines that match a regular expression.
:param str expression: The regex to search for.
:param file: The file to search in.
:type file: str, file
:param int flags: The regex flags to use when searching.
:param bool invert: Select non matching lines instead.
:return: All the matching lines.
:rtype: list
"""
# requirements = re
if isinstance(file, str):
file = open(file)
lines = []
for line in file:
if bool(re.search(expression, line, flags=flags)) ^ invert:
lines.append(line)
return lines
def is_valid_email_address(email_address):
"""
Check that the string specified appears to be a valid email address.
:param str email_address: The email address to validate.
:return: Whether the email address appears to be valid or not.
:rtype: bool
"""
# requirements = re
return EMAIL_REGEX.match(email_address) != None
def get_ip_list(ip_network, mask=None):
"""
Quickly convert an IPv4 or IPv6 network (CIDR or Subnet) to a list
of individual IPs in their string representation.
:param str ip_network:
:param int mask:
:return: list
"""
if mask and '/' not in ip_network:
net = ipaddress.ip_network("{0}/{1}".format(ip_network, mask))
elif '/' not in ip_network:
return [str(ipaddress.ip_address(ip_network))]
else:
net = ipaddress.ip_network(ip_network)
hosts = net.hosts()
if net.netmask == ipaddress.IPv4Address('255.255.255.255') and sys.version_info > (3, 9):
# see: https://github.com/zeroSteiner/smoke-zephyr/issues/8
hosts = []
return [host.__str__() for host in hosts]
def sort_ipv4_list(ip_list, unique=True):
"""
Sorts a provided list of IPv4 addresses. Optionally can remove duplicate values
Supports IPv4 addresses with ports included (ex: [10.11.12.13:80, 10.11.12.13:8080])
:param ip_list: (list) iterable of IPv4 Addresses
:param unique: (bool) removes duplicate values if true
:return: sorted list of IP addresses
"""
if unique:
ip_list = list(set(ip_list))
ipv4_list = sorted([i.rstrip(':') for i in ip_list], key=lambda ip: (
int(ip.split(".")[0]),
int(ip.split(".")[1]),
int(ip.split(".")[2]),
int(ip.split(".")[3].split(':')[0]),
int(ip.split(":")[1]) if ":" in ip else 0
))
return ipv4_list
def open_uri(uri):
"""
Open a URI in a platform intelligent way. On Windows this will use
'cmd.exe /c start' and on Linux this will use gvfs-open or xdg-open
depending on which is available. If no suitable application can be
found to open the URI, a RuntimeError will be raised.
.. versionadded:: 1.3.0
:param str uri: The URI to open.
"""
close_fds = True
startupinfo = None
proc_args = []
if sys.platform.startswith('win'):
proc_args.append(which('cmd.exe'))
proc_args.append('/c')
proc_args.append('start')
uri = uri.replace('&', '^&')
close_fds = False
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
elif which('gvfs-open'):
proc_args.append(which('gvfs-open'))
elif which('xdg-open'):
proc_args.append(which('xdg-open'))
else:
raise RuntimeError('could not find suitable application to open uri')
proc_args.append(uri)
proc_h = subprocess.Popen(proc_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=close_fds, startupinfo=startupinfo)
return proc_h.wait() == 0
def parse_case_camel_to_snake(camel):
"""
Convert a string from CamelCase to snake_case.
:param str camel: The CamelCase string to convert.
:return: The snake_case version of string.
:rtype: str
"""
# requirements = re
return re.sub('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))', r'_\1', camel).lower()
def parse_case_snake_to_camel(snake, upper_first=True):
"""
Convert a string from snake_case to CamelCase.
:param str snake: The snake_case string to convert.
:param bool upper_first: Whether or not to capitalize the first
character of the string.
:return: The CamelCase version of string.
:rtype: str
"""
snake = snake.split('_')
first_part = snake[0]
if upper_first:
first_part = first_part.title()
return first_part + ''.join(word.title() for word in snake[1:])
def parse_server(server, default_port):
"""
Convert a server string to a tuple suitable for passing to connect, for
example converting 'www.google.com:443' to ('www.google.com', 443).
:param str server: The server string to convert.
:param int default_port: The port to use in case one is not specified
in the server string.
:return: The parsed server information.
:rtype: tuple
"""
server = server.rsplit(':', 1)
host = server[0]
if host.startswith('[') and host.endswith(']'):
host = host[1:-1]
if len(server) == 1:
return (host, default_port)
port = server[1]
if not port:
port = default_port
else:
port = int(port)
return (host, port)
def parse_timespan(timedef):
"""
Convert a string timespan definition to seconds, for example converting
'1m30s' to 90. If *timedef* is already an int, the value will be returned
unmodified.
:param timedef: The timespan definition to convert to seconds.
:type timedef: int, str
:return: The converted value in seconds.
:rtype: int
"""
if isinstance(timedef, int):
return timedef
converter_order = ('w', 'd', 'h', 'm', 's')
converters = {
'w': 604800,
'd': 86400,
'h': 3600,
'm': 60,
's': 1
}
timedef = timedef.lower()
if timedef.isdigit():
return int(timedef)
elif len(timedef) == 0:
return 0
seconds = -1
for spec in converter_order:
timedef = timedef.split(spec)
if len(timedef) == 1:
timedef = timedef[0]
continue
elif len(timedef) > 2 or not timedef[0].isdigit():
seconds = -1
break
adjustment = converters[spec]
seconds = max(seconds, 0)
seconds += (int(timedef[0]) * adjustment)
timedef = timedef[1]
if not len(timedef):
break
if seconds < 0:
raise ValueError('invalid time format')
return seconds
def parse_to_slug(words, maxlen=24):
"""
Parse a string into a slug format suitable for use in URLs and other
character restricted applications. Only utf-8 strings are supported at this
time.
:param str words: The words to parse.
:param int maxlen: The maximum length of the slug.
:return: The parsed words as a slug.
:rtype: str
"""
slug = ''
maxlen = min(maxlen, len(words))
for c in words:
if len(slug) == maxlen:
break
c = ord(c)
if c == 0x27:
continue
elif c >= 0x30 and c <= 0x39:
slug += chr(c)
elif c >= 0x41 and c <= 0x5a:
slug += chr(c + 0x20)
elif c >= 0x61 and c <= 0x7a:
slug += chr(c)
elif len(slug) and slug[-1] != '-':
slug += '-'
if len(slug) and slug[-1] == '-':
slug = slug[:-1]
return slug
def random_string_alphanumeric(size):
"""
Generate a random string of *size* length consisting of mixed case letters
and numbers. This function is not meant for cryptographic purposes.
:param int size: The length of the string to return.
:return: A string consisting of random characters.
:rtype: str
"""
# requirements = random, string
return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(size))
def random_string_lower_numeric(size):
"""
Generate a random string of *size* length consisting of lowercase letters
and numbers. This function is not meant for cryptographic purposes.
:param int size: The length of the string to return.
:return: A string consisting of random characters.
:rtype: str
"""
# requirements = random, string
return ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(size))
def selection_collision(selections, poolsize):
"""
Calculate the probability that two random values selected from an arbitrary
sized pool of unique values will be equal. This is commonly known as the
"Birthday Problem".
:param int selections: The number of random selections.
:param int poolsize: The number of unique random values in the pool to choose from.
:rtype: float
:return: The chance that a collision will occur as a percentage.
"""
# requirments = sys
probability = 100.0
poolsize = float(poolsize)
for i in range(selections):
probability = probability * (poolsize - i) / poolsize
probability = (100.0 - probability)
return probability
def unescape_single_quote(escaped):
"""
Unescape a string which uses backslashes to escape single quotes.
:param str escaped: The string to unescape.
:return: The unescaped string.
:rtype: str
"""
escaped = escaped.replace('\\\\', '\\')
escaped = escaped.replace('\\\'', '\'')
return escaped
def unique(seq, key=None):
"""
Create a unique list or tuple from a provided list or tuple and preserve the
order.
:param seq: The list or tuple to preserve unique items from.
:type seq: list, tuple
:param key: If key is provided it will be called during the
comparison process.
:type key: function, None
"""
if key is None:
key = lambda x: x
preserved_type = type(seq)
if preserved_type not in (list, tuple):
raise TypeError("unique argument 1 must be list or tuple, not {0}".format(preserved_type.__name__))
seen = []
result = []
for item in seq:
marker = key(item)
if marker in seen:
continue
seen.append(marker)
result.append(item)
return preserved_type(result)
def weighted_choice(choices, weight):
"""
Make a random selection from the specified choices. Apply the *weight*
function to each to return a positive integer representing shares of
selection pool the choice should received. The *weight* function is passed a
single argument of the choice from the *choices* iterable.
:param choices: The choices to select from.
:type choices: list, tuple
:param weight: The function used for gather weight information for choices.
:type weight: function
:return: A randomly selected choice from the provided *choices*.
"""
# requirements = random
weights = []
# get weight values for each of the choices
for choice in choices:
choice_weight = weight(choice)
if not (isinstance(choice_weight, int) and choice_weight > 0):
raise TypeError('weight results must be positive integers')
weights.append(choice_weight)
# make a selection within the acceptable range
selection = random.randint(0, sum(weights) - 1)
# find and return the corresponding choice
for idx, choice in enumerate(choices):
if selection < sum(weights[:idx + 1]):
return choice
raise RuntimeError('no selection could be made')
def which(program):
"""
Locate an executable binary's full path by its name.
:param str program: The executables name.
:return: The full path to the executable.
:rtype: str
"""
# requirements = os
is_exe = lambda fpath: (os.path.isfile(fpath) and os.access(fpath, os.X_OK))
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
if is_exe(program):
return os.path.abspath(program)
return None
def xfrange(start, stop=None, step=1):
"""
Iterate through an arithmetic progression.
:param start: Starting number.
:type start: float, int, long
:param stop: Stopping number.
:type stop: float, int, long
:param step: Stepping size.
:type step: float, int, long
"""
if stop is None:
stop = start
start = 0.0
start = float(start)
while start < stop:
yield start
start += step
| bsd-3-clause |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/scipy/optimize/tests/test_zeros.py | 10 | 3685 | from __future__ import division, print_function, absolute_import
from math import sqrt, exp, sin, cos
from numpy.testing import (assert_warns, assert_,
assert_allclose,
assert_equal)
from numpy import finfo
from scipy.optimize import zeros as cc
from scipy.optimize import zeros
# Import testing parameters
from scipy.optimize._tstutils import functions, fstrings
class TestBasic(object):
def run_check(self, method, name):
a = .5
b = sqrt(3)
xtol = 4*finfo(float).eps
rtol = 4*finfo(float).eps
for function, fname in zip(functions, fstrings):
zero, r = method(function, a, b, xtol=xtol, rtol=rtol,
full_output=True)
assert_(r.converged)
assert_allclose(zero, 1.0, atol=xtol, rtol=rtol,
err_msg='method %s, function %s' % (name, fname))
def test_bisect(self):
self.run_check(cc.bisect, 'bisect')
def test_ridder(self):
self.run_check(cc.ridder, 'ridder')
def test_brentq(self):
self.run_check(cc.brentq, 'brentq')
def test_brenth(self):
self.run_check(cc.brenth, 'brenth')
def test_newton(self):
f1 = lambda x: x**2 - 2*x - 1
f1_1 = lambda x: 2*x - 2
f1_2 = lambda x: 2.0 + 0*x
f2 = lambda x: exp(x) - cos(x)
f2_1 = lambda x: exp(x) + sin(x)
f2_2 = lambda x: exp(x) + cos(x)
for f, f_1, f_2 in [(f1, f1_1, f1_2), (f2, f2_1, f2_2)]:
x = zeros.newton(f, 3, tol=1e-6)
assert_allclose(f(x), 0, atol=1e-6)
x = zeros.newton(f, 3, fprime=f_1, tol=1e-6)
assert_allclose(f(x), 0, atol=1e-6)
x = zeros.newton(f, 3, fprime=f_1, fprime2=f_2, tol=1e-6)
assert_allclose(f(x), 0, atol=1e-6)
def test_deriv_zero_warning(self):
func = lambda x: x**2
dfunc = lambda x: 2*x
assert_warns(RuntimeWarning, cc.newton, func, 0.0, dfunc)
def test_gh_5555():
root = 0.1
def f(x):
return x - root
methods = [cc.bisect, cc.ridder]
xtol = 4*finfo(float).eps
rtol = 4*finfo(float).eps
for method in methods:
res = method(f, -1e8, 1e7, xtol=xtol, rtol=rtol)
assert_allclose(root, res, atol=xtol, rtol=rtol,
err_msg='method %s' % method.__name__)
def test_gh_5557():
# Show that without the changes in 5557 brentq and brenth might
# only achieve a tolerance of 2*(xtol + rtol*|res|).
# f linearly interpolates (0, -0.1), (0.5, -0.1), and (1,
# 0.4). The important parts are that |f(0)| < |f(1)| (so that
# brent takes 0 as the initial guess), |f(0)| < atol (so that
# brent accepts 0 as the root), and that the exact root of f lies
# more than atol away from 0 (so that brent doesn't achieve the
# desired tolerance).
def f(x):
if x < 0.5:
return -0.1
else:
return x - 0.6
atol = 0.51
rtol = 4*finfo(float).eps
methods = [cc.brentq, cc.brenth]
for method in methods:
res = method(f, 0, 1, xtol=atol, rtol=rtol)
assert_allclose(0.6, res, atol=atol, rtol=rtol)
class TestRootResults:
def test_repr(self):
r = zeros.RootResults(root=1.0,
iterations=44,
function_calls=46,
flag=0)
expected_repr = (" converged: True\n flag: 'converged'"
"\n function_calls: 46\n iterations: 44\n"
" root: 1.0")
assert_equal(repr(r), expected_repr)
| mit |
Glutanimate/FrozenFields | src/frozen_fields/main.py | 1 | 6350 | # -*- coding: utf-8 -*-
"""
This file is part of the Frozen Fields add-on for Anki.
Main Module, hooks add-on methods into Anki.
Copyright: (c) 2012-2015 Tiago Barroso <https://github.com/tmbb>
(c) 2015-2018 Glutanimate <https://glutanimate.com/>
License: GNU AGPLv3 <https://www.gnu.org/licenses/agpl.html>
"""
import os
from anki.hooks import addHook, runHook, wrap
from anki.utils import json
from aqt.addcards import AddCards
from aqt.editor import Editor
from aqt.qt import *
from .config import local_conf
from .consts import *
icon_path = os.path.join(addon_path, "icons")
icon_path_frozen = os.path.join(icon_path, "frozen.png")
icon_path_unfrozen = os.path.join(icon_path, "unfrozen.png")
icon_frozen = QUrl.fromLocalFile(icon_path_frozen).toString()
icon_unfrozen = QUrl.fromLocalFile(icon_path_unfrozen).toString()
hotkey_toggle_field = local_conf["hotkeyOne"]
hotkey_toggle_all = local_conf["hotkeyAll"]
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, "js20.js"), "r") as f:
js_code_20 = f.read() % (hotkey_toggle_field, icon_frozen, hotkey_toggle_field, icon_unfrozen)
with open(os.path.join(__location__, "js21.js"), "r") as f:
js_code_21 = f.read()
def loadNote20(self):
"""Modified loadNote(), adds buttons to Editor"""
if not self.note:
return
if self.stealFocus:
field = self.currentField
else:
field = -1
if not self._loaded:
# will be loaded when page is ready
return
data = []
for fld, val in self.note.items():
data.append((fld, self.mw.col.media.escapeImages(val)))
###### ↓modified #########
if isinstance(self.parentWindow, AddCards): # only modify AddCards Editor
flds = self.note.model()["flds"]
sticky = [fld["sticky"] for fld in flds]
self.web.eval(js_code_20)
self.web.eval("setFrozenFields(%s, %s, %d);" % (
json.dumps(data), json.dumps(sticky), field))
else:
self.web.eval("setFields(%s, %d);" % (
json.dumps(data), field))
###########################
self.web.eval("setFonts(%s);" % (
json.dumps(self.fonts())))
self.checkValid()
self.widget.show()
if self.stealFocus:
self.web.setFocus()
# self.stealFocus = False
def loadNote21(self, focusTo=None):
if not self.note:
return
data = []
for fld, val in list(self.note.items()):
data.append((fld, self.mw.col.media.escapeImages(val)))
self.widget.show()
self.updateTags()
def oncallback(arg):
if not self.note:
return
self.setupForegroundButton()
self.checkValid()
if focusTo is not None:
self.web.setFocus()
try:
from aqt import gui_hooks
gui_hooks.editor_did_load_note(self)
except:
runHook("loadNote", self)
# only modify AddCards Editor
if not isinstance(self.parentWindow, AddCards):
self.web.evalWithCallback("setFields(%s); setFonts(%s); focusField(%s); setNoteId(%s)" % (
json.dumps(data),
json.dumps(self.fonts()), json.dumps(focusTo),
json.dumps(self.note.id)),
oncallback)
else:
iconstr_frozen = self.resourceToData(icon_path_frozen)
iconstr_unfrozen = self.resourceToData(icon_path_unfrozen)
flds = self.note.model()["flds"]
sticky = [fld["sticky"] for fld in flds]
eval_definitions = js_code_21 % (hotkey_toggle_field, iconstr_frozen,
iconstr_unfrozen)
eval_calls = "setFrozenFields(%s, %s); setFonts(%s); focusField(%s); setNoteId(%s)" % (
json.dumps(data), json.dumps(sticky),
json.dumps(self.fonts()),
json.dumps(focusTo),
json.dumps(self.note.id))
self.web.eval(eval_definitions)
self.web.evalWithCallback(eval_calls, oncallback)
def onBridge(self, str, _old):
"""Extends the js<->py bridge with our pycmd handler"""
if not str.startswith("frozen"):
if anki21 and str.startswith("blur"):
self.lastField = self.currentField # save old focus
return _old(self, str)
if not self.note or not runHook:
# shutdown
return
(cmd, txt) = str.split(":", 1)
cur = int(txt)
flds = self.note.model()['flds']
flds[cur]['sticky'] = not flds[cur]['sticky']
if not anki21:
self.loadNote()
def frozenToggle(self, batch=False):
"""Toggle state of current field"""
flds = self.note.model()['flds']
cur = self.currentField
if cur is None:
cur = 0
is_sticky = flds[cur]["sticky"]
if not batch:
flds[cur]["sticky"] = not is_sticky
else:
for n in range(len(self.note.fields)):
try:
flds[n]['sticky'] = not is_sticky
except IndexError:
break
if anki21:
self.loadNoteKeepingFocus()
else:
self.web.eval("saveField('key');")
self.loadNote()
def onFrozenToggle21(self, batch=False):
self.web.evalWithCallback(
"saveField('key');", lambda _: self.frozenToggle(batch=batch))
def onSetupButtons20(self):
"""Set up hotkeys"""
if not isinstance(self.parentWindow, AddCards): # only modify AddCards Editor
return
QShortcut(QKeySequence(hotkey_toggle_field), self.parentWindow,
activated=self.frozenToggle)
QShortcut(QKeySequence(hotkey_toggle_all), self.parentWindow,
activated=lambda: self.frozenToggle(batch=True))
def onSetupShortcuts21(cuts, self):
cuts += [(hotkey_toggle_field, self.onFrozenToggle),
(hotkey_toggle_all, lambda: self.onFrozenToggle(batch=True), True)]
# third value: enable shortcut even when no field selected
# Add-on hooks, etc.
if anki21:
addHook("setupEditorShortcuts", onSetupShortcuts21)
Editor.onBridgeCmd = wrap(Editor.onBridgeCmd, onBridge, "around")
Editor.loadNote = loadNote21
Editor.onFrozenToggle = onFrozenToggle21
else:
addHook("setupEditorButtons", onSetupButtons20)
Editor.bridge = wrap(Editor.bridge, onBridge, 'around')
Editor.loadNote = loadNote20
Editor.frozenToggle = frozenToggle
| bsd-2-clause |
danakj/chromium | third_party/closure_linter/closure_linter/not_strict_test.py | 129 | 2318 | #!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gjslint --nostrict.
Tests errors that can be thrown by gjslint when not in strict mode.
"""
import os
import sys
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = False
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
'limited_doc_checks.js')
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
_TEST_FILES = [
'not_strict.js'
]
class GJsLintTestSuite(unittest.TestSuite):
"""Test suite to run a GJsLintTest for each of several files.
If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
testdata to test. Otherwise, _TEST_FILES is used.
"""
def __init__(self, tests=()):
unittest.TestSuite.__init__(self, tests)
argv = sys.argv and sys.argv[1:] or []
if argv:
test_files = argv
else:
test_files = _TEST_FILES
for test_file in test_files:
resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
runner.Run,
errors.ByName))
if __name__ == '__main__':
# Don't let main parse args; it happens in the TestSuite.
googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
| bsd-3-clause |
pylada/pylada-light | src/pylada/ipython/launch/interactive.py | 1 | 4693 | ###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to
# make it easier to submit large numbers of jobs on supercomputers. It
# provides a python interface to physical input, such as crystal structures,
# as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs.
# It is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# PyLaDa. If not, see <http://www.gnu.org/licenses/>.
###############################
""" Launch interactive function.
This launch strategy will interactively compute each pylada job. This will
block the interpreter.
"""
__docformat__ = "restructuredtext en"
def launch(self, event, jobfolders):
""" Launch jobs interactively.
This call will block until each job is finished in turn.
"""
from os.path import join, dirname
from copy import deepcopy
from .. import get_shell
from ... import default_comm
try:
kwargs = get_shell(self).ev(event.kwargs)
except:
print("Could not process keyword arguments.")
print(event.kwargs)
return
if event.nbprocs != 0:
comm = deepcopy(default_comm)
comm['n'] = event.nbprocs
comm["ppn"] = event.ppn
kwargs['comm'] = comm
for current, path in jobfolders:
# start computations.
for job in current.values():
if job.is_tagged:
continue
name = str(job.name)
if name[0] == '/':
name = name[1:]
if hasattr(job.functional, 'Extract') and not event.force:
p = join(dirname(path), name)
extract = job.functional.Extract(p)
if extract.success:
print("Job {0} completed successfully. It will not be relaunched.".format(name))
continue
print("Working on {0} in {1}.".format(name, path))
kwargs["outdir"] = join(dirname(path), name)
if event.force:
kwargs['overwrite'] = True
job.compute(**kwargs)
def completer(self, event, data):
""" Completer for scattered launcher. """
from .. import jobfolder_file_completer
if data[-1] == "--kwargs":
return [u for u in self.user_ns if u[0] != '_' and isinstance(self.user_ns[u], dict)]
elif data[-1] == "--nbprocs":
return ['']
elif data[-1] == "--ppn":
return ['']
result = ['--force', '--kwargs', '--help', '--nbprocs', '--ppn']
result.extend(jobfolder_file_completer([event.symbol]))
result = list(set(result) - set(data))
return result
def parser(self, subparsers, opalls):
""" Adds subparser for interactive. """
from ... import default_comm
result = subparsers.add_parser('interactive',
description="Launches calculations interactively.\n"
"Each job will launched one after the other. "
"This call is *blocking*.",
parents=[opalls])
result.add_argument('--kwargs', type=str, default="{}", dest="kwargs",
help="Dictionary which contains arguments for the functionals. "
"\"outdir\" and \"comm\" are added automatically. "
"The functional must accept these arguments.")
result.add_argument('--nbprocs', type=int, default=default_comm.get('n', 0),
nargs='?', help="Number of processes over which to launch external calculations. "
"Defaults to {0}. Do 0 for serial.".format(default_comm.get('n', 1)))
result.add_argument('--ppn', dest="ppn", default=default_comm.get('ppn', 1), type=int,
help="Number of processes per node with which to launch external calculations. "
"Defaults to {0}.".format(default_comm.get('ppn', 1)))
result.set_defaults(func=launch)
return result
| gpl-3.0 |
mhvk/numpy | numpy/core/_internal.py | 3 | 27343 | """
A place for internal code
Some things are more easily handled Python.
"""
import ast
import re
import sys
import platform
import warnings
from .multiarray import dtype, array, ndarray
try:
import ctypes
except ImportError:
ctypes = None
IS_PYPY = platform.python_implementation() == 'PyPy'
if sys.byteorder == 'little':
_nbo = '<'
else:
_nbo = '>'
def _makenames_list(adict, align):
allfields = []
for fname, obj in adict.items():
n = len(obj)
if not isinstance(obj, tuple) or n not in (2, 3):
raise ValueError("entry not a 2- or 3- tuple")
if n > 2 and obj[2] == fname:
continue
num = int(obj[1])
if num < 0:
raise ValueError("invalid offset.")
format = dtype(obj[0], align=align)
if n > 2:
title = obj[2]
else:
title = None
allfields.append((fname, format, num, title))
# sort by offsets
allfields.sort(key=lambda x: x[2])
names = [x[0] for x in allfields]
formats = [x[1] for x in allfields]
offsets = [x[2] for x in allfields]
titles = [x[3] for x in allfields]
return names, formats, offsets, titles
# Called in PyArray_DescrConverter function when
# a dictionary without "names" and "formats"
# fields is used as a data-type descriptor.
def _usefields(adict, align):
try:
names = adict[-1]
except KeyError:
names = None
if names is None:
names, formats, offsets, titles = _makenames_list(adict, align)
else:
formats = []
offsets = []
titles = []
for name in names:
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
if len(res) > 2:
titles.append(res[2])
else:
titles.append(None)
return dtype({"names": names,
"formats": formats,
"offsets": offsets,
"titles": titles}, align)
# construct an array_protocol descriptor list
# from the fields attribute of a descriptor
# This calls itself recursively but should eventually hit
# a descriptor that has no fields and then return
# a simple typestring
def _array_descr(descriptor):
fields = descriptor.fields
if fields is None:
subdtype = descriptor.subdtype
if subdtype is None:
if descriptor.metadata is None:
return descriptor.str
else:
new = descriptor.metadata.copy()
if new:
return (descriptor.str, new)
else:
return descriptor.str
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
offset = 0
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
result.append(('', f'|V{num}'))
offset += num
elif field[1] < offset:
raise ValueError(
"dtype.descr is not defined for types with overlapping or "
"out-of-order fields")
if len(field) > 3:
name = (field[2], field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]),
field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
if descriptor.itemsize > offset:
num = descriptor.itemsize - offset
result.append(('', f'|V{num}'))
return result
# Build a new array from the information in a pickle.
# Note that the name numpy.core._internal._reconstruct is embedded in
# pickles of ndarrays made with NumPy before release 1.0
# so don't remove the name here, or you'll
# break backward compatibility.
def _reconstruct(subtype, shape, dtype):
return ndarray.__new__(subtype, shape, dtype)
# format_re was originally from numarray by J. Todd Miller
format_re = re.compile(r'(?P<order1>[<>|=]?)'
r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
r'(?P<order2>[<>|=]?)'
r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
sep_re = re.compile(r'\s*,\s*')
space_re = re.compile(r'\s+$')
# astr is a string (perhaps comma separated)
_convorder = {'=': _nbo}
def _commastring(astr):
startindex = 0
result = []
while startindex < len(astr):
mo = format_re.match(astr, pos=startindex)
try:
(order1, repeats, order2, dtype) = mo.groups()
except (TypeError, AttributeError):
raise ValueError(
f'format number {len(result)+1} of "{astr}" is not recognized'
) from None
startindex = mo.end()
# Separator or ending padding
if startindex < len(astr):
if space_re.match(astr, pos=startindex):
startindex = len(astr)
else:
mo = sep_re.match(astr, pos=startindex)
if not mo:
raise ValueError(
'format number %d of "%s" is not recognized' %
(len(result)+1, astr))
startindex = mo.end()
if order2 == '':
order = order1
elif order1 == '':
order = order2
else:
order1 = _convorder.get(order1, order1)
order2 = _convorder.get(order2, order2)
if (order1 != order2):
raise ValueError(
'inconsistent byte-order specification %s and %s' %
(order1, order2))
order = order1
if order in ('|', '=', _nbo):
order = ''
dtype = order + dtype
if (repeats == ''):
newitem = dtype
else:
newitem = (dtype, ast.literal_eval(repeats))
result.append(newitem)
return result
class dummy_ctype:
def __init__(self, cls):
self._cls = cls
def __mul__(self, other):
return self
def __call__(self, *other):
return self._cls(other)
def __eq__(self, other):
return self._cls == other._cls
def __ne__(self, other):
return self._cls != other._cls
def _getintp_ctype():
val = _getintp_ctype.cache
if val is not None:
return val
if ctypes is None:
import numpy as np
val = dummy_ctype(np.intp)
else:
char = dtype('p').char
if char == 'i':
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
class _missing_ctypes:
def cast(self, num, obj):
return num.value
class c_void_p:
def __init__(self, ptr):
self.value = ptr
class _ctypes:
def __init__(self, array, ptr=None):
self._arr = array
if ctypes:
self._ctypes = ctypes
self._data = self._ctypes.c_void_p(ptr)
else:
# fake a pointer-like object that holds onto the reference
self._ctypes = _missing_ctypes()
self._data = self._ctypes.c_void_p(ptr)
self._data._objects = array
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
"""
Return the data pointer cast to a particular c-types object.
For example, calling ``self._as_parameter_`` is equivalent to
``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a
pointer to a ctypes array of floating-point data:
``self.data_as(ctypes.POINTER(ctypes.c_double))``.
The returned pointer will keep a reference to the array.
"""
# _ctypes.cast function causes a circular reference of self._data in
# self._data._objects. Attributes of self._data cannot be released
# until gc.collect is called. Make a copy of the pointer first then let
# it hold the array reference. This is a workaround to circumvent the
# CPython bug https://bugs.python.org/issue12836
ptr = self._ctypes.cast(self._data, obj)
ptr._arr = self._arr
return ptr
def shape_as(self, obj):
"""
Return the shape tuple as an array of some other c-types
type. For example: ``self.shape_as(ctypes.c_short)``.
"""
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
"""
Return the strides tuple as an array of some other
c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
"""
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.strides)
@property
def data(self):
"""
A pointer to the memory area of the array as a Python integer.
This memory area may contain data that is not aligned, or not in correct
byte-order. The memory area may not even be writeable. The array
flags and data-type of this array should be respected when passing this
attribute to arbitrary C-code to avoid trouble that can include Python
crashing. User Beware! The value of this attribute is exactly the same
as ``self._array_interface_['data'][0]``.
Note that unlike ``data_as``, a reference will not be kept to the array:
code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
pointer to a deallocated array, and should be spelt
``(a + b).ctypes.data_as(ctypes.c_void_p)``
"""
return self._data.value
@property
def shape(self):
"""
(c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the C-integer corresponding to ``dtype('p')`` on this
platform (see `~numpy.ctypeslib.c_intp`). This base-type could be
`ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on
the platform. The ctypes array contains the shape of
the underlying array.
"""
return self.shape_as(_getintp_ctype())
@property
def strides(self):
"""
(c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the same as for the shape attribute. This ctypes array
contains the strides information from the underlying array. This strides
information is important for showing how many bytes must be jumped to
get to the next element in the array.
"""
return self.strides_as(_getintp_ctype())
@property
def _as_parameter_(self):
"""
Overrides the ctypes semi-magic method
Enables `c_func(some_array.ctypes)`
"""
return self.data_as(ctypes.c_void_p)
# Numpy 1.21.0, 2021-05-18
def get_data(self):
"""Deprecated getter for the `_ctypes.data` property.
.. deprecated:: 1.21
"""
warnings.warn('"get_data" is deprecated. Use "data" instead',
DeprecationWarning, stacklevel=2)
return self.data
def get_shape(self):
"""Deprecated getter for the `_ctypes.shape` property.
.. deprecated:: 1.21
"""
warnings.warn('"get_shape" is deprecated. Use "shape" instead',
DeprecationWarning, stacklevel=2)
return self.shape
def get_strides(self):
"""Deprecated getter for the `_ctypes.strides` property.
.. deprecated:: 1.21
"""
warnings.warn('"get_strides" is deprecated. Use "strides" instead',
DeprecationWarning, stacklevel=2)
return self.strides
def get_as_parameter(self):
"""Deprecated getter for the `_ctypes._as_parameter_` property.
.. deprecated:: 1.21
"""
warnings.warn(
'"get_as_parameter" is deprecated. Use "_as_parameter_" instead',
DeprecationWarning, stacklevel=2,
)
return self._as_parameter_
def _newnames(datatype, order):
"""
Given a datatype and an order object, return a new names tuple, with the
order indicated
"""
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
seen = set()
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
if name in seen:
raise ValueError(f"duplicate field name: {name}") from None
else:
raise ValueError(f"unknown field name: {name}") from None
seen.add(name)
return tuple(list(order) + nameslist)
raise ValueError(f"unsupported order value: {order}")
def _copy_fields(ary):
"""Return copy of structured array with padding between fields removed.
Parameters
----------
ary : ndarray
Structured array from which to remove padding bytes
Returns
-------
ary_copy : ndarray
Copy of ary with padding bytes removed
"""
dt = ary.dtype
copy_dtype = {'names': dt.names,
'formats': [dt.fields[name][0] for name in dt.names]}
return array(ary, dtype=copy_dtype, copy=True)
def _getfield_is_safe(oldtype, newtype, offset):
""" Checks safety of getfield for object arrays.
As in _view_is_safe, we need to check that memory containing objects is not
reinterpreted as a non-object datatype and vice versa.
Parameters
----------
oldtype : data-type
Data type of the original ndarray.
newtype : data-type
Data type of the field being accessed by ndarray.getfield
offset : int
Offset of the field being accessed by ndarray.getfield
Raises
------
TypeError
If the field access is invalid
"""
if newtype.hasobject or oldtype.hasobject:
if offset == 0 and newtype == oldtype:
return
if oldtype.names is not None:
for name in oldtype.names:
if (oldtype.fields[name][1] == offset and
oldtype.fields[name][0] == newtype):
return
raise TypeError("Cannot get/set field of an object array")
return
def _view_is_safe(oldtype, newtype):
""" Checks safety of a view involving object arrays, for example when
doing::
np.zeros(10, dtype=oldtype).view(newtype)
Parameters
----------
oldtype : data-type
Data type of original ndarray
newtype : data-type
Data type of the view
Raises
------
TypeError
If the new type is incompatible with the old type.
"""
# if the types are equivalent, there is no problem.
# for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
if oldtype == newtype:
return
if newtype.hasobject or oldtype.hasobject:
raise TypeError("Cannot change data-type for object array.")
return
# Given a string containing a PEP 3118 format specifier,
# construct a NumPy dtype
_pep3118_native_map = {
'?': '?',
'c': 'S1',
'b': 'b',
'B': 'B',
'h': 'h',
'H': 'H',
'i': 'i',
'I': 'I',
'l': 'l',
'L': 'L',
'q': 'q',
'Q': 'Q',
'e': 'e',
'f': 'f',
'd': 'd',
'g': 'g',
'Zf': 'F',
'Zd': 'D',
'Zg': 'G',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
_pep3118_standard_map = {
'?': '?',
'c': 'S1',
'b': 'b',
'B': 'B',
'h': 'i2',
'H': 'u2',
'i': 'i4',
'I': 'u4',
'l': 'i4',
'L': 'u4',
'q': 'i8',
'Q': 'u8',
'e': 'f2',
'f': 'f',
'd': 'd',
'Zf': 'F',
'Zd': 'D',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V', # padding
}
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
_pep3118_unsupported_map = {
'u': 'UCS-2 strings',
'&': 'pointers',
't': 'bitfields',
'X': 'function pointers',
}
class _Stream:
def __init__(self, s):
self.s = s
self.byteorder = '@'
def advance(self, n):
res = self.s[:n]
self.s = self.s[n:]
return res
def consume(self, c):
if self.s[:len(c)] == c:
self.advance(len(c))
return True
return False
def consume_until(self, c):
if callable(c):
i = 0
while i < len(self.s) and not c(self.s[i]):
i = i + 1
return self.advance(i)
else:
i = self.s.index(c)
res = self.advance(i)
self.advance(len(c))
return res
@property
def next(self):
return self.s[0]
def __bool__(self):
return bool(self.s)
def _dtype_from_pep3118(spec):
stream = _Stream(spec)
dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
return dtype
def __dtype_from_pep3118(stream, is_subdtype):
field_spec = dict(
names=[],
formats=[],
offsets=[],
itemsize=0
)
offset = 0
common_alignment = 1
is_padding = False
# Parse spec
while stream:
value = None
# End of structure, bail out to upper level
if stream.consume('}'):
break
# Sub-arrays (1)
shape = None
if stream.consume('('):
shape = stream.consume_until(')')
shape = tuple(map(int, shape.split(',')))
# Byte order
if stream.next in ('@', '=', '<', '>', '^', '!'):
byteorder = stream.advance(1)
if byteorder == '!':
byteorder = '>'
stream.byteorder = byteorder
# Byte order characters also control native vs. standard type sizes
if stream.byteorder in ('@', '^'):
type_map = _pep3118_native_map
type_map_chars = _pep3118_native_typechars
else:
type_map = _pep3118_standard_map
type_map_chars = _pep3118_standard_typechars
# Item sizes
itemsize_str = stream.consume_until(lambda c: not c.isdigit())
if itemsize_str:
itemsize = int(itemsize_str)
else:
itemsize = 1
# Data types
is_padding = False
if stream.consume('T{'):
value, align = __dtype_from_pep3118(
stream, is_subdtype=True)
elif stream.next in type_map_chars:
if stream.next == 'Z':
typechar = stream.advance(2)
else:
typechar = stream.advance(1)
is_padding = (typechar == 'x')
dtypechar = type_map[typechar]
if dtypechar in 'USV':
dtypechar += '%d' % itemsize
itemsize = 1
numpy_byteorder = {'@': '=', '^': '='}.get(
stream.byteorder, stream.byteorder)
value = dtype(numpy_byteorder + dtypechar)
align = value.alignment
elif stream.next in _pep3118_unsupported_map:
desc = _pep3118_unsupported_map[stream.next]
raise NotImplementedError(
"Unrepresentable PEP 3118 data type {!r} ({})"
.format(stream.next, desc))
else:
raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
#
# Native alignment may require padding
#
# Here we assume that the presence of a '@' character implicitly implies
# that the start of the array is *already* aligned.
#
extra_offset = 0
if stream.byteorder == '@':
start_padding = (-offset) % align
intra_padding = (-value.itemsize) % align
offset += start_padding
if intra_padding != 0:
if itemsize > 1 or (shape is not None and _prod(shape) > 1):
# Inject internal padding to the end of the sub-item
value = _add_trailing_padding(value, intra_padding)
else:
# We can postpone the injection of internal padding,
# as the item appears at most once
extra_offset += intra_padding
# Update common alignment
common_alignment = _lcm(align, common_alignment)
# Convert itemsize to sub-array
if itemsize != 1:
value = dtype((value, (itemsize,)))
# Sub-arrays (2)
if shape is not None:
value = dtype((value, shape))
# Field name
if stream.consume(':'):
name = stream.consume_until(':')
else:
name = None
if not (is_padding and name is None):
if name is not None and name in field_spec['names']:
raise RuntimeError(f"Duplicate field name '{name}' in PEP3118 format")
field_spec['names'].append(name)
field_spec['formats'].append(value)
field_spec['offsets'].append(offset)
offset += value.itemsize
offset += extra_offset
field_spec['itemsize'] = offset
# extra final padding for aligned types
if stream.byteorder == '@':
field_spec['itemsize'] += (-offset) % common_alignment
# Check if this was a simple 1-item type, and unwrap it
if (field_spec['names'] == [None]
and field_spec['offsets'][0] == 0
and field_spec['itemsize'] == field_spec['formats'][0].itemsize
and not is_subdtype):
ret = field_spec['formats'][0]
else:
_fix_names(field_spec)
ret = dtype(field_spec)
# Finished
return ret, common_alignment
def _fix_names(field_spec):
""" Replace names which are None with the next unused f%d name """
names = field_spec['names']
for i, name in enumerate(names):
if name is not None:
continue
j = 0
while True:
name = f'f{j}'
if name not in names:
break
j = j + 1
names[i] = name
def _add_trailing_padding(value, padding):
"""Inject the specified number of padding bytes at the end of a dtype"""
if value.fields is None:
field_spec = dict(
names=['f0'],
formats=[value],
offsets=[0],
itemsize=value.itemsize
)
else:
fields = value.fields
names = value.names
field_spec = dict(
names=names,
formats=[fields[name][0] for name in names],
offsets=[fields[name][1] for name in names],
itemsize=value.itemsize
)
field_spec['itemsize'] += padding
return dtype(field_spec)
def _prod(a):
p = 1
for x in a:
p *= x
return p
def _gcd(a, b):
"""Calculate the greatest common divisor of a and b"""
while b:
a, b = b, a % b
return a
def _lcm(a, b):
return a // _gcd(a, b) * b
def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
""" Format the error message for when __array_ufunc__ gives up. """
args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
['{}={!r}'.format(k, v)
for k, v in kwargs.items()])
args = inputs + kwargs.get('out', ())
types_string = ', '.join(repr(type(arg).__name__) for arg in args)
return ('operand type(s) all returned NotImplemented from '
'__array_ufunc__({!r}, {!r}, {}): {}'
.format(ufunc, method, args_string, types_string))
def array_function_errmsg_formatter(public_api, types):
""" Format the error message for when __array_ufunc__ gives up. """
func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
return ("no implementation found for '{}' on types that implement "
'__array_function__: {}'.format(func_name, list(types)))
def _ufunc_doc_signature_formatter(ufunc):
"""
Builds a signature string which resembles PEP 457
This is used to construct the first line of the docstring
"""
# input arguments are simple
if ufunc.nin == 1:
in_args = 'x'
else:
in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin))
# output arguments are both keyword or positional
if ufunc.nout == 0:
out_args = ', /, out=()'
elif ufunc.nout == 1:
out_args = ', /, out=None'
else:
out_args = '[, {positional}], / [, out={default}]'.format(
positional=', '.join(
'out{}'.format(i+1) for i in range(ufunc.nout)),
default=repr((None,)*ufunc.nout)
)
# keyword only args depend on whether this is a gufunc
kwargs = (
", casting='same_kind'"
", order='K'"
", dtype=None"
", subok=True"
)
# NOTE: gufuncs may or may not support the `axis` parameter
if ufunc.signature is None:
kwargs = f", where=True{kwargs}[, signature, extobj]"
else:
kwargs += "[, signature, extobj, axes, axis]"
# join all the parts together
return '{name}({in_args}{out_args}, *{kwargs})'.format(
name=ufunc.__name__,
in_args=in_args,
out_args=out_args,
kwargs=kwargs
)
def npy_ctypes_check(cls):
# determine if a class comes from ctypes, in order to work around
# a bug in the buffer protocol for those objects, bpo-10746
try:
# ctypes class are new-style, so have an __mro__. This probably fails
# for ctypes classes with multiple inheritance.
if IS_PYPY:
# (..., _ctypes.basics._CData, Bufferable, object)
ctype_base = cls.__mro__[-3]
else:
# # (..., _ctypes._CData, object)
ctype_base = cls.__mro__[-2]
# right now, they're part of the _ctypes module
return '_ctypes' in ctype_base.__module__
except Exception:
return False
class recursive:
'''
A decorator class for recursive nested functions.
Naive recursive nested functions hold a reference to themselves:
def outer(*args):
def stringify_leaky(arg0, *arg1):
if len(arg1) > 0:
return stringify_leaky(*arg1) # <- HERE
return str(arg0)
stringify_leaky(*args)
This design pattern creates a reference cycle that is difficult for a
garbage collector to resolve. The decorator class prevents the
cycle by passing the nested function in as an argument `self`:
def outer(*args):
@recursive
def stringify(self, arg0, *arg1):
if len(arg1) > 0:
return self(*arg1)
return str(arg0)
stringify(*args)
'''
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
return self.func(self, *args, **kwargs)
| bsd-3-clause |
wolfier/incubator-airflow | airflow/operators/dagrun_operator.py | 14 | 3160 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.models import BaseOperator
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
from airflow.api.common.experimental.trigger_dag import trigger_dag
import json
class DagRunOrder(object):
def __init__(self, run_id=None, payload=None):
self.run_id = run_id
self.payload = payload
class TriggerDagRunOperator(BaseOperator):
"""
Triggers a DAG run for a specified ``dag_id``
:param trigger_dag_id: the dag_id to trigger
:type trigger_dag_id: str
:param python_callable: a reference to a python function that will be
called while passing it the ``context`` object and a placeholder
object ``obj`` for your callable to fill and return if you want
a DagRun created. This ``obj`` object contains a ``run_id`` and
``payload`` attribute that you can modify in your function.
The ``run_id`` should be a unique identifier for that DAG run, and
the payload has to be a picklable object that will be made available
to your tasks while executing that DAG run. Your function header
should look like ``def foo(context, dag_run_obj):``
:type python_callable: python callable
:param execution_date: Execution date for the dag
:type execution_date: datetime.datetime
"""
template_fields = tuple()
template_ext = tuple()
ui_color = '#ffefeb'
@apply_defaults
def __init__(
self,
trigger_dag_id,
python_callable=None,
execution_date=None,
*args, **kwargs):
super(TriggerDagRunOperator, self).__init__(*args, **kwargs)
self.python_callable = python_callable
self.trigger_dag_id = trigger_dag_id
self.execution_date = execution_date
def execute(self, context):
dro = DagRunOrder(run_id='trig__' + timezone.utcnow().isoformat())
if self.python_callable is not None:
dro = self.python_callable(context, dro)
if dro:
trigger_dag(dag_id=self.trigger_dag_id,
run_id=dro.run_id,
conf=json.dumps(dro.payload),
execution_date=self.execution_date,
replace_microseconds=False)
else:
self.log.info("Criteria not met, moving on")
| apache-2.0 |
skoppisetty/idigbio-appliance | lib/sqlalchemy/log.py | 39 | 6820 | # sqlalchemy/log.py
# Copyright (C) 2006-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# Includes alterations by Vinay Sajip [email protected]
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Logging control and utilities.
Control of logging for SA can be performed from the regular python logging
module. The regular dotted module namespace is used, starting at
'sqlalchemy'. For class-level logging, the class name is appended.
The "echo" keyword parameter, available on SQLA :class:`.Engine`
and :class:`.Pool` objects, corresponds to a logger specific to that
instance only.
"""
import logging
import sys
from sqlalchemy import util
# set initial level to WARN. This so that
# log statements don't occur in the absense of explicit
# logging being enabled for 'sqlalchemy'.
rootlogger = logging.getLogger('sqlalchemy')
if rootlogger.level == logging.NOTSET:
rootlogger.setLevel(logging.WARN)
def _add_default_handler(logger):
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s %(name)s %(message)s'))
logger.addHandler(handler)
_logged_classes = set()
def class_logger(cls, enable=False):
logger = logging.getLogger(cls.__module__ + "." + cls.__name__)
if enable == 'debug':
logger.setLevel(logging.DEBUG)
elif enable == 'info':
logger.setLevel(logging.INFO)
cls._should_log_debug = lambda self: logger.isEnabledFor(logging.DEBUG)
cls._should_log_info = lambda self: logger.isEnabledFor(logging.INFO)
cls.logger = logger
_logged_classes.add(cls)
class Identified(object):
logging_name = None
def _should_log_debug(self):
return self.logger.isEnabledFor(logging.DEBUG)
def _should_log_info(self):
return self.logger.isEnabledFor(logging.INFO)
class InstanceLogger(object):
"""A logger adapter (wrapper) for :class:`.Identified` subclasses.
This allows multiple instances (e.g. Engine or Pool instances)
to share a logger, but have its verbosity controlled on a
per-instance basis.
The basic functionality is to return a logging level
which is based on an instance's echo setting.
Default implementation is:
'debug' -> logging.DEBUG
True -> logging.INFO
False -> Effective level of underlying logger
(logging.WARNING by default)
None -> same as False
"""
# Map echo settings to logger levels
_echo_map = {
None: logging.NOTSET,
False: logging.NOTSET,
True: logging.INFO,
'debug': logging.DEBUG,
}
def __init__(self, echo, name):
self.echo = echo
self.logger = logging.getLogger(name)
# if echo flag is enabled and no handlers,
# add a handler to the list
if self._echo_map[echo] <= logging.INFO \
and not self.logger.handlers:
_add_default_handler(self.logger)
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""Delegate a debug call to the underlying logger."""
self.log(logging.DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""Delegate an info call to the underlying logger."""
self.log(logging.INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""Delegate a warning call to the underlying logger."""
self.log(logging.WARNING, msg, *args, **kwargs)
warn = warning
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(logging.ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""Delegate an exception call to the underlying logger."""
kwargs["exc_info"] = 1
self.log(logging.ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""Delegate a critical call to the underlying logger."""
self.log(logging.CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""Delegate a log call to the underlying logger.
The level here is determined by the echo
flag as well as that of the underlying logger, and
logger._log() is called directly.
"""
# inline the logic from isEnabledFor(),
# getEffectiveLevel(), to avoid overhead.
if self.logger.manager.disable >= level:
return
selected_level = self._echo_map[self.echo]
if selected_level == logging.NOTSET:
selected_level = self.logger.getEffectiveLevel()
if level >= selected_level:
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""Is this logger enabled for level 'level'?"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getEffectiveLevel(self):
"""What's the effective level for this logger?"""
level = self._echo_map[self.echo]
if level == logging.NOTSET:
level = self.logger.getEffectiveLevel()
return level
def instance_logger(instance, echoflag=None):
"""create a logger for an instance that implements :class:`.Identified`."""
if instance.logging_name:
name = "%s.%s.%s" % (instance.__class__.__module__,
instance.__class__.__name__, instance.logging_name)
else:
name = "%s.%s" % (instance.__class__.__module__,
instance.__class__.__name__)
instance._echo = echoflag
if echoflag in (False, None):
# if no echo setting or False, return a Logger directly,
# avoiding overhead of filtering
logger = logging.getLogger(name)
else:
# if a specified echo flag, return an EchoLogger,
# which checks the flag, overrides normal log
# levels by calling logger._log()
logger = InstanceLogger(echoflag, name)
instance.logger = logger
class echo_property(object):
__doc__ = """\
When ``True``, enable log output for this element.
This has the effect of setting the Python logging level for the namespace
of this element's class and object reference. A value of boolean ``True``
indicates that the loglevel ``logging.INFO`` will be set for the logger,
whereas the string value ``debug`` will set the loglevel to
``logging.DEBUG``.
"""
def __get__(self, instance, owner):
if instance is None:
return self
else:
return instance._echo
def __set__(self, instance, value):
instance_logger(instance, echoflag=value)
| gpl-3.0 |
lovetox/gajim | src/common/jingle_rtp.py | 1 | 18136 | ##
## Copyright (C) 2006 Gajim Team
##
## Gajim is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## Gajim is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gajim. If not, see <http://www.gnu.org/licenses/>.
"""
Handles Jingle RTP sessions (XEP 0167)
"""
import socket
import nbxmpp
import gi
from gi.repository import Farstream
gi.require_version('Gst', '1.0')
from gi.repository import Gst
from gi.repository import GLib
from common import gajim
from common.jingle_transport import JingleTransportICEUDP
from common.jingle_content import contents, JingleContent, JingleContentSetupException
from common.connection_handlers_events import InformationEvent
from common.jingle_session import FailedApplication
from collections import deque
import logging
log = logging.getLogger('gajim.c.jingle_rtp')
class JingleRTPContent(JingleContent):
def __init__(self, session, media, transport=None):
if transport is None:
transport = JingleTransportICEUDP(None)
JingleContent.__init__(self, session, transport)
self.media = media
self._dtmf_running = False
self.farstream_media = {'audio': Farstream.MediaType.AUDIO,
'video': Farstream.MediaType.VIDEO}[media]
self.pipeline = None
self.src_bin = None
self.stream_failed_once = False
self.candidates_ready = False # True when local candidates are prepared
self.callbacks['session-initiate'] += [self.__on_remote_codecs]
self.callbacks['content-add'] += [self.__on_remote_codecs]
self.callbacks['description-info'] += [self.__on_remote_codecs]
self.callbacks['content-accept'] += [self.__on_remote_codecs]
self.callbacks['session-accept'] += [self.__on_remote_codecs]
self.callbacks['session-terminate'] += [self.__stop]
self.callbacks['session-terminate-sent'] += [self.__stop]
def setup_stream(self, on_src_pad_added):
# pipeline and bus
self.pipeline = Gst.Pipeline()
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message', self._on_gst_message)
# conference
self.conference = Gst.ElementFactory.make('fsrtpconference', None)
self.pipeline.add(self.conference)
self.funnel = None
self.p2psession = self.conference.new_session(self.farstream_media)
participant = self.conference.new_participant()
# FIXME: Consider a workaround, here...
# pidgin and telepathy-gabble don't follow the XEP, and it won't work
# due to bad controlling-mode
params = {'controlling-mode': self.session.weinitiate, 'debug': False}
if gajim.config.get('use_stun_server'):
stun_server = gajim.config.get('stun_server')
if not stun_server and self.session.connection._stun_servers:
stun_server = self.session.connection._stun_servers[0]['host']
if stun_server:
try:
ip = socket.getaddrinfo(stun_server, 0, socket.AF_UNSPEC,
socket.SOCK_STREAM)[0][4][0]
except socket.gaierror as e:
log.warning('Lookup of stun ip failed: %s' % str(e))
else:
params['stun-ip'] = ip
self.p2pstream = self.p2psession.new_stream(participant,
Farstream.StreamDirection.BOTH)
self.p2pstream.connect('src-pad-added', on_src_pad_added)
self.p2pstream.set_transmitter_ht('nice', params)
def is_ready(self):
return (JingleContent.is_ready(self) and self.candidates_ready)
def make_bin_from_config(self, config_key, pipeline, text):
pipeline = pipeline % gajim.config.get(config_key)
try:
bin = Gst.parse_bin_from_description(pipeline, True)
return bin
except GLib.GError as e:
gajim.nec.push_incoming_event(InformationEvent(None,
conn=self.session.connection, level='error',
pri_txt=_('%s configuration error') % text.capitalize(),
sec_txt=_("Couldn't setup %s. Check your configuration.\n\n"
"Pipeline was:\n%s\n\nError was:\n%s") % (text, pipeline,
str(e))))
raise JingleContentSetupException
def add_remote_candidates(self, candidates):
JingleContent.add_remote_candidates(self, candidates)
# FIXME: connectivity should not be etablished yet
# Instead, it should be etablished after session-accept!
if self.sent:
self.p2pstream.add_remote_candidates(candidates)
def batch_dtmf(self, events):
"""
Send several DTMF tones
"""
if self._dtmf_running:
raise Exception("There is a DTMF batch already running")
events = deque(events)
self._dtmf_running = True
self._start_dtmf(events.popleft())
GLib.timeout_add(500, self._next_dtmf, events)
def _next_dtmf(self, events):
self._stop_dtmf()
if events:
self._start_dtmf(events.popleft())
GLib.timeout_add(500, self._next_dtmf, events)
else:
self._dtmf_running = False
def _start_dtmf(self, event):
if event in ('*', '#'):
event = {'*': Farstream.DTMFEvent.STAR,
'#': Farstream.DTMFEvent.POUND}[event]
else:
event = int(event)
self.p2psession.start_telephony_event(event, 2)
def _stop_dtmf(self):
self.p2psession.stop_telephony_event()
def _fill_content(self, content):
content.addChild(nbxmpp.NS_JINGLE_RTP + ' description',
attrs={'media': self.media}, payload=list(self.iter_codecs()))
def _setup_funnel(self):
self.funnel = Gst.ElementFactory.make('funnel', None)
self.pipeline.add(self.funnel)
self.funnel.link(self.sink)
self.sink.set_state(Gst.State.PLAYING)
self.funnel.set_state(Gst.State.PLAYING)
def _on_src_pad_added(self, stream, pad, codec):
if not self.funnel:
self._setup_funnel()
pad.link(self.funnel.get_request_pad('sink_%u'))
def _on_gst_message(self, bus, message):
if message.type == Gst.MessageType.ELEMENT:
name = message.get_structure().get_name()
log.debug('gst element message: %s: %s' % (name, message))
if name == 'farstream-new-active-candidate-pair':
pass
elif name == 'farstream-recv-codecs-changed':
pass
elif name == 'farstream-codecs-changed':
if self.sent and self.p2psession.props.codecs_without_config:
self.send_description_info()
if self.transport.remote_candidates:
# those lines MUST be done after we get info on our
# codecs
self.p2pstream.add_remote_candidates(
self.transport.remote_candidates)
self.transport.remote_candidates = []
self.p2pstream.set_property('direction',
Farstream.StreamDirection.BOTH)
elif name == 'farstream-local-candidates-prepared':
self.candidates_ready = True
if self.is_ready():
self.session.on_session_state_changed(self)
elif name == 'farstream-new-local-candidate':
candidate = self.p2pstream.parse_new_local_candidate(message)[1]
self.transport.candidates.append(candidate)
if self.sent:
# FIXME: Is this case even possible?
self.send_candidate(candidate)
elif name == 'farstream-component-state-changed':
state = message.get_structure().get_value('state')
if state == Farstream.StreamState.FAILED:
reason = nbxmpp.Node('reason')
reason.setTag('failed-transport')
self.session.remove_content(self.creator, self.name, reason)
elif name == 'farstream-error':
log.error('Farstream error #%d!\nMessage: %s' % (
message.get_structure().get_value('error-no'),
message.get_structure().get_value('error-msg')))
elif message.type == Gst.MessageType.ERROR:
# TODO: Fix it to fallback to videotestsrc anytime an error occur,
# or raise an error, Jingle way
# or maybe one-sided stream?
if not self.stream_failed_once:
gajim.nec.push_incoming_event(InformationEvent(None,
conn=self.session.connection, level='error',
pri_txt=_('GStreamer error'), sec_txt=_('Error: %s\nDebug: '
'%s' % (message.get_structure().get_value('gerror'),
message.get_structure().get_value('debug')))))
sink_pad = self.p2psession.get_property('sink-pad')
# Remove old source
self.src_bin.get_static_pad('src').unlink(sink_pad)
self.src_bin.set_state(Gst.State.NULL)
self.pipeline.remove(self.src_bin)
if not self.stream_failed_once:
# Add fallback source
self.src_bin = self.get_fallback_src()
self.pipeline.add(self.src_bin)
self.src_bin.link(sink_pad)
self.stream_failed_once = True
else:
reason = nbxmpp.Node('reason')
reason.setTag('failed-application')
self.session.remove_content(self.creator, self.name, reason)
# Start playing again
self.pipeline.set_state(Gst.State.PLAYING)
def get_fallback_src(self):
return Gst.ElementFactory.make('fakesrc', None)
def on_negotiated(self):
if self.accepted:
if self.p2psession.get_property('codecs'):
# those lines MUST be done after we get info on our codecs
if self.transport.remote_candidates:
self.p2pstream.add_remote_candidates(
self.transport.remote_candidates)
self.transport.remote_candidates = []
# TODO: Farstream.StreamDirection.BOTH only if senders='both'
# self.p2pstream.set_property('direction',
# Farstream.StreamDirection.BOTH)
JingleContent.on_negotiated(self)
def __on_remote_codecs(self, stanza, content, error, action):
"""
Get peer codecs from what we get from peer
"""
codecs = []
for codec in content.getTag('description').iterTags('payload-type'):
if not codec['id'] or not codec['name'] or not codec['clockrate']:
# ignore invalid payload-types
continue
c = Farstream.Codec.new(int(codec['id']), codec['name'],
self.farstream_media, int(codec['clockrate']))
if 'channels' in codec:
c.channels = int(codec['channels'])
else:
c.channels = 1
for p in codec.iterTags('parameter'):
c.add_optional_parameter(p['name'], str(p['value']))
codecs.append(c)
if codecs:
try:
self.p2pstream.set_remote_codecs(codecs)
except GLib.Error:
raise FailedApplication
def iter_codecs(self):
codecs = self.p2psession.props.codecs_without_config
for codec in codecs:
attrs = {'name': codec.encoding_name,
'id': codec.id,
'channels': codec.channels}
if codec.clock_rate:
attrs['clockrate'] = codec.clock_rate
if codec.optional_params:
payload = list(nbxmpp.Node('parameter', {'name': p.name,
'value': p.value}) for p in codec.optional_params)
else:
payload = []
yield nbxmpp.Node('payload-type', attrs, payload)
def __stop(self, *things):
self.pipeline.set_state(Gst.State.NULL)
def __del__(self):
self.__stop()
def destroy(self):
JingleContent.destroy(self)
self.p2pstream.disconnect_by_func(self._on_src_pad_added)
self.pipeline.get_bus().disconnect_by_func(self._on_gst_message)
class JingleAudio(JingleRTPContent):
"""
Jingle VoIP sessions consist of audio content transported over an ICE UDP
protocol
"""
def __init__(self, session, transport=None):
JingleRTPContent.__init__(self, session, 'audio', transport)
self.setup_stream()
def set_mic_volume(self, vol):
"""
vol must be between 0 ans 1
"""
self.mic_volume.set_property('volume', vol)
def set_out_volume(self, vol):
"""
vol must be between 0 ans 1
"""
self.out_volume.set_property('volume', vol)
def setup_stream(self):
JingleRTPContent.setup_stream(self, self._on_src_pad_added)
# Configure SPEEX
# Workaround for psi (not needed since rev
# 147aedcea39b43402fe64c533d1866a25449888a):
# place 16kHz before 8kHz, as buggy psi versions will take in
# account only the first codec
codecs = [Farstream.Codec.new(Farstream.CODEC_ID_ANY, 'SPEEX',
Farstream.MediaType.AUDIO, 16000),
Farstream.Codec.new(Farstream.CODEC_ID_ANY, 'SPEEX',
Farstream.MediaType.AUDIO, 8000)]
self.p2psession.set_codec_preferences(codecs)
# the local parts
# TODO: Add queues?
self.src_bin = self.make_bin_from_config('audio_input_device',
'%s ! audioconvert', _("audio input"))
self.sink = self.make_bin_from_config('audio_output_device',
'audioconvert ! volume name=gajim_out_vol ! %s', _("audio output"))
self.mic_volume = self.src_bin.get_by_name('gajim_vol')
self.out_volume = self.sink.get_by_name('gajim_out_vol')
# link gst elements
self.pipeline.add(self.sink)
self.pipeline.add(self.src_bin)
self.src_bin.get_static_pad('src').link(self.p2psession.get_property(
'sink-pad'))
# The following is needed for farstream to process ICE requests:
self.pipeline.set_state(Gst.State.PLAYING)
class JingleVideo(JingleRTPContent):
def __init__(self, session, transport=None, in_xid=0, out_xid=0):
JingleRTPContent.__init__(self, session, 'video', transport)
self.in_xid = in_xid
self.out_xid = out_xid
self.out_xid_set = False
self.setup_stream()
def setup_stream(self):
# TODO: Everything is not working properly:
# sometimes, one window won't show up,
# sometimes it'll freeze...
JingleRTPContent.setup_stream(self, self._on_src_pad_added)
bus = self.pipeline.get_bus()
bus.enable_sync_message_emission()
bus.connect('sync-message::element', self._on_sync_message)
# the local parts
if gajim.config.get('video_framerate'):
framerate = 'videorate ! video/x-raw,framerate=%s ! ' % \
gajim.config.get('video_framerate')
else:
framerate = ''
try:
w, h = gajim.config.get('video_size').split('x')
except:
w = h = None
if w and h:
video_size = 'video/x-raw,width=%s,height=%s ! ' % (w, h)
else:
video_size = ''
if gajim.config.get('video_see_self'):
tee = '! tee name=t ! queue ! videoscale ! ' + \
'video/x-raw,width=160,height=120 ! videoconvert ! ' + \
'%s t. ! queue ' % gajim.config.get(
'video_output_device')
else:
tee = ''
self.src_bin = self.make_bin_from_config('video_input_device',
'%%s %s! %svideoscale ! %svideoconvert' % (tee, framerate,
video_size), _("video input"))
self.pipeline.add(self.src_bin)
self.pipeline.set_state(Gst.State.PLAYING)
self.sink = self.make_bin_from_config('video_output_device',
'videoscale ! videoconvert ! %s',
_("video output"))
self.pipeline.add(self.sink)
self.src_bin.get_static_pad('src').link(self.p2psession.get_property(
'sink-pad'))
# The following is needed for farstream to process ICE requests:
self.pipeline.set_state(Gst.State.PLAYING)
def _on_sync_message(self, bus, message):
if message.get_structure() is None:
return False
if message.get_structure().get_name() == 'prepare-window-handle':
message.src.set_property('force-aspect-ratio', True)
imagesink = message.src
if gajim.config.get('video_see_self') and not self.out_xid_set:
imagesink.set_window_handle(self.out_xid)
self.out_xid_set = True
else:
imagesink.set_window_handle(self.in_xid)
def get_fallback_src(self):
# TODO: Use avatar?
pipeline = 'videotestsrc is-live=true ! video/x-raw,framerate=10/1 ! videoconvert'
return Gst.parse_bin_from_description(pipeline, True)
def destroy(self):
JingleRTPContent.destroy(self)
self.pipeline.get_bus().disconnect_by_func(self._on_sync_message)
def get_content(desc):
if desc['media'] == 'audio':
return JingleAudio
elif desc['media'] == 'video':
return JingleVideo
contents[nbxmpp.NS_JINGLE_RTP] = get_content
| gpl-3.0 |
alkemics/graphalchemy | graphalchemy/ogm/mapper.py | 1 | 1095 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# IMPORTS
# ==============================================================================
class Mapper(object):
"""
"""
_new_mappers = False
def __init__(self):
pass
def __call__(self, *args, **kwargs):
return self.register(*args, **kwargs)
def register(self, class_, model, adjacencies={}):
# Instrument class attributes
# Instrument class adjacencies
# Update the metadata to register the class
model.register_class(class_)
# Update the metadata to register the adjacencies
if len(adjacencies) and not model.is_node():
raise Exception('Adjacencies can only be registered on nodes.')
for name, adjacency in adjacencies.iteritems():
node = model
node.add_adjacency(adjacency, name)
relationship = adjacency.relationship
relationship.add_adjacency(adjacency, name)
| apache-2.0 |
crazy-canux/django | tests/template_tests/syntax_tests/test_with.py | 391 | 2245 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class WithTagTests(SimpleTestCase):
@setup({'with01': '{% with key=dict.key %}{{ key }}{% endwith %}'})
def test_with01(self):
output = self.engine.render_to_string('with01', {'dict': {'key': 50}})
self.assertEqual(output, '50')
@setup({'legacywith01': '{% with dict.key as key %}{{ key }}{% endwith %}'})
def test_legacywith01(self):
output = self.engine.render_to_string('legacywith01', {'dict': {'key': 50}})
self.assertEqual(output, '50')
@setup({'with02': '{{ key }}{% with key=dict.key %}'
'{{ key }}-{{ dict.key }}-{{ key }}'
'{% endwith %}{{ key }}'})
def test_with02(self):
output = self.engine.render_to_string('with02', {'dict': {'key': 50}})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID50-50-50INVALID')
else:
self.assertEqual(output, '50-50-50')
@setup({'legacywith02': '{{ key }}{% with dict.key as key %}'
'{{ key }}-{{ dict.key }}-{{ key }}'
'{% endwith %}{{ key }}'})
def test_legacywith02(self):
output = self.engine.render_to_string('legacywith02', {'dict': {'key': 50}})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID50-50-50INVALID')
else:
self.assertEqual(output, '50-50-50')
@setup({'with03': '{% with a=alpha b=beta %}{{ a }}{{ b }}{% endwith %}'})
def test_with03(self):
output = self.engine.render_to_string('with03', {'alpha': 'A', 'beta': 'B'})
self.assertEqual(output, 'AB')
@setup({'with-error01': '{% with dict.key xx key %}{{ key }}{% endwith %}'})
def test_with_error01(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('with-error01', {'dict': {'key': 50}})
@setup({'with-error02': '{% with dict.key as %}{{ key }}{% endwith %}'})
def test_with_error02(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('with-error02', {'dict': {'key': 50}})
| bsd-3-clause |
MyAOSP/external_skia | tools/generate_fir_coeff.py | 198 | 4546 | #!/usr/bin/python
'''
Copyright 2013 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
import math
import pprint
def withinStdDev(n):
"""Returns the percent of samples within n std deviations of the normal."""
return math.erf(n / math.sqrt(2))
def withinStdDevRange(a, b):
"""Returns the percent of samples within the std deviation range a, b"""
if b < a:
return 0;
if a < 0:
if b < 0:
return (withinStdDev(-a) - withinStdDev(-b)) / 2;
else:
return (withinStdDev(-a) + withinStdDev(b)) / 2;
else:
return (withinStdDev(b) - withinStdDev(a)) / 2;
#We have a bunch of smudged samples which represent the average coverage of a range.
#We have a 'center' which may not line up with those samples.
#From the 'center' we want to make a normal approximation where '5' sample width out we're at '3' std deviations.
#The first and last samples may not be fully covered.
#This is the sub-sample shift for each set of FIR coefficients (the centers of the lcds in the samples)
#Each subpxl takes up 1/3 of a pixel, so they are centered at x=(i/n+1/2n), or 1/6, 3/6, 5/6 of a pixel.
#Each sample takes up 1/4 of a pixel, so the results fall at (x*4)%1, or 2/3, 0, 1/3 of a sample.
samples_per_pixel = 4
subpxls_per_pixel = 3
#sample_offsets is (frac, int) in sample units.
sample_offsets = [math.modf((float(subpxl_index)/subpxls_per_pixel + 1.0/(2.0*subpxls_per_pixel))*samples_per_pixel) for subpxl_index in range(subpxls_per_pixel)]
#How many samples to consider to the left and right of the subpxl center.
sample_units_width = 5
#The std deviation at sample_units_width.
std_dev_max = 3
#The target sum is in some fixed point representation.
#Values larger the 1 in fixed point simulate ink spread.
target_sum = 0x110
for sample_offset, sample_align in sample_offsets:
coeffs = []
coeffs_rounded = []
#We start at sample_offset - sample_units_width
current_sample_left = sample_offset - sample_units_width
current_std_dev_left = -std_dev_max
done = False
while not done:
current_sample_right = math.floor(current_sample_left + 1)
if current_sample_right > sample_offset + sample_units_width:
done = True
current_sample_right = sample_offset + sample_units_width
current_std_dev_right = current_std_dev_left + ((current_sample_right - current_sample_left) / sample_units_width) * std_dev_max
coverage = withinStdDevRange(current_std_dev_left, current_std_dev_right)
coeffs.append(coverage * target_sum)
coeffs_rounded.append(int(round(coverage * target_sum)))
current_sample_left = current_sample_right
current_std_dev_left = current_std_dev_right
# Now we have the numbers we want, but our rounding needs to add up to target_sum.
delta = 0
coeffs_rounded_sum = sum(coeffs_rounded)
if coeffs_rounded_sum > target_sum:
# The coeffs add up to too much. Subtract 1 from the ones which were rounded up the most.
delta = -1
if coeffs_rounded_sum < target_sum:
# The coeffs add up to too little. Add 1 to the ones which were rounded down the most.
delta = 1
if delta:
print "Initial sum is 0x%0.2X, adjusting." % (coeffs_rounded_sum,)
coeff_diff = [(coeff_rounded - coeff) * delta
for coeff, coeff_rounded in zip(coeffs, coeffs_rounded)]
class IndexTracker:
def __init__(self, index, item):
self.index = index
self.item = item
def __lt__(self, other):
return self.item < other.item
def __repr__(self):
return "arr[%d] == %s" % (self.index, repr(self.item))
coeff_pkg = [IndexTracker(i, diff) for i, diff in enumerate(coeff_diff)]
coeff_pkg.sort()
# num_elements_to_force_round had better be < (2 * sample_units_width + 1) or
# * our math was wildy wrong
# * an awful lot of the curve is out side our sample
# either is pretty bad, and probably means the results will not be useful.
num_elements_to_force_round = abs(coeffs_rounded_sum - target_sum)
for i in xrange(num_elements_to_force_round):
print "Adding %d to index %d to force round %f." % (delta, coeff_pkg[i].index, coeffs[coeff_pkg[i].index])
coeffs_rounded[coeff_pkg[i].index] += delta
print "Prepending %d 0x00 for allignment." % (sample_align,)
coeffs_rounded_aligned = ([0] * int(sample_align)) + coeffs_rounded
print ', '.join(["0x%0.2X" % coeff_rounded for coeff_rounded in coeffs_rounded_aligned])
print sum(coeffs), hex(sum(coeffs_rounded))
print
| bsd-3-clause |
atul-bhouraskar/django | django/contrib/contenttypes/migrations/0001_initial.py | 585 | 1227 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.contenttypes.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ContentType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('app_label', models.CharField(max_length=100)),
('model', models.CharField(max_length=100, verbose_name='python model class name')),
],
options={
'ordering': ('name',),
'db_table': 'django_content_type',
'verbose_name': 'content type',
'verbose_name_plural': 'content types',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.contenttypes.models.ContentTypeManager()),
],
),
migrations.AlterUniqueTogether(
name='contenttype',
unique_together=set([('app_label', 'model')]),
),
]
| bsd-3-clause |
krux/kibana-pkg | kibana-4.0.2-linux-x64/node/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/easy_xml_test.py | 2698 | 3270 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
| mit |
CameronLonsdale/jam | docs/highlighting.py | 2 | 3364 | from pygments.lexer import RegexLexer, bygroups, include, combined, words
from pygments.token import *
import sphinx
class JamLexer(RegexLexer):
name = "Jam"
aliases = ["jam"]
filenames = ["*.jm"]
INTEGER_REGEX = r"[0-9]([0-9_]*[0-9])?"
tokens = {
'root': [
(r"#.*?$", Comment),
include('constants'),
include('keywords'),
include('builtins'),
(INTEGER_REGEX, Literal.Number),
(r"{0}\.({0})?".format(INTEGER_REGEX), Literal.Number),
(r"({0})?\.{0}".format(INTEGER_REGEX), Literal.Number),
(r"\"(.*)?\"", Literal.String),
(r"`(.*)?`", Literal.String),
(r"\(", Text, '#push'),
(r"\)", Text, '#pop'),
(" ", Text.Whitespace),
include('operators'),
(r"([a-zA-Z_][a-zA-Z_0-9]*)", Name),
# Mark everything unrecognised as normal
# Catch-all for bad lexers
(r".", Name),
],
'keywords': [
(words(
(
"self",
"const",
"end",
"def",
"class",
"template",
"if",
"elif",
"else",
"while",
"for",
"in",
"as",
"import",
"pragma",
"trait",
), suffix = r'\b'),
Keyword)
],
'constants': [
(words(
(
"true",
"false",
"null",
"inf",
"nan",
), suffix = r'\b'),
Name.Builtin)
],
'builtins': [
(words(
(
"puts",
"Int",
"Int8",
"Int16",
"Int32",
"Int64",
"Int128",
"UInt",
"UInt8",
"UInt16",
"UInt32",
"UInt64",
"UInt128",
"Float",
"Float16",
"Float32",
"Float64",
"UFloat",
"UFloat16",
"UFloat32",
"UFloat64",
"Bool",
"String",
), suffix = r'\b'),
Name.Builtin)
],
'operators': [(symbol, Operator) for symbol in (
r"~",
r"!",
r"%",
r"\^",
r"&",
r"&&",
r"\*",
r"\*\*",
r"-",
r"-=",
r"\+",
r"\+=",
r"=",
r"==",
r"!=",
r"\|",
r"\|\|",
r":",
r"\?",
r"<",
r"<=",
r">",
r">=",
r"\.",
r"/",
r"//",
)
]
}
| mit |
htwenhe/DJOA | env/Lib/site-packages/django/db/models/options.py | 149 | 36502 | from __future__ import unicode_literals
import warnings
from bisect import bisect
from collections import OrderedDict, defaultdict
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db import connections
from django.db.models.fields import AutoField
from django.db.models.fields.proxy import OrderWrt
from django.db.models.fields.related import ManyToManyField
from django.utils import six
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import (
force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.functional import cached_property
from django.utils.lru_cache import lru_cache
from django.utils.text import camel_case_to_spaces
from django.utils.translation import override, string_concat
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = tuple()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'swappable', 'auto_created',
'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name',
'required_db_features', 'required_db_vendor')
class raise_deprecation(object):
def __init__(self, suggested_alternative):
self.suggested_alternative = suggested_alternative
def __call__(self, fn):
def wrapper(*args, **kwargs):
warnings.warn(
"'%s is an unofficial API that has been deprecated. "
"You may be able to replace it with '%s'" % (
fn.__name__,
self.suggested_alternative,
),
RemovedInDjango110Warning, stacklevel=2
)
return fn(*args, **kwargs)
return wrapper
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = next(iter(option_together))
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
@python_2_unicode_compatible
class Options(object):
FORWARD_PROPERTIES = ('fields', 'many_to_many', 'concrete_fields',
'local_concrete_fields', '_forward_fields_map')
REVERSE_PROPERTIES = ('related_objects', 'fields_map', '_relation_tree')
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.proxied_children = []
self.local_fields = []
self.local_many_to_many = []
self.virtual_fields = []
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.has_auto_field = False
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = OrderedDict()
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes). `managers`
# keeps a list of 3-tuples of the form:
# (creation_counter, instance, abstract(=True))
self.managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = apps
self.default_related_name = None
@lru_cache(maxsize=None)
def _map_model(self, link):
# This helper function is used to allow backwards compatibility with
# the previous API. No future methods should use this function.
# It maps a field to (field, model or related_model,) depending on the
# field type.
model = link.model._meta.concrete_model
if model is self.model:
model = None
return link, model
@lru_cache(maxsize=None)
def _map_model_details(self, link):
# This helper function is used to allow backwards compatibility with
# the previous API. No future methods should use this function.
# This function maps a field to a tuple of:
# (field, model or related_model, direct, is_m2m) depending on the
# field type.
direct = not link.auto_created or link.concrete
model = link.model._meta.concrete_model
if model is self.model:
model = None
m2m = link.is_relation and link.many_to_many
return link, model, direct, m2m
@property
def label(self):
return '%s.%s' % (self.app_label, self.object_name)
@property
def label_lower(self):
return '%s.%s' % (self.app_label, self.model_name)
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
@property
def abstract_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if abstract
]
@property
def concrete_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if not abstract
]
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(six.itervalues(self.parents))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
def add_field(self, field, virtual=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if virtual:
self.virtual_fields.append(field)
elif field.is_relation and field.many_to_many:
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model:
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_text(self.app_label), smart_text(self.model_name))
def can_migrate(self, connection):
"""
Return True if the model can/should be migrated on the `connection`.
`connection` can be either a real connection or a connection alias.
"""
if self.proxy or self.swapped or not self.managed:
return False
if isinstance(connection, six.string_types):
connection = connections[connection]
if self.required_db_vendor:
return self.required_db_vendor == connection.vendor
if self.required_db_features:
return all(getattr(connection.features, feat, False)
for feat in self.required_db_features)
return True
@property
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
with override(None):
return force_text(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) != self.label_lower:
return swapped_for
return None
@cached_property
def fields(self):
"""
Returns a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not virtual or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
is_not_an_m2m_field = lambda f: not (f.is_relation and f.many_to_many)
is_not_a_generic_relation = lambda f: not (f.is_relation and f.one_to_many)
is_not_a_generic_foreign_key = lambda f: not (
f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False) if
is_not_an_m2m_field(f) and is_not_a_generic_relation(f)
and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Returns a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Returns a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@raise_deprecation(suggested_alternative="get_fields()")
def get_fields_with_model(self):
return [self._map_model(f) for f in self.get_fields()]
@raise_deprecation(suggested_alternative="get_fields()")
def get_concrete_fields_with_model(self):
return [self._map_model(f) for f in self.concrete_fields]
@cached_property
def many_to_many(self):
"""
Returns a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False)
if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Returns all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields
if not obj.hidden or obj.field.many_to_many)
)
@raise_deprecation(suggested_alternative="get_fields()")
def get_m2m_with_model(self):
return [self._map_model(f) for f in self.many_to_many]
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name, many_to_many=None):
"""
Returns a field instance given a field name. The field can be either a
forward or reverse field, unless many_to_many is specified; if it is,
only forward fields will be returned.
The many_to_many argument exists for backwards compatibility reasons;
it has been deprecated and will be removed in Django 1.10.
"""
m2m_in_kwargs = many_to_many is not None
if m2m_in_kwargs:
# Always throw a warning if many_to_many is used regardless of
# whether it alters the return type or not.
warnings.warn(
"The 'many_to_many' argument on get_field() is deprecated; "
"use a filter on field.many_to_many instead.",
RemovedInDjango110Warning
)
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
field = self._forward_fields_map[field_name]
if many_to_many is False and field.many_to_many:
raise FieldDoesNotExist(
'%s has no field named %r' % (self.object_name, field_name)
)
return field
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named %r. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
if m2m_in_kwargs:
# Previous API does not allow searching reverse fields.
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
@raise_deprecation(suggested_alternative="get_field()")
def get_field_by_name(self, name):
return self._map_model_details(self.get_field(name))
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_field_names(self):
names = set()
fields = self.get_fields()
for field in fields:
# For backwards compatibility GenericForeignKey should not be
# included in the results.
if field.is_relation and field.many_to_one and field.related_model is None:
continue
# Relations to child proxy models should not be included.
if (field.model != self.model and
field.model._meta.concrete_model == self.concrete_model):
continue
names.add(field.name)
if hasattr(field, 'attname'):
names.add(field.attname)
return list(names)
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_objects(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
include_parents = True if local_only is False else PROXY_PARENTS
fields = self._get_fields(
forward=False, reverse=True,
include_parents=include_parents,
include_hidden=include_hidden,
)
fields = (obj for obj in fields if not isinstance(obj.field, ManyToManyField))
if include_proxy_eq:
children = chain.from_iterable(c._relation_tree
for c in self.concrete_model._meta.proxied_children
if c is not self)
relations = (f.remote_field for f in children
if include_hidden or not f.remote_field.field.remote_field.is_hidden())
fields = chain(fields, relations)
return list(fields)
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_objects_with_model(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
return [
self._map_model(f) for f in self.get_all_related_objects(
local_only=local_only,
include_hidden=include_hidden,
include_proxy_eq=include_proxy_eq,
)
]
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_many_to_many_objects(self, local_only=False):
include_parents = True if local_only is not True else PROXY_PARENTS
fields = self._get_fields(
forward=False, reverse=True,
include_parents=include_parents, include_hidden=True
)
return [obj for obj in fields if isinstance(obj.field, ManyToManyField)]
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_m2m_objects_with_model(self):
fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return [self._map_model(obj) for obj in fields if isinstance(obj.field, ManyToManyField)]
def get_base_chain(self, model):
"""
Return a list of parent classes leading to `model` (ordered from
closest to most distant ancestor). This has to handle the case where
`model` is a grandparent or even more distant relation.
"""
if not self.parents:
return []
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return []
def get_parent_list(self):
"""
Returns all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if model._meta.abstract:
continue
fields_with_relations = (
f for f in model._meta._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, six.string_types):
related_objects_graph[f.remote_field.model._meta].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
properties_to_expire = []
if forward:
properties_to_expire.extend(self.FORWARD_PROPERTIES)
if reverse and not self.abstract:
properties_to_expire.extend(self.REVERSE_PROPERTIES)
for cache_key in properties_to_expire:
try:
delattr(self, cache_key)
except AttributeError:
pass
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Returns a list of fields associated to the model. By default, includes
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = False
if seen_models is None:
seen_models = set()
topmost_call = True
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if hasattr(obj, 'parent_link') and obj.parent_link:
continue
fields.append(obj)
if reverse:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields.extend(
field for field in chain(self.local_fields, self.local_many_to_many)
)
# Virtual fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the virtual fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields.extend(
f for f in self.virtual_fields
)
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
| mit |
low-sky/pyspeckit | pyspeckit/spectrum/models/n2hp.py | 4 | 11414 | """
===========
N2H+ fitter
===========
Reference for line params:
Dore (Private Communication), improving on the determinations from
L. Pagani, F. Daniel, and M. L. Dubernet A&A 494, 719-727 (2009)
DOI: 10.1051/0004-6361:200810570
http://www.strw.leidenuniv.nl/~moldata/N2H+.html
http://adsabs.harvard.edu/abs/2005MNRAS.363.1083D
"""
from __future__ import print_function
import numpy as np
import matplotlib.cbook as mpcb
import copy
try:
from astropy.io import fits as pyfits
except ImportError:
import pyfits
try:
import scipy.interpolate
import scipy.ndimage
scipyOK = True
except ImportError:
scipyOK=False
from ...mpfit import mpfit
from .. import units
from . import fitter,model,modelgrid
from . import hyperfine
import astropy.units as u
freq_dict_cen ={
'J1-0': 93173.7637e6,
'J2-1': 186344.8420e6,
'J3-2': 279511.8325e6,
}
voff_lines_dict={
####### J 1-0
'J1-0_01': -7.9930,
'J1-0_02': -7.9930,
'J1-0_03': -7.9930,
'J1-0_04': -0.6112,
'J1-0_05': -0.6112,
'J1-0_06': -0.6112,
'J1-0_07': 0.0000,
'J1-0_08': 0.9533,
'J1-0_09': 0.9533,
'J1-0_10': 5.5371,
'J1-0_11': 5.5371,
'J1-0_12': 5.5371,
'J1-0_13': 5.9704,
'J1-0_14': 5.9704,
'J1-0_15': 6.9238,
####### J 2-1
'J2-1_01': -4.6258,
'J2-1_02': -4.5741,
'J2-1_03': -4.4376,
'J2-1_04': -4.2209,
'J2-1_05': -4.0976,
'J2-1_06': -3.8808,
'J2-1_07': -3.1619,
'J2-1_08': -2.9453,
'J2-1_09': -2.3469,
'J2-1_10': -1.9290,
'J2-1_11': -1.5888,
'J2-1_12': -1.5516,
'J2-1_13': -1.4523,
'J2-1_14': -1.1465,
'J2-1_15': -0.8065,
'J2-1_16': -0.6532,
'J2-1_17': -0.4694,
'J2-1_18': -0.1767,
'J2-1_19': 0.0000,
'J2-1_20': 0.0071,
'J2-1_21': 0.1137,
'J2-1_22': 0.1291,
'J2-1_23': 0.1617,
'J2-1_24': 0.2239,
'J2-1_25': 0.5237,
'J2-1_26': 0.6384,
'J2-1_27': 0.7405,
'J2-1_28': 2.1394,
'J2-1_29': 2.5158,
'J2-1_30': 2.5444,
'J2-1_31': 2.6225,
'J2-1_32': 2.8844,
'J2-1_33': 3.0325,
'J2-1_34': 3.0990,
'J2-1_35': 3.2981,
'J2-1_36': 3.5091,
'J2-1_37': 3.8148,
'J2-1_38': 3.8201,
'J2-1_39': 6.9891,
'J2-1_40': 7.5057,
####### J 3-2
'J3-2_01': -3.0666,
'J3-2_02': -2.9296,
'J3-2_03': -2.7221,
'J3-2_04': -2.6563,
'J3-2_05': -2.5270,
'J3-2_06': -2.4010,
'J3-2_07': -2.2535,
'J3-2_08': -2.1825,
'J3-2_09': -2.1277,
'J3-2_10': -1.5862,
'J3-2_11': -1.0158,
'J3-2_12': -0.6131,
'J3-2_13': -0.6093,
'J3-2_14': -0.5902,
'J3-2_15': -0.4872,
'J3-2_16': -0.4725,
'J3-2_17': -0.2757,
'J3-2_18': -0.0697,
'J3-2_19': -0.0616,
'J3-2_20': -0.0022,
'J3-2_21': 0.0000,
'J3-2_22': 0.0143,
'J3-2_23': 0.0542,
'J3-2_24': 0.0561,
'J3-2_25': 0.0575,
'J3-2_26': 0.0687,
'J3-2_27': 0.1887,
'J3-2_28': 0.2411,
'J3-2_29': 0.3781,
'J3-2_30': 0.4620,
'J3-2_31': 0.4798,
'J3-2_32': 0.5110,
'J3-2_33': 0.5540,
'J3-2_34': 0.7808,
'J3-2_35': 0.9066,
'J3-2_36': 1.6382,
'J3-2_37': 1.6980,
'J3-2_38': 2.1025,
'J3-2_39': 2.1236,
'J3-2_40': 2.1815,
'J3-2_41': 2.5281,
'J3-2_42': 2.6458,
'J3-2_43': 2.8052,
'J3-2_44': 3.0320,
'J3-2_45': 3.4963,
}
line_strength_dict = {
####### J 1-0
'J1-0_01': 0.025957,
'J1-0_02': 0.065372,
'J1-0_03': 0.019779,
'J1-0_04': 0.004376,
'J1-0_05': 0.034890,
'J1-0_06': 0.071844,
'J1-0_07': 0.259259,
'J1-0_08': 0.156480,
'J1-0_09': 0.028705,
'J1-0_10': 0.041361,
'J1-0_11': 0.013309,
'J1-0_12': 0.056442,
'J1-0_13': 0.156482,
'J1-0_14': 0.028705,
'J1-0_15': 0.037038,
####### J 2-1
'J2-1_01': 0.008272,
'J2-1_02': 0.005898,
'J2-1_03': 0.031247,
'J2-1_04': 0.013863,
'J2-1_05': 0.013357,
'J2-1_06': 0.010419,
'J2-1_07': 0.000218,
'J2-1_08': 0.000682,
'J2-1_09': 0.000152,
'J2-1_10': 0.001229,
'J2-1_11': 0.000950,
'J2-1_12': 0.000875,
'J2-1_13': 0.002527,
'J2-1_14': 0.000365,
'J2-1_15': 0.000164,
'J2-1_16': 0.021264,
'J2-1_17': 0.031139,
'J2-1_18': 0.000576,
'J2-1_19': 0.200000,
'J2-1_20': 0.001013,
'J2-1_21': 0.111589,
'J2-1_22': 0.088126,
'J2-1_23': 0.142604,
'J2-1_24': 0.011520,
'J2-1_25': 0.027608,
'J2-1_26': 0.012800,
'J2-1_27': 0.066354,
'J2-1_28': 0.013075,
'J2-1_29': 0.003198,
'J2-1_30': 0.061880,
'J2-1_31': 0.004914,
'J2-1_32': 0.035879,
'J2-1_33': 0.011026,
'J2-1_34': 0.039052,
'J2-1_35': 0.019767,
'J2-1_36': 0.004305,
'J2-1_37': 0.001814,
'J2-1_38': 0.000245,
'J2-1_39': 0.000029,
'J2-1_40': 0.000004,
####### J 3-2
'J3-2_01': 0.001845,
'J3-2_02': 0.001818,
'J3-2_03': 0.003539,
'J3-2_04': 0.014062,
'J3-2_05': 0.011432,
'J3-2_06': 0.000089,
'J3-2_07': 0.002204,
'J3-2_08': 0.002161,
'J3-2_09': 0.000061,
'J3-2_10': 0.000059,
'J3-2_11': 0.000212,
'J3-2_12': 0.000255,
'J3-2_13': 0.000247,
'J3-2_14': 0.000436,
'J3-2_15': 0.010208,
'J3-2_16': 0.000073,
'J3-2_17': 0.007447,
'J3-2_18': 0.000000,
'J3-2_19': 0.000155,
'J3-2_20': 0.000274,
'J3-2_21': 0.174603,
'J3-2_22': 0.018683,
'J3-2_23': 0.135607,
'J3-2_24': 0.100527,
'J3-2_25': 0.124866,
'J3-2_26': 0.060966,
'J3-2_27': 0.088480,
'J3-2_28': 0.001083,
'J3-2_29': 0.094510,
'J3-2_30': 0.014029,
'J3-2_31': 0.007191,
'J3-2_32': 0.022222,
'J3-2_33': 0.047915,
'J3-2_34': 0.015398,
'J3-2_35': 0.000071,
'J3-2_36': 0.000794,
'J3-2_37': 0.001372,
'J3-2_38': 0.007107,
'J3-2_39': 0.016618,
'J3-2_40': 0.009776,
'J3-2_41': 0.000997,
'J3-2_42': 0.000487,
'J3-2_43': 0.000069,
'J3-2_44': 0.000039,
'J3-2_45': 0.000010,
}
# Get frequency dictionary in Hz based on the offset velocity and rest frequency
conv_J10=u.doppler_radio(freq_dict_cen['J1-0']*u.Hz)
conv_J21=u.doppler_radio(freq_dict_cen['J2-1']*u.Hz)
conv_J32=u.doppler_radio(freq_dict_cen['J3-2']*u.Hz)
freq_dict = {
name: ((voff_lines_dict[name]*u.km/u.s).to(u.Hz, equivalencies=conv_J10).value) for name in voff_lines_dict.keys() if "J1-0" in name
}
freq_dict.update({
name: ((voff_lines_dict[name]*u.km/u.s).to(u.Hz, equivalencies=conv_J21).value) for name in voff_lines_dict.keys() if "J2-1" in name
})
freq_dict.update({
name: ((voff_lines_dict[name]*u.km/u.s).to(u.Hz, equivalencies=conv_J32).value) for name in voff_lines_dict.keys() if "J3-2" in name
})
# relative_strength_total_degeneracy is not used in the CLASS implementation
# of the hfs fit. It is the sum of the degeneracy values for all hyperfines
# for a given line; it gives the relative weights between lines.
# Hyperfine weights are treated as normalized within one rotational transition.
w10 = sum(val for name,val in line_strength_dict.items() if 'J1-0' in name)
w21 = sum(val for name,val in line_strength_dict.items() if 'J2-1' in name)
w32 = sum(val for name,val in line_strength_dict.items() if 'J3-2' in name)
relative_strength_total_degeneracy = {
name : w10 for name in line_strength_dict.keys() if "J1-0" in name
}
relative_strength_total_degeneracy.update({
name : w21 for name in line_strength_dict.keys() if "J2-1" in name
})
relative_strength_total_degeneracy.update({
name : w32 for name in line_strength_dict.keys() if "J3-2" in name
})
# Get the list of line names from the previous lists
line_names = [name for name in voff_lines_dict.keys()]
n2hp_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict, freq_dict,
line_strength_dict,
relative_strength_total_degeneracy)
n2hp_vtau_fitter = n2hp_vtau.fitter
n2hp_vtau_vheight_fitter = n2hp_vtau.vheight_fitter
n2hp_vtau_tbg_fitter = n2hp_vtau.background_fitter
# RADEX part from old file
def n2hp_radex(xarr,
density=4,
column=13,
xoff_v=0.0,
width=1.0,
grid_vwidth=1.0,
grid_vwidth_scale=False,
texgrid=None,
taugrid=None,
hdr=None,
path_to_texgrid='',
path_to_taugrid='',
temperature_gridnumber=3,
debug=False,
verbose=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
minfreq = (4.8,)
maxfreq = (5.0,)
elif len(taugrid)==len(texgrid) and hdr is not None:
minfreq,maxfreq,texgrid = zip(*texgrid)
minfreq,maxfreq,taugrid = zip(*taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
else:
raise Exception
# Convert X-units to frequency in GHz
xarr = copy.copy(xarr)
xarr.convert_to_unit('Hz', quiet=True)
tau_nu_cumul = np.zeros(len(xarr))
gridval1 = np.interp(density, densityarr[0,:], xinds[0,:])
gridval2 = np.interp(column, columnarr[:,0], yinds[:,0])
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
tau = [scipy.ndimage.map_coordinates(tg[temperature_gridnumber,:,:],np.array([[gridval2],[gridval1]]),order=1) for tg in taugrid]
tex = [scipy.ndimage.map_coordinates(tg[temperature_gridnumber,:,:],np.array([[gridval2],[gridval1]]),order=1) for tg in texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
if verbose:
print("density %20.12g column %20.12g: tau %20.12g tex %20.12g" % (density, column, tau, tex))
if debug:
import pdb; pdb.set_trace()
return n2hp_vtau(xarr,Tex=tex,tau=tau,xoff_v=xoff_v,width=width,**kwargs)
| mit |
Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/site-packages/numpy/distutils/errors.py | 152 | 3577 | """distutils.errors
Provides exceptions used by the Distutils modules. Note that Distutils
modules may raise standard exceptions; in particular, SystemExit is
usually raised for errors that are obviously the end-user's fault
(eg. bad command-line arguments).
This module is safe to use in "from ... import *" mode; it only exports
symbols whose names start with "Distutils" and end with "Error"."""
class DistutilsError (Exception):
"""The root of all Distutils evil."""
pass
class DistutilsModuleError (DistutilsError):
"""Unable to load an expected module, or to find an expected class
within some module (in particular, command modules and classes)."""
pass
class DistutilsClassError (DistutilsError):
"""Some command class (or possibly distribution class, if anyone
feels a need to subclass Distribution) is found not to be holding
up its end of the bargain, ie. implementing some part of the
"command "interface."""
pass
class DistutilsGetoptError (DistutilsError):
"""The option table provided to 'fancy_getopt()' is bogus."""
pass
class DistutilsArgError (DistutilsError):
"""Raised by fancy_getopt in response to getopt.error -- ie. an
error in the command line usage."""
pass
class DistutilsFileError (DistutilsError):
"""Any problems in the filesystem: expected file not found, etc.
Typically this is for problems that we detect before OSError
could be raised."""
pass
class DistutilsOptionError (DistutilsError):
"""Syntactic/semantic errors in command options, such as use of
mutually conflicting options, or inconsistent options,
badly-spelled values, etc. No distinction is made between option
values originating in the setup script, the command line, config
files, or what-have-you -- but if we *know* something originated in
the setup script, we'll raise DistutilsSetupError instead."""
pass
class DistutilsSetupError (DistutilsError):
"""For errors that can be definitely blamed on the setup script,
such as invalid keyword arguments to 'setup()'."""
pass
class DistutilsPlatformError (DistutilsError):
"""We don't know how to do something on the current platform (but
we do know how to do it on some platform) -- eg. trying to compile
C files on a platform not supported by a CCompiler subclass."""
pass
class DistutilsExecError (DistutilsError):
"""Any problems executing an external program (such as the C
compiler, when compiling C files)."""
pass
class DistutilsInternalError (DistutilsError):
"""Internal inconsistencies or impossibilities (obviously, this
should never be seen if the code is working!)."""
pass
class DistutilsTemplateError (DistutilsError):
"""Syntax error in a file list template."""
class DistutilsByteCompileError(DistutilsError):
"""Byte compile error."""
# Exception classes used by the CCompiler implementation classes
class CCompilerError (Exception):
"""Some compile/link operation failed."""
class PreprocessError (CCompilerError):
"""Failure to preprocess one or more C/C++ files."""
class CompileError (CCompilerError):
"""Failure to compile one or more C/C++ source files."""
class LibError (CCompilerError):
"""Failure to create a static library from one or more C/C++ object
files."""
class LinkError (CCompilerError):
"""Failure to link one or more C/C++ object files into an executable
or shared library file."""
class UnknownFileError (CCompilerError):
"""Attempt to process an unknown file type."""
| gpl-3.0 |
moondrop-entertainment/django-nonrel-drawp | django/template/defaultfilters.py | 1 | 29877 | """Default variable filters."""
import re
from decimal import Decimal, InvalidOperation, ROUND_HALF_UP
import random as random_module
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.template.base import Variable, Library
from django.conf import settings
from django.utils import formats
from django.utils.encoding import force_unicode, iri_to_uri
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe, SafeData
from django.utils.translation import ugettext, ungettext
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive unicode objects. The object
passed as the first positional argument will be converted to a unicode
object.
"""
def _dec(*args, **kwargs):
if args:
args = list(args)
args[0] = force_unicode(args[0])
if isinstance(args[0], SafeData) and getattr(func, 'is_safe', False):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser).
_dec._decorated_function = getattr(func, '_decorated_function', func)
for attr in ('is_safe', 'needs_autoescape'):
if hasattr(func, attr):
setattr(_dec, attr, getattr(func, attr))
return wraps(func)(_dec)
###################
# STRINGS #
###################
def addslashes(value):
"""
Adds slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
addslashes.is_safe = True
addslashes = stringfilter(addslashes)
def capfirst(value):
"""Capitalizes the first character of the value."""
return value and value[0].upper() + value[1:]
capfirst.is_safe=True
capfirst = stringfilter(capfirst)
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
from django.utils.html import escapejs
return escapejs(value)
escapejs = stringfilter(escapejs)
def fix_ampersands(value):
"""Replaces ampersands with ``&`` entities."""
from django.utils.html import fix_ampersands
return fix_ampersands(value)
fix_ampersands.is_safe=True
fix_ampersands = stringfilter(fix_ampersands)
# Values for testing floatformat input against infinity and NaN representations,
# which differ across platforms and Python versions. Some (i.e. old Windows
# ones) are not recognized by Decimal but we want to return them unchanged vs.
# returning an empty string as we do for completley invalid input. Note these
# need to be built up from values that are not inf/nan, since inf/nan values do
# not reload properly from .pyc files on Windows prior to some level of Python 2.5
# (see Python Issue757815 and Issue1080440).
pos_inf = 1e200 * 1e200
neg_inf = -1e200 * 1e200
nan = (1e200 * 1e200) / (1e200 * 1e200)
special_floats = [str(pos_inf), str(neg_inf), str(nan)]
def floatformat(text, arg=-1):
"""
Displays a float to a specified number of decimal places.
If called without an argument, it displays the floating point number with
one decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, it will always display exactly arg number of decimal
places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, it will display arg number of decimal places -- but
only if there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, the (platform-dependent) string
representation of that value will be displayed.
"""
try:
input_val = force_unicode(text)
d = Decimal(input_val)
except UnicodeEncodeError:
return u''
except InvalidOperation:
if input_val in special_floats:
return input_val
try:
d = Decimal(force_unicode(float(text)))
except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError):
return u''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format(u'%d' % (int(d)), 0))
if p == 0:
exp = Decimal(1)
else:
exp = Decimal(u'1.0') / (Decimal(10) ** abs(p))
try:
# Avoid conversion to scientific notation by accessing `sign`, `digits`
# and `exponent` from `Decimal.as_tuple()` directly.
sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP).as_tuple()
digits = [unicode(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append(u'0')
digits.insert(-exponent, u'.')
if sign:
digits.append(u'-')
number = u''.join(reversed(digits))
return mark_safe(formats.number_format(number, abs(p)))
except InvalidOperation:
return input_val
floatformat.is_safe = True
def iriencode(value):
"""Escapes an IRI value for use in a URL."""
return force_unicode(iri_to_uri(value))
iriencode.is_safe = True
iriencode = stringfilter(iriencode)
def linenumbers(value, autoescape=None):
"""Displays text with line numbers."""
from django.utils.html import escape
lines = value.split(u'\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = unicode(len(unicode(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = (u"%0" + width + u"d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = (u"%0" + width + u"d. %s") % (i + 1, escape(line))
return mark_safe(u'\n'.join(lines))
linenumbers.is_safe = True
linenumbers.needs_autoescape = True
linenumbers = stringfilter(linenumbers)
def lower(value):
"""Converts a string into all lowercase."""
return value.lower()
lower.is_safe = True
lower = stringfilter(lower)
def make_list(value):
"""
Returns the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
make_list.is_safe = False
make_list = stringfilter(make_list)
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return mark_safe(re.sub('[-\s]+', '-', value))
slugify.is_safe = True
slugify = stringfilter(slugify)
def stringformat(value, arg):
"""
Formats the variable according to the arg, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
try:
return (u"%" + unicode(arg)) % value
except (ValueError, TypeError):
return u""
stringformat.is_safe = True
def title(value):
"""Converts a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
title.is_safe = True
title = stringfilter(title)
def truncatechars(value, arg):
"""
Specific for Drawp school version.
Slices a string by number of chars.
From left to right.
Value is the string to be sliced.
Arg is an int value of the size
of the slice returned.
"""
try:
length = int(arg)
except ValueError:
return value
return value[:length]
def truncatewords(value, arg):
"""
Truncates a string after a certain number of words.
Argument: Number of words to truncate after.
Newlines within the string are removed.
"""
from django.utils.text import truncate_words
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return truncate_words(value, length)
truncatewords.is_safe = True
truncatewords = stringfilter(truncatewords)
def truncatewords_html(value, arg):
"""
Truncates HTML after a certain number of words.
Argument: Number of words to truncate after.
Newlines in the HTML are preserved.
"""
from django.utils.text import truncate_html_words
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return truncate_html_words(value, length)
truncatewords_html.is_safe = True
truncatewords_html = stringfilter(truncatewords_html)
def upper(value):
"""Converts a string into all uppercase."""
return value.upper()
upper.is_safe = False
upper = stringfilter(upper)
def urlencode(value, safe=None):
"""
Escapes a value for use in a URL.
Takes an optional ``safe`` parameter used to determine the characters which
should not be escaped by Django's ``urlquote`` method. If not provided, the
default safe characters will be used (but an empty string can be provided
when *all* characters should be escaped).
"""
from django.utils.http import urlquote
kwargs = {}
if safe is not None:
kwargs['safe'] = safe
return urlquote(value, **kwargs)
urlencode.is_safe = False
urlencode = stringfilter(urlencode)
def urlize(value, autoescape=None):
"""Converts URLs in plain text into clickable links."""
from django.utils.html import urlize
return mark_safe(urlize(value, nofollow=True, autoescape=autoescape))
urlize.is_safe=True
urlize.needs_autoescape = True
urlize = stringfilter(urlize)
def urlizetrunc(value, limit, autoescape=None):
"""
Converts URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
from django.utils.html import urlize
return mark_safe(urlize(value, trim_url_limit=int(limit), nofollow=True,
autoescape=autoescape))
urlizetrunc.is_safe = True
urlizetrunc.needs_autoescape = True
urlizetrunc = stringfilter(urlizetrunc)
def wordcount(value):
"""Returns the number of words."""
return len(value.split())
wordcount.is_safe = False
wordcount = stringfilter(wordcount)
def wordwrap(value, arg):
"""
Wraps words at specified line length.
Argument: number of characters to wrap the text at.
"""
from django.utils.text import wrap
return wrap(value, int(arg))
wordwrap.is_safe = True
wordwrap = stringfilter(wordwrap)
def ljust(value, arg):
"""
Left-aligns the value in a field of a given width.
Argument: field size.
"""
return value.ljust(int(arg))
ljust.is_safe = True
ljust = stringfilter(ljust)
def rjust(value, arg):
"""
Right-aligns the value in a field of a given width.
Argument: field size.
"""
return value.rjust(int(arg))
rjust.is_safe = True
rjust = stringfilter(rjust)
def center(value, arg):
"""Centers the value in a field of a given width."""
return value.center(int(arg))
center.is_safe = True
center = stringfilter(center)
def cut(value, arg):
"""
Removes all values of arg from the given string.
"""
safe = isinstance(value, SafeData)
value = value.replace(arg, u'')
if safe and arg != ';':
return mark_safe(value)
return value
cut = stringfilter(cut)
###################
# HTML STRINGS #
###################
def escape(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
from django.utils.safestring import mark_for_escaping
return mark_for_escaping(value)
escape.is_safe = True
escape = stringfilter(escape)
def force_escape(value):
"""
Escapes a string's HTML. This returns a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
from django.utils.html import escape
return mark_safe(escape(value))
force_escape = stringfilter(force_escape)
force_escape.is_safe = True
def linebreaks(value, autoescape=None):
"""
Replaces line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br />``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
from django.utils.html import linebreaks
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
linebreaks.is_safe = True
linebreaks.needs_autoescape = True
linebreaks = stringfilter(linebreaks)
def linebreaksbr(value, autoescape=None):
"""
Converts all newlines in a piece of plain text to HTML line breaks
(``<br />``).
"""
if autoescape and not isinstance(value, SafeData):
from django.utils.html import escape
value = escape(value)
return mark_safe(value.replace('\n', '<br />'))
linebreaksbr.is_safe = True
linebreaksbr.needs_autoescape = True
linebreaksbr = stringfilter(linebreaksbr)
def safe(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_safe(value)
safe.is_safe = True
safe = stringfilter(safe)
def safeseq(value):
"""
A "safe" filter for sequences. Marks each element in the sequence,
individually, as safe, after converting them to unicode. Returns a list
with the results.
"""
return [mark_safe(force_unicode(obj)) for obj in value]
safeseq.is_safe = True
def removetags(value, tags):
"""Removes a space separated list of [X]HTML tags from the output."""
tags = [re.escape(tag) for tag in tags.split()]
tags_re = u'(%s)' % u'|'.join(tags)
starttag_re = re.compile(ur'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile(u'</%s>' % tags_re)
value = starttag_re.sub(u'', value)
value = endtag_re.sub(u'', value)
return value
removetags.is_safe = True
removetags = stringfilter(removetags)
def striptags(value):
"""Strips all [X]HTML tags."""
from django.utils.html import strip_tags
return strip_tags(value)
striptags.is_safe = True
striptags = stringfilter(striptags)
###################
# LISTS #
###################
def dictsort(value, arg):
"""
Takes a list of dicts, returns that list sorted by the property given in
the argument.
"""
return sorted(value, key=Variable(arg).resolve)
dictsort.is_safe = False
def dictsortreversed(value, arg):
"""
Takes a list of dicts, returns that list sorted in reverse order by the
property given in the argument.
"""
return sorted(value, key=Variable(arg).resolve, reverse=True)
dictsortreversed.is_safe = False
def first(value):
"""Returns the first item in a list."""
try:
return value[0]
except IndexError:
return u''
first.is_safe = False
def join(value, arg, autoescape=None):
"""
Joins a list with a string, like Python's ``str.join(list)``.
"""
value = map(force_unicode, value)
if autoescape:
value = [conditional_escape(v) for v in value]
try:
data = conditional_escape(arg).join(value)
except AttributeError: # fail silently but nicely
return value
return mark_safe(data)
join.is_safe = True
join.needs_autoescape = True
def last(value):
"Returns the last item in a list"
try:
return value[-1]
except IndexError:
return u''
last.is_safe = True
def length(value):
"""Returns the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return ''
length.is_safe = True
def length_is(value, arg):
"""Returns a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
length_is.is_safe = False
def random(value):
"""Returns a random item from the list."""
return random_module.choice(value)
random.is_safe = True
def slice_(value, arg):
"""
Returns a slice of the list.
Uses the same syntax as Python's list slicing; see
http://diveintopython.org/native_data_types/lists.html#odbchelper.list.slice
for an introduction.
"""
try:
bits = []
for x in arg.split(u':'):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
slice_.is_safe = True
def unordered_list(value, autoescape=None):
"""
Recursively takes a self-nested list and returns an HTML unordered list --
WITHOUT opening and closing <ul> tags.
The list is assumed to be in the proper format. For example, if ``var``
contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``,
then ``{{ var|unordered_list }}`` would return::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
from django.utils.html import conditional_escape
escaper = conditional_escape
else:
escaper = lambda x: x
def convert_old_style_list(list_):
"""
Converts old style lists to the new easier to understand format.
The old list format looked like:
['Item 1', [['Item 1.1', []], ['Item 1.2', []]]
And it is converted to:
['Item 1', ['Item 1.1', 'Item 1.2]]
"""
if not isinstance(list_, (tuple, list)) or len(list_) != 2:
return list_, False
first_item, second_item = list_
if second_item == []:
return [first_item], True
try:
it = iter(second_item) # see if second item is iterable
except TypeError:
return list_, False
old_style_list = True
new_second_item = []
for sublist in second_item:
item, old_style_list = convert_old_style_list(sublist)
if not old_style_list:
break
new_second_item.extend(item)
if old_style_list:
second_item = new_second_item
return [first_item, second_item], old_style_list
def _helper(list_, tabs=1):
indent = u'\t' * tabs
output = []
list_length = len(list_)
i = 0
while i < list_length:
title = list_[i]
sublist = ''
sublist_item = None
if isinstance(title, (list, tuple)):
sublist_item = title
title = ''
elif i < list_length - 1:
next_item = list_[i+1]
if next_item and isinstance(next_item, (list, tuple)):
# The next item is a sub-list.
sublist_item = next_item
# We've processed the next item now too.
i += 1
if sublist_item:
sublist = _helper(sublist_item, tabs+1)
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (indent, sublist,
indent, indent)
output.append('%s<li>%s%s</li>' % (indent,
escaper(force_unicode(title)), sublist))
i += 1
return '\n'.join(output)
value, converted = convert_old_style_list(value)
return mark_safe(_helper(value))
unordered_list.is_safe = True
unordered_list.needs_autoescape = True
###################
# INTEGERS #
###################
def add(value, arg):
"""Adds the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except:
return value
add.is_safe = False
def get_digit(value, arg):
"""
Given a whole number, returns the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Returns the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
get_digit.is_safe = False
###################
# DATES #
###################
def date(value, arg=None):
"""Formats a date according to the given format."""
from django.utils.dateformat import format
if not value:
return u''
if arg is None:
arg = settings.DATE_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
date.is_safe = False
def time(value, arg=None):
"""Formats a time according to the given format."""
from django.utils import dateformat
if value in (None, u''):
return u''
if arg is None:
arg = settings.TIME_FORMAT
try:
return formats.time_format(value, arg)
except AttributeError:
try:
return dateformat.time_format(value, arg)
except AttributeError:
return ''
time.is_safe = False
def timesince(value, arg=None):
"""Formats a date as the time since that date (i.e. "4 days, 6 hours")."""
from django.utils.timesince import timesince
if not value:
return u''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return u''
timesince.is_safe = False
def timeuntil(value, arg=None):
"""Formats a date as the time until that date (i.e. "4 days, 6 hours")."""
from django.utils.timesince import timeuntil
if not value:
return u''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return u''
timeuntil.is_safe = False
###################
# LOGIC #
###################
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
default.is_safe = False
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
default_if_none.is_safe = False
def divisibleby(value, arg):
"""Returns True if the value is devisible by the argument."""
return int(value) % int(arg) == 0
divisibleby.is_safe = False
def yesno(value, arg=None):
"""
Given a string mapping values for true, false and (optionally) None,
returns one of those strings accoding to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = ugettext('yes,no,maybe')
bits = arg.split(u',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
yesno.is_safe = False
###################
# MISC #
###################
def filesizeformat(bytes):
"""
Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc).
"""
try:
bytes = float(bytes)
except (TypeError,ValueError,UnicodeDecodeError):
return ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
filesize_number_format = lambda value: formats.number_format(round(value, 1), 1)
if bytes < 1024:
return ungettext("%(size)d byte", "%(size)d bytes", bytes) % {'size': bytes}
if bytes < 1024 * 1024:
return ugettext("%s KB") % filesize_number_format(bytes / 1024)
if bytes < 1024 * 1024 * 1024:
return ugettext("%s MB") % filesize_number_format(bytes / (1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024:
return ugettext("%s GB") % filesize_number_format(bytes / (1024 * 1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024 * 1024:
return ugettext("%s TB") % filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024))
return ugettext("%s PB") % filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024 * 1024))
filesizeformat.is_safe = True
def pluralize(value, arg=u's'):
"""
Returns a plural suffix if the value is not 1. By default, 's' is used as
the suffix:
* If value is 0, vote{{ value|pluralize }} displays "0 votes".
* If value is 1, vote{{ value|pluralize }} displays "1 vote".
* If value is 2, vote{{ value|pluralize }} displays "2 votes".
If an argument is provided, that string is used instead:
* If value is 0, class{{ value|pluralize:"es" }} displays "0 classes".
* If value is 1, class{{ value|pluralize:"es" }} displays "1 class".
* If value is 2, class{{ value|pluralize:"es" }} displays "2 classes".
If the provided argument contains a comma, the text before the comma is
used for the singular case and the text after the comma is used for the
plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies".
"""
if not u',' in arg:
arg = u',' + arg
bits = arg.split(u',')
if len(bits) > 2:
return u''
singular_suffix, plural_suffix = bits[:2]
try:
if int(value) != 1:
return plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
if len(value) != 1:
return plural_suffix
except TypeError: # len() of unsized object.
pass
return singular_suffix
pluralize.is_safe = False
def phone2numeric(value):
"""Takes a phone number and converts it in to its numerical equivalent."""
from django.utils.text import phone2numeric
return phone2numeric(value)
phone2numeric.is_safe = True
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
from pprint import pformat
try:
return pformat(value)
except Exception, e:
return u"Error in formatting: %s" % force_unicode(e, errors="replace")
pprint.is_safe = True
# Syntax: register.filter(name of filter, callback)
register.filter(add)
register.filter(addslashes)
register.filter(capfirst)
register.filter(center)
register.filter(cut)
register.filter(date)
register.filter(default)
register.filter(default_if_none)
register.filter(dictsort)
register.filter(dictsortreversed)
register.filter(divisibleby)
register.filter(escape)
register.filter(escapejs)
register.filter(filesizeformat)
register.filter(first)
register.filter(fix_ampersands)
register.filter(floatformat)
register.filter(force_escape)
register.filter(get_digit)
register.filter(iriencode)
register.filter(join)
register.filter(last)
register.filter(length)
register.filter(length_is)
register.filter(linebreaks)
register.filter(linebreaksbr)
register.filter(linenumbers)
register.filter(ljust)
register.filter(lower)
register.filter(make_list)
register.filter(phone2numeric)
register.filter(pluralize)
register.filter(pprint)
register.filter(removetags)
register.filter(random)
register.filter(rjust)
register.filter(safe)
register.filter(safeseq)
register.filter('slice', slice_)
register.filter(slugify)
register.filter(stringformat)
register.filter(striptags)
register.filter(time)
register.filter(timesince)
register.filter(timeuntil)
register.filter(title)
register.filter(truncatechars)
register.filter(truncatewords)
register.filter(truncatewords_html)
register.filter(unordered_list)
register.filter(upper)
register.filter(urlencode)
register.filter(urlize)
register.filter(urlizetrunc)
register.filter(wordcount)
register.filter(wordwrap)
register.filter(yesno)
| bsd-3-clause |
atopuzov/nitro-python | nssrc/com/citrix/netscaler/nitro/resource/config/audit/auditmessages.py | 3 | 6812 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class auditmessages(base_resource) :
""" Configuration for audit message resource. """
def __init__(self) :
self._loglevel = []
self._numofmesgs = 0
self._value = ""
self.___count = 0
@property
def loglevel(self) :
ur"""Audit log level filter, which specifies the types of events to display.
The following loglevels are valid:
* ALL - All events.
* EMERGENCY - Events that indicate an immediate crisis on the server.
* ALERT - Events that might require action.
* CRITICAL - Events that indicate an imminent server crisis.
* ERROR - Events that indicate some type of error.
* WARNING - Events that require action in the near future.
* NOTICE - Events that the administrator should know about.
* INFORMATIONAL - All but low-level events.
* DEBUG - All events, in extreme detail.<br/>Possible values = ALL, EMERGENCY, ALERT, CRITICAL, ERROR, WARNING, NOTICE, INFORMATIONAL, DEBUG.
"""
try :
return self._loglevel
except Exception as e:
raise e
@loglevel.setter
def loglevel(self, loglevel) :
ur"""Audit log level filter, which specifies the types of events to display.
The following loglevels are valid:
* ALL - All events.
* EMERGENCY - Events that indicate an immediate crisis on the server.
* ALERT - Events that might require action.
* CRITICAL - Events that indicate an imminent server crisis.
* ERROR - Events that indicate some type of error.
* WARNING - Events that require action in the near future.
* NOTICE - Events that the administrator should know about.
* INFORMATIONAL - All but low-level events.
* DEBUG - All events, in extreme detail.<br/>Possible values = ALL, EMERGENCY, ALERT, CRITICAL, ERROR, WARNING, NOTICE, INFORMATIONAL, DEBUG
"""
try :
self._loglevel = loglevel
except Exception as e:
raise e
@property
def numofmesgs(self) :
ur"""Number of log messages to be displayed.<br/>Default value: 20<br/>Minimum length = 1<br/>Maximum length = 256.
"""
try :
return self._numofmesgs
except Exception as e:
raise e
@numofmesgs.setter
def numofmesgs(self, numofmesgs) :
ur"""Number of log messages to be displayed.<br/>Default value: 20<br/>Minimum length = 1<br/>Maximum length = 256
"""
try :
self._numofmesgs = numofmesgs
except Exception as e:
raise e
@property
def value(self) :
ur"""The Audit message.
"""
try :
return self._value
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(auditmessages_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.auditmessages
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the auditmessages resources that are configured on netscaler.
"""
try :
if not name :
obj = auditmessages()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_args(cls, client, args) :
ur""" Use this API to fetch all the auditmessages resources that are configured on netscaler.
# This uses auditmessages_args which is a way to provide additional arguments while fetching the resources.
"""
try :
obj = auditmessages()
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(args)
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of auditmessages resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = auditmessages()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the auditmessages resources configured on NetScaler.
"""
try :
obj = auditmessages()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of auditmessages resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = auditmessages()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Loglevel:
ALL = "ALL"
EMERGENCY = "EMERGENCY"
ALERT = "ALERT"
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
NOTICE = "NOTICE"
INFORMATIONAL = "INFORMATIONAL"
DEBUG = "DEBUG"
class auditmessages_response(base_response) :
def __init__(self, length=1) :
self.auditmessages = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.auditmessages = [auditmessages() for _ in range(length)]
| apache-2.0 |
madjam/mxnet | example/rcnn/rcnn/processing/bbox_regression.py | 25 | 5519 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This file has functions about generating bounding box regression targets
"""
import numpy as np
from ..logger import logger
from bbox_transform import bbox_overlaps, bbox_transform
from rcnn.config import config
def compute_bbox_regression_targets(rois, overlaps, labels):
"""
given rois, overlaps, gt labels, compute bounding box regression targets
:param rois: roidb[i]['boxes'] k * 4
:param overlaps: roidb[i]['max_overlaps'] k * 1
:param labels: roidb[i]['max_classes'] k * 1
:return: targets[i][class, dx, dy, dw, dh] k * 5
"""
# Ensure ROIs are floats
rois = rois.astype(np.float, copy=False)
# Sanity check
if len(rois) != len(overlaps):
logger.warning('bbox regression: len(rois) != len(overlaps)')
# Indices of ground-truth ROIs
gt_inds = np.where(overlaps == 1)[0]
if len(gt_inds) == 0:
logger.warning('bbox regression: len(gt_inds) == 0')
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= config.TRAIN.BBOX_REGRESSION_THRESH)[0]
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = bbox_overlaps(rois[ex_inds, :], rois[gt_inds, :])
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
targets[ex_inds, 0] = labels[ex_inds]
targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
return targets
def add_bbox_regression_targets(roidb):
"""
given roidb, add ['bbox_targets'] and normalize bounding box regression targets
:param roidb: roidb to be processed. must have gone through imdb.prepare_roidb
:return: means, std variances of targets
"""
logger.info('bbox regression: add bounding box regression targets')
assert len(roidb) > 0
assert 'max_classes' in roidb[0]
num_images = len(roidb)
num_classes = roidb[0]['gt_overlaps'].shape[1]
for im_i in range(num_images):
rois = roidb[im_i]['boxes']
max_overlaps = roidb[im_i]['max_overlaps']
max_classes = roidb[im_i]['max_classes']
roidb[im_i]['bbox_targets'] = compute_bbox_regression_targets(rois, max_overlaps, max_classes)
if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
# use fixed / precomputed means and stds instead of empirical values
means = np.tile(np.array(config.TRAIN.BBOX_MEANS), (num_classes, 1))
stds = np.tile(np.array(config.TRAIN.BBOX_STDS), (num_classes, 1))
else:
# compute mean, std values
class_counts = np.zeros((num_classes, 1)) + 1e-14
sums = np.zeros((num_classes, 4))
squared_sums = np.zeros((num_classes, 4))
for im_i in range(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in range(1, num_classes):
cls_indexes = np.where(targets[:, 0] == cls)[0]
if cls_indexes.size > 0:
class_counts[cls] += cls_indexes.size
sums[cls, :] += targets[cls_indexes, 1:].sum(axis=0)
squared_sums[cls, :] += (targets[cls_indexes, 1:] ** 2).sum(axis=0)
means = sums / class_counts
# var(x) = E(x^2) - E(x)^2
stds = np.sqrt(squared_sums / class_counts - means ** 2)
# normalized targets
for im_i in range(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in range(1, num_classes):
cls_indexes = np.where(targets[:, 0] == cls)[0]
roidb[im_i]['bbox_targets'][cls_indexes, 1:] -= means[cls, :]
roidb[im_i]['bbox_targets'][cls_indexes, 1:] /= stds[cls, :]
return means.ravel(), stds.ravel()
def expand_bbox_regression_targets(bbox_targets_data, num_classes):
"""
expand from 5 to 4 * num_classes; only the right class has non-zero bbox regression targets
:param bbox_targets_data: [k * 5]
:param num_classes: number of classes
:return: bbox target processed [k * 4 num_classes]
bbox_weights ! only foreground boxes have bbox regression computation!
"""
classes = bbox_targets_data[:, 0]
bbox_targets = np.zeros((classes.size, 4 * num_classes), dtype=np.float32)
bbox_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
indexes = np.where(classes > 0)[0]
for index in indexes:
cls = classes[index]
start = int(4 * cls)
end = start + 4
bbox_targets[index, start:end] = bbox_targets_data[index, 1:]
bbox_weights[index, start:end] = config.TRAIN.BBOX_WEIGHTS
return bbox_targets, bbox_weights
| apache-2.0 |
agoose77/hivesystem | manual/movingpanda/panda-7.py | 1 | 4435 | import dragonfly
import dragonfly.pandahive
import bee
from bee import connect
import math, functools
from panda3d.core import NodePath
import dragonfly.scene.unbound
import dragonfly.std
import dragonfly.io
import dragonfly.canvas
import Spyder
# ## random matrix generator
from random import random
def random_matrix_generator():
while 1:
a = Spyder.AxisSystem()
a.rotateZ(360 * random())
a.origin = Spyder.Coordinate(15 * random() - 7.5, 15 * random() - 7.5, 0)
yield dragonfly.scene.matrix(a, "AxisSystem")
def id_generator():
n = 0
while 1:
yield "spawnedpanda" + str(n)
from dragonfly.canvas import box2d, canvasargs
from bee.drone import dummydrone
from libcontext.pluginclasses import plugin_single_required
class parameters: pass
class myscene(bee.frame):
pandaclassname_ = bee.get_parameter("pandaclassname")
pandaname_ = bee.get_parameter("pandaname")
pandaicon_ = bee.get_parameter("pandaicon")
c1 = bee.configure("scene")
c1.import_mesh_EGG("models/environment")
a = Spyder.AxisSystem()
a *= 0.25
a.origin += (-8, 42, 0)
c1.add_model_SPYDER(axissystem=a)
c2 = bee.configure("scene")
c2.import_mesh_EGG("models/panda-model")
a = Spyder.AxisSystem()
a *= 0.005
c2.add_actor_SPYDER(axissystem=a, entityname=pandaname_)
c2.import_mesh_EGG("models/panda-walk4")
c2.add_animation("walk")
c3 = bee.configure("scene")
c3.import_mesh_EGG("models/panda-model")
a = Spyder.AxisSystem()
a *= 0.005
c3.add_actorclass_SPYDER(axissystem=a, actorclassname=pandaclassname_)
c3.import_mesh_EGG("models/panda-walk4")
c3.add_animation("walk")
box = box2d(50, 470, 96, 96)
params = parameters()
params.transparency = True
args = canvasargs("pandaicon.png", pandaicon_, box, params)
plugin = plugin_single_required(args)
pattern = ("canvas", "draw", "init", ("object", "image"))
d1 = dummydrone(plugindict={pattern: plugin})
i1 = bee.init("mousearea")
i1.register(pandaicon_, box)
del a, box, params, args, plugin, pattern
class myhive(dragonfly.pandahive.pandahive):
pandaname = "mypanda"
pandaname_ = bee.attribute("pandaname")
pandaclassname = "pandaclass"
pandaclassname_ = bee.attribute("pandaclassname")
pandaicon = "pandaicon"
pandaicon_ = bee.attribute("pandaicon")
canvas = dragonfly.pandahive.pandacanvas()
mousearea = dragonfly.canvas.mousearea()
raiser = bee.raiser()
connect("evexc", raiser)
animation = dragonfly.scene.unbound.animation()
pandaid = dragonfly.std.variable("id")(pandaname_)
walk = dragonfly.std.variable("str")("walk")
connect(pandaid, animation.actor)
connect(walk, animation.animation_name)
key_w = dragonfly.io.keyboardsensor_trigger("W")
connect(key_w, animation.loop)
key_s = dragonfly.io.keyboardsensor_trigger("S")
connect(key_s, animation.stop)
pandaspawn = dragonfly.scene.spawn_actor()
v_panda = dragonfly.std.variable("id")(pandaclassname_)
connect(v_panda, pandaspawn)
panda_id = dragonfly.std.generator("id", id_generator)()
random_matrix = dragonfly.std.generator(("object", "matrix"), random_matrix_generator)()
w_spawn = dragonfly.std.weaver(("id", ("object", "matrix")))()
connect(panda_id, w_spawn.inp1)
connect(random_matrix, w_spawn.inp2)
do_spawn = dragonfly.std.transistor(("id", ("object", "matrix")))()
connect(w_spawn, do_spawn)
connect(do_spawn, pandaspawn.spawn_matrix)
key_z = dragonfly.io.keyboardsensor_trigger("Z")
connect(key_z, do_spawn)
pandaicon_click = dragonfly.io.mouseareasensor(pandaicon_)
connect(pandaicon_click, do_spawn)
myscene = myscene(
scene="scene",
pandaname=pandaname_,
pandaclassname=pandaclassname_,
canvas=canvas,
mousearea=mousearea,
pandaicon=pandaicon_
)
main = myhive().getinstance()
main.build("main")
main.place()
main.close()
main.init()
from direct.task import Task
def spinCameraTask(camera, task):
angleDegrees = task.time * 30.0
angleRadians = angleDegrees * (math.pi / 180.0)
camera.setPos(20 * math.sin(angleRadians), -20.0 * math.cos(angleRadians), 3)
camera.setHpr(angleDegrees, 0, 0)
return Task.cont
main.window.taskMgr.add(functools.partial(spinCameraTask, main.window.camera), "SpinCameraTask")
main.run()
| bsd-2-clause |
thaim/ansible | lib/ansible/modules/network/aci/aci_access_sub_port_block_to_access_port.py | 5 | 12106 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Simon Metzger <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_access_sub_port_block_to_access_port
short_description: Manage sub port blocks of Fabric interface policy leaf profile interface selectors (infra:HPortS, infra:SubPortBlk)
description:
- Manage sub port blocks of Fabric interface policy leaf profile interface selectors on Cisco ACI fabrics.
seealso:
- name: APIC Management Information Model reference
description: More information about the internal APIC classes B(infra:HPortS) and B(infra:SubPortBlk).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Simon Metzger (@smnmtzgr)
version_added: '2.8'
options:
leaf_interface_profile:
description:
- The name of the Fabric access policy leaf interface profile.
type: str
required: yes
aliases: [ leaf_interface_profile_name ]
access_port_selector:
description:
- The name of the Fabric access policy leaf interface profile access port selector.
type: str
required: yes
aliases: [ name, access_port_selector_name ]
leaf_port_blk:
description:
- The name of the Fabric access policy leaf interface profile access port block.
type: str
required: yes
aliases: [ leaf_port_blk_name ]
leaf_port_blk_description:
description:
- The description to assign to the C(leaf_port_blk).
type: str
from_port:
description:
- The beginning (from-range) of the port range block for the leaf access port block.
type: str
required: yes
aliases: [ from, fromPort, from_port_range ]
to_port:
description:
- The end (to-range) of the port range block for the leaf access port block.
type: str
required: yes
aliases: [ to, toPort, to_port_range ]
from_sub_port:
description:
- The beginning (from-range) of the sub port range block for the leaf access port block.
type: str
required: yes
aliases: [ fromSubPort, from_sub_port_range ]
to_sub_port:
description:
- The end (to-range) of the sub port range block for the leaf access port block.
type: str
required: yes
aliases: [ toSubPort, to_sub_port_range ]
from_card:
description:
- The beginning (from-range) of the card range block for the leaf access port block.
type: str
aliases: [ from_card_range ]
to_card:
description:
- The end (to-range) of the card range block for the leaf access port block.
type: str
aliases: [ to_card_range ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Associate an access sub port block (single port) to an interface selector
aci_access_sub_port_block_to_access_port:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
access_port_selector: accessportselectorname
leaf_port_blk: leafportblkname
from_port: 13
to_port: 13
from_sub_port: 1
to_sub_port: 1
state: present
delegate_to: localhost
- name: Associate an access sub port block (port range) to an interface selector
aci_access_sub_port_block_to_access_port:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
access_port_selector: accessportselectorname
leaf_port_blk: leafportblkname
from_port: 13
to_port: 13
from_sub_port: 1
to_sub_port: 3
state: present
delegate_to: localhost
- name: Remove an access sub port block from an interface selector
aci_access_sub_port_block_to_access_port:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
access_port_selector: accessportselectorname
leaf_port_blk: leafportblkname
from_port: 13
to_port: 13
from_sub_port: 1
to_sub_port: 1
state: absent
delegate_to: localhost
- name: Query Specific access sub port block under given access port selector
aci_access_sub_port_block_to_access_port:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
access_port_selector: accessportselectorname
leaf_port_blk: leafportblkname
state: query
delegate_to: localhost
register: query_result
- name: Query all access sub port blocks under given leaf interface profile
aci_access_sub_port_block_to_access_port:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
state: query
delegate_to: localhost
register: query_result
- name: Query all access sub port blocks in the fabric
aci_access_sub_port_block_to_access_port:
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
leaf_interface_profile=dict(type='str', aliases=['leaf_interface_profile_name']), # Not required for querying all objects
access_port_selector=dict(type='str', aliases=['name', 'access_port_selector_name']), # Not required for querying all objects
leaf_port_blk=dict(type='str', aliases=['leaf_port_blk_name']), # Not required for querying all objects
leaf_port_blk_description=dict(type='str'),
from_port=dict(type='str', aliases=['from', 'fromPort', 'from_port_range']), # Not required for querying all objects and deleting sub port blocks
to_port=dict(type='str', aliases=['to', 'toPort', 'to_port_range']), # Not required for querying all objects and deleting sub port blocks
from_sub_port=dict(type='str', aliases=['fromSubPort', 'from_sub_port_range']), # Not required for querying all objects and deleting sub port blocks
to_sub_port=dict(type='str', aliases=['toSubPort', 'to_sub_port_range']), # Not required for querying all objects and deleting sub port blocks
from_card=dict(type='str', aliases=['from_card_range']),
to_card=dict(type='str', aliases=['to_card_range']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['access_port_selector', 'leaf_port_blk', 'leaf_interface_profile']],
['state', 'present', ['access_port_selector', 'leaf_port_blk', 'from_port', 'to_port', 'from_sub_port', 'to_sub_port', 'leaf_interface_profile']],
],
)
leaf_interface_profile = module.params['leaf_interface_profile']
access_port_selector = module.params['access_port_selector']
leaf_port_blk = module.params['leaf_port_blk']
leaf_port_blk_description = module.params['leaf_port_blk_description']
from_port = module.params['from_port']
to_port = module.params['to_port']
from_sub_port = module.params['from_sub_port']
to_sub_port = module.params['to_sub_port']
from_card = module.params['from_card']
to_card = module.params['to_card']
state = module.params['state']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='infraAccPortP',
aci_rn='infra/accportprof-{0}'.format(leaf_interface_profile),
module_object=leaf_interface_profile,
target_filter={'name': leaf_interface_profile},
),
subclass_1=dict(
aci_class='infraHPortS',
# NOTE: normal rn: hports-{name}-typ-{type}, hence here hardcoded to range for purposes of module
aci_rn='hports-{0}-typ-range'.format(access_port_selector),
module_object=access_port_selector,
target_filter={'name': access_port_selector},
),
subclass_2=dict(
aci_class='infraSubPortBlk',
aci_rn='subportblk-{0}'.format(leaf_port_blk),
module_object=leaf_port_blk,
target_filter={'name': leaf_port_blk},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='infraSubPortBlk',
class_config=dict(
descr=leaf_port_blk_description,
name=leaf_port_blk,
fromPort=from_port,
toPort=to_port,
fromSubPort=from_sub_port,
toSubPort=to_sub_port,
fromCard=from_card,
toCard=to_card,
# type='range',
),
)
aci.get_diff(aci_class='infraSubPortBlk')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| mit |
thaim/ansible | lib/ansible/modules/storage/purestorage/purefb_dsrole.py | 38 | 6212 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Simon Dodsley ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefb_dsrole
version_added: '2.8'
short_description: Configure FlashBlade Management Directory Service Roles
description:
- Set or erase directory services role configurations.
author:
- Pure Storage Ansible Team (@sdodsley) <[email protected]>
options:
state:
description:
- Create or delete directory service role
default: present
type: str
choices: [ absent, present ]
role:
description:
- The directory service role to work on
choices: [ array_admin, ops_admin, readonly, storage_admin ]
type: str
group_base:
description:
- Specifies where the configured group is located in the directory
tree. This field consists of Organizational Units (OUs) that combine
with the base DN attribute and the configured group CNs to complete
the full Distinguished Name of the groups. The group base should
specify OU= for each OU and multiple OUs should be separated by commas.
The order of OUs is important and should get larger in scope from left
to right.
- Each OU should not exceed 64 characters in length.
type: str
group:
description:
- Sets the common Name (CN) of the configured directory service group
containing users for the FlashBlade. This name should be just the
Common Name of the group without the CN= specifier.
- Common Names should not exceed 64 characters in length.
type: str
extends_documentation_fragment:
- purestorage.fb
'''
EXAMPLES = r'''
- name: Delete existing array_admin directory service role
purefb_dsrole:
role: array_admin
state: absent
fb_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Create array_admin directory service role
purefb_dsrole:
role: array_admin
group_base: "OU=PureGroups,OU=SANManagers"
group: pureadmins
fb_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Update ops_admin directory service role
purefb_dsrole:
role: ops_admin
group_base: "OU=PureGroups"
group: opsgroup
fb_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = r'''
'''
HAS_PURITY_FB = True
try:
from purity_fb import DirectoryServiceRole
except ImportError:
HAS_PURITY_FB = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_blade, purefb_argument_spec
def update_role(module, blade):
"""Update Directory Service Role"""
changed = False
role = blade.directory_services.list_directory_services_roles(names=[module.params['role']])
if role.items[0].group_base != module.params['group_base'] or role.items[0].group != module.params['group']:
try:
role = DirectoryServiceRole(group_base=module.params['group_base'],
group=module.params['group'])
blade.directory_services.update_directory_services_roles(names=[module.params['role']],
directory_service_role=role)
changed = True
except Exception:
module.fail_json(msg='Update Directory Service Role {0} failed'.format(module.params['role']))
module.exit_json(changed=changed)
def delete_role(module, blade):
"""Delete Directory Service Role"""
changed = False
try:
role = DirectoryServiceRole(group_base='',
group='')
blade.directory_services.update_directory_services_roles(names=[module.params['role']],
directory_service_role=role)
changed = True
except Exception:
module.fail_json(msg='Delete Directory Service Role {0} failed'.format(module.params['role']))
module.exit_json(changed=changed)
def create_role(module, blade):
"""Create Directory Service Role"""
changed = False
try:
role = DirectoryServiceRole(group_base=module.params['group_base'],
group=module.params['group'])
blade.directory_services.update_directory_services_roles(names=[module.params['role']],
directory_service_role=role)
changed = True
except Exception:
module.fail_json(msg='Create Directory Service Role {0} failed: Check configuration'.format(module.params['role']))
module.exit_json(changed=changed)
def main():
argument_spec = purefb_argument_spec()
argument_spec.update(dict(
role=dict(required=True, type='str', choices=['array_admin', 'ops_admin', 'readonly', 'storage_admin']),
state=dict(type='str', default='present', choices=['absent', 'present']),
group_base=dict(type='str'),
group=dict(type='str'),
))
required_together = [['group', 'group_base']]
module = AnsibleModule(argument_spec,
required_together=required_together,
supports_check_mode=False)
if not HAS_PURITY_FB:
module.fail_json(msg='purity_fb sdk is required for this module')
state = module.params['state']
blade = get_blade(module)
role_configured = False
role = blade.directory_services.list_directory_services_roles(names=[module.params['role']])
if role.items[0].group is not None:
role_configured = True
if state == 'absent' and role_configured:
delete_role(module, blade)
elif role_configured and state == 'present':
update_role(module, blade)
elif not role_configured and state == 'present':
create_role(module, blade)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| mit |
lihui7115/ChromiumGStreamerBackend | PRESUBMIT_test_mocks.py | 28 | 3773 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import re
import subprocess
import sys
class MockInputApi(object):
"""Mock class for the InputApi class.
This class can be used for unittests for presubmit by initializing the files
attribute as the list of changed files.
"""
def __init__(self):
self.json = json
self.re = re
self.os_path = os.path
self.python_executable = sys.executable
self.subprocess = subprocess
self.files = []
self.is_committing = False
self.change = MockChange([])
def AffectedFiles(self, file_filter=None):
return self.files
def AffectedSourceFiles(self, file_filter=None):
return self.files
def LocalPaths(self):
return self.files
def PresubmitLocalPath(self):
return os.path.dirname(__file__)
def ReadFile(self, filename, mode='rU'):
if hasattr(filename, 'AbsoluteLocalPath'):
filename = filename.AbsoluteLocalPath()
for file_ in self.files:
if file_.LocalPath() == filename:
return '\n'.join(file_.NewContents())
# Otherwise, file is not in our mock API.
raise IOError, "No such file or directory: '%s'" % filename
class MockOutputApi(object):
"""Mock class for the OutputApi class.
An instance of this class can be passed to presubmit unittests for outputing
various types of results.
"""
class PresubmitResult(object):
def __init__(self, message, items=None, long_text=''):
self.message = message
self.items = items
self.long_text = long_text
def __repr__(self):
return self.message
class PresubmitError(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'error'
class PresubmitPromptWarning(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'warning'
class PresubmitNotifyResult(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'notify'
class PresubmitPromptOrNotify(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'promptOrNotify'
class MockFile(object):
"""Mock class for the File class.
This class can be used to form the mock list of changed files in
MockInputApi for presubmit unittests.
"""
def __init__(self, local_path, new_contents):
self._local_path = local_path
self._new_contents = new_contents
self._changed_contents = [(i + 1, l) for i, l in enumerate(new_contents)]
def ChangedContents(self):
return self._changed_contents
def NewContents(self):
return self._new_contents
def LocalPath(self):
return self._local_path
def rfind(self, p):
"""os.path.basename is called on MockFile so we need an rfind method."""
return self._local_path.rfind(p)
def __getitem__(self, i):
"""os.path.basename is called on MockFile so we need a get method."""
return self._local_path[i]
class MockAffectedFile(MockFile):
def AbsoluteLocalPath(self):
return self._local_path
class MockChange(object):
"""Mock class for Change class.
This class can be used in presubmit unittests to mock the query of the
current change.
"""
def __init__(self, changed_files):
self._changed_files = changed_files
def LocalPaths(self):
return self._changed_files
| bsd-3-clause |
angad/libjingle-mac | scons-2.2.0/build/lib/SCons/Memoize.py | 14 | 9676 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Memoize.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__doc__ = """Memoizer
A metaclass implementation to count hits and misses of the computed
values that various methods cache in memory.
Use of this modules assumes that wrapped methods be coded to cache their
values in a consistent way. Here is an example of wrapping a method
that returns a computed value, with no input parameters:
memoizer_counters = [] # Memoization
memoizer_counters.append(SCons.Memoize.CountValue('foo')) # Memoization
def foo(self):
try: # Memoization
return self._memo['foo'] # Memoization
except KeyError: # Memoization
pass # Memoization
result = self.compute_foo_value()
self._memo['foo'] = result # Memoization
return result
Here is an example of wrapping a method that will return different values
based on one or more input arguments:
def _bar_key(self, argument): # Memoization
return argument # Memoization
memoizer_counters.append(SCons.Memoize.CountDict('bar', _bar_key)) # Memoization
def bar(self, argument):
memo_key = argument # Memoization
try: # Memoization
memo_dict = self._memo['bar'] # Memoization
except KeyError: # Memoization
memo_dict = {} # Memoization
self._memo['dict'] = memo_dict # Memoization
else: # Memoization
try: # Memoization
return memo_dict[memo_key] # Memoization
except KeyError: # Memoization
pass # Memoization
result = self.compute_bar_value(argument)
memo_dict[memo_key] = result # Memoization
return result
At one point we avoided replicating this sort of logic in all the methods
by putting it right into this module, but we've moved away from that at
present (see the "Historical Note," below.).
Deciding what to cache is tricky, because different configurations
can have radically different performance tradeoffs, and because the
tradeoffs involved are often so non-obvious. Consequently, deciding
whether or not to cache a given method will likely be more of an art than
a science, but should still be based on available data from this module.
Here are some VERY GENERAL guidelines about deciding whether or not to
cache return values from a method that's being called a lot:
-- The first question to ask is, "Can we change the calling code
so this method isn't called so often?" Sometimes this can be
done by changing the algorithm. Sometimes the *caller* should
be memoized, not the method you're looking at.
-- The memoized function should be timed with multiple configurations
to make sure it doesn't inadvertently slow down some other
configuration.
-- When memoizing values based on a dictionary key composed of
input arguments, you don't need to use all of the arguments
if some of them don't affect the return values.
Historical Note: The initial Memoizer implementation actually handled
the caching of values for the wrapped methods, based on a set of generic
algorithms for computing hashable values based on the method's arguments.
This collected caching logic nicely, but had two drawbacks:
Running arguments through a generic key-conversion mechanism is slower
(and less flexible) than just coding these things directly. Since the
methods that need memoized values are generally performance-critical,
slowing them down in order to collect the logic isn't the right
tradeoff.
Use of the memoizer really obscured what was being called, because
all the memoized methods were wrapped with re-used generic methods.
This made it more difficult, for example, to use the Python profiler
to figure out how to optimize the underlying methods.
"""
import types
# A flag controlling whether or not we actually use memoization.
use_memoizer = None
CounterList = []
class Counter(object):
"""
Base class for counting memoization hits and misses.
We expect that the metaclass initialization will have filled in
the .name attribute that represents the name of the function
being counted.
"""
def __init__(self, method_name):
"""
"""
self.method_name = method_name
self.hit = 0
self.miss = 0
CounterList.append(self)
def display(self):
fmt = " %7d hits %7d misses %s()"
print fmt % (self.hit, self.miss, self.name)
def __cmp__(self, other):
try:
return cmp(self.name, other.name)
except AttributeError:
return 0
class CountValue(Counter):
"""
A counter class for simple, atomic memoized values.
A CountValue object should be instantiated in a class for each of
the class's methods that memoizes its return value by simply storing
the return value in its _memo dictionary.
We expect that the metaclass initialization will fill in the
.underlying_method attribute with the method that we're wrapping.
We then call the underlying_method method after counting whether
its memoized value has already been set (a hit) or not (a miss).
"""
def __call__(self, *args, **kw):
obj = args[0]
if self.method_name in obj._memo:
self.hit = self.hit + 1
else:
self.miss = self.miss + 1
return self.underlying_method(*args, **kw)
class CountDict(Counter):
"""
A counter class for memoized values stored in a dictionary, with
keys based on the method's input arguments.
A CountDict object is instantiated in a class for each of the
class's methods that memoizes its return value in a dictionary,
indexed by some key that can be computed from one or more of
its input arguments.
We expect that the metaclass initialization will fill in the
.underlying_method attribute with the method that we're wrapping.
We then call the underlying_method method after counting whether the
computed key value is already present in the memoization dictionary
(a hit) or not (a miss).
"""
def __init__(self, method_name, keymaker):
"""
"""
Counter.__init__(self, method_name)
self.keymaker = keymaker
def __call__(self, *args, **kw):
obj = args[0]
try:
memo_dict = obj._memo[self.method_name]
except KeyError:
self.miss = self.miss + 1
else:
key = self.keymaker(*args, **kw)
if key in memo_dict:
self.hit = self.hit + 1
else:
self.miss = self.miss + 1
return self.underlying_method(*args, **kw)
class Memoizer(object):
"""Object which performs caching of method calls for its 'primary'
instance."""
def __init__(self):
pass
def Dump(title=None):
if title:
print title
CounterList.sort()
for counter in CounterList:
counter.display()
class Memoized_Metaclass(type):
def __init__(cls, name, bases, cls_dict):
super(Memoized_Metaclass, cls).__init__(name, bases, cls_dict)
for counter in cls_dict.get('memoizer_counters', []):
method_name = counter.method_name
counter.name = cls.__name__ + '.' + method_name
counter.underlying_method = cls_dict[method_name]
replacement_method = types.MethodType(counter, None, cls)
setattr(cls, method_name, replacement_method)
def EnableMemoization():
global use_memoizer
use_memoizer = 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause |
yig/intpy | intpy/src/support/__init__.py | 1 | 1101 | # support/__init__.py
#
# Copyright 2008 Rafael Menezes Barreto <[email protected],
# [email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version 2
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""IntPy support sub-package
This sub-package organizes the code of support for IntPy package.
It was developed in CIn/UFPE (Brazil) by Rafael Menezes Barreto
<[email protected], [email protected]> and it's free software.
"""
from intpy.support import rounding
from intpy.support import stdfunc
from intpy.support.general import *
| gpl-2.0 |
consbio/gis-metadata-parser | gis_metadata/iso_metadata_parser.py | 1 | 32865 | """ A module to contain utility ISO-19115 metadata parsing helpers """
import six
from collections import OrderedDict
from copy import deepcopy
from frozendict import frozendict, FrozenOrderedDict
from parserutils.collections import filter_empty, reduce_value, wrap_value
from parserutils.elements import get_element_name, get_element_text, get_elements_text
from parserutils.elements import get_elements, get_remote_element, insert_element, remove_element
from parserutils.elements import XPATH_DELIM
from gis_metadata.exceptions import InvalidContent
from gis_metadata.metadata_parser import MetadataParser
from gis_metadata.utils import DATE_TYPE, DATE_TYPE_SINGLE, DATE_TYPE_MULTIPLE
from gis_metadata.utils import DATE_TYPE_RANGE, DATE_TYPE_RANGE_BEGIN, DATE_TYPE_RANGE_END
from gis_metadata.utils import ATTRIBUTES
from gis_metadata.utils import CONTACTS
from gis_metadata.utils import BOUNDING_BOX
from gis_metadata.utils import DATES
from gis_metadata.utils import DIGITAL_FORMS
from gis_metadata.utils import KEYWORDS_PLACE, KEYWORDS_STRATUM, KEYWORDS_TEMPORAL, KEYWORDS_THEME
from gis_metadata.utils import LARGER_WORKS
from gis_metadata.utils import PROCESS_STEPS
from gis_metadata.utils import RASTER_DIMS, RASTER_INFO
from gis_metadata.utils import COMPLEX_DEFINITIONS, ParserProperty
from gis_metadata.utils import format_xpaths, get_default_for_complex, get_default_for_complex_sub
from gis_metadata.utils import parse_complex_list, parse_property, update_complex_list, update_property
iteritems = getattr(six, 'iteritems')
string_types = getattr(six, 'string_types')
six_moves = getattr(six, 'moves')
xrange = getattr(six_moves, 'xrange')
ISO_ROOTS = ('MD_Metadata', 'MI_Metadata')
KEYWORD_PROPS = (KEYWORDS_PLACE, KEYWORDS_STRATUM, KEYWORDS_TEMPORAL, KEYWORDS_THEME)
KEYWORD_TYPES = frozendict({
KEYWORDS_PLACE: 'place',
KEYWORDS_STRATUM: 'stratum',
KEYWORDS_TEMPORAL: 'temporal',
KEYWORDS_THEME: 'theme'
})
# For appending digital form content to ISO distribution format specs
ISO_DIGITAL_FORMS_DELIM = '@------------------------------@'
# Define backup locations for attribute sub-properties and dimension type property
ISO_DEFINITIONS = dict({k: dict(v) for k, v in iteritems(COMPLEX_DEFINITIONS)})
ISO_DEFINITIONS[ATTRIBUTES].update({
'_definition_source': '{_definition_src}',
'__definition_source': '{__definition_src}',
'___definition_source': '{___definition_src}'
})
ISO_DEFINITIONS[RASTER_DIMS]['_type'] = '{_type}'
ISO_DEFINITIONS = frozendict({k: frozendict(v) for k, v in iteritems(ISO_DEFINITIONS)})
ISO_TAG_ROOTS = OrderedDict((
# First process private dependency tags (order enforced by key sorting)
('_content_coverage', 'contentInfo/MD_CoverageDescription'),
('_dataqual', 'dataQualityInfo/DQ_DataQuality'),
('_dataqual_lineage', '{_dataqual}/lineage/LI_Lineage'),
('_dataqual_report', '{_dataqual}/report'),
('_distinfo', 'distributionInfo/MD_Distribution'),
('_distinfo_dist', '{_distinfo}/distributor/MD_Distributor'),
('_distinfo_proc', '{_distinfo_dist}/distributionOrderProcess/MD_StandardOrderProcess'),
('_distinfo_resp', '{_distinfo_dist}/distributorContact/CI_ResponsibleParty'),
('_distinfo_resp_contact', '{_distinfo_resp}/contactInfo/CI_Contact'),
('_distinfo_rsrc', '{_distinfo}/transferOptions/MD_DigitalTransferOptions/onLine/CI_OnlineResource'),
('_idinfo', 'identificationInfo/MD_DataIdentification'),
('_idinfo_aggregate', '{_idinfo}/aggregationInfo/MD_AggregateInformation'),
('_idinfo_aggregate_citation', '{_idinfo_aggregate}/aggregateDataSetName/CI_Citation'),
('_idinfo_aggregate_contact', '{_idinfo_aggregate_citation}/citedResponsibleParty/CI_ResponsibleParty'),
('_idinfo_citation', '{_idinfo}/citation/CI_Citation'),
('_idinfo_citresp', '{_idinfo_citation}/citedResponsibleParty/CI_ResponsibleParty'),
('_idinfo_extent', '{_idinfo}/extent/EX_Extent'),
('_idinfo_keywords', '{_idinfo}/descriptiveKeywords/MD_Keywords'),
('_idinfo_resp', '{_idinfo}/pointOfContact/CI_ResponsibleParty'),
('_idinfo_resp_contact', '{_idinfo_resp}/contactInfo/CI_Contact'),
('_srinfo_grid_rep', 'spatialRepresentationInfo/MD_GridSpatialRepresentation'),
('_srinfo_grid_dim', '{_srinfo_grid_rep}/axisDimensionProperties/MD_Dimension'),
# Supported in separate file ISO-19110: FC_FeatureCatalog
('_attr_root', 'FC_FeatureCatalogue'),
('_attr_base', 'featureType/FC_FeatureType/carrierOfCharacteristics/FC_FeatureAttribute'),
('_attr_def', '{_attr_base}/definitionReference/FC_DefinitionReference/definitionSource/FC_DefinitionSource'),
('_attr_src', '{_attr_def}/source/CI_Citation/citedResponsibleParty/CI_ResponsibleParty'),
# References to separate file ISO-19110 from: MD_Metadata
('_attr_citation', 'contentInfo/MD_FeatureCatalogueDescription/featureCatalogueCitation'),
('_attr_contact', '{_attr_citation}/CI_Citation/citedResponsibleParty/CI_ResponsibleParty/contactInfo/CI_Contact'),
('_attr_contact_url', '{_attr_contact}/onlineResource/CI_OnlineResource/linkage/URL')
))
# Two passes required because of self references within roots dict
ISO_TAG_ROOTS.update(format_xpaths(ISO_TAG_ROOTS, **ISO_TAG_ROOTS))
ISO_TAG_ROOTS.update(format_xpaths(ISO_TAG_ROOTS, **ISO_TAG_ROOTS))
ISO_TAG_ROOTS = FrozenOrderedDict(ISO_TAG_ROOTS)
ISO_TAG_FORMATS = {
# Property-specific xpath roots: the base from which each element repeats
'_attribute_accuracy_root': '{_dataqual_report}',
'_attributes_root': 'featureType/FC_FeatureType/carrierOfCharacteristics',
'_bounding_box_root': '{_idinfo_extent}/geographicElement',
'_contacts_root': '{_idinfo}/pointOfContact',
'_dataset_completeness_root': '{_dataqual_report}',
'_dates_root': '{_idinfo_extent}/temporalElement',
'_digital_forms_root': '{_distinfo}/distributionFormat',
'_dist_liability_root': '{_idinfo}/resourceConstraints',
'_transfer_options_root': '{_distinfo}/transferOptions/MD_DigitalTransferOptions/onLine',
'_keywords_root': '{_idinfo}/descriptiveKeywords',
'_larger_works_root': '{_idinfo_aggregate_citation}',
'_process_steps_root': '{_dataqual_lineage}/processStep',
'_raster_info_root': '{_srinfo_grid_rep}/axisDimensionProperties',
'_use_constraints_root': '{_idinfo}/resourceConstraints',
# Then process public dependent tags
'title': '{_idinfo_citation}/title/CharacterString',
'abstract': '{_idinfo}/abstract/CharacterString',
'purpose': '{_idinfo}/purpose/CharacterString',
'supplementary_info': '{_idinfo}/supplementalInformation/CharacterString',
'online_linkages': '{_idinfo_citresp}/contactInfo/CI_Contact/onlineResource/CI_OnlineResource/linkage/URL',
'originators': '{_idinfo_citresp}/organisationName/CharacterString',
'publish_date': '{_idinfo_citation}/date/CI_Date/date/Date',
'publish_date_type': '{_idinfo_citation}/date/CI_Date/dateType/CI_DateTypeCode',
'data_credits': '{_idinfo}/credit/CharacterString',
CONTACTS: '{_idinfo_resp}/{{ct_path}}',
'dist_contact_org': '{_distinfo_resp}/organisationName/CharacterString',
'dist_contact_person': '{_distinfo_resp}/individualName/CharacterString',
'dist_address_type': '{_distinfo_resp_contact}/address/@type',
'dist_address': '{_distinfo_resp_contact}/address/CI_Address/deliveryPoint/CharacterString',
'dist_city': '{_distinfo_resp_contact}/address/CI_Address/city/CharacterString',
'dist_state': '{_distinfo_resp_contact}/address/CI_Address/administrativeArea/CharacterString',
'dist_postal': '{_distinfo_resp_contact}/address/CI_Address/postalCode/CharacterString',
'dist_country': '{_distinfo_resp_contact}/address/CI_Address/country/CharacterString',
'_dist_country': '{_distinfo_resp_contact}/address/CI_Address/country/Country', # If not in CharacterString
'dist_phone': '{_distinfo_resp_contact}/phone/CI_Telephone/voice/CharacterString',
'dist_email': '{_distinfo_resp_contact}/address/CI_Address/electronicMailAddress/CharacterString',
'dist_liability': '{_idinfo}/resourceConstraints/MD_LegalConstraints/otherConstraints/CharacterString',
'processing_fees': '{_distinfo_proc}/fees/CharacterString',
'processing_instrs': '{_distinfo_proc}/orderingInstructions/CharacterString',
'resource_desc': '{_idinfo}/resourceSpecificUsage/MD_Usage/specificUsage/CharacterString',
'tech_prerequisites': '{_idinfo}/environmentDescription/CharacterString',
ATTRIBUTES: '{_attr_base}/{{ad_path}}',
'_attributes_file': '{_attr_citation}/@href',
'__attributes_file': '{_attr_contact_url}', # If not in above: "_attr_citation/@href"
'attribute_accuracy': '{_dataqual_report}/DQ_QuantitativeAttributeAccuracy/measureDescription/CharacterString',
BOUNDING_BOX: '{_idinfo_extent}/geographicElement/EX_GeographicBoundingBox/{{bbox_path}}',
'dataset_completeness': '{_dataqual_report}/DQ_CompletenessOmission/measureDescription/CharacterString',
DIGITAL_FORMS: '{_distinfo}/distributionFormat/MD_Format/{{df_path}}',
'_access_desc': '{_distinfo_rsrc}/description/CharacterString',
'_access_instrs': '{_distinfo_rsrc}/protocol/CharacterString',
'_network_resource': '{_distinfo_rsrc}/linkage/URL',
PROCESS_STEPS: '{_dataqual_lineage}/processStep/LI_ProcessStep/{{ps_path}}',
LARGER_WORKS: '{_idinfo_aggregate_citation}/{{lw_path}}',
'_lw_citation': '{_idinfo_aggregate_contact}/{{lw_path}}',
'_lw_collective': '{_idinfo_aggregate_citation}/collectiveTitle/CharacterString',
'_lw_contact': '{_idinfo_aggregate_contact}/contactInfo/CI_Contact/{{lw_path}}',
'_lw_linkage': '{_idinfo_aggregate_contact}/contactInfo/CI_Contact/onlineResource/CI_OnlineResource/{{lw_path}}',
RASTER_INFO: '{_srinfo_grid_dim}/{{ri_path}}',
'_ri_num_dims': '{_srinfo_grid_rep}/numberOfDimensions/Integer',
'other_citation_info': '{_idinfo_citation}/otherCitationDetails/CharacterString',
'use_constraints': '{_idinfo}/resourceConstraints/MD_Constraints/useLimitation/CharacterString',
DATES: '{_idinfo_extent}/temporalElement/EX_TemporalExtent/extent/{{type_path}}',
KEYWORDS_PLACE: '{_idinfo_keywords}/keyword/CharacterString',
KEYWORDS_STRATUM: '{_idinfo_keywords}/keyword/CharacterString',
KEYWORDS_TEMPORAL: '{_idinfo_keywords}/keyword/CharacterString',
KEYWORDS_THEME: '{_idinfo_keywords}/keyword/CharacterString'
}
# Apply XPATH root formats to the basic data map formats
ISO_TAG_FORMATS.update(ISO_TAG_ROOTS)
ISO_TAG_FORMATS.update(format_xpaths(ISO_TAG_FORMATS, **ISO_TAG_ROOTS))
ISO_TAG_FORMATS = frozendict(ISO_TAG_FORMATS)
ISO_TAG_PRIMITIVES = frozenset({
'Binary', 'Boolean', 'CharacterString',
'Date', 'DateTime', 'timePosition',
'Decimal', 'Integer', 'Real', 'RecordType',
'CI_DateTypeCode', 'MD_KeywordTypeCode', 'URL'
})
class IsoParser(MetadataParser):
""" A class to parse metadata files conforming to the ISO-19115 standard """
def _init_data_map(self):
""" OVERRIDDEN: Initialize required ISO-19115 data map with XPATHS and specialized functions """
if self._data_map is not None:
return # Initiation happens once
# Parse and validate the ISO metadata root
if self._xml_tree is None:
iso_root = ISO_ROOTS[0]
else:
iso_root = get_element_name(self._xml_tree)
if iso_root not in ISO_ROOTS:
raise InvalidContent('Invalid XML root for ISO-19115 standard: {root}', root=iso_root)
iso_data_map = {'_root': iso_root}
iso_data_map.update(ISO_TAG_ROOTS)
iso_data_map.update(ISO_TAG_FORMATS)
iso_data_structures = {}
# Capture and format complex XPATHs
ad_format = iso_data_map[ATTRIBUTES]
ft_source = iso_data_map['_attr_src'].replace('/carrierOfCharacteristics/FC_FeatureAttribute', '')
iso_data_structures[ATTRIBUTES] = format_xpaths(
ISO_DEFINITIONS[ATTRIBUTES],
label=ad_format.format(ad_path='memberName/LocalName'),
aliases=ad_format.format(ad_path='aliases/LocalName'), # Not in spec
definition=ad_format.format(ad_path='definition/CharacterString'),
# First try to populate attribute definition source from FC_FeatureAttribute
definition_src=iso_data_map['_attr_src'] + '/organisationName/CharacterString',
_definition_src=iso_data_map['_attr_src'] + '/individualName/CharacterString',
# Then assume feature type source is the same as attribute: populate from FC_FeatureType
__definition_src=ft_source + '/organisationName/CharacterString',
___definition_src=ft_source + '/individualName/CharacterString'
)
bb_format = iso_data_map[BOUNDING_BOX]
iso_data_structures[BOUNDING_BOX] = format_xpaths(
ISO_DEFINITIONS[BOUNDING_BOX],
east=bb_format.format(bbox_path='eastBoundLongitude/Decimal'),
south=bb_format.format(bbox_path='southBoundLatitude/Decimal'),
west=bb_format.format(bbox_path='westBoundLongitude/Decimal'),
north=bb_format.format(bbox_path='northBoundLatitude/Decimal')
)
ct_format = iso_data_map[CONTACTS]
iso_data_structures[CONTACTS] = format_xpaths(
ISO_DEFINITIONS[CONTACTS],
name=ct_format.format(ct_path='individualName/CharacterString'),
organization=ct_format.format(ct_path='organisationName/CharacterString'),
position=ct_format.format(ct_path='positionName/CharacterString'),
email=ct_format.format(
ct_path='contactInfo/CI_Contact/address/CI_Address/electronicMailAddress/CharacterString'
)
)
dt_format = iso_data_map[DATES]
iso_data_structures[DATES] = {
DATE_TYPE_MULTIPLE: dt_format.format(type_path='TimeInstant/timePosition'),
DATE_TYPE_RANGE_BEGIN: dt_format.format(type_path='TimePeriod/begin/TimeInstant/timePosition'),
DATE_TYPE_RANGE_END: dt_format.format(type_path='TimePeriod/end/TimeInstant/timePosition'),
DATE_TYPE_SINGLE: dt_format.format(type_path='TimeInstant/timePosition') # Same as multiple
}
iso_data_structures[DATES][DATE_TYPE_RANGE] = [
iso_data_structures[DATES][DATE_TYPE_RANGE_BEGIN],
iso_data_structures[DATES][DATE_TYPE_RANGE_END]
]
df_format = iso_data_map[DIGITAL_FORMS]
iso_data_structures[DIGITAL_FORMS] = format_xpaths(
ISO_DEFINITIONS[DIGITAL_FORMS],
name=df_format.format(df_path='name/CharacterString'),
content='', # Not supported in ISO-19115 (appending to spec)
decompression=df_format.format(df_path='fileDecompressionTechnique/CharacterString'),
version=df_format.format(df_path='version/CharacterString'),
specification=df_format.format(df_path='specification/CharacterString'),
access_desc=iso_data_map['_access_desc'],
access_instrs=iso_data_map['_access_instrs'],
network_resource=iso_data_map['_network_resource']
)
keywords_structure = {
'keyword_root': 'MD_Keywords/keyword',
'keyword_type': 'MD_Keywords/type/MD_KeywordTypeCode',
'keyword': 'MD_Keywords/keyword/CharacterString'
}
for keyword_prop in KEYWORD_PROPS:
iso_data_structures[keyword_prop] = deepcopy(keywords_structure)
lw_format = iso_data_map[LARGER_WORKS]
iso_data_structures[LARGER_WORKS] = format_xpaths(
ISO_DEFINITIONS[LARGER_WORKS],
title=lw_format.format(lw_path='title/CharacterString'),
edition=lw_format.format(lw_path='edition/CharacterString'),
origin=iso_data_map['_lw_citation'].format(lw_path='individualName/CharacterString'),
online_linkage=iso_data_map['_lw_linkage'].format(lw_path='linkage/URL'),
other_citation=lw_format.format(lw_path='otherCitationDetails/CharacterString'),
date=lw_format.format(lw_path='editionDate/Date'),
place=iso_data_map['_lw_contact'].format(lw_path='address/CI_Address/city/CharacterString'),
info=iso_data_map['_lw_citation'].format(lw_path='organisationName/CharacterString')
)
ps_format = iso_data_map[PROCESS_STEPS]
iso_data_structures[PROCESS_STEPS] = format_xpaths(
ISO_DEFINITIONS[PROCESS_STEPS],
description=ps_format.format(ps_path='description/CharacterString'),
date=ps_format.format(ps_path='dateTime/DateTime'),
sources=ps_format.format(
ps_path='source/LI_Source/sourceCitation/CI_Citation/alternateTitle/CharacterString'
)
)
ri_format = iso_data_map[RASTER_INFO]
iso_data_structures[RASTER_INFO] = format_xpaths(
ISO_DEFINITIONS[RASTER_DIMS],
type=ri_format.format(ri_path='dimensionName/MD_DimensionNameTypeCode'),
_type=ri_format.format(ri_path='dimensionName/MD_DimensionNameTypeCode/@codeListValue'),
size=ri_format.format(ri_path='dimensionSize/Integer'),
value=ri_format.format(ri_path='resolution/Measure'),
units=ri_format.format(ri_path='resolution/Measure/@uom')
)
# Assign XPATHS and gis_metadata.utils.ParserProperties to data map
for prop, xpath in iteritems(dict(iso_data_map)):
if prop == ATTRIBUTES:
iso_data_map[prop] = ParserProperty(self._parse_attribute_details, self._update_attribute_details)
elif prop in (CONTACTS, PROCESS_STEPS):
iso_data_map[prop] = ParserProperty(self._parse_complex_list, self._update_complex_list)
elif prop in (BOUNDING_BOX, LARGER_WORKS):
iso_data_map[prop] = ParserProperty(self._parse_complex, self._update_complex)
elif prop == DATES:
iso_data_map[prop] = ParserProperty(self._parse_dates, self._update_dates)
elif prop == DIGITAL_FORMS:
iso_data_map[prop] = ParserProperty(self._parse_digital_forms, self._update_digital_forms)
elif prop in KEYWORD_PROPS:
iso_data_map[prop] = ParserProperty(self._parse_keywords, self._update_keywords)
elif prop == RASTER_INFO:
iso_data_map[prop] = ParserProperty(self._parse_raster_info, self._update_raster_info)
else:
iso_data_map[prop] = xpath
self._data_map = iso_data_map
self._data_structures = iso_data_structures
def _parse_attribute_details(self, prop=ATTRIBUTES):
""" Concatenates a list of Attribute Details data structures parsed from a remote file """
parsed_attributes = self._parse_attribute_details_file(prop)
if parsed_attributes is None:
# If not in the (official) remote location, try the tree itself
parsed_attributes = self._parse_complex_list(prop)
for attribute in (a for a in parsed_attributes if not a['aliases']):
# Aliases are not in ISO standard: default to label
attribute['aliases'] = attribute['label']
return get_default_for_complex(prop, parsed_attributes)
def _parse_attribute_details_file(self, prop=ATTRIBUTES):
""" Concatenates a list of Attribute Details data structures parsed from a remote file """
# Parse content from remote file URL, which may be stored in one of two places:
# Starting at: contentInfo/MD_FeatureCatalogueDescription/featureCatalogueCitation
# ATTRIBUTE: href
# ELEMENT TEXT: CI_Citation/.../CI_Contact/onlineResource/CI_OnlineResource/linkage
self._attr_details_file_url = parse_property(
self._xml_tree, None, self._data_map, '_attributes_file'
)
if not self._attr_details_file_url:
self._attr_details_file_url = None
return None
try:
tree_to_parse = get_remote_element(self._attr_details_file_url)
except Exception:
self._attr_details_file_url = None
return None
xpath_map = self._data_structures[ATTRIBUTES]
xpath_root = self._get_xroot_for(prop)
return parse_complex_list(tree_to_parse, xpath_root, xpath_map, prop)
def _parse_digital_forms(self, prop=DIGITAL_FORMS):
""" Concatenates a list of Digital Form data structures parsed from the metadata """
xpath_map = self._data_structures[prop]
# Parse base digital form fields: 'name', 'content', 'decompression', 'version', 'specification'
xpath_root = self._data_map['_digital_forms_root']
digital_forms = parse_complex_list(self._xml_tree, xpath_root, xpath_map, prop)
# Parse digital form transfer option fields: 'access_desc', 'access_instrs', 'network_resource'
xpath_root = self._data_map['_transfer_options_root']
transfer_opts = parse_complex_list(self._xml_tree, xpath_root, xpath_map, prop)
# Split out digital form content that has been appended to specifications
content_delim = ISO_DIGITAL_FORMS_DELIM
for digital_form in digital_forms:
specs = reduce_value(digital_form['specification'])
specs = specs.splitlines() if isinstance(specs, string_types) else specs
specifications = wrap_value(s.strip() for s in specs)
digital_form['content'] = []
digital_form['specification'] = []
has_content = False
# For each specification, insert delim before appending content
for spec in specifications:
has_content = has_content or spec == content_delim
if not has_content:
digital_form['specification'].append(spec)
elif spec != content_delim:
digital_form['content'].append(spec)
# Reduce spec and content to single string values if possible
for form_prop in ('content', 'specification'):
digital_form[form_prop] = reduce_value(filter_empty(digital_form[form_prop], u''))
# Combine digital forms and transfer options into a single complex struct
df_len = len(digital_forms)
to_len = len(transfer_opts)
parsed_forms = []
for idx in xrange(0, max(df_len, to_len)):
digital_form = {}.fromkeys(ISO_DEFINITIONS[prop], u'')
if idx < df_len:
digital_form.update(i for i in digital_forms[idx].items() if i[1])
if idx < to_len:
digital_form.update(i for i in transfer_opts[idx].items() if i[1])
if any(digital_form.values()):
parsed_forms.append(digital_form)
return get_default_for_complex(prop, parsed_forms)
def _parse_keywords(self, prop):
""" Parse type-specific keywords from the metadata: Theme or Place """
keywords = []
if prop in KEYWORD_PROPS:
xpath_root = self._data_map['_keywords_root']
xpath_map = self._data_structures[prop]
xtype = xpath_map['keyword_type']
xpath = xpath_map['keyword']
ktype = KEYWORD_TYPES[prop]
for element in get_elements(self._xml_tree, xpath_root):
if get_element_text(element, xtype).lower() == ktype.lower():
keywords.extend(get_elements_text(element, xpath))
return keywords
def _parse_raster_info(self, prop=RASTER_INFO):
""" Collapses multiple dimensions into a single raster_info complex struct """
raster_info = {}.fromkeys(ISO_DEFINITIONS[prop], u'')
# Ensure conversion of lists to newlines is in place
raster_info['dimensions'] = get_default_for_complex_sub(
prop=prop,
subprop='dimensions',
value=parse_property(self._xml_tree, None, self._data_map, '_ri_num_dims'),
xpath=self._data_map['_ri_num_dims']
)
xpath_root = self._get_xroot_for(prop)
xpath_map = self._data_structures[prop]
for dimension in parse_complex_list(self._xml_tree, xpath_root, xpath_map, RASTER_DIMS):
dimension_type = dimension['type'].lower()
if dimension_type == 'vertical':
raster_info['vertical_count'] = dimension['size']
elif dimension_type == 'column':
raster_info['column_count'] = dimension['size']
raster_info['x_resolution'] = u' '.join(dimension[k] for k in ['value', 'units']).strip()
elif dimension_type == 'row':
raster_info['row_count'] = dimension['size']
raster_info['y_resolution'] = u' '.join(dimension[k] for k in ['value', 'units']).strip()
return raster_info if any(raster_info[k] for k in raster_info) else {}
def _update_attribute_details(self, **update_props):
""" Update operation for ISO Attribute Details metadata: write to "MD_Metadata/featureType" """
tree_to_update = update_props['tree_to_update']
xpath = self._data_map['_attr_citation']
# Cannot write to remote file: remove the featureCatalogueCitation element
self._attr_details_file_url = None
remove_element(tree_to_update, xpath, True)
return self._update_complex_list(**update_props)
def _update_dates(self, **update_props):
"""
Update operation for ISO Dates metadata
:see: gis_metadata.utils.COMPLEX_DEFINITIONS[DATES]
"""
tree_to_update = update_props['tree_to_update']
xpath_root = self._data_map['_dates_root']
if self.dates:
date_type = self.dates[DATE_TYPE]
# First remove all date info from common root
remove_element(tree_to_update, xpath_root)
if date_type == DATE_TYPE_MULTIPLE:
xpath_root += '/TimeInstant'
elif date_type == DATE_TYPE_RANGE:
xpath_root += '/TimePeriod'
return super(IsoParser, self)._update_dates(xpath_root, **update_props)
def _update_digital_forms(self, **update_props):
"""
Update operation for ISO Digital Forms metadata
:see: gis_metadata.utils.COMPLEX_DEFINITIONS[DIGITAL_FORMS]
"""
digital_forms = wrap_value(update_props['values'])
# Update all Digital Form properties: distributionFormat*
xpath_map = self._data_structures[update_props['prop']]
dist_format_props = ('name', 'decompression', 'version', 'specification')
dist_format_xroot = self._data_map['_digital_forms_root']
dist_format_xmap = {prop: xpath_map[prop] for prop in dist_format_props}
dist_formats = []
for digital_form in digital_forms:
dist_format = {prop: digital_form[prop] for prop in dist_format_props}
if digital_form.get('content'):
dist_spec = wrap_value(digital_form.get('specification'))
dist_spec.append(ISO_DIGITAL_FORMS_DELIM)
dist_spec.extend(wrap_value(digital_form['content']))
dist_format['specification'] = dist_spec
dist_formats.append(dist_format)
update_props['values'] = dist_formats
dist_formats = update_complex_list(
xpath_root=dist_format_xroot, xpath_map=dist_format_xmap, **update_props
)
# Update all Network Resources: transferOptions+
trans_option_props = ('access_desc', 'access_instrs', 'network_resource')
trans_option_xroot = self._data_map['_transfer_options_root']
trans_option_xmap = {prop: self._data_map['_' + prop] for prop in trans_option_props}
trans_options = []
for digital_form in digital_forms:
trans_options.append({prop: digital_form[prop] for prop in trans_option_props})
update_props['values'] = trans_options
trans_options = update_complex_list(
xpath_root=trans_option_xroot, xpath_map=trans_option_xmap, **update_props
)
return {
'distribution_formats': dist_formats,
'transfer_options': trans_options
}
def _update_keywords(self, **update_props):
""" Update operation for ISO type-specific Keywords metadata: Theme or Place """
tree_to_update = update_props['tree_to_update']
prop = update_props['prop']
values = update_props['values']
keywords = []
if prop in KEYWORD_PROPS:
xpath_root = self._data_map['_keywords_root']
xpath_map = self._data_structures[prop]
xtype = xpath_map['keyword_type']
xroot = xpath_map['keyword_root']
xpath = xpath_map['keyword']
ktype = KEYWORD_TYPES[prop]
# Remove descriptiveKeyword nodes according to type
for element in get_elements(tree_to_update, xpath_root):
if get_element_text(element, xtype).lower() == ktype.lower():
remove_element(tree_to_update, xpath_root)
element = insert_element(tree_to_update, 0, xpath_root)
insert_element(element, 0, xtype, ktype) # Add the type node
keywords.extend(update_property(element, xroot, xpath, prop, values))
return keywords
def _update_raster_info(self, **update_props):
""" Derives multiple dimensions from a single raster_info complex struct """
tree_to_update = update_props['tree_to_update']
prop = update_props['prop']
values = update_props.pop('values')
# Update number of dimensions at raster_info root (applies to all dimensions below)
xroot, xpath = None, self._data_map['_ri_num_dims']
raster_info = [update_property(tree_to_update, xroot, xpath, prop, values.get('dimensions', u''))]
# Derive vertical, longitude, and latitude dimensions from raster_info
xpath_root = self._get_xroot_for(prop)
xpath_map = self._data_structures[prop]
v_dimension = {}
if values.get('vertical_count'):
v_dimension = v_dimension.fromkeys(xpath_map, u'')
v_dimension['type'] = 'vertical'
v_dimension['size'] = values.get('vertical_count', u'')
x_dimension = {}
if values.get('column_count') or values.get('x_resolution'):
x_dimension = x_dimension.fromkeys(xpath_map, u'')
x_dimension['type'] = 'column'
x_dimension['size'] = values.get('column_count', u'')
x_dimension['value'] = values.get('x_resolution', u'')
y_dimension = {}
if values.get('row_count') or values.get('y_resolution'):
y_dimension = y_dimension.fromkeys(xpath_map, u'')
y_dimension['type'] = 'row'
y_dimension['size'] = values.get('row_count', u'')
y_dimension['value'] = values.get('y_resolution', u'')
# Update derived dimensions as complex list, and append affected elements for return
update_props['prop'] = RASTER_DIMS
update_props['values'] = [v_dimension, x_dimension, y_dimension]
raster_info += update_complex_list(xpath_root=xpath_root, xpath_map=xpath_map, **update_props)
return raster_info
def update(self, use_template=False, **metadata_defaults):
""" OVERRIDDEN: Prevents writing multiple CharacterStrings per XPATH property """
self.validate()
tree_to_update = self._xml_tree if not use_template else self._get_template(**metadata_defaults)
supported_props = self._metadata_props
# Iterate over keys, and extract non-primitive root for all XPATHs
# xroot = identificationInfo/MD_DataIdentification/abstract/
# xpath = identificationInfo/MD_DataIdentification/abstract/CharacterString
#
# This prevents multiple primitive tags from being inserted under an element
for prop, xpath in iteritems(self._data_map):
if not prop.startswith('_') or prop.strip('_') in supported_props:
# Send only public or alternate properties
xroot = self._trim_xpath(xpath, prop)
values = getattr(self, prop, u'')
update_property(tree_to_update, xroot, xpath, prop, values, supported_props)
return tree_to_update
def _trim_xpath(self, xpath, prop):
""" Removes primitive type tags from an XPATH """
xroot = self._get_xroot_for(prop)
if xroot is None and isinstance(xpath, string_types):
xtags = xpath.split(XPATH_DELIM)
if xtags[-1] in ISO_TAG_PRIMITIVES:
xroot = XPATH_DELIM.join(xtags[:-1])
return xroot
| bsd-3-clause |
fedorahungary/fedinv | fedinv/fedinv/wsgi.py | 1 | 1419 | """
WSGI config for fedinv project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "fedinv.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fedinv.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| gpl-2.0 |
asrar7787/Test-Frontools | node_modules/node-gyp/gyp/pylib/gyp/generator/xcode.py | 1363 | 58344 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import gyp.xcode_ninja
import errno
import os
import sys
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
_library_search_paths_var = 'LIBRARY_SEARCH_PATHS'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'ios_app_extension',
'ios_watch_app',
'ios_watchkit_extension',
'mac_bundle',
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
'mac_xctest_bundle',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
]
generator_filelist_paths = None
# Xcode's standard set of library directories, which don't need to be duplicated
# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay.
xcode_standard_library_dirs = frozenset([
'$(SDKROOT)/usr/lib',
'$(SDKROOT)/usr/local/lib',
])
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
if len(configuration_names) == 0:
configuration_names = ['Default']
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings and config files. This
# is intended to be used very sparingly. Really, almost everything should
# go into target-specific build settings sections. The project-wide
# settings are only intended to be used in cases where Xcode attempts to
# resolve variable references in a project context as opposed to a target
# context, such as when resolving sourceTree references while building up
# the tree tree view for UI display.
# Any values set globally are applied to all configurations, then any
# per-configuration values are applied.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in self.build_file_dict:
config_ref = self.project.AddOrGetFileInRootGroup(
self.build_file_dict['xcode_config_file'])
xccl.SetBaseConfiguration(config_ref)
build_file_configurations = self.build_file_dict.get('configurations', {})
if build_file_configurations:
for config_name in configurations:
build_file_configuration_named = \
build_file_configurations.get(config_name, {})
if build_file_configuration_named:
xcc = xccl.ConfigurationNamed(config_name)
for xck, xcv in build_file_configuration_named.get('xcode_settings',
{}).iteritems():
xcc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in build_file_configuration_named:
config_ref = self.project.AddOrGetFileInRootGroup(
build_file_configurations[config_name]['xcode_config_file'])
xcc.SetBaseConfiguration(config_ref)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, add its target to the
# targets, and add it to the test targets.
if target.get('run_as'):
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
command = target['run_as']
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
if hasattr(dependency_xct, 'test_runner'):
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
def AddSourceToTarget(source, type, pbxp, xct):
# TODO(mark): Perhaps source_extensions and library_extensions can be made a
# little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's', 'swift']
# .o is conceptually more of a "source" than a "library," but Xcode thinks
# of "sources" as things to compile and "libraries" (or "frameworks") as
# things to link with. Adding an object file to an Xcode target's frameworks
# phase works properly.
library_extensions = ['a', 'dylib', 'framework', 'o']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext:
ext = ext[1:].lower()
if ext in source_extensions and type != 'none':
xct.SourcesPhase().AddFile(source)
elif ext in library_extensions and type != 'none':
xct.FrameworksPhase().AddFile(source)
else:
# Files that aren't added to a sources or frameworks build phase can still
# go into the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
def AddHeaderToTarget(header, pbxp, xct, is_public):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
settings = '{ATTRIBUTES = (%s, ); }' % ('Private', 'Public')[is_public]
xct.HeadersPhase().AddFile(header, settings)
_xcode_variable_re = re.compile(r'(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
_xcode_define_re = re.compile(r'([\\\"\' ])')
def EscapeXcodeDefine(s):
"""We must escape the defines that we give to XCode so that it knows not to
split on spaces and to respect backslash and quote literals. However, we
must not quote the define, or Xcode will incorrectly intepret variables
especially $(inherited)."""
return re.sub(_xcode_define_re, r'\\\1', s)
def PerformBuild(data, configurations, params):
options = params['options']
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
for config in configurations:
arguments = ['xcodebuild', '-project', xcodeproj_path]
arguments += ['-configuration', config]
print "Building [%s]: %s" % (config, arguments)
subprocess.check_call(arguments)
def CalculateGeneratorInputInfo(params):
toplevel = params['options'].toplevel_dir
if params.get('flavor') == 'ninja':
generator_dir = os.path.relpath(params['options'].generator_output or '.')
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
output_dir = os.path.normpath(os.path.join(generator_dir, output_dir))
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, output_dir, 'gypfiles-xcode-ninja'))
else:
output_dir = os.path.normpath(os.path.join(toplevel, 'xcodebuild'))
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Optionally configure each spec to use ninja as the external builder.
ninja_wrapper = params.get('flavor') == 'ninja'
if ninja_wrapper:
(target_list, target_dicts, data) = \
gyp.xcode_ninja.CreateWrapper(target_list, target_dicts, data, params)
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
upgrade_check_project_version = \
generator_flags.get('xcode_upgrade_check_project_version', None)
# Format upgrade_check_project_version with leading zeros as needed.
if upgrade_check_project_version:
upgrade_check_project_version = str(upgrade_check_project_version)
while len(upgrade_check_project_version) < 4:
upgrade_check_project_version = '0' + upgrade_check_project_version
skip_excluded_files = \
not generator_flags.get('xcode_list_excluded_files', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
# Set project-level attributes from multiple options
project_attributes = {};
if parallel_builds:
project_attributes['BuildIndependentTargetsInParallel'] = 'YES'
if upgrade_check_project_version:
project_attributes['LastUpgradeCheck'] = upgrade_check_project_version
project_attributes['LastTestingUpgradeCheck'] = \
upgrade_check_project_version
project_attributes['LastSwiftUpdateCheck'] = \
upgrade_check_project_version
pbxp.SetProperty('attributes', project_attributes)
# Add gyp/gypi files to project
if not generator_flags.get('standalone'):
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. The type with
# "+bundle" appended will be used if the target has "mac_bundle" set.
# loadable_modules not in a mac_bundle are mapped to
# com.googlecode.gyp.xcode.bundle, a pseudo-type that xcode.py interprets
# to create a single-file mh_bundle.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.googlecode.gyp.xcode.bundle',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'mac_kernel_extension': 'com.apple.product-type.kernel-extension',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'loadable_module+xctest': 'com.apple.product-type.bundle.unit-test',
'shared_library+bundle': 'com.apple.product-type.framework',
'executable+extension+bundle': 'com.apple.product-type.app-extension',
'executable+watch+extension+bundle':
'com.apple.product-type.watchkit-extension',
'executable+watch+bundle':
'com.apple.product-type.application.watchapp',
'mac_kernel_extension+bundle': 'com.apple.product-type.kernel-extension',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_xctest = int(spec.get('mac_xctest_bundle', 0))
is_bundle = int(spec.get('mac_bundle', 0)) or is_xctest
is_app_extension = int(spec.get('ios_app_extension', 0))
is_watchkit_extension = int(spec.get('ios_watchkit_extension', 0))
is_watch_app = int(spec.get('ios_watch_app', 0))
if type != 'none':
type_bundle_key = type
if is_xctest:
type_bundle_key += '+xctest'
assert type == 'loadable_module', (
'mac_xctest_bundle targets must have type loadable_module '
'(target %s)' % target_name)
elif is_app_extension:
assert is_bundle, ('ios_app_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+extension+bundle'
elif is_watchkit_extension:
assert is_bundle, ('ios_watchkit_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+extension+bundle'
elif is_watch_app:
assert is_bundle, ('ios_watch_app flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+bundle'
elif is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
assert not is_bundle, (
'mac_bundle targets cannot have type none (target "%s")' %
target_name)
assert not is_xctest, (
'mac_xctest_bundle targets cannot have type none (target "%s")' %
target_name)
target_product_name = spec.get('product_name')
if target_product_name is not None:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_outdir=spec.get('product_dir'),
force_prefix=spec.get('product_prefix'),
force_extension=spec.get('product_extension'))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target is used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
# The Xcode "issues" don't affect xcode-ninja builds, since the dependency
# logic all happens in ninja. Don't bother creating the extra targets in
# that case.
if type != 'none' and (spec_actions or spec_rules) and not ninja_wrapper:
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_suffix = generator_flags.get(
'support_target_suffix', ' Support')
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + support_target_suffix,
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, type, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_dirname, rule_source_basename = \
posixpath.split(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
'INPUT_FILE_DIRNAME': rule_source_dirname,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, type, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = ExpandXcodeVariables(message, rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s.make' % re.sub(
'[^a-zA-Z0-9_]', '_' , '%s_%s' % (target_name, rule['rule_name']))
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
if len(concrete_output_dirs) > 0:
makefile.write('\t@mkdir -p "%s"\n' %
'" "'.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
# Mark it with note: so Xcode picks it up in build output.
makefile.write('\t@echo note: %s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(/usr/sbin/sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec xcrun make -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
groups = ['inputs', 'inputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for group in groups:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, type, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" and "mac_framework_private_headers" if
# it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
for header in spec.get('mac_framework_private_headers', []):
AddHeaderToTarget(header, pbxp, xct, False)
# Add "mac_framework_headers". These can be valid for both frameworks
# and static libraries.
if is_bundle or type == 'static_library':
for header in spec.get('mac_framework_headers', []):
AddHeaderToTarget(header, pbxp, xct, True)
# Add "copies".
pbxcp_dict = {}
for copy_group in spec.get('copies', []):
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
code_sign = int(copy_group.get('xcode_code_sign', 0))
settings = (None, '{ATTRIBUTES = (CodeSignOnCopy, ); }')[code_sign];
# Coalesce multiple "copies" sections in the same target with the same
# "destination" property into the same PBXCopyFilesBuildPhase, otherwise
# they'll wind up with ID collisions.
pbxcp = pbxcp_dict.get(dest, None)
if pbxcp is None:
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
pbxcp_dict[dest] = pbxcp
for file in copy_group['files']:
pbxcp.AddFile(file, settings)
# Excluded files can also go into the project file.
if not skip_excluded_files:
for key in ['sources', 'mac_bundle_resources', 'mac_framework_headers',
'mac_framework_private_headers']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for action in spec.get('actions', []):
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
# Make the postbuild step depend on the output of ld or ar from this
# target. Apparently putting the script step after the link step isn't
# sufficient to ensure proper ordering in all cases. With an input
# declared but no outputs, the script step should run every time, as
# desired.
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': ['$(BUILT_PRODUCTS_DIR)/$(EXECUTABLE_PATH)'],
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
library_dir = posixpath.dirname(library)
if library_dir not in xcode_standard_library_dirs and (
not xct.HasBuildSetting(_library_search_paths_var) or
library_dir not in xct.GetBuildSetting(_library_search_paths_var)):
xct.AppendBuildSetting(_library_search_paths_var, library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
for library_dir in configuration.get('library_dirs', []):
if library_dir not in xcode_standard_library_dirs and (
not xcbc.HasBuildSetting(_library_search_paths_var) or
library_dir not in xcbc.GetBuildSetting(_library_search_paths_var)):
xcbc.AppendBuildSetting(_library_search_paths_var, library_dir)
if 'defines' in configuration:
for define in configuration['defines']:
set_define = EscapeXcodeDefine(define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in configuration:
config_ref = pbxp.AddOrGetFileInRootGroup(
configuration['xcode_config_file'])
xcbc.SetBaseConfiguration(config_ref)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
| mit |
nlaurens/budgetRapportage | model/budget/kostensoortgroup.py | 1 | 5986 | class KostensoortGroup:
def __init__(self, name, descr, level, parent):
self.name = name
self.descr = descr
self.parent = parent # KostensoortGroup class
self.level = level
self.kostenSoorten = {} # KS die bij deze node horen (uit kostensoortgroep)
self.children = [] # list of kostensoortGroup classes
def add_kostensoort(self, kostensoort, descr):
self.kostenSoorten[kostensoort] = descr
def add_child(self, child):
self.children.append(child)
def lower_level_parent(self, level):
if self.level < level:
return self
else:
return self.parent.lower_level_parent(level)
# Finds a child and returns that child
def find(self, name):
if self.name == name:
return self
elif self.children:
for child in self.children:
result = child.find(name)
if result is not None:
return result
else:
return None
"""
.save_as_csv
"""
def save_as_csv(self, filename):
import csv
totalDepth= max(self.list_levels([]))
kslist = self.flat_list([], totalDepth)
with open(filename, "wb") as csv_file:
writer = csv.writer(csv_file, delimiter=',')
# write header
header = []
for row in range(1, (len(kslist[0])-2)/2+1):
header.extend(['group'+str(row), 'descr-group'+str(row)])
header.extend(['ks', 'descr-ks'])
writer.writerow(header)
for line in kslist:
writer.writerow(line)
"""
input:
kslist - can be empty for top level
totalDepth as int - specifies the maximum level in this kostensoortgroup
output: list of strings: [ksgroup1, ksgroup2, .., ks, descr]
Example on how to use this function to export ksgroups to csv:
from model import ksgroup
ksgroups = ksgroup.available()
for str_group in ksgroups:
group = ksgroup.load(str_group)
filename = group.name + '.csv'
group.save_as_csv(filename)
"""
def flat_list(self, kslist, totalDepth):
if self.children:
for child in self.children:
child.flat_list(kslist, totalDepth)
if self.kostenSoorten:
ksgroup = self
grouplist = []
while ksgroup.parent:
grouplist.insert(0,ksgroup.parent.name)
grouplist.insert(1,ksgroup.parent.descr)
ksgroup = ksgroup.parent
grouplist.append(self.name)
grouplist.append(self.descr)
for ks, descr in self.kostenSoorten.iteritems():
kslist_row = grouplist[:]
# add blanks to the list before the ks/descr to make sure all
# ks end up in the last column:
# group1, group2, group3, ks, descr <- has 3 subgroups
# group1, blank, blank, ks, descr <- has only 1 subgroup
blanks = ['', '']*(totalDepth - self.level)
kslist_row.extend(blanks)
kslist_row.append(ks)
kslist_row.append(descr)
kslist.append( kslist_row )
return kslist
def walk_tree(self, maxdepth):
"helper debug function"
if self.level <= maxdepth:
self.druk_af()
for child in self.children:
child.walk_tree(maxdepth)
def druk_af(self):
"helper debug function"
print 'ksgroup ' + self.name + ' (level ' + str(self.level) + ') - ' + self.descr
if self.parent != '':
print 'belongs to parent: ' + self.parent.name
if self.kostenSoorten:
print 'contains the following kostensoorten:'
print self.kostenSoorten
if self.children:
print 'and has children:'
for child in self.children:
print child.name
print ''
# Creates a list of all levels in the tree
# for example: [1, 5, 12, 40]
def list_levels(self, levels):
for child in self.children:
levels = child.list_levels(levels)
if self.level not in levels:
levels.append(self.level)
return levels
# runs through the tree and adjusts levels
# using the translate dictionary
def correct_levels(self, translate):
for child in self.children:
child.correct_levels(translate)
self.level = translate[self.level]
return
# normalizes the depth of levels to 1,2,3,..
def normalize_levels(self):
levels = sorted(self.list_levels([]))
i = 0
translate_table = {}
for level in levels:
translate_table[level] = i
i += 1
self.correct_levels(translate_table)
return
# Returns a dictionary of all ks from the whole tree
def get_ks_recursive(self):
ks = {}
for child in self.children:
ks.update(child.get_ks_recursive())
ks.update(self.kostenSoorten)
return ks
def save_as_txt(self, filehandle):
lvl = self.level + 1
sp = (lvl - 1) * ' '
head = lvl * '#'
filehandle.write(sp + head + self.name + ' ' + self.descr + '\n')
if self.kostenSoorten:
for ks, descr in self.kostenSoorten.iteritems():
filehandle.write(sp + ' ' + str(ks) + ' ' + descr + '\n')
filehandle.write('\n')
for child in self.children:
child.save_as_txt(filehandle)
# Returns a list of all nodes that have no children (i.e. final nodes)
def get_end_children(self, children):
if self.children:
for child in self.children:
children.extend(child.get_end_children([]))
else:
return [self]
return children
| mit |
lanthaler/schemaorg | lib/rdflib/plugins/parsers/pyRdfa/options.py | 14 | 11840 | # -*- coding: utf-8 -*-
"""
L{Options} class: collect the possible options that govern the parsing possibilities. The module also includes the L{ProcessorGraph} class that handles the processor graph, per RDFa 1.1 (i.e., the graph containing errors and warnings).
@summary: RDFa parser (distiller)
@requires: U{RDFLib<http://rdflib.net>}
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
"""
"""
$Id: options.py,v 1.20 2013-10-16 11:48:54 ivan Exp $ $Date: 2013-10-16 11:48:54 $
"""
import sys, datetime
import rdflib
from rdflib import URIRef
from rdflib import Literal
from rdflib import BNode
from rdflib import Namespace
if rdflib.__version__ >= "3.0.0" :
from rdflib import Graph
from rdflib import RDF as ns_rdf
from rdflib import RDFS as ns_rdfs
else :
from rdflib.Graph import Graph
from rdflib.RDFS import RDFSNS as ns_rdfs
from rdflib.RDF import RDFNS as ns_rdf
from .host import HostLanguage, MediaTypes, content_to_host_language, predefined_1_0_rel, require_embedded_rdf
from . import ns_xsd, ns_distill, ns_rdfa
from . import RDFA_Error, RDFA_Warning, RDFA_Info
from .transform.lite import lite_prune
ns_dc = Namespace("http://purl.org/dc/terms/")
ns_ht = Namespace("http://www.w3.org/2006/http#")
class ProcessorGraph :
"""Wrapper around the 'processor graph', ie, the (RDF) Graph containing the warnings,
error messages, and informational messages.
"""
def __init__(self) :
self.graph = Graph()
def add_triples(self, msg, top_class, info_class, context, node) :
"""
Add an error structure to the processor graph: a bnode with a number of predicates. The structure
follows U{the processor graph vocabulary<http://www.w3.org/2010/02/rdfa/wiki/Processor_Graph_Vocabulary>} as described
on the RDFa WG Wiki page.
@param msg: the core error message, added as an object to a dc:description
@param top_class: Error, Warning, or Info; an explicit rdf:type added to the bnode
@type top_class: URIRef
@param info_class: An additional error class, added as an rdf:type to the bnode in case it is not None
@type info_class: URIRef
@param context: An additional information added, if not None, as an object with rdfa:context as a predicate
@type context: either an URIRef or a URI String (an URIRef will be created in the second case)
@param node: The node's element name that contains the error
@type node: string
@return: the bnode that serves as a subject for the errors. The caller may add additional information
@rtype: BNode
"""
# Lazy binding of relevant prefixes
self.graph.bind("dcterms", ns_dc)
self.graph.bind("pyrdfa", ns_distill)
self.graph.bind("rdf", ns_rdf)
self.graph.bind("rdfa", ns_rdfa)
self.graph.bind("ht", ns_ht)
self.graph.bind("xsd", ns_xsd)
# Python 3 foolproof way
try :
is_context_string = isinstance(context, basestring)
except :
is_context_string = isinstance(context, str)
bnode = BNode()
if node != None:
try :
full_msg = "[In element '%s'] %s" % (node.nodeName, msg)
except :
full_msg = "[In element '%s'] %s" % (node, msg)
else :
full_msg = msg
self.graph.add((bnode, ns_rdf["type"], top_class))
if info_class :
self.graph.add((bnode, ns_rdf["type"], info_class))
self.graph.add((bnode, ns_dc["description"], Literal(full_msg)))
self.graph.add((bnode, ns_dc["date"], Literal(datetime.datetime.utcnow().isoformat(),datatype=ns_xsd["dateTime"])))
if context and (isinstance(context,URIRef) or is_context_string):
htbnode = BNode()
self.graph.add( (bnode, ns_rdfa["context"],htbnode) )
self.graph.add( (htbnode, ns_rdf["type"], ns_ht["Request"]) )
self.graph.add( (htbnode, ns_ht["requestURI"], Literal("%s" % context)) )
return bnode
def add_http_context(self, subj, http_code) :
"""
Add an additional HTTP context to a message with subject in C{subj}, using the U{<http://www.w3.org/2006/http#>}
vocabulary. Typically used to extend an error structure, as created by L{add_triples}.
@param subj: an RDFLib resource, typically a blank node
@param http_code: HTTP status code
"""
bnode = BNode()
self.graph.add((subj, ns_rdfa["context"], bnode))
self.graph.add((bnode, ns_rdf["type"], ns_ht["Response"]))
self.graph.add((bnode, ns_ht["responseCode"], URIRef("http://www.w3.org/2006/http#%s" % http_code)))
class Options :
"""Settable options. An instance of this class is stored in
the L{execution context<ExecutionContext>} of the parser.
@ivar space_preserve: whether plain literals should preserve spaces at output or not
@type space_preserve: Boolean
@ivar output_default_graph: whether the 'default' graph should be returned to the user
@type output_default_graph: Boolean
@ivar output_processor_graph: whether the 'processor' graph should be returned to the user
@type output_processor_graph: Boolean
@ivar processor_graph: the 'processor' Graph
@type processor_graph: L{ProcessorGraph}
@ivar transformers: extra transformers
@type transformers: list
@ivar vocab_cache_report: whether the details of vocabulary file caching process should be reported as information (mainly for debug)
@type vocab_cache_report: Boolean
@ivar refresh_vocab_cache: whether the caching checks of vocabs should be by-passed, ie, if caches should be re-generated regardless of the stored date (important for vocab development)
@type refresh_vocab_cache: Boolean
@ivar embedded_rdf: whether embedded RDF (ie, turtle in an HTML script element or an RDF/XML content in SVG) should be extracted and added to the final graph. This is a non-standard option...
@type embedded_rdf: Boolean
@ivar vocab_expansion: whether the @vocab elements should be expanded and a mini-RDFS processing should be done on the merged graph
@type vocab_expansion: Boolean
@ivar vocab_cache: whether the system should use the vocabulary caching mechanism when expanding via the mini-RDFS, or should just fetch the graphs every time
@type vocab_cache: Boolean
@ivar host_language: the host language for the RDFa attributes. Default is HostLanguage.xhtml, but it can be HostLanguage.rdfa_core and HostLanguage.html5, or others...
@type host_language: integer (logically: an enumeration)
@ivar content_type: the content type of the host file. Default is None
@type content_type: string (logically: an enumeration)
@ivar add_informational_messages: whether informational messages should also be added to the processor graph, or only errors and warnings
@ivar experimental_features: whether experimental features should be activated; that is a developer's option...
@ivar check_lite: whether RDFa Lite should be checked, to generate warnings.
"""
def __init__(self, output_default_graph = True,
output_processor_graph = False,
space_preserve = True,
transformers = [],
embedded_rdf = True,
vocab_expansion = False,
vocab_cache = True,
vocab_cache_report = False,
refresh_vocab_cache = False,
add_informational_messages = False,
check_lite = False,
experimental_features = False
) :
self.space_preserve = space_preserve
self.transformers = transformers
self.processor_graph = ProcessorGraph()
self.output_default_graph = output_default_graph
self.output_processor_graph = output_processor_graph
self.host_language = HostLanguage.rdfa_core
self.vocab_cache_report = vocab_cache_report
self.refresh_vocab_cache = refresh_vocab_cache
self.embedded_rdf = embedded_rdf
self.vocab_expansion = vocab_expansion
self.vocab_cache = vocab_cache
self.add_informational_messages = add_informational_messages
self.check_lite = check_lite
if check_lite :
self.transformers.append(lite_prune)
self.experimental_features = experimental_features
def set_host_language(self, content_type) :
"""
Set the host language for processing, based on the recognized types. If this is not a recognized content type,
it falls back to RDFa core (i.e., XML)
@param content_type: content type
@type content_type: string
"""
if content_type in content_to_host_language :
self.host_language = content_to_host_language[content_type]
else :
self.host_language = HostLanguage.rdfa_core
if self.host_language in require_embedded_rdf :
self.embedded_rdf = True
def __str__(self) :
retval = """Current options:
preserve space : %s
output processor graph : %s
output default graph : %s
host language : %s
accept embedded RDF : %s
check rdfa lite : %s
cache vocabulary graphs : %s
"""
return retval % (self.space_preserve, self.output_processor_graph, self.output_default_graph, self.host_language, self.embedded_rdf, self.check_lite, self.vocab_cache)
def reset_processor_graph(self):
"""Empty the processor graph. This is necessary if the same options is reused
for several RDFa sources, and new error messages should be generated.
"""
self.processor_graph.graph.remove((None,None,None))
def add_warning(self, txt, warning_type=None, context=None, node=None, buggy_value=None) :
"""Add a warning to the processor graph.
@param txt: the warning text.
@keyword warning_type: Warning Class
@type warning_type: URIRef
@keyword context: possible context to be added to the processor graph
@type context: URIRef or String
@keyword buggy_value: a special case when a 'term' is not recognized; no warning is generated for that case if the value is part of the 'usual' XHTML terms, because almost all RDFa file contains some of those and that would pollute the output
@type buggy_value: String
"""
if warning_type == ns_rdfa["UnresolvedTerm"] and buggy_value in predefined_1_0_rel :
return
return self.processor_graph.add_triples(txt, RDFA_Warning, warning_type, context, node)
def add_info(self, txt, info_type=None, context=None, node=None, buggy_value=None) :
"""Add an informational comment to the processor graph.
@param txt: the information text.
@keyword info_type: Info Class
@type info_type: URIRef
@keyword context: possible context to be added to the processor graph
@type context: URIRef or String
@keyword buggy_value: a special case when a 'term' is not recognized; no information is generated for that case if the value is part of the 'usual' XHTML terms, because almost all RDFa file contains some of those and that would pollute the output
@type buggy_value: String
"""
if self.add_informational_messages :
return self.processor_graph.add_triples(txt, RDFA_Info, info_type, context, node)
else :
return
def add_error(self, txt, err_type=None, context=None, node=None, buggy_value=None) :
"""Add an error to the processor graph.
@param txt: the information text.
@keyword err_type: Error Class
@type err_type: URIRef
@keyword context: possible context to be added to the processor graph
@type context: URIRef or String
@keyword buggy_value: a special case when a 'term' is not recognized; no error is generated for that case if the value is part of the 'usual' XHTML terms, because almost all RDFa file contains some of those and that would pollute the output
@type buggy_value: String
"""
return self.processor_graph.add_triples(txt, RDFA_Error, err_type, context, node)
| apache-2.0 |
cgstudiomap/cgstudiomap | main/eggs/Werkzeug-0.11.2-py2.7.egg/werkzeug/contrib/lint.py | 128 | 12322 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.lint
~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module provides a middleware that performs sanity checks of the WSGI
application. It checks that :pep:`333` is properly implemented and warns
on some common HTTP errors such as non-empty responses for 304 status
codes.
This module provides a middleware, the :class:`LintMiddleware`. Wrap your
application with it and it will warn about common problems with WSGI and
HTTP while your application is running.
It's strongly recommended to use it during development.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from urlparse import urlparse
from warnings import warn
from werkzeug.datastructures import Headers
from werkzeug.http import is_entity_header
from werkzeug.wsgi import FileWrapper
from werkzeug._compat import string_types
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_string(context, obj, stacklevel=3):
if type(obj) is not str:
warn(WSGIWarning('%s requires bytestrings, got %s' %
(context, obj.__class__.__name__)))
class InputStream(object):
def __init__(self, stream):
self._stream = stream
def read(self, *args):
if len(args) == 0:
warn(WSGIWarning('wsgi does not guarantee an EOF marker on the '
'input stream, thus making calls to '
'wsgi.input.read() unsafe. Conforming servers '
'may never return from this call.'),
stacklevel=2)
elif len(args) != 1:
warn(WSGIWarning('too many parameters passed to wsgi.input.read()'),
stacklevel=2)
return self._stream.read(*args)
def readline(self, *args):
if len(args) == 0:
warn(WSGIWarning('Calls to wsgi.input.readline() without arguments'
' are unsafe. Use wsgi.input.read() instead.'),
stacklevel=2)
elif len(args) == 1:
warn(WSGIWarning('wsgi.input.readline() was called with a size hint. '
'WSGI does not support this, although it\'s available '
'on all major servers.'),
stacklevel=2)
else:
raise TypeError('too many arguments passed to wsgi.input.readline()')
return self._stream.readline(*args)
def __iter__(self):
try:
return iter(self._stream)
except TypeError:
warn(WSGIWarning('wsgi.input is not iterable.'), stacklevel=2)
return iter(())
def close(self):
warn(WSGIWarning('application closed the input stream!'),
stacklevel=2)
self._stream.close()
class ErrorStream(object):
def __init__(self, stream):
self._stream = stream
def write(self, s):
check_string('wsgi.error.write()', s)
self._stream.write(s)
def flush(self):
self._stream.flush()
def writelines(self, seq):
for line in seq:
self.write(seq)
def close(self):
warn(WSGIWarning('application closed the error stream!'),
stacklevel=2)
self._stream.close()
class GuardedWrite(object):
def __init__(self, write, chunks):
self._write = write
self._chunks = chunks
def __call__(self, s):
check_string('write()', s)
self._write.write(s)
self._chunks.append(len(s))
class GuardedIterator(object):
def __init__(self, iterator, headers_set, chunks):
self._iterator = iterator
self._next = iter(iterator).next
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self):
return self
def next(self):
if self.closed:
warn(WSGIWarning('iterated over closed app_iter'),
stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(WSGIWarning('Application returned before it '
'started the response'), stacklevel=2)
check_string('application iterator items', rv)
self.chunks.append(len(rv))
return rv
def close(self):
self.closed = True
if hasattr(self._iterator, 'close'):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get('content-length', type=int)
if status_code == 304:
for key, value in headers:
key = key.lower()
if key not in ('expires', 'content-location') and \
is_entity_header(key):
warn(HTTPWarning('entity header %r found in 304 '
'response' % key))
if bytes_sent:
warn(HTTPWarning('304 responses must not have a body'))
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(HTTPWarning('%r responses must have an empty '
'content length') % status_code)
if bytes_sent:
warn(HTTPWarning('%r responses must not have a body' %
status_code))
elif content_length is not None and content_length != bytes_sent:
warn(WSGIWarning('Content-Length and the number of bytes '
'sent to the client do not match.'))
def __del__(self):
if not self.closed:
try:
warn(WSGIWarning('Iterator was garbage collected before '
'it was closed.'))
except Exception:
pass
class LintMiddleware(object):
"""This middleware wraps an application and warns on common errors.
Among other thing it currently checks for the following problems:
- invalid status codes
- non-bytestrings sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Detected errors are emitted using the standard Python :mod:`warnings`
system and usually end up on :data:`stderr`.
::
from werkzeug.contrib.lint import LintMiddleware
app = LintMiddleware(app)
:param app: the application to wrap
"""
def __init__(self, app):
self.app = app
def check_environ(self, environ):
if type(environ) is not dict:
warn(WSGIWarning('WSGI environment is not a standard python dict.'),
stacklevel=4)
for key in ('REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once'):
if key not in environ:
warn(WSGIWarning('required environment key %r not found'
% key), stacklevel=3)
if environ['wsgi.version'] != (1, 0):
warn(WSGIWarning('environ is not a WSGI 1.0 environ'),
stacklevel=3)
script_name = environ.get('SCRIPT_NAME', '')
if script_name and script_name[:1] != '/':
warn(WSGIWarning('SCRIPT_NAME does not start with a slash: %r'
% script_name), stacklevel=3)
path_info = environ.get('PATH_INFO', '')
if path_info[:1] != '/':
warn(WSGIWarning('PATH_INFO does not start with a slash: %r'
% path_info), stacklevel=3)
def check_start_response(self, status, headers, exc_info):
check_string('status', status)
status_code = status.split(None, 1)[0]
if len(status_code) != 3 or not status_code.isdigit():
warn(WSGIWarning('Status code must be three digits'), stacklevel=3)
if len(status) < 4 or status[3] != ' ':
warn(WSGIWarning('Invalid value for status %r. Valid '
'status strings are three digits, a space '
'and a status explanation'), stacklevel=3)
status_code = int(status_code)
if status_code < 100:
warn(WSGIWarning('status code < 100 detected'), stacklevel=3)
if type(headers) is not list:
warn(WSGIWarning('header list is not a list'), stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn(WSGIWarning('Headers must tuple 2-item tuples'),
stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn(WSGIWarning('header items must be strings'),
stacklevel=3)
if name.lower() == 'status':
warn(WSGIWarning('The status header is not supported due to '
'conflicts with the CGI spec.'),
stacklevel=3)
if exc_info is not None and not isinstance(exc_info, tuple):
warn(WSGIWarning('invalid value for exc_info'), stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers):
etag = headers.get('etag')
if etag is not None:
if etag.startswith('w/'):
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn(HTTPWarning('unquoted etag emitted.'), stacklevel=4)
location = headers.get('location')
if location is not None:
if not urlparse(location).netloc:
warn(HTTPWarning('absolute URLs required for location header'),
stacklevel=4)
def check_iterator(self, app_iter):
if isinstance(app_iter, string_types):
warn(WSGIWarning('application returned string. Response will '
'send character for character to the client '
'which will kill the performance. Return a '
'list or iterable instead.'), stacklevel=3)
def __call__(self, *args, **kwargs):
if len(args) != 2:
warn(WSGIWarning('Two arguments to WSGI app required'), stacklevel=2)
if kwargs:
warn(WSGIWarning('No keyword arguments to WSGI app allowed'),
stacklevel=2)
environ, start_response = args
self.check_environ(environ)
environ['wsgi.input'] = InputStream(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorStream(environ['wsgi.errors'])
# hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length
environ['wsgi.file_wrapper'] = FileWrapper
headers_set = []
chunks = []
def checking_start_response(*args, **kwargs):
if len(args) not in (2, 3):
warn(WSGIWarning('Invalid number of arguments: %s, expected '
'2 or 3' % len(args), stacklevel=2))
if kwargs:
warn(WSGIWarning('no keyword arguments allowed.'))
status, headers = args[:2]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
headers_set[:] = self.check_start_response(status, headers,
exc_info)
return GuardedWrite(start_response(status, headers, exc_info),
chunks)
app_iter = self.app(environ, checking_start_response)
self.check_iterator(app_iter)
return GuardedIterator(app_iter, headers_set, chunks)
| agpl-3.0 |
aduric/crossfit | nonrel/tests/regressiontests/settings_tests/tests.py | 47 | 2686 | from django.conf import settings
from django.utils import unittest
from django.conf import settings, UserSettingsHolder, global_settings
class SettingsTests(unittest.TestCase):
#
# Regression tests for #10130: deleting settings.
#
def test_settings_delete(self):
settings.TEST = 'test'
self.assertEqual('test', settings.TEST)
del settings.TEST
self.assertRaises(AttributeError, getattr, settings, 'TEST')
def test_settings_delete_wrapped(self):
self.assertRaises(TypeError, delattr, settings, '_wrapped')
class TrailingSlashURLTests(unittest.TestCase):
settings_module = settings
def setUp(self):
self._original_media_url = self.settings_module.MEDIA_URL
def tearDown(self):
self.settings_module.MEDIA_URL = self._original_media_url
def test_blank(self):
"""
If blank, no PendingDeprecationWarning error will be raised, even though it
doesn't end in a slash.
"""
self.settings_module.MEDIA_URL = ''
self.assertEqual('', self.settings_module.MEDIA_URL)
def test_end_slash(self):
"""
MEDIA_URL works if you end in a slash.
"""
self.settings_module.MEDIA_URL = '/foo/'
self.assertEqual('/foo/', self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = 'http://media.foo.com/'
self.assertEqual('http://media.foo.com/',
self.settings_module.MEDIA_URL)
def test_no_end_slash(self):
"""
MEDIA_URL raises an PendingDeprecationWarning error if it doesn't end in a
slash.
"""
import warnings
warnings.filterwarnings('error', 'If set, MEDIA_URL must end with a slash', PendingDeprecationWarning)
def setattr_settings(settings_module, attr, value):
setattr(settings_module, attr, value)
self.assertRaises(PendingDeprecationWarning, setattr_settings,
self.settings_module, 'MEDIA_URL', '/foo')
self.assertRaises(PendingDeprecationWarning, setattr_settings,
self.settings_module, 'MEDIA_URL',
'http://media.foo.com')
def test_double_slash(self):
"""
If a MEDIA_URL ends in more than one slash, presume they know what
they're doing.
"""
self.settings_module.MEDIA_URL = '/stupid//'
self.assertEqual('/stupid//', self.settings_module.MEDIA_URL)
self.settings_module.MEDIA_URL = 'http://media.foo.com/stupid//'
self.assertEqual('http://media.foo.com/stupid//',
self.settings_module.MEDIA_URL)
| bsd-3-clause |
tumbl3w33d/ansible | lib/ansible/modules/network/aci/aci_interface_policy_leaf_profile.py | 8 | 6901 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Bruno Calogero <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_interface_policy_leaf_profile
short_description: Manage fabric interface policy leaf profiles (infra:AccPortP)
description:
- Manage fabric interface policy leaf profiles on Cisco ACI fabrics.
version_added: '2.5'
options:
leaf_interface_profile:
description:
- The name of the Fabric access policy leaf interface profile.
type: str
required: yes
aliases: [ name, leaf_interface_profile_name ]
description:
description:
- Description for the Fabric access policy leaf interface profile.
type: str
aliases: [ descr ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
seealso:
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(infra:AccPortP).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Bruno Calogero (@brunocalogero)
'''
EXAMPLES = r'''
- name: Add a new leaf_interface_profile
aci_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
description: leafintprfname description
state: present
delegate_to: localhost
- name: Remove a leaf_interface_profile
aci_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
state: absent
delegate_to: localhost
- name: Remove all leaf_interface_profiles
aci_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
state: absent
delegate_to: localhost
- name: Query a leaf_interface_profile
aci_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
leaf_interface_profile=dict(type='str', aliases=['name', 'leaf_interface_profile_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['leaf_interface_profile']],
['state', 'present', ['leaf_interface_profile']],
],
)
leaf_interface_profile = module.params.get('leaf_interface_profile')
description = module.params.get('description')
state = module.params.get('state')
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='infraAccPortP',
aci_rn='infra/accportprof-{0}'.format(leaf_interface_profile),
module_object=leaf_interface_profile,
target_filter={'name': leaf_interface_profile},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='infraAccPortP',
class_config=dict(
name=leaf_interface_profile,
descr=description,
),
)
aci.get_diff(aci_class='infraAccPortP')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
katrid/django | tests/check_framework/test_security.py | 242 | 17428 | from django.conf import settings
from django.core.checks.security import base, csrf, sessions
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckSessionCookieSecureTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.sessions import check_session_cookie_secure
return check_session_cookie_secure
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=[])
def test_session_cookie_secure_with_installed_app(self):
"""
Warn if SESSION_COOKIE_SECURE is off and "django.contrib.sessions" is
in INSTALLED_APPS.
"""
self.assertEqual(self.func(None), [sessions.W010])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=[],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_with_middleware(self):
"""
Warn if SESSION_COOKIE_SECURE is off and
"django.contrib.sessions.middleware.SessionMiddleware" is in
MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [sessions.W011])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_both(self):
"""
If SESSION_COOKIE_SECURE is off and we find both the session app and
the middleware, provide one common warning.
"""
self.assertEqual(self.func(None), [sessions.W012])
@override_settings(
SESSION_COOKIE_SECURE=True,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_true(self):
"""
If SESSION_COOKIE_SECURE is on, there's no warning about it.
"""
self.assertEqual(self.func(None), [])
class CheckSessionCookieHttpOnlyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.sessions import check_session_cookie_httponly
return check_session_cookie_httponly
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=[])
def test_session_cookie_httponly_with_installed_app(self):
"""
Warn if SESSION_COOKIE_HTTPONLY is off and "django.contrib.sessions"
is in INSTALLED_APPS.
"""
self.assertEqual(self.func(None), [sessions.W013])
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=[],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_httponly_with_middleware(self):
"""
Warn if SESSION_COOKIE_HTTPONLY is off and
"django.contrib.sessions.middleware.SessionMiddleware" is in
MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [sessions.W014])
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_httponly_both(self):
"""
If SESSION_COOKIE_HTTPONLY is off and we find both the session app and
the middleware, provide one common warning.
"""
self.assertEqual(self.func(None), [sessions.W015])
@override_settings(
SESSION_COOKIE_HTTPONLY=True,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_httponly_true(self):
"""
If SESSION_COOKIE_HTTPONLY is on, there's no warning about it.
"""
self.assertEqual(self.func(None), [])
class CheckCSRFMiddlewareTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.csrf import check_csrf_middleware
return check_csrf_middleware
@override_settings(MIDDLEWARE_CLASSES=[])
def test_no_csrf_middleware(self):
"""
Warn if CsrfViewMiddleware isn't in MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [csrf.W003])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.csrf.CsrfViewMiddleware"])
def test_with_csrf_middleware(self):
self.assertEqual(self.func(None), [])
class CheckCSRFCookieSecureTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.csrf import check_csrf_cookie_secure
return check_csrf_cookie_secure
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE=False)
def test_with_csrf_cookie_secure_false(self):
"""
Warn if CsrfViewMiddleware is in MIDDLEWARE_CLASSES but
CSRF_COOKIE_SECURE isn't True.
"""
self.assertEqual(self.func(None), [csrf.W016])
@override_settings(MIDDLEWARE_CLASSES=[], CSRF_COOKIE_SECURE=False)
def test_with_csrf_cookie_secure_false_no_middleware(self):
"""
No warning if CsrfViewMiddleware isn't in MIDDLEWARE_CLASSES, even if
CSRF_COOKIE_SECURE is False.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE=True)
def test_with_csrf_cookie_secure_true(self):
self.assertEqual(self.func(None), [])
class CheckCSRFCookieHttpOnlyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.csrf import check_csrf_cookie_httponly
return check_csrf_cookie_httponly
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_HTTPONLY=False)
def test_with_csrf_cookie_httponly_false(self):
"""
Warn if CsrfViewMiddleware is in MIDDLEWARE_CLASSES but
CSRF_COOKIE_HTTPONLY isn't True.
"""
self.assertEqual(self.func(None), [csrf.W017])
@override_settings(MIDDLEWARE_CLASSES=[], CSRF_COOKIE_HTTPONLY=False)
def test_with_csrf_cookie_httponly_false_no_middleware(self):
"""
No warning if CsrfViewMiddleware isn't in MIDDLEWARE_CLASSES, even if
CSRF_COOKIE_HTTPONLY is False.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_HTTPONLY=True)
def test_with_csrf_cookie_httponly_true(self):
self.assertEqual(self.func(None), [])
class CheckSecurityMiddlewareTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_security_middleware
return check_security_middleware
@override_settings(MIDDLEWARE_CLASSES=[])
def test_no_security_middleware(self):
"""
Warn if SecurityMiddleware isn't in MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [base.W001])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"])
def test_with_security_middleware(self):
self.assertEqual(self.func(None), [])
class CheckStrictTransportSecurityTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_sts
return check_sts
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_SECONDS=0)
def test_no_sts(self):
"""
Warn if SECURE_HSTS_SECONDS isn't > 0.
"""
self.assertEqual(self.func(None), [base.W004])
@override_settings(
MIDDLEWARE_CLASSES=[],
SECURE_HSTS_SECONDS=0)
def test_no_sts_no_middlware(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't > 0 and SecurityMiddleware isn't
installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_SECONDS=3600)
def test_with_sts(self):
self.assertEqual(self.func(None), [])
class CheckStrictTransportSecuritySubdomainsTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_sts_include_subdomains
return check_sts_include_subdomains
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_HSTS_SECONDS=3600)
def test_no_sts_subdomains(self):
"""
Warn if SECURE_HSTS_INCLUDE_SUBDOMAINS isn't True.
"""
self.assertEqual(self.func(None), [base.W005])
@override_settings(
MIDDLEWARE_CLASSES=[],
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_HSTS_SECONDS=3600)
def test_no_sts_subdomains_no_middlware(self):
"""
Don't warn if SecurityMiddleware isn't installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False,
SECURE_HSTS_SECONDS=None)
def test_no_sts_subdomains_no_seconds(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't set.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_INCLUDE_SUBDOMAINS=True,
SECURE_HSTS_SECONDS=3600)
def test_with_sts_subdomains(self):
self.assertEqual(self.func(None), [])
class CheckXFrameOptionsMiddlewareTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_xframe_options_middleware
return check_xframe_options_middleware
@override_settings(MIDDLEWARE_CLASSES=[])
def test_middleware_not_installed(self):
"""
Warn if XFrameOptionsMiddleware isn't in MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [base.W002])
@override_settings(MIDDLEWARE_CLASSES=["django.middleware.clickjacking.XFrameOptionsMiddleware"])
def test_middleware_installed(self):
self.assertEqual(self.func(None), [])
class CheckXFrameOptionsDenyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_xframe_deny
return check_xframe_deny
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.clickjacking.XFrameOptionsMiddleware"],
X_FRAME_OPTIONS='SAMEORIGIN',
)
def test_x_frame_options_not_deny(self):
"""
Warn if XFrameOptionsMiddleware is in MIDDLEWARE_CLASSES but
X_FRAME_OPTIONS isn't 'DENY'.
"""
self.assertEqual(self.func(None), [base.W019])
@override_settings(MIDDLEWARE_CLASSES=[], X_FRAME_OPTIONS='SAMEORIGIN')
def test_middleware_not_installed(self):
"""
No error if XFrameOptionsMiddleware isn't in MIDDLEWARE_CLASSES even if
X_FRAME_OPTIONS isn't 'DENY'.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.clickjacking.XFrameOptionsMiddleware"],
X_FRAME_OPTIONS='DENY',
)
def test_xframe_deny(self):
self.assertEqual(self.func(None), [])
class CheckContentTypeNosniffTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_content_type_nosniff
return check_content_type_nosniff
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_no_content_type_nosniff(self):
"""
Warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True.
"""
self.assertEqual(self.func(None), [base.W006])
@override_settings(
MIDDLEWARE_CLASSES=[],
SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_no_content_type_nosniff_no_middleware(self):
"""
Don't warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True and
SecurityMiddleware isn't in MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_CONTENT_TYPE_NOSNIFF=True)
def test_with_content_type_nosniff(self):
self.assertEqual(self.func(None), [])
class CheckXssFilterTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_xss_filter
return check_xss_filter
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_BROWSER_XSS_FILTER=False)
def test_no_xss_filter(self):
"""
Warn if SECURE_BROWSER_XSS_FILTER isn't True.
"""
self.assertEqual(self.func(None), [base.W007])
@override_settings(
MIDDLEWARE_CLASSES=[],
SECURE_BROWSER_XSS_FILTER=False)
def test_no_xss_filter_no_middleware(self):
"""
Don't warn if SECURE_BROWSER_XSS_FILTER isn't True and
SecurityMiddleware isn't in MIDDLEWARE_CLASSES.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_BROWSER_XSS_FILTER=True)
def test_with_xss_filter(self):
self.assertEqual(self.func(None), [])
class CheckSSLRedirectTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_ssl_redirect
return check_ssl_redirect
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False)
def test_no_ssl_redirect(self):
"""
Warn if SECURE_SSL_REDIRECT isn't True.
"""
self.assertEqual(self.func(None), [base.W008])
@override_settings(
MIDDLEWARE_CLASSES=[],
SECURE_SSL_REDIRECT=False)
def test_no_ssl_redirect_no_middlware(self):
"""
Don't warn if SECURE_SSL_REDIRECT is False and SecurityMiddleware isn't
installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE_CLASSES=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=True)
def test_with_ssl_redirect(self):
self.assertEqual(self.func(None), [])
class CheckSecretKeyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_secret_key
return check_secret_key
@override_settings(SECRET_KEY=('abcdefghijklmnopqrstuvwx' * 2) + 'ab')
def test_okay_secret_key(self):
self.assertEqual(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH)
self.assertGreater(len(set(settings.SECRET_KEY)), base.SECRET_KEY_MIN_UNIQUE_CHARACTERS)
self.assertEqual(self.func(None), [])
@override_settings(SECRET_KEY='')
def test_empty_secret_key(self):
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY=None)
def test_missing_secret_key(self):
del settings.SECRET_KEY
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY=None)
def test_none_secret_key(self):
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY=('abcdefghijklmnopqrstuvwx' * 2) + 'a')
def test_low_length_secret_key(self):
self.assertEqual(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH - 1)
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY='abcd' * 20)
def test_low_entropy_secret_key(self):
self.assertGreater(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH)
self.assertLess(len(set(settings.SECRET_KEY)), base.SECRET_KEY_MIN_UNIQUE_CHARACTERS)
self.assertEqual(self.func(None), [base.W009])
class CheckDebugTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_debug
return check_debug
@override_settings(DEBUG=True)
def test_debug_true(self):
"""
Warn if DEBUG is True.
"""
self.assertEqual(self.func(None), [base.W018])
@override_settings(DEBUG=False)
def test_debug_false(self):
self.assertEqual(self.func(None), [])
class CheckAllowedHostsTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_allowed_hosts
return check_allowed_hosts
@override_settings(ALLOWED_HOSTS=[])
def test_allowed_hosts_empty(self):
self.assertEqual(self.func(None), [base.W020])
@override_settings(ALLOWED_HOSTS=['.example.com', ])
def test_allowed_hosts_set(self):
self.assertEqual(self.func(None), [])
| bsd-3-clause |
atalax/linux | tools/perf/util/setup.py | 766 | 1540 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
libapikfs = getenv('LIBAPI')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, libapikfs],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='[email protected]',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
hifly/OpenUpgrade | addons/mrp_repair/__openerp__.py | 259 | 2554 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Repairs Management',
'version': '1.0',
'category': 'Manufacturing',
'description': """
The aim is to have a complete module to manage all products repairs.
====================================================================
The following topics should be covered by this module:
------------------------------------------------------
* Add/remove products in the reparation
* Impact for stocks
* Invoicing (products and/or services)
* Warranty concept
* Repair quotation report
* Notes for the technician and for the final customer
""",
'author': 'OpenERP SA',
'depends': ['mrp', 'sale', 'account'],
'website': 'https://www.odoo.com/page/manufacturing',
'data': [
'security/ir.model.access.csv',
'security/mrp_repair_security.xml',
'mrp_repair_data.xml',
'mrp_repair_sequence.xml',
'wizard/mrp_repair_cancel_view.xml',
'wizard/mrp_repair_make_invoice_view.xml',
'mrp_repair_view.xml',
'mrp_repair_workflow.xml',
'mrp_repair_report.xml',
'views/report_mrprepairorder.xml',
],
'demo': ['mrp_repair_demo.yml'],
'test': ['test/mrp_repair_users.yml',
'test/test_mrp_repair_noneinv.yml',
'test/test_mrp_repair_b4inv.yml',
'test/test_mrp_repair_afterinv.yml',
'test/test_mrp_repair_cancel.yml',
'test/test_mrp_repair_fee.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
open2c/bioframe | bioframe/io/fileops.py | 1 | 21340 | from collections import OrderedDict
from contextlib import closing
import tempfile
import json
import io
import numpy as np
import pandas as pd
try:
import bbi
except ImportError:
bbi = None
try:
import pyBigWig
except ImportError:
pyBigWig = None
from ..core.stringops import parse_region
from ..core.arrops import argnatsort
from .schemas import SCHEMAS, BAM_FIELDS, GAP_FIELDS, UCSC_MRNA_FIELDS
__all__ = [
"read_table",
"read_chromsizes",
"read_tabix",
"read_pairix",
"read_bam",
"load_fasta",
"read_bigwig",
"to_bigwig",
"read_bigbed",
"to_bigbed",
"read_parquet",
"to_parquet",
]
def read_table(filepath_or, schema=None, **kwargs):
"""
Read a tab-delimited file into a data frame.
Equivalent to :func:`pandas.read_table` but supports an additional
`schema` argument to populate column names for common genomic formats.
"""
kwargs.setdefault("sep", "\t")
kwargs.setdefault("header", None)
if isinstance(filepath_or, str) and filepath_or.endswith(".gz"):
kwargs.setdefault("compression", "gzip")
if schema is not None:
try:
kwargs.setdefault("names", SCHEMAS[schema])
except (KeyError, TypeError):
if isinstance(schema, str):
raise ValueError("TSV schema not found: '{}'".format(schema))
kwargs.setdefault("names", schema)
return pd.read_csv(filepath_or, **kwargs)
def parse_gtf_attributes(attrs, kv_sep="=", item_sep=";", quotechar='"', **kwargs):
item_lists = attrs.str.split(item_sep)
item_lists = item_lists.apply(
lambda items: [item.strip().split(kv_sep) for item in items]
)
stripchars = quotechar + " "
item_lists = item_lists.apply(
lambda items: [
map(lambda x: x.strip(stripchars), item) for item in items if len(item) == 2
]
)
kv_records = item_lists.apply(dict)
return pd.DataFrame.from_records(kv_records, **kwargs)
def read_chromsizes(
filepath_or,
filter_chroms=True,
chrom_patterns=(r"^chr[0-9]+$", r"^chr[XY]$", r"^chrM$"),
natsort=True,
as_bed=False,
**kwargs
):
"""
Parse a ``<db>.chrom.sizes`` or ``<db>.chromInfo.txt`` file from the UCSC
database, where ``db`` is a genome assembly name.
Parameters
----------
filepath_or : str or file-like
Path or url to text file, or buffer.
filter_chroms : bool, optional
Filter for chromosome names given in ``chrom_patterns``.
chrom_patterns : sequence, optional
Sequence of regular expressions to capture desired sequence names.
natsort : bool, optional
Sort each captured group of names in natural order. Default is True.
as_bed : bool, optional
If True, return chromsizes as an interval dataframe (chrom, start, end).
**kwargs :
Passed to :func:`pandas.read_csv`
Returns
-------
Series of integer bp lengths indexed by sequence name or an interval dataframe.
Notes
-----
Mention name patterns
See also
--------
* UCSC assembly terminology: <http://genome.ucsc.edu/FAQ/FAQdownloads.html#download9>
* NCBI assembly terminology: <https://www.ncbi.nlm.nih.gov/grc/help/definitions>
"""
if isinstance(filepath_or, str) and filepath_or.endswith(".gz"):
kwargs.setdefault("compression", "gzip")
chromtable = pd.read_csv(
filepath_or,
sep="\t",
usecols=[0, 1],
names=["name", "length"],
dtype={"name": str},
**kwargs
)
if filter_chroms:
parts = []
for pattern in chrom_patterns:
if not len(pattern):
continue
part = chromtable[chromtable["name"].str.contains(pattern)]
if natsort:
part = part.iloc[argnatsort(part["name"])]
parts.append(part)
chromtable = pd.concat(parts, axis=0)
if as_bed:
chromtable["start"] = 0
chromtable = (
chromtable[["name", "start", "length"]]
.rename({"name": "chrom", "length": "end"}, axis="columns")
.reset_index(drop=True)
)
else:
chromtable.index = chromtable["name"].values
chromtable = chromtable["length"]
return chromtable
def read_gapfile(filepath_or_fp, chroms=None, **kwargs):
gap = pd.read_csv(
filepath_or_fp,
sep="\t",
names=GAP_FIELDS,
usecols=["chrom", "start", "end", "length", "type", "bridge"],
**kwargs
)
if chroms is not None:
gap = gap[gap.chrom.isin(chroms)]
return gap
def read_ucsc_mrnafile(filepath_or_fp, chroms=None, **kwargs):
mrna = pd.read_csv(
filepath_or_fp,
sep="\t",
names=UCSC_MRNA_FIELDS,
# usecols=['chrom', 'start', 'end', 'length', 'type', 'bridge'],
**kwargs
)
if chroms is not None:
mrna = mrna[mrna.chrom.isin(chroms)]
return mrna
def read_tabix(fp, chrom=None, start=None, end=None):
import pysam
with closing(pysam.TabixFile(fp)) as f:
names = list(f.header) or None
df = pd.read_csv(
io.StringIO("\n".join(f.fetch(chrom, start, end))),
sep="\t",
header=None,
names=names,
)
return df
def read_pairix(
fp,
region1,
region2=None,
chromsizes=None,
columns=None,
usecols=None,
dtypes=None,
**kwargs
):
import pypairix
import cytoolz as toolz
if dtypes is None:
dtypes = {}
f = pypairix.open(fp, "r")
header = f.get_header()
if len(header):
header_groups = toolz.groupby(lambda x: x.split(":")[0], header)
if "#chromsize" in header_groups and chromsizes is None:
items = [line.split()[1:] for line in header_groups["#chromsize"]]
if len(items) and chromsizes is None:
names, lengths = zip(*((item[0], int(item[1])) for item in items))
chromsizes = pd.Series(index=names, data=lengths)
if "#columns" in header_groups and columns is None:
columns = header_groups["#columns"][0].split()[1:]
chrom1, start1, end1 = parse_region(region1, chromsizes)
if region2 is not None:
chrom2, start2, end2 = parse_region(region2, chromsizes)
else:
chrom2, start2, end2 = chrom1, start1, end1
it = f.query2D(chrom1, start1, end1, chrom2, start2, end2)
if usecols is not None:
argusecols = [columns.index(col) for col in usecols]
records = [(record[i] for i in argusecols) for record in it]
columns = usecols
else:
records = it
df = pd.DataFrame.from_records(records, columns=columns)
if columns is not None:
for col in columns:
if col in dtypes:
df[col] = df[col].astype(dtypes[col])
else:
df[col] = pd.to_numeric(df[col], "ignore")
return df
def read_bam(fp, chrom=None, start=None, end=None):
import pysam
with closing(pysam.AlignmentFile(fp, "rb")) as f:
bam_iter = f.fetch(chrom, start, end)
records = [
(
s.qname,
s.flag,
s.rname,
s.pos,
s.mapq,
s.cigarstring if s.mapq != 0 else np.nan,
s.rnext,
s.pnext,
s.tlen,
s.seq,
s.qual,
json.dumps(OrderedDict(s.tags)),
)
for s in bam_iter
]
df = pd.DataFrame(records, columns=BAM_FIELDS)
return df
def extract_centromeres(df, schema=None, merge=True):
if schema == "centromeres":
cens = df
elif schema == "cytoband":
cens = df[df["gieStain"] == "acen"]
elif schema == "gap":
cens = df[df["type"] == "centromere"]
else:
raise ValueError('`schema` must be one of {"centromeres", "cytoband", "gap"}.')
if merge:
cens = cens.groupby("chrom").agg({"start": np.min, "end": np.max}).reset_index()
cens["mid"] = (cens["start"] + cens["end"]) // 2
cens = (
cens[["chrom", "start", "end", "mid"]]
.sort_values("chrom")
.reset_index(drop=True)
)
return cens
class PysamFastaRecord(object):
def __init__(self, ff, ref):
self.ff = ff
if ref not in ff.references:
raise KeyError("Reference name '{}' not found in '{}'".format(ref, ff))
self.ref = ref
def __getitem__(self, key):
if isinstance(key, slice):
start, stop = key.start, key.stop
else:
start = key
stop = key + 1
return self.ff.fetch(self.ref, start, stop)
def load_fasta(filepath_or, engine="pysam", **kwargs):
"""
Load lazy fasta sequences from an indexed fasta file (optionally compressed)
or from a collection of uncompressed fasta files.
Parameters
----------
filepath_or : str or iterable
If a string, a filepath to a single `.fa` or `.fa.gz` file. Assumed to
be accompanied by a `.fai` index file. Depending on the engine, the
index may be created on the fly, and some compression formats may not
be supported. If not a string, an iterable of fasta file paths each
assumed to contain a single sequence.
engine : {'pysam', 'pyfaidx'}, optional
Module to use for loading sequences.
kwargs : optional
Options to pass to ``pysam.FastaFile`` or ``pyfaidx.Fasta``.
Returns
-------
OrderedDict of (lazy) fasta records.
Notes
-----
* pysam/samtools can read .fai and .gzi indexed files, I think.
* pyfaidx can handle uncompressed and bgzf compressed files.
"""
is_multifile = not isinstance(filepath_or, str)
records = OrderedDict()
engine = engine.lower()
if engine == "pysam":
try:
import pysam
except ImportError:
raise ImportError("pysam is required to use engine='pysam'")
if is_multifile:
for onefile in filepath_or:
ff = pysam.FastaFile(onefile, **kwargs)
name = ff.references[0]
records[name] = PysamFastaRecord(ff, name)
else:
ff = pysam.FastaFile(filepath_or, **kwargs)
for name in ff.references:
records[name] = PysamFastaRecord(ff, name)
elif engine == "pyfaidx":
try:
import pyfaidx
except ImportError:
raise ImportError("pyfaidx is required to use engine='pyfaidx'")
if is_multifile:
for onefile in filepath_or:
ff = pyfaidx.Fasta(onefile, **kwargs)
name = next(iter(ff.keys()))
records[name] = ff[name]
else:
ff = pyfaidx.Fasta(filepath_or, **kwargs)
for name in ff.keys():
records[name] = ff[name]
else:
raise ValueError("engine must be 'pysam' or 'pyfaidx'")
return records
def read_bigwig(path, chrom, start=None, end=None, engine="auto"):
"""
Read intervals from a bigWig file.
Parameters
----------
path : str
Path or URL to a bigWig file
chrom : str
start, end : int, optional
Start and end coordinates. Defaults to 0 and chromosome length.
engine : {"auto", "pybbi", "pybigwig"}
Library to use for querying the bigWig file.
Returns
-------
DataFrame
"""
engine = engine.lower()
if engine == "auto":
if bbi is None and pyBigWig is None:
raise ImportError(
"read_bigwig requires either the pybbi or pyBigWig package"
)
elif bbi is not None:
engine = "pybbi"
else:
engine = "pybigwig"
if engine in ("pybbi", "bbi"):
if start is None:
start = 0
if end is None:
end = -1
with bbi.open(path) as f:
df = f.fetch_intervals(chrom, start=start, end=end)
elif engine == "pybigwig":
f = pyBigWig.open(path)
if start is None:
start = 0
if end is None:
end = f.chroms()[chrom]
ivals = f.intervals(chrom, start, end)
df = pd.DataFrame(ivals, columns=["start", "end", "value"])
df.insert(0, "chrom", chrom)
else:
raise ValueError(
"engine must be 'auto', 'pybbi' or 'pybigwig'; got {}".format(engine)
)
return df
def read_bigbed(path, chrom, start=None, end=None, engine="auto"):
"""
Read intervals from a bigBed file.
Parameters
----------
path : str
Path or URL to a bigBed file
chrom : str
start, end : int, optional
Start and end coordinates. Defaults to 0 and chromosome length.
engine : {"auto", "pybbi", "pybigwig"}
Library to use for querying the bigBed file.
Returns
-------
DataFrame
"""
engine = engine.lower()
if engine == "auto":
if bbi is None and pyBigWig is None:
raise ImportError(
"read_bigbed requires either the pybbi or pyBigWig package"
)
elif bbi is not None:
engine = "pybbi"
else:
engine = "pybigwig"
if engine in ("pybbi", "bbi"):
if start is None:
start = 0
if end is None:
end = -1
with bbi.open(path) as f:
df = f.fetch_intervals(chrom, start=start, end=end)
elif engine == "pybigwig":
f = pyBigWig.open(path)
if start is None:
start = 0
if end is None:
end = f.chroms()[chrom]
ivals = f.entries(chrom, start, end)
df = pd.DataFrame(ivals, columns=["start", "end", "rest"])
df.insert(0, "chrom", chrom)
else:
raise ValueError(
"engine must be 'auto', 'pybbi' or 'pybigwig'; got {}".format(engine)
)
return df
def to_bigwig(df, chromsizes, outpath, value_field=None):
"""
Save a bedGraph-like dataframe as a binary BigWig track.
Parameters
----------
df : pandas.DataFrame
Data frame with columns 'chrom', 'start', 'end' and one or more value
columns
chromsizes : pandas.Series
Series indexed by chromosome name mapping to their lengths in bp
outpath : str
The output BigWig file path
value_field : str, optional
Select the column label of the data frame to generate the track. Default
is to use the fourth column.
"""
is_bedgraph = True
for col in ["chrom", "start", "end"]:
if col not in df.columns:
is_bedgraph = False
if len(df.columns) < 4:
is_bedgraph = False
if not is_bedgraph:
raise ValueError(
"A bedGraph-like DataFrame is required, got {}".format(df.columns)
)
if value_field is None:
value_field = df.columns[3]
columns = ["chrom", "start", "end", value_field]
bg = df[columns].copy()
bg["chrom"] = bg["chrom"].astype(str)
bg = bg.sort_values(["chrom", "start", "end"])
with tempfile.NamedTemporaryFile(suffix=".bg") as f, tempfile.NamedTemporaryFile(
"wt", suffix=".chrom.sizes"
) as cs:
chromsizes.to_csv(cs, sep="\t", header=False)
cs.flush()
bg.to_csv(
f.name, sep="\t", columns=columns, index=False, header=False, na_rep="nan"
)
p = subprocess.run(
["bedGraphToBigWig", f.name, cs.name, outpath],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return p
def to_bigbed(df, chromsizes, outpath, schema="bed6"):
"""
Save a bedGraph-like dataframe as a binary BigWig track.
Parameters
----------
df : pandas.DataFrame
Data frame with columns 'chrom', 'start', 'end' and one or more value
columns
chromsizes : pandas.Series
Series indexed by chromosome name mapping to their lengths in bp
outpath : str
The output BigWig file path
value_field : str, optional
Select the column label of the data frame to generate the track. Default
is to use the fourth column.
"""
import tempfile
import subprocess
is_bed6 = True
for col in ["chrom", "start", "end", "name", "score", "strand"]:
if col not in df.columns:
is_bed6 = False
if len(df.columns) < 6:
is_bed6 = False
if not is_bed6:
raise ValueError("A bed6-like DataFrame is required, got {}".format(df.columns))
columns = ["chrom", "start", "end", "name", "score", "strand"]
bed = df[columns].copy()
bed["chrom"] = bed["chrom"].astype(str)
bed = bed.sort_values(["chrom", "start", "end"])
with tempfile.NamedTemporaryFile(suffix=".bed") as f, tempfile.NamedTemporaryFile(
"wt", suffix=".chrom.sizes"
) as cs:
chromsizes.to_csv(cs, sep="\t", header=False)
cs.flush()
bed.to_csv(
f.name, sep="\t", columns=columns, index=False, header=False, na_rep="nan"
)
p = subprocess.run(
["bedToBigBed", "-type={}".format(schema), f.name, cs.name, outpath],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return p
def to_parquet(
pieces,
outpath,
row_group_size=None,
compression="snappy",
use_dictionary=True,
version=2.0,
**kwargs
):
"""
Save an iterable of dataframe chunks to a single Apache Parquet file. For
more info about Parquet, see https://arrow.apache.org/docs/python/parquet.html.
Parameters
----------
pieces : DataFrame or iterable of DataFrame
Chunks to write
outpath : str
Path to output file
row_group_size : int
Number of rows per row group
compression : {'snappy', 'gzip', 'brotli', 'none'}, optional
Compression algorithm. Can be set on a per-column basis with a
dictionary of column names to compression lib.
use_dictionary : bool, optional
Use dictionary encoding. Can be set on a per-column basis with a list
of column names.
See also
--------
pyarrow.parquet.write_table
pyarrow.parquet.ParquetFile
fastparquet
"""
try:
import pyarrow.parquet
import pyarrow as pa
except ImportError:
raise ImportError("Saving to parquet requires the `pyarrow` package")
if isinstance(pieces, pd.DataFrame):
pieces = (pieces,)
try:
for i, piece in enumerate(pieces):
table = pa.Table.from_pandas(piece, preserve_index=False)
if i == 0:
writer = pa.parquet.ParquetWriter(
outpath,
table.schema,
compression=compression,
use_dictionary=use_dictionary,
version=version,
**kwargs
)
writer.write_table(table, row_group_size=row_group_size)
finally:
writer.close()
def read_parquet(filepath, columns=None, iterator=False, **kwargs):
"""
Load DataFrames from Parquet files, optionally in pieces.
Parameters
----------
filepath : str, pathlib.Path, pyarrow.NativeFile, or file-like object
Readable source. For passing bytes or buffer-like file containing a
Parquet file, use pyarorw.BufferReader
columns: list
If not None, only these columns will be read from the row groups. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'
iterator : boolean, default False
Return an iterator object that yields row group DataFrames and
provides the ParquetFile interface.
use_threads : boolean, default True
Perform multi-threaded column reads
memory_map : boolean, default True
If the source is a file path, use a memory map to read file, which can
improve performance in some environments
Returns
-------
DataFrame or ParquetFileIterator
"""
use_threads = kwargs.pop("use_threads", True)
if not iterator:
return pd.read_parquet(
filepath, columns=columns, use_threads=use_threads, **kwargs
)
else:
try:
from pyarrow.parquet import ParquetFile
except ImportError:
raise ImportError(
"Iterating over Parquet data requires the `pyarrow` package."
)
class ParquetFileIterator(ParquetFile):
def __iter__(self):
return self
def __next__(self):
if not hasattr(self, "_rgid"):
self._rgid = 0
if self._rgid < self.num_row_groups:
rg = self.read_row_group(
self._rgid,
columns=columns,
use_threads=use_threads,
use_pandas_metadata=True,
)
self._rgid += 1
else:
raise StopIteration
return rg.to_pandas()
return ParquetFileIterator(filepath, **kwargs)
| mit |
sv-dev1/odoo | addons/email_template/wizard/__init__.py | 446 | 1130 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import email_template_preview
import mail_compose_message
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
waterdotorg/waterday-2015 | project/custom/forms.py | 1 | 1447 | from django import forms
from django.conf import settings
from PIL import Image
class ShareImageForm(forms.Form):
WORD_CHOICES = (
('', '2. WATER GIVES ME...'),
('joy', 'joy'),
('friendship', 'friendship'),
('life', 'life'),
('family', 'family'),
('nourishment', 'nourishment'),
('inspiration', 'inspiration'),
('strength', 'strength'),
)
image = forms.ImageField()
# word = forms.ChoiceField(choices=WORD_CHOICES)
def clean_image(self):
data = self.cleaned_data['image']
im = Image.open(data)
im_overlay = Image.open(settings.STATIC_ROOT + 'img/overlay/overlay-friendship.png')
width, height = im.size
if width < 300 or height < 250:
raise forms.ValidationError("Minimum image width and height are "
"600 x 500")
if im.format not in ('JPEG', 'PNG', 'GIF'):
raise forms.ValidationError("File type not supported. Please upload a JPG, PNG or GIF.")
im = im.resize((600, 500))
try:
if im.mode != 'RGBA':
im = im.convert('RGBA')
except:
raise forms.ValidationError("Image mode not supported.")
try:
im_out = Image.alpha_composite(im, im_overlay)
except:
raise forms.ValidationError("Sorry, this image is not compatible.")
return data
| gpl-3.0 |
asbjornu/arana | app/Web/aranalibtest/common/appenginepatch/appenginepatcher/patch.py | 2 | 24059 | # -*- coding: utf-8 -*-
# Unfortunately, we have to fix a few App Engine bugs here because otherwise
# not all of our features will work. Still, we should keep the number of bug
# fixes to a minimum and report everything to Google, please:
# http://code.google.com/p/googleappengine/issues/list
from google.appengine.ext import db
from google.appengine.ext.db import polymodel
import logging, new, os, re, sys
base_path = os.path.abspath(os.path.dirname(__file__))
get_verbose_name = lambda class_name: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', ' \\1', class_name).lower().strip()
DEFAULT_NAMES = ('verbose_name', 'ordering', 'permissions', 'app_label',
'abstract', 'db_table', 'db_tablespace')
def patch_all():
patch_python()
patch_app_engine()
patch_django()
setup_logging()
def patch_python():
# Remove modules that we want to override
for module in ('memcache',):
if module in sys.modules:
del sys.modules[module]
# For some reason the imp module can't be replaced via sys.path
from appenginepatcher import have_appserver
if have_appserver:
from appenginepatcher import imp
sys.modules['imp'] = imp
if have_appserver:
def unlink(_):
raise NotImplementedError('App Engine does not support FS writes!')
os.unlink = unlink
def patch_app_engine():
# This allows for using Paginator on a Query object. We limit the number
# of results to 301, so there won't be any timeouts (301, so you can say
# "more than 300 results").
def __len__(self):
return self.count(301)
db.Query.__len__ = __len__
# Add "model" property to Query (needed by generic views)
class ModelProperty(object):
def __get__(self, query, unused):
try:
return query._Query__model_class
except:
return query._model_class
db.Query.model = ModelProperty()
# Add a few Model methods that are needed for serialization and ModelForm
def _get_pk_val(self):
if self.has_key():
return unicode(self.key())
else:
return None
db.Model._get_pk_val = _get_pk_val
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._get_pk_val() == other._get_pk_val()
db.Model.__eq__ = __eq__
def __ne__(self, other):
return not self.__eq__(other)
db.Model.__ne__ = __ne__
def pk(self):
return self._get_pk_val()
db.Model.id = db.Model.pk = property(pk)
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
from django.db.models.fields import FieldDoesNotExist
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
db.Model.serializable_value = serializable_value
# Make Property more Django-like (needed for serialization and ModelForm)
db.Property.serialize = True
db.Property.editable = True
db.Property.help_text = ''
def blank(self):
return not self.required
db.Property.blank = property(blank)
def _get_verbose_name(self):
if not getattr(self, '_verbose_name', None):
self._verbose_name = self.name.replace('_', ' ')
return self._verbose_name
def _set_verbose_name(self, verbose_name):
self._verbose_name = verbose_name
db.Property.verbose_name = property(_get_verbose_name, _set_verbose_name)
def attname(self):
return self.name
db.Property.attname = property(attname)
class Rel(object):
def __init__(self, property):
self.field_name = 'key'
self.property = property
self.to = property.reference_class
self.multiple = True
self.parent_link = False
self.related_name = getattr(property, 'collection_name', None)
self.through = None
class RelProperty(object):
def __get__(self, property, cls):
if property is None:
return self
if not hasattr(property, 'reference_class'):
return None
if not hasattr(property, '_rel_cache'):
property._rel_cache = Rel(property)
return property._rel_cache
db.Property.rel = RelProperty()
def formfield(self, **kwargs):
return self.get_form_field(**kwargs)
db.Property.formfield = formfield
# Add repr to make debugging a little bit easier
from django.utils.datastructures import SortedDict
def __repr__(self):
d = SortedDict()
if self.has_key() and self.key().name():
d['key_name'] = self.key().name()
for field in self._meta.fields:
try:
d[field.name] = getattr(self, field.name)
except:
d[field.name] = field.get_value_for_datastore(self)
return u'%s(**%s)' % (self.__class__.__name__, repr(d))
db.Model.__repr__ = __repr__
# Add default __str__ and __unicode__ methods
def __str__(self):
return unicode(self).encode('utf-8')
db.Model.__str__ = __str__
def __unicode__(self):
return unicode(repr(self))
db.Model.__unicode__ = __unicode__
# Replace save() method with one that calls put(), so a monkey-patched
# put() will also work if someone uses save()
def save(self):
self.put()
db.Model.save = save
# Add _meta to Model, so porting code becomes easier (generic views,
# xheaders, and serialization depend on it).
from django.conf import settings
from django.utils.encoding import force_unicode, smart_str
from django.utils.translation import string_concat, get_language, \
activate, deactivate_all
class _meta(object):
many_to_many = ()
class pk:
name = 'key'
attname = 'pk'
def __init__(self, model, bases):
try:
self.app_label = model.__module__.split('.')[-2]
except IndexError:
raise ValueError('Django expects models (here: %s.%s) to be defined in their own apps!' % (model.__module__, model.__name__))
self.parents = [b for b in bases if issubclass(b, db.Model)]
self.object_name = model.__name__
self.module_name = self.object_name.lower()
self.verbose_name = get_verbose_name(self.object_name)
self.ordering = ()
self.abstract = model is db.Model
self.model = model
self.unique_together = ()
self.installed = model.__module__.rsplit('.', 1)[0] in \
settings.INSTALLED_APPS
self.permissions = []
meta = model.__dict__.get('Meta')
if meta:
meta_attrs = meta.__dict__.copy()
for name in meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
elif hasattr(meta, attr_name):
setattr(self, attr_name, getattr(meta, attr_name))
# verbose_name_plural is a special case because it uses a 's'
# by default.
setattr(self, 'verbose_name_plural', meta_attrs.pop('verbose_name_plural', string_concat(self.verbose_name, 's')))
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError, "'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys())
else:
self.verbose_name_plural = self.verbose_name + 's'
if not self.abstract:
self.permissions.extend([
('add_%s' % self.object_name.lower(),
string_concat('Can add ', self.verbose_name)),
('change_%s' % self.object_name.lower(),
string_concat('Can change ', self.verbose_name)),
('delete_%s' % self.object_name.lower(),
string_concat('Can delete ', self.verbose_name)),
])
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_str(self.app_label), smart_str(self.module_name))
def _set_db_table(self, db_table):
self._db_table = db_table
def _get_db_table(self):
if getattr(settings, 'DJANGO_STYLE_MODEL_KIND', True):
if hasattr(self, '_db_table'):
return self._db_table
return '%s_%s' % (self.app_label, self.module_name)
return self.object_name
db_table = property(_get_db_table, _set_db_table)
def _set_db_tablespace(self, db_tablespace):
self._db_tablespace = db_tablespace
def _get_db_tablespace(self):
if hasattr(self, '_db_tablespace'):
return self._db_tablespace
return settings.DEFAULT_TABLESPACE
db_tablespace = property(_get_db_tablespace, _set_db_tablespace)
@property
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
lang = get_language()
deactivate_all()
raw = force_unicode(self.verbose_name)
activate(lang)
return raw
@property
def local_fields(self):
return tuple(sorted([p for p in self.model.properties().values()
if not isinstance(p, db.ListProperty)],
key=lambda prop: prop.creation_counter))
@property
def local_many_to_many(self):
return tuple(sorted([p for p in self.model.properties().values()
if isinstance(p, db.ListProperty) and
not p.name == '_class'],
key=lambda prop: prop.creation_counter))
@property
def fields(self):
return self.local_fields + self.local_many_to_many
def get_field(self, name, many_to_many=True):
"""
Returns the requested field by name. Raises FieldDoesNotExist on error.
"""
for f in self.fields:
if f.name == name:
return f
from django.db.models.fields import FieldDoesNotExist
raise FieldDoesNotExist, '%s has no field named %r' % (self.object_name, name)
def get_all_related_objects(self, local_only=False):
try:
self._related_objects_cache
except AttributeError:
self._fill_related_objects_cache()
if local_only:
return [k for k, v in self._related_objects_cache.items() if not v]
return self._related_objects_cache.keys()
def get_all_related_objects_with_model(self):
"""
Returns a list of (related-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
self._related_objects_cache
except AttributeError:
self._fill_related_objects_cache()
return self._related_objects_cache.items()
def _fill_related_objects_cache(self):
from django.db.models.loading import get_models
from django.db.models.related import RelatedObject
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_objects_with_model():
if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
for klass in get_models():
for f in klass._meta.local_fields:
if f.rel and not isinstance(f.rel.to, str) and self == f.rel.to._meta:
cache[RelatedObject(f.rel.to, klass, f)] = None
self._related_objects_cache = cache
def get_all_related_many_to_many_objects(self, local_only=False):
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
if local_only:
return [k for k, v in cache.items() if not v]
return cache.keys()
def get_all_related_m2m_objects_with_model(self):
"""
Returns a list of (related-m2m-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
return cache.items()
def _fill_related_many_to_many_cache(self):
from django.db.models.loading import get_models, app_cache_ready
from django.db.models.related import RelatedObject
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_m2m_objects_with_model():
if obj.field.creation_counter < 0 and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
for klass in get_models():
for f in klass._meta.local_many_to_many:
if f.rel and not isinstance(f.rel.to, str) and self == f.rel.to._meta:
cache[RelatedObject(f.rel.to, klass, f)] = None
if app_cache_ready():
self._related_many_to_many_cache = cache
return cache
def get_add_permission(self):
return 'add_%s' % self.object_name.lower()
def get_change_permission(self):
return 'change_%s' % self.object_name.lower()
def get_delete_permission(self):
return 'delete_%s' % self.object_name.lower()
def get_ordered_objects(self):
return []
def get_parent_list(self):
"""
Returns a list of all the ancestor of this model as a list. Useful for
determining if something is an ancestor, regardless of lineage.
"""
result = set()
for parent in self.parents:
result.add(parent)
result.update(parent._meta.get_parent_list())
return result
# Required to support reference properties to db.Model
db.Model._meta = _meta(db.Model, ())
def _initialize_model(cls, bases):
cls._meta = _meta(cls, bases)
cls._default_manager = cls
if not cls._meta.abstract:
from django.db.models.loading import register_models
register_models(cls._meta.app_label, cls)
# Register models with Django
from django.db.models import signals
old_propertied_class_init = db.PropertiedClass.__init__
def __init__(cls, name, bases, attrs, map_kind=True):
"""Creates a combined appengine and Django model.
The resulting model will be known to both the appengine libraries and
Django.
"""
_initialize_model(cls, bases)
old_propertied_class_init(cls, name, bases, attrs,
not cls._meta.abstract)
signals.class_prepared.send(sender=cls)
db.PropertiedClass.__init__ = __init__
old_poly_init = polymodel.PolymorphicClass.__init__
def __init__(cls, name, bases, attrs):
if polymodel.PolyModel not in bases:
_initialize_model(cls, bases)
old_poly_init(cls, name, bases, attrs)
if polymodel.PolyModel not in bases:
signals.class_prepared.send(sender=cls)
polymodel.PolymorphicClass.__init__ = __init__
@classmethod
def kind(cls):
return cls._meta.db_table
db.Model.kind = kind
# Add model signals
old_model_init = db.Model.__init__
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
old_model_init(self, *args, **kwargs)
signals.post_init.send(sender=self.__class__, instance=self)
db.Model.__init__ = __init__
old_put = db.Model.put
def put(self, *args, **kwargs):
raw = False
signals.pre_save.send(sender=self.__class__, instance=self, raw=raw)
created = not self.is_saved()
result = old_put(self, *args, **kwargs)
signals.post_save.send(sender=self.__class__, instance=self,
created=created, raw=raw)
return result
db.Model.put = put
old_delete = db.Model.delete
def delete(self, *args, **kwargs):
signals.pre_delete.send(sender=self.__class__, instance=self)
result = old_delete(self, *args, **kwargs)
signals.post_delete.send(sender=self.__class__, instance=self)
return result
db.Model.delete = delete
# This has to come last because we load Django here
from django.db.models.fields import BLANK_CHOICE_DASH
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
first_choice = include_blank and blank_choice or []
if self.choices:
return first_choice + list(self.choices)
if self.rel:
return first_choice + [(obj.pk, unicode(obj))
for obj in self.rel.to.all().fetch(301)]
return first_choice
db.Property.get_choices = get_choices
fix_app_engine_bugs()
def fix_app_engine_bugs():
# Fix handling of verbose_name. Google resolves lazy translation objects
# immedately which of course breaks translation support.
# http://code.google.com/p/googleappengine/issues/detail?id=583
from django import forms
from django.utils.text import capfirst
# This import is needed, so the djangoforms patch can do its work, first
from google.appengine.ext.db import djangoforms
def get_form_field(self, form_class=forms.CharField, **kwargs):
defaults = {'required': self.required}
defaults['label'] = capfirst(self.verbose_name)
if self.choices:
choices = []
if not self.required or (self.default is None and
'initial' not in kwargs):
choices.append(('', '---------'))
for choice in self.choices:
choices.append((unicode(choice), unicode(choice)))
defaults['widget'] = forms.Select(choices=choices)
if self.default is not None:
defaults['initial'] = self.default
defaults.update(kwargs)
return form_class(**defaults)
db.Property.get_form_field = get_form_field
# Extend ModelForm with support for EmailProperty
# http://code.google.com/p/googleappengine/issues/detail?id=880
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for an email property."""
defaults = {'form_class': forms.EmailField}
defaults.update(kwargs)
return super(db.EmailProperty, self).get_form_field(**defaults)
db.EmailProperty.get_form_field = get_form_field
# Fix DateTimeProperty, so it returns a property even for auto_now and
# auto_now_add.
# http://code.google.com/p/googleappengine/issues/detail?id=994
def get_form_field(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(db.DateTimeProperty, self).get_form_field(**defaults)
db.DateTimeProperty.get_form_field = get_form_field
def get_form_field(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(db.DateProperty, self).get_form_field(**defaults)
db.DateProperty.get_form_field = get_form_field
def get_form_field(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(db.TimeProperty, self).get_form_field(**defaults)
db.TimeProperty.get_form_field = get_form_field
# Fix default value of UserProperty (Google resolves the user too early)
# http://code.google.com/p/googleappengine/issues/detail?id=879
from django.utils.functional import lazy
from google.appengine.api import users
def get_form_field(self, **kwargs):
defaults = {'initial': lazy(users.GetCurrentUser, users.User)()}
defaults.update(kwargs)
return super(db.UserProperty, self).get_form_field(**defaults)
db.UserProperty.get_form_field = get_form_field
# Fix file uploads via BlobProperty
def get_form_field(self, **kwargs):
defaults = {'form_class': forms.FileField}
defaults.update(kwargs)
return super(db.BlobProperty, self).get_form_field(**defaults)
db.BlobProperty.get_form_field = get_form_field
def get_value_for_form(self, instance):
return getattr(instance, self.name)
db.BlobProperty.get_value_for_form = get_value_for_form
from django.core.files.uploadedfile import UploadedFile
def make_value_from_form(self, value):
if isinstance(value, UploadedFile):
return db.Blob(value.read())
return super(db.BlobProperty, self).make_value_from_form(value)
db.BlobProperty.make_value_from_form = make_value_from_form
# Optimize ReferenceProperty, so it returns the key directly
# http://code.google.com/p/googleappengine/issues/detail?id=993
def get_value_for_form(self, instance):
return self.get_value_for_datastore(instance)
db.ReferenceProperty.get_value_for_form = get_value_for_form
# Use our ModelChoiceField instead of Google's
def get_form_field(self, **kwargs):
defaults = {'form_class': forms.ModelChoiceField,
'queryset': self.reference_class.all()}
defaults.update(kwargs)
return super(db.ReferenceProperty, self).get_form_field(**defaults)
db.ReferenceProperty.get_form_field = get_form_field
def patch_django():
# Most patches are part of the django-app-engine project:
# http://www.bitbucket.org/wkornewald/django-app-engine/
# Activate ragendja's GLOBALTAGS support (automatically done on import)
from ragendja import template
def setup_logging():
from django.conf import settings
if settings.DEBUG:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
| gpl-2.0 |
jefftriplett/trolley | trolley.py | 1 | 23057 | #!/usr/bin/env python
"""
Trolley syncs issues between CSV, Github, and Buffer with Trello.
"""
import csv
import datetime
import os
import random
import click
import click_config
import github3
from buffpy.api import API as BufferAPI
from buffpy.managers.profiles import Profiles
from buffpy.managers.updates import Updates
from trello import TrelloClient
__author__ = 'Jeff Triplett'
__copyright__ = 'Copyright 2015, Jeff Triplett'
__license__ = 'BSD'
__version__ = '0.1.6'
# hold auth state
_buffer_auth = None
_github_auth = None
_trello_auth = None
BUFFER_CLIENT_ID = os.environ.get('BUFFER_CLIENT_ID')
BUFFER_CLIENT_SECRET = os.environ.get('BUFFER_CLIENT_SECRET')
BUFFER_ACCESS_TOKEN = os.environ.get('BUFFER_ACCESS_TOKEN')
GITHUB_USERNAME = os.environ.get('GITHUB_USERNAME')
GITHUB_PASSWORD = os.environ.get('GITHUB_PASSWORD')
GITHUB_ORG = os.environ.get('GITHUB_ORG')
GITHUB_REPO = os.environ.get('GITHUB_REPO')
GITHUB_SCOPES = ['user', 'repo']
TRELLO_APP_KEY = os.environ.get('TRELLO_APP_KEY')
TRELLO_APP_SECRET = os.environ.get('TRELLO_APP_SECRET')
TRELLO_AUTH_TOKEN = os.environ.get('TRELLO_AUTH_TOKEN')
TRELLO_BOARD_ID = os.environ.get('TRELLO_BOARD_ID')
TRELLO_DEFAULT_LIST = os.environ.get('TRELLO_DEFAULT_LIST', 'Uncategorized')
# might migrate to:
# http://click.pocoo.org/4/options/#values-from-environment-variables
class config(object):
class buffer(object):
client_id = BUFFER_CLIENT_ID
client_secret = BUFFER_CLIENT_SECRET
access_token = BUFFER_ACCESS_TOKEN
class github(object):
username = GITHUB_USERNAME
password = GITHUB_PASSWORD
org = GITHUB_ORG
repo = GITHUB_REPO
class trello(object):
app_key = TRELLO_APP_KEY
app_secret = TRELLO_APP_SECRET
auth_token = TRELLO_AUTH_TOKEN
board_id = TRELLO_BOARD_ID
default_list = TRELLO_DEFAULT_LIST
# utils
def csv_to_dict_list(filename):
"""Open a CSV file and return a list of dict objects."""
with open(filename) as f:
values = list(csv.DictReader(f))
return values
def get_random_color():
filename = 'etc/color-blind-safe.csv'
colors = csv_to_dict_list(filename)
index = random.randint(0, len(colors))
return colors[index]['color']
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('version {}'.format(__version__))
ctx.exit()
# github utils
def get_github_auth(github_config):
"""Log me into github and return an object."""
global _github_auth
if _github_auth:
return _github_auth
assert github_config.username
assert github_config.password
_github_auth = github3.login(
github_config.username,
github_config.password)
return _github_auth
def get_github_repository(config, github_org, github_repo):
"""Return a repository object and log me in."""
github = get_github_auth(config.github)
repository = github.repository(github_org, github_repo)
return repository
def get_existing_github_issues(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
existing_issues = [str(item.title) for item in repository.iter_issues()]
return existing_issues
def get_existing_github_labels(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
existing_labels = [str(item.name) for item in repository.iter_labels()]
return existing_labels
def get_existing_github_milestones(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
existing_milestones = [str(item.title) for item in repository.iter_milestones()]
return existing_milestones
# github core
def close_existing_github_issues(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
issues = [str(issue.title) for issue in repository.iter_issues()]
click.echo('closing {} issues'.format(len(issues)))
for issue in repository.iter_issues():
click.echo('closing issue "{}"'.format(issue.title))
issue.close()
def create_github_issues(config, github_org, github_repo,
filename='etc/default_github_issues.csv'):
issues = csv_to_dict_list(filename)
repository = get_github_repository(config, github_org, github_repo)
existing_issues = get_existing_github_issues(config, github_org, github_repo)
click.echo('creating {} issues'.format(len(issues)))
for issue in issues:
title = str(issue['title'])
body = str(issue['body'])
labels = issue['labels']
if labels:
if ',' in labels:
labels = labels.split(',')
else:
labels = [labels]
if title not in existing_issues:
click.echo('creating issue "{}"'.format(title))
repository.create_issue(title, body, labels=labels)
else:
click.echo('issue "{}" already exists'.format(title))
def create_github_labels(config, github_org, github_repo,
filename='etc/default_github_labels.csv'):
labels = csv_to_dict_list(filename)
repository = get_github_repository(config, github_org, github_repo)
existing_labels = get_existing_github_labels(config, github_org, github_repo)
click.echo('creating {} labels'.format(len(labels)))
for label in labels:
name = str(label['name'])
color = str(label['color'])
if name not in existing_labels:
click.echo('creating label "{}"'.format(name))
if not len(color):
color = get_random_color()
repository.create_label(name, color)
else:
click.echo('label "{}" already exists'.format(name))
def create_github_milestones(config, github_org, github_repo,
filename='etc/default_github_milestones.csv'):
milestones = csv_to_dict_list(filename)
repository = get_github_repository(config, github_org, github_repo)
existing_milestones = get_existing_github_milestones(config, github_org, github_repo)
click.echo('creating {} milestones'.format(len(milestones)))
for milestone in milestones:
title = str(milestone['title'])
if title not in existing_milestones:
click.echo('creating milestone "{}"'.format(title))
repository.create_milestone(title)
else:
click.echo('milestone "{}" already exists'.format(title))
def delete_existing_github_labels(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
labels = [str(label.name) for label in repository.iter_labels()]
click.echo('removing {} labels'.format(len(labels)))
for label in labels:
click.echo('removing label "{}"'.format(label))
repository.label(label).delete()
def delete_existing_github_milestones(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
milestones = repository.iter_milestones(github_org, github_repo)
click.echo('removing {} milestones'.format(len(list(milestones))))
for milestone in milestones:
click.echo('removing milestone "{}"'.format(milestone.title))
milestone.delete()
# trello utils
def get_trello_auth(trello_config):
"""Log me into trello and return an object."""
global _trello_auth
if _trello_auth:
return _trello_auth
assert trello_config.app_key
assert trello_config.app_secret
assert trello_config.auth_token
_trello_auth = TrelloClient(
api_key=trello_config.app_key,
api_secret=trello_config.app_secret,
token=trello_config.auth_token,
# token_secret=str(trello_config.auth_token),
)
return _trello_auth
def get_existing_trello_boards(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
boards = [str(board.name) for board in board.get_cards()]
return boards
def get_existing_trello_cards(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
cards = board.get_cards()
cards = [str(card.name) for card in cards]
return cards
def get_existing_trello_labels(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
labels = board.get_labels()
labels = [label for label in labels]
return labels
def get_existing_trello_lists(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
all_lists = board.all_lists()
all_lists = [item.name for item in all_lists]
return all_lists
def get_trello_list_lookup(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
all_lists = board.all_lists()
list_lookup = {}
for item in all_lists:
id = item.id
name = item.name
list_lookup[name] = id
list_lookup[id] = name
default_list = config.trello.default_list
if default_list not in list_lookup:
new_list = board.add_list(default_list)
new_list_id = new_list.id
list_lookup[default_list] = new_list_id
list_lookup[new_list_id] = default_list
return list_lookup
# trello core
def create_trello_cards(config, trello_board_id,
filename='etc/default_trello_cards.csv'):
cards = csv_to_dict_list(filename)
trello = get_trello_auth(config.trello)
existing_cards = get_existing_trello_cards(config, trello_board_id)
board_lookup = get_trello_list_lookup(config, trello_board_id)
category = board_lookup[config.trello.default_list]
board = trello.get_board(trello_board_id)
click.echo('creating {} cards'.format(len(cards)))
for card in cards:
name = str(card.get('title', ''))
description = str(card.get('body', ''))
labels = card.get('labels', [])
if labels:
if ',' in labels:
labels = labels.split(',')
else:
labels = [labels]
if name not in existing_cards:
click.echo('creating issue "{}"'.format(name))
list_item = board.get_list(category)
new_card = list_item.add_card(name, description, labels=labels)
'''
# currently labels are broken in the trello python client :/
if len(labels):
for label in labels:
trello.cards.new_label(new_card['id'], label)
'''
else:
click.echo('issue "{}" already exists'.format(name))
def create_trello_labels(config, trello_board_id,
filename='etc/default_trello_labels.csv'):
labels = csv_to_dict_list(filename)
existing_labels = get_existing_trello_labels(config, trello_board_id)
click.echo('creating {} labels'.format(len(labels)))
for label in labels:
name = str(label['name'])
color = str(label['color'])
if name not in existing_labels:
click.echo('creating label "{}"'.format(name))
if not len(color):
color = get_random_color()
# TODO: Create Trello label via API
#repository.create_label(name, color)
else:
click.echo('label "{}" already exists'.format(name))
def create_trello_lists(config, trello_board_id,
filename='etc/default_trello_lists.csv'):
lists = csv_to_dict_list(filename)
trello = get_trello_auth(config.trello)
existing_lists = get_existing_trello_lists(config, trello_board_id)
click.echo('creating {} lists'.format(len(lists)))
for item in lists:
title = str(item['title'])
if title not in existing_lists:
click.echo('creating list "{}"'.format(title))
trello.boards.new_list(trello_board_id, title)
else:
click.echo('list "{}" already exists'.format(title))
def list_trello_boards(config):
trello = get_trello_auth(config.trello)
boards = trello.list_boards()
for board in boards:
click.echo('{0}: {1}{2}'.format(
board.id,
board.name,
' (closed)' if board.closed else ''
))
def list_trello_organizations(config):
trello = get_trello_auth(config.trello)
organizations = trello.list_organizations()
for organization in organizations:
click.echo('{0}: {1}'.format(
organization.id,
organization.name
))
# sync github and trello
def sync_github_issues_to_trello_cards(config, github_org, github_repo,
trello_board_id):
trello = get_trello_auth(config.trello)
board_lookup = get_trello_list_lookup(config, trello_board_id)
existing_trello_cards = get_existing_trello_cards(config, trello_board_id)
repository = get_github_repository(config, github_org, github_repo)
issues = repository.iter_issues()
#click.echo('creating {} issues'.format(issues.count))
for issue in issues:
title = issue.title
desc = issue.body
category = board_lookup[config.trello.default_list]
if title not in existing_trello_cards:
click.echo('creating issue "{}"'.format(title))
trello.cards.new(title, category, desc=desc)
else:
click.echo('issue "{}" already exists'.format(title))
def sync_trello_cards_to_github_issues(config, trello_board_id, github_org, github_repo):
trello = get_trello_auth(config.trello)
existing_github_issues = get_existing_github_issues(config, github_org, github_repo)
repository = get_github_repository(config, github_org, github_repo)
board = trello.get_board(trello_board_id)
cards = board.all_cards()
click.echo('creating {} cards'.format(len(cards)))
for card in cards:
name = card.name
# id = card['id']
# list_id = card['idList']
description = card.description
labels = card.labels
if name not in existing_github_issues:
click.echo('creating card "{}"'.format(name))
repository.create_issue(name, description, labels=labels)
else:
click.echo('card "{}" already exists'.format(name))
def list_trello_cards(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(config.trello.board_id)
cards = [card for card in board.open_cards()]
for card in cards:
name = card.name
card_id = card.id
description = card.description
click.echo('{0}: {1}'.format(card_id, name))
if len(description):
click.echo(description)
def get_buffer_auth(buffer_config):
"""Log me into buffer and return an object."""
global _buffer_auth
if _buffer_auth:
return _buffer_auth
assert buffer_config.client_id
assert buffer_config.client_secret
assert buffer_config.access_token
_buffer_auth = BufferAPI(
client_id=buffer_config.client_id,
client_secret=buffer_config.client_secret,
access_token=buffer_config.access_token,
)
return _buffer_auth
def test_buffer(config):
client = get_buffer_auth(config.buffer)
profiles = Profiles(api=client).filter(service='twitter')
if not len(profiles):
raise Exception('Your twitter account is not configured')
profile = profiles[0]
print profile
print
pending = profile.updates.pending
for item in pending:
print item
print item.id
print item.text
print item.scheduled_at
print datetime.datetime.fromtimestamp(item.scheduled_at)
# cli methods we are exposing to be used via terminal
@click.group()
@click_config.wrap(module=config, sections=('github', 'trello'))
@click.option('--version', is_flag=True, callback=print_version,
expose_value=False, is_eager=True)
def cli():
assert config.buffer
pass
@cli.command('bootstrap')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_bootstrap(github_org, github_repo):
"""Sets up github with some sensible defaults."""
delete_existing_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo)
create_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo)
create_github_issues(
config,
github_org or config.github.org,
github_repo or config.github.repo)
create_github_milestones(
config,
github_org or config.github.org,
github_repo or config.github.repo)
@cli.command('close_existing_github_issues')
@click.option('--force/--no-force', default=False)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_close_existing_github_issues(force, github_org, github_repo):
"""Close all existing GitHub issues."""
message = 'Do you really want to close all of your existing GitHub issues?'
if force or click.confirm(message):
close_existing_github_issues(
config,
github_org or config.github.org,
github_repo or config.github.repo)
else:
click.echo('Action aborted')
@cli.command('create_github_issues')
@click.option('--filename', default='etc/default_github_issues.csv')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_create_github_issues(filename, github_org, github_repo):
"""Create GitHub issues from a CSV file."""
create_github_issues(
config,
github_org or config.github.org,
github_repo or config.github.repo,
filename)
@cli.command('create_github_labels')
@click.option('--filename', default='etc/default_github_labels.csv')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_create_github_labels(filename, github_org, github_repo):
"""Create GitHub labels from a CSV file."""
create_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo,
filename)
@cli.command('create_github_milestones')
@click.option('--filename', default='etc/default_github_milestones.csv')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_create_github_milestones(filename, github_org, github_repo):
"""Create GitHub milestones from a CSV file."""
create_github_milestones(
config,
github_org or config.github.org,
github_repo or config.github.repo,
filename)
@cli.command('create_trello_cards')
@click.option('--filename', default='etc/default_trello_cards.csv')
@click.option('--trello-board', type=str)
def cli_create_trello_cards(filename, trello_board):
"""Create Trello cards from a CSV file."""
create_trello_cards(
config,
trello_board or config.trello.board_id,
filename)
@cli.command('create_trello_labels')
@click.option('--filename', default='etc/default_trello_labels.csv')
@click.option('--trello-board', type=str)
def cli_create_trello_labels(filename, trello_board):
"""Create Trello labels from a CSV file."""
create_trello_labels(
config,
trello_board or config.trello.board_id,
filename)
@cli.command('create_trello_lists')
@click.option('--filename', default='etc/default_trello_lists.csv')
@click.option('--trello-board', type=str)
def cli_create_trello_lists(filename, trello_board):
"""Create Trello lists from a CSV file."""
create_trello_lists(
config,
trello_board or config.trello.board_id,
filename)
@cli.command('delete_existing_github_labels')
@click.option('--force/--no-force', default=False)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_delete_existing_github_labels(force, github_org, github_repo):
"""Delete labels from GitHub repo."""
message = 'Do you really want to delete all of the existing GitHub labels?'
if force or click.confirm(message):
delete_existing_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo)
else:
click.echo('Action aborted')
@cli.command('delete_existing_github_milestones')
@click.option('--force/--no-force', default=False)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_delete_existing_github_milestones(force, github_org, github_repo):
"""Delete milestones from GitHub repo."""
message = 'Do you really want to delete all of the existing GitHub milestones?'
if force or click.confirm(message):
delete_existing_github_milestones(
config,
github_org or config.github.org,
github_repo or config.github.repo)
else:
click.echo('Action aborted')
@cli.command('sync_github_issues_to_trello_cards')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
@click.option('--trello-board', type=str)
def cli_sync_github_issues_to_trello_cards(github_org, github_repo, trello_board):
"""Convert your GitHub issues to Trello cards."""
sync_github_issues_to_trello_cards(
config,
github_org or config.github.org,
github_repo or config.github.repo,
trello_board or config.trello.board_id)
@cli.command('sync_trello_cards_to_github_issues')
@click.option('--trello-board', type=str)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_sync_trello_cards_to_github_issues(trello_board, github_org, github_repo):
"""Convert your Trello cards to GitHub issues."""
sync_trello_cards_to_github_issues(
config,
trello_board or config.trello.board_id,
github_org or config.github.org,
github_repo or config.github.repo)
@cli.command('list_trello_boards')
def cli_list_trello_boards():
"""List your Trello boards."""
list_trello_boards(config)
@cli.command('list_trello_cards')
@click.option('--trello-board', type=str)
def cli_list_trello_cards(trello_board):
"""List your Trello cards for a given board."""
list_trello_cards(
config,
trello_board or config.trello.board_id)
@cli.command('list_trello_organizations')
def cli_list_trello_organizations():
"""List your Trello organizations."""
list_trello_organizations(config)
@cli.command('test_buffer')
def cli_test_buffer():
"""Convert your Trello cards to GitHub issues."""
try:
test_buffer(config)
except Exception as e:
print e
if __name__ == '__main__':
cli()
| bsd-3-clause |
tboyce021/home-assistant | tests/components/gios/test_air_quality.py | 7 | 4211 | """Test air_quality of GIOS integration."""
from datetime import timedelta
import json
from gios import ApiError
from homeassistant.components.air_quality import (
ATTR_AQI,
ATTR_CO,
ATTR_NO2,
ATTR_OZONE,
ATTR_PM_2_5,
ATTR_PM_10,
ATTR_SO2,
)
from homeassistant.components.gios.air_quality import ATTRIBUTION
from homeassistant.components.gios.const import AQI_GOOD
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
STATE_UNAVAILABLE,
)
from homeassistant.util.dt import utcnow
from tests.async_mock import patch
from tests.common import async_fire_time_changed, load_fixture
from tests.components.gios import init_integration
async def test_air_quality(hass):
"""Test states of the air_quality."""
await init_integration(hass)
registry = await hass.helpers.entity_registry.async_get_registry()
state = hass.states.get("air_quality.home")
assert state
assert state.state == "4"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_AQI) == AQI_GOOD
assert state.attributes.get(ATTR_PM_10) == 17
assert state.attributes.get(ATTR_PM_2_5) == 4
assert state.attributes.get(ATTR_CO) == 252
assert state.attributes.get(ATTR_SO2) == 4
assert state.attributes.get(ATTR_NO2) == 7
assert state.attributes.get(ATTR_OZONE) == 96
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
)
assert state.attributes.get(ATTR_ICON) == "mdi:emoticon-happy"
assert state.attributes.get("station") == "Test Name 1"
entry = registry.async_get("air_quality.home")
assert entry
assert entry.unique_id == 123
async def test_air_quality_with_incomplete_data(hass):
"""Test states of the air_quality with incomplete data from measuring station."""
await init_integration(hass, incomplete_data=True)
registry = await hass.helpers.entity_registry.async_get_registry()
state = hass.states.get("air_quality.home")
assert state
assert state.state == "4"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_AQI) == "foo"
assert state.attributes.get(ATTR_PM_10) is None
assert state.attributes.get(ATTR_PM_2_5) == 4
assert state.attributes.get(ATTR_CO) == 252
assert state.attributes.get(ATTR_SO2) == 4
assert state.attributes.get(ATTR_NO2) == 7
assert state.attributes.get(ATTR_OZONE) == 96
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
)
assert state.attributes.get(ATTR_ICON) == "mdi:blur"
assert state.attributes.get("station") == "Test Name 1"
entry = registry.async_get("air_quality.home")
assert entry
assert entry.unique_id == 123
async def test_availability(hass):
"""Ensure that we mark the entities unavailable correctly when service causes an error."""
await init_integration(hass)
state = hass.states.get("air_quality.home")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "4"
future = utcnow() + timedelta(minutes=60)
with patch(
"homeassistant.components.gios.Gios._get_all_sensors",
side_effect=ApiError("Unexpected error"),
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("air_quality.home")
assert state
assert state.state == STATE_UNAVAILABLE
future = utcnow() + timedelta(minutes=120)
with patch(
"homeassistant.components.gios.Gios._get_all_sensors",
return_value=json.loads(load_fixture("gios/sensors.json")),
), patch(
"homeassistant.components.gios.Gios._get_indexes",
return_value=json.loads(load_fixture("gios/indexes.json")),
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("air_quality.home")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "4"
| apache-2.0 |
KimNorgaard/ansible-modules-extras | cloud/cloudstack/cs_resourcelimit.py | 27 | 6401 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_resourcelimit
short_description: Manages resource limits on Apache CloudStack based clouds.
description:
- Manage limits of resources for domains, accounts and projects.
version_added: "2.1"
author: "René Moser (@resmo)"
options:
resource_type:
description:
- Type of the resource.
required: true
choices:
- instance
- ip_address
- volume
- snapshot
- template
- network
- vpc
- cpu
- memory
- primary_storage
- secondary_storage
aliases: [ 'type' ]
limit:
description:
- Maximum number of the resource.
- Default is unlimited C(-1).
required: false
default: -1
aliases: [ 'max' ]
domain:
description:
- Domain the resource is related to.
required: false
default: null
account:
description:
- Account the resource is related to.
required: false
default: null
project:
description:
- Name of the project the resource is related to.
required: false
default: null
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Update a resource limit for instances of a domain
local_action:
module: cs_resourcelimit
type: instance
limit: 10
domain: customers
# Update a resource limit for instances of an account
local_action:
module: cs_resourcelimit
type: instance
limit: 12
account: moserre
domain: customers
'''
RETURN = '''
---
recource_type:
description: Type of the resource
returned: success
type: string
sample: instance
limit:
description: Maximum number of the resource.
returned: success
type: int
sample: -1
domain:
description: Domain the resource is related to.
returned: success
type: string
sample: example domain
account:
description: Account the resource is related to.
returned: success
type: string
sample: example account
project:
description: Project the resource is related to.
returned: success
type: string
sample: example project
'''
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
# import cloudstack common
from ansible.module_utils.cloudstack import *
RESOURCE_TYPES = {
'instance': 0,
'ip_address': 1,
'volume': 2,
'snapshot': 3,
'template': 4,
'network': 6,
'vpc': 7,
'cpu': 8,
'memory': 9,
'primary_storage': 10,
'secondary_storage': 11,
}
class AnsibleCloudStackResourceLimit(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackResourceLimit, self).__init__(module)
self.returns = {
'max': 'limit',
}
def get_resource_type(self):
resource_type = self.module.params.get('resource_type')
return RESOURCE_TYPES.get(resource_type)
def get_resource_limit(self):
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['resourcetype'] = self.get_resource_type()
resource_limit = self.cs.listResourceLimits(**args)
if resource_limit:
return resource_limit['resourcelimit'][0]
self.module.fail_json(msg="Resource limit type '%s' not found." % self.module.params.get('resource_type'))
def update_resource_limit(self):
resource_limit = self.get_resource_limit()
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['resourcetype'] = self.get_resource_type()
args['max'] = self.module.params.get('limit', -1)
if self.has_changed(args, resource_limit):
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.updateResourceLimit(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
resource_limit = res['resourcelimit']
return resource_limit
def get_result(self, resource_limit):
self.result = super(AnsibleCloudStackResourceLimit, self).get_result(resource_limit)
self.result['resource_type'] = self.module.params.get('resource_type')
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
resource_type = dict(required=True, choices=RESOURCE_TYPES.keys(), aliases=['type']),
limit = dict(default=-1, aliases=['max']),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
try:
acs_resource_limit = AnsibleCloudStackResourceLimit(module)
resource_limit = acs_resource_limit.update_resource_limit()
result = acs_resource_limit.get_result(resource_limit)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
leonth/elude | elude/servers/redis.py | 1 | 1616 | import asyncio
import asyncio_redis
from elude import config
from elude.servers import BaseServer
REDIS_REQUEST_WIP_KEY = '_elude:request_wip'
class RedisServer(BaseServer):
def __init__(self, proxy_gatherer, serialize_func, deserialize_func):
super().__init__(proxy_gatherer)
self.serialize = serialize_func
self.deserialize = deserialize_func
self._request_cache = {}
self._conn = None
@asyncio.coroutine
def connect(self):
if self._conn is None:
self._conn = yield from asyncio_redis.Pool.create(host=config.SERVER_REDIS_HOST, port=config.SERVER_REDIS_PORT, password=config.SERVER_REDIS_PASSWORD, db=config.SERVER_REDIS_DB, poolsize=3)
return self._conn
@asyncio.coroutine
def serve(self):
conn = yield from self.connect()
while True:
request_obj_raw = yield from conn.brpoplpush(config.SERVER_REDIS_REQUEST_KEY, REDIS_REQUEST_WIP_KEY)
try:
request_obj = self.deserialize(request_obj_raw)
self.put_request(request_obj)
except ValueError:
self.process_response({'id': None, 'error': {'code': -32700, 'message': 'Parse error'}})
conn.close()
def process_response(self, result):
@asyncio.coroutine
def really_process():
conn = yield from self.connect()
yield from conn.lpush(config.SERVER_REDIS_RESPONSE_KEY_PREFIX + str(result['id']), [self.serialize(result)])
#yield from self.conn.lrem(REDIS_REQUEST_WIP_KEY, , -1)
asyncio.async(really_process()) | mit |
GustavoHennig/ansible | lib/ansible/modules/database/mongodb/mongodb_user.py | 6 | 16030 | #!/usr/bin/python
# (c) 2012, Elliott Foster <[email protected]>
# Sponsored by Four Kitchens http://fourkitchens.com.
# (c) 2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: mongodb_user
short_description: Adds or removes a user from a MongoDB database.
description:
- Adds or removes a user from a MongoDB database.
version_added: "1.1"
options:
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- The host running the database
required: false
default: localhost
login_port:
description:
- The port to connect to
required: false
default: 27017
login_database:
version_added: "2.0"
description:
- The database where login credentials are stored
required: false
default: null
replica_set:
version_added: "1.6"
description:
- Replica set to connect to (automatically connects to primary for writes)
required: false
default: null
database:
description:
- The name of the database to add/remove the user from
required: true
name:
description:
- The name of the user to add or remove
required: true
default: null
aliases: [ 'user' ]
password:
description:
- The password to use for the user
required: false
default: null
ssl:
version_added: "1.8"
description:
- Whether to use an SSL connection when connecting to the database
default: False
ssl_cert_reqs:
version_added: "2.2"
description:
- Specifies whether a certificate is required from the other side of the connection, and whether it will be validated if provided.
required: false
default: "CERT_REQUIRED"
choices: ["CERT_REQUIRED", "CERT_OPTIONAL", "CERT_NONE"]
roles:
version_added: "1.3"
description:
- "The database user roles valid values could either be one or more of the following strings: 'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', 'dbAdminAnyDatabase'"
- "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'."
- "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required."
required: false
default: "readWrite"
state:
description:
- The database user state
required: false
default: present
choices: [ "present", "absent" ]
update_password:
required: false
default: always
choices: ['always', 'on_create']
version_added: "2.1"
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
notes:
- Requires the pymongo Python package on the remote host, version 2.4.2+. This
can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
requirements: [ "pymongo" ]
author: "Elliott Foster (@elliotttf)"
'''
EXAMPLES = '''
# Create 'burgers' database user with name 'bob' and password '12345'.
- mongodb_user:
database: burgers
name: bob
password: 12345
state: present
# Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly)
- mongodb_user:
database: burgers
name: bob
password: 12345
state: present
ssl: True
# Delete 'burgers' database user with name 'bob'.
- mongodb_user:
database: burgers
name: bob
state: absent
# Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style)
- mongodb_user:
database: burgers
name: ben
password: 12345
roles: read
state: present
- mongodb_user:
database: burgers
name: jim
password: 12345
roles: readWrite,dbAdmin,userAdmin
state: present
- mongodb_user:
database: burgers
name: joe
password: 12345
roles: readWriteAnyDatabase
state: present
# add a user to database in a replica set, the primary server is automatically discovered and written to
- mongodb_user:
database: burgers
name: bob
replica_set: belcher
password: 12345
roles: readWriteAnyDatabase
state: present
# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is useful for oplog access (MONGO_OPLOG_URL).
# please notice the credentials must be added to the 'admin' database because the 'local' database is not syncronized and can't receive user credentials
# To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin"
# This syntax requires mongodb 2.6+ and pymongo 2.5+
- mongodb_user:
login_user: root
login_password: root_password
database: admin
user: oplog_reader
password: oplog_reader_password
state: present
replica_set: belcher
roles:
- db: local
role: read
'''
RETURN = '''
user:
description: The name of the user to add or remove.
returned: success
type: string
'''
import os
import ssl as ssl_lib
from distutils.version import LooseVersion
try:
from pymongo.errors import ConnectionFailure
from pymongo.errors import OperationFailure
from pymongo import version as PyMongoVersion
from pymongo import MongoClient
except ImportError:
try: # for older PyMongo 2.2
from pymongo import Connection as MongoClient
except ImportError:
pymongo_found = False
else:
pymongo_found = True
else:
pymongo_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six.moves import configparser
# =========================================
# MongoDB module specific support methods.
#
def check_compatibility(module, client):
"""Check the compatibility between the driver and the database.
See: https://docs.mongodb.com/ecosystem/drivers/driver-compatibility-reference/#python-driver-compatibility
Args:
module: Ansible module.
client (cursor): Mongodb cursor on admin database.
"""
loose_srv_version = LooseVersion(client.server_info()['version'])
loose_driver_version = LooseVersion(PyMongoVersion)
if loose_srv_version >= LooseVersion('3.2') and loose_driver_version < LooseVersion('3.2'):
module.fail_json(msg=' (Note: you must use pymongo 3.2+ with MongoDB >= 3.2)')
elif loose_srv_version >= LooseVersion('3.0') and loose_driver_version <= LooseVersion('2.8'):
module.fail_json(msg=' (Note: you must use pymongo 2.8+ with MongoDB 3.0)')
elif loose_srv_version >= LooseVersion('2.6') and loose_driver_version <= LooseVersion('2.7'):
module.fail_json(msg=' (Note: you must use pymongo 2.7+ with MongoDB 2.6)')
elif LooseVersion(PyMongoVersion) <= LooseVersion('2.5'):
module.fail_json(msg=' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)')
def user_find(client, user, db_name):
"""Check if the user exists.
Args:
client (cursor): Mongodb cursor on admin database.
user (str): User to check.
db_name (str): User's database.
Returns:
dict: when user exists, False otherwise.
"""
for mongo_user in client["admin"].system.users.find():
if mongo_user['user'] == user:
# NOTE: there is no 'db' field in mongo 2.4.
if 'db' not in mongo_user:
return mongo_user
if mongo_user["db"] == db_name:
return mongo_user
return False
def user_add(module, client, db_name, user, password, roles):
#pymongo's user_add is a _create_or_update_user so we won't know if it was changed or updated
#without reproducing a lot of the logic in database.py of pymongo
db = client[db_name]
if roles is None:
db.add_user(user, password, False)
else:
db.add_user(user, password, None, roles=roles)
def user_remove(module, client, db_name, user):
exists = user_find(client, user, db_name)
if exists:
if module.check_mode:
module.exit_json(changed=True, user=user)
db = client[db_name]
db.remove_user(user)
else:
module.exit_json(changed=False, user=user)
def load_mongocnf():
config = configparser.RawConfigParser()
mongocnf = os.path.expanduser('~/.mongodb.cnf')
try:
config.readfp(open(mongocnf))
creds = dict(
user=config.get('client', 'user'),
password=config.get('client', 'pass')
)
except (configparser.NoOptionError, IOError):
return False
return creds
def check_if_roles_changed(uinfo, roles, db_name):
# We must be aware of users which can read the oplog on a replicaset
# Such users must have access to the local DB, but since this DB does not store users credentials
# and is not synchronized among replica sets, the user must be stored on the admin db
# Therefore their structure is the following :
# {
# "_id" : "admin.oplog_reader",
# "user" : "oplog_reader",
# "db" : "admin", # <-- admin DB
# "roles" : [
# {
# "role" : "read",
# "db" : "local" # <-- local DB
# }
# ]
# }
def make_sure_roles_are_a_list_of_dict(roles, db_name):
output = list()
for role in roles:
if isinstance(role, basestring):
new_role = { "role": role, "db": db_name }
output.append(new_role)
else:
output.append(role)
return output
roles_as_list_of_dict = make_sure_roles_are_a_list_of_dict(roles, db_name)
uinfo_roles = uinfo.get('roles', [])
if sorted(roles_as_list_of_dict) == sorted(uinfo_roles):
return False
return True
# =========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec = dict(
login_user=dict(default=None),
login_password=dict(default=None, no_log=True),
login_host=dict(default='localhost'),
login_port=dict(default='27017'),
login_database=dict(default=None),
replica_set=dict(default=None),
database=dict(required=True, aliases=['db']),
name=dict(required=True, aliases=['user']),
password=dict(aliases=['pass'], no_log=True),
ssl=dict(default=False, type='bool'),
roles=dict(default=None, type='list'),
state=dict(default='present', choices=['absent', 'present']),
update_password=dict(default="always", choices=["always", "on_create"]),
ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']),
),
supports_check_mode=True
)
if not pymongo_found:
module.fail_json(msg='the python pymongo module is required')
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_database = module.params['login_database']
replica_set = module.params['replica_set']
db_name = module.params['database']
user = module.params['name']
password = module.params['password']
ssl = module.params['ssl']
ssl_cert_reqs = None
roles = module.params['roles'] or []
state = module.params['state']
update_password = module.params['update_password']
try:
connection_params = {
"host": login_host,
"port": int(login_port),
}
if replica_set:
connection_params["replicaset"] = replica_set
if ssl:
connection_params["ssl"] = ssl
connection_params["ssl_cert_reqs"] = getattr(ssl_lib, module.params['ssl_cert_reqs'])
client = MongoClient(**connection_params)
# NOTE: this check must be done ASAP.
# We doesn't need to be authenticated.
check_compatibility(module, client)
if login_user is None and login_password is None:
mongocnf_creds = load_mongocnf()
if mongocnf_creds is not False:
login_user = mongocnf_creds['user']
login_password = mongocnf_creds['password']
elif login_password is None or login_user is None:
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
if login_user is not None and login_password is not None:
client.admin.authenticate(login_user, login_password, source=login_database)
elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'):
if db_name != "admin":
module.fail_json(msg='The localhost login exception only allows the first admin account to be created')
#else: this has to be the first admin user added
except Exception:
e = get_exception()
module.fail_json(msg='unable to connect to database: %s' % str(e))
if state == 'present':
if password is None and update_password == 'always':
module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create')
try:
uinfo = user_find(client, user, db_name)
if update_password != 'always' and uinfo:
password = None
if not check_if_roles_changed(uinfo, roles, db_name):
module.exit_json(changed=False, user=user)
if module.check_mode:
module.exit_json(changed=True, user=user)
user_add(module, client, db_name, user, password, roles)
except Exception:
e = get_exception()
module.fail_json(msg='Unable to add or update user: %s' % str(e))
# Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848
#newuinfo = user_find(client, user, db_name)
#if uinfo['role'] == newuinfo['role'] and CheckPasswordHere:
# module.exit_json(changed=False, user=user)
elif state == 'absent':
try:
user_remove(module, client, db_name, user)
except Exception:
e = get_exception()
module.fail_json(msg='Unable to remove user: %s' % str(e))
module.exit_json(changed=True, user=user)
if __name__ == '__main__':
main()
| gpl-3.0 |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/webdriver/pylib/test/selenium/webdriver/common/utils.py | 52 | 1991 | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import socket
import sys
import unittest
def run_tests(test_case, driver, webserver):
logging.basicConfig(level=logging.WARN)
webserver.start()
try:
testLoader = unittest.TestLoader()
testRunner = unittest.TextTestRunner()
test_case_name = "selenium.test.selenium.webdriver.common.%s" % test_case
if len(sys.argv) > 1:
testMethod = sys.argv[1]
testRunner.run(
testLoader.loadTestsFromName(
"%s.%s" % (test_case_name, testMethod)))
else:
testRunner.run(testLoader.loadTestsFromName(test_case_name))
driver.quit()
finally:
webserver.stop()
def require_online(func):
"""Only exucte the test method if the internet is accessible."""
def testMethod(self):
socket_ = socket.socket()
try:
socket_.settimeout(1)
socket_.connect(("www.google.com", 80))
return func(self)
except socket.error:
return lambda x: None
testMethod.func_name = func.func_name
return testMethod
def convert_cookie_to_json(cookie):
cookie_dict = {}
for key, value in cookie.items():
if key == "expires":
cookie_dict["expiry"] = int(value) * 1000
else:
cookie_dict[key] = value
return cookie_dict
| mit |
eirannejad/pyRevit | extensions/pyRevitTools.extension/pyRevit.tab/Analysis.panel/Tools.stack/Inspect.pulldown/Find All Attached Constraints.pushbutton/script.py | 1 | 1382 | """Find all constraints attached to the selected element."""
from pyrevit import revit, DB
def listconstraints(selected_el):
print('THIS OBJECT ID: {0}'.format(selected_el.Id))
clconst = DB.FilteredElementCollector(revit.doc)\
.OfCategory(DB.BuiltInCategory.OST_Constraints)\
.WhereElementIsNotElementType()
constlst = set()
for cnst in clconst:
refs = [(x.ElementId, x) for x in cnst.References]
elids = [x[0] for x in refs]
if selected_el.Id in elids:
constlst.add(cnst)
print('CONST TYPE: {0} # OF REFs: {1} CONST ID: {2}'
.format(cnst.GetType().Name.ljust(28),
str(cnst.References.Size).ljust(24),
cnst.Id))
for t in refs:
ref = t[1]
elid = t[0]
if elid == selected_el.Id:
elid = str(elid) + ' (this)'
el = revit.doc.GetElement(ref.ElementId)
print(' {0} LINKED OBJ CATEGORY: {1} ID: {2}'
.format(ref.ElementReferenceType.ToString().ljust(35),
el.Category.Name.ljust(20),
elid))
print('\n')
print('\n')
selection = revit.get_selection()
for el in selection.elements:
listconstraints(el)
| gpl-3.0 |
xiaozhuchacha/OpenBottle | action_earley_srv/scripts/nltk/classify/weka.py | 5 | 12625 | # Natural Language Toolkit: Interface to Weka Classsifiers
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Edward Loper <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Classifiers that make use of the external 'Weka' package.
"""
from __future__ import print_function
import time
import tempfile
import os
import subprocess
import re
import zipfile
from sys import stdin
from nltk import compat
from nltk.probability import DictionaryProbDist
from nltk.internals import java, config_java
from nltk.classify.api import ClassifierI
_weka_classpath = None
_weka_search = ['.',
'/usr/share/weka',
'/usr/local/share/weka',
'/usr/lib/weka',
'/usr/local/lib/weka',]
def config_weka(classpath=None):
global _weka_classpath
# Make sure java's configured first.
config_java()
if classpath is not None:
_weka_classpath = classpath
if _weka_classpath is None:
searchpath = _weka_search
if 'WEKAHOME' in os.environ:
searchpath.insert(0, os.environ['WEKAHOME'])
for path in searchpath:
if os.path.exists(os.path.join(path, 'weka.jar')):
_weka_classpath = os.path.join(path, 'weka.jar')
version = _check_weka_version(_weka_classpath)
if version:
print(('[Found Weka: %s (version %s)]' %
(_weka_classpath, version)))
else:
print('[Found Weka: %s]' % _weka_classpath)
_check_weka_version(_weka_classpath)
if _weka_classpath is None:
raise LookupError('Unable to find weka.jar! Use config_weka() '
'or set the WEKAHOME environment variable. '
'For more information about Weka, please see '
'http://www.cs.waikato.ac.nz/ml/weka/')
def _check_weka_version(jar):
try:
zf = zipfile.ZipFile(jar)
except (SystemExit, KeyboardInterrupt):
raise
except:
return None
try:
try:
return zf.read('weka/core/version.txt')
except KeyError:
return None
finally:
zf.close()
class WekaClassifier(ClassifierI):
def __init__(self, formatter, model_filename):
self._formatter = formatter
self._model = model_filename
def prob_classify_many(self, featuresets):
return self._classify_many(featuresets, ['-p', '0', '-distribution'])
def classify_many(self, featuresets):
return self._classify_many(featuresets, ['-p', '0'])
def _classify_many(self, featuresets, options):
# Make sure we can find java & weka.
config_weka()
temp_dir = tempfile.mkdtemp()
try:
# Write the test data file.
test_filename = os.path.join(temp_dir, 'test.arff')
self._formatter.write(test_filename, featuresets)
# Call weka to classify the data.
cmd = ['weka.classifiers.bayes.NaiveBayes',
'-l', self._model, '-T', test_filename] + options
(stdout, stderr) = java(cmd, classpath=_weka_classpath,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Check if something went wrong:
if stderr and not stdout:
if 'Illegal options: -distribution' in stderr:
raise ValueError('The installed version of weka does '
'not support probability distribution '
'output.')
else:
raise ValueError('Weka failed to generate output:\n%s'
% stderr)
# Parse weka's output.
return self.parse_weka_output(stdout.decode(stdin.encoding).split('\n'))
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
def parse_weka_distribution(self, s):
probs = [float(v) for v in re.split('[*,]+', s) if v.strip()]
probs = dict(zip(self._formatter.labels(), probs))
return DictionaryProbDist(probs)
def parse_weka_output(self, lines):
# Strip unwanted text from stdout
for i,line in enumerate(lines):
if line.strip().startswith("inst#"):
lines = lines[i:]
break
if lines[0].split() == ['inst#', 'actual', 'predicted',
'error', 'prediction']:
return [line.split()[2].split(':')[1]
for line in lines[1:] if line.strip()]
elif lines[0].split() == ['inst#', 'actual', 'predicted',
'error', 'distribution']:
return [self.parse_weka_distribution(line.split()[-1])
for line in lines[1:] if line.strip()]
# is this safe:?
elif re.match(r'^0 \w+ [01]\.[0-9]* \?\s*$', lines[0]):
return [line.split()[1] for line in lines if line.strip()]
else:
for line in lines[:10]:
print(line)
raise ValueError('Unhandled output format -- your version '
'of weka may not be supported.\n'
' Header: %s' % lines[0])
# [xx] full list of classifiers (some may be abstract?):
# ADTree, AODE, BayesNet, ComplementNaiveBayes, ConjunctiveRule,
# DecisionStump, DecisionTable, HyperPipes, IB1, IBk, Id3, J48,
# JRip, KStar, LBR, LeastMedSq, LinearRegression, LMT, Logistic,
# LogisticBase, M5Base, MultilayerPerceptron,
# MultipleClassifiersCombiner, NaiveBayes, NaiveBayesMultinomial,
# NaiveBayesSimple, NBTree, NNge, OneR, PaceRegression, PART,
# PreConstructedLinearModel, Prism, RandomForest,
# RandomizableClassifier, RandomTree, RBFNetwork, REPTree, Ridor,
# RuleNode, SimpleLinearRegression, SimpleLogistic,
# SingleClassifierEnhancer, SMO, SMOreg, UserClassifier, VFI,
# VotedPerceptron, Winnow, ZeroR
_CLASSIFIER_CLASS = {
'naivebayes': 'weka.classifiers.bayes.NaiveBayes',
'C4.5': 'weka.classifiers.trees.J48',
'log_regression': 'weka.classifiers.functions.Logistic',
'svm': 'weka.classifiers.functions.SMO',
'kstar': 'weka.classifiers.lazy.KStar',
'ripper': 'weka.classifiers.rules.JRip',
}
@classmethod
def train(cls, model_filename, featuresets,
classifier='naivebayes', options=[], quiet=True):
# Make sure we can find java & weka.
config_weka()
# Build an ARFF formatter.
formatter = ARFF_Formatter.from_train(featuresets)
temp_dir = tempfile.mkdtemp()
try:
# Write the training data file.
train_filename = os.path.join(temp_dir, 'train.arff')
formatter.write(train_filename, featuresets)
if classifier in cls._CLASSIFIER_CLASS:
javaclass = cls._CLASSIFIER_CLASS[classifier]
elif classifier in cls._CLASSIFIER_CLASS.values():
javaclass = classifier
else:
raise ValueError('Unknown classifier %s' % classifier)
# Train the weka model.
cmd = [javaclass, '-d', model_filename, '-t', train_filename]
cmd += list(options)
if quiet:
stdout = subprocess.PIPE
else: stdout = None
java(cmd, classpath=_weka_classpath, stdout=stdout)
# Return the new classifier.
return WekaClassifier(formatter, model_filename)
finally:
for f in os.listdir(temp_dir):
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir)
class ARFF_Formatter:
"""
Converts featuresets and labeled featuresets to ARFF-formatted
strings, appropriate for input into Weka.
Features and classes can be specified manually in the constructor, or may
be determined from data using ``from_train``.
"""
def __init__(self, labels, features):
"""
:param labels: A list of all class labels that can be generated.
:param features: A list of feature specifications, where
each feature specification is a tuple (fname, ftype);
and ftype is an ARFF type string such as NUMERIC or
STRING.
"""
self._labels = labels
self._features = features
def format(self, tokens):
"""Returns a string representation of ARFF output for the given data."""
return self.header_section() + self.data_section(tokens)
def labels(self):
"""Returns the list of classes."""
return list(self._labels)
def write(self, outfile, tokens):
"""Writes ARFF data to a file for the given data."""
if not hasattr(outfile, 'write'):
outfile = open(outfile, 'w')
outfile.write(self.format(tokens))
outfile.close()
@staticmethod
def from_train(tokens):
"""
Constructs an ARFF_Formatter instance with class labels and feature
types determined from the given data. Handles boolean, numeric and
string (note: not nominal) types.
"""
# Find the set of all attested labels.
labels = set(label for (tok, label) in tokens)
# Determine the types of all features.
features = {}
for tok, label in tokens:
for (fname, fval) in tok.items():
if issubclass(type(fval), bool):
ftype = '{True, False}'
elif issubclass(type(fval), (compat.integer_types, float, bool)):
ftype = 'NUMERIC'
elif issubclass(type(fval), compat.string_types):
ftype = 'STRING'
elif fval is None:
continue # can't tell the type.
else:
raise ValueError('Unsupported value type %r' % ftype)
if features.get(fname, ftype) != ftype:
raise ValueError('Inconsistent type for %s' % fname)
features[fname] = ftype
features = sorted(features.items())
return ARFF_Formatter(labels, features)
def header_section(self):
"""Returns an ARFF header as a string."""
# Header comment.
s = ('% Weka ARFF file\n' +
'% Generated automatically by NLTK\n' +
'%% %s\n\n' % time.ctime())
# Relation name
s += '@RELATION rel\n\n'
# Input attribute specifications
for fname, ftype in self._features:
s += '@ATTRIBUTE %-30r %s\n' % (fname, ftype)
# Label attribute specification
s += '@ATTRIBUTE %-30r {%s}\n' % ('-label-', ','.join(self._labels))
return s
def data_section(self, tokens, labeled=None):
"""
Returns the ARFF data section for the given data.
:param tokens: a list of featuresets (dicts) or labelled featuresets
which are tuples (featureset, label).
:param labeled: Indicates whether the given tokens are labeled
or not. If None, then the tokens will be assumed to be
labeled if the first token's value is a tuple or list.
"""
# Check if the tokens are labeled or unlabeled. If unlabeled,
# then use 'None'
if labeled is None:
labeled = tokens and isinstance(tokens[0], (tuple, list))
if not labeled:
tokens = [(tok, None) for tok in tokens]
# Data section
s = '\n@DATA\n'
for (tok, label) in tokens:
for fname, ftype in self._features:
s += '%s,' % self._fmt_arff_val(tok.get(fname))
s += '%s\n' % self._fmt_arff_val(label)
return s
def _fmt_arff_val(self, fval):
if fval is None:
return '?'
elif isinstance(fval, (bool, compat.integer_types)):
return '%s' % fval
elif isinstance(fval, float):
return '%r' % fval
else:
return '%r' % fval
if __name__ == '__main__':
from nltk.classify.util import names_demo, binary_names_demo_features
def make_classifier(featuresets):
return WekaClassifier.train('/tmp/name.model', featuresets,
'C4.5')
classifier = names_demo(make_classifier, binary_names_demo_features)
| mit |
virneo/opencog | opencog/python/pln_old/examples/context/context_agent.py | 32 | 1282 | """
A MindAgent to test the application of the context rules
"""
from opencog.cogserver import MindAgent
from pln.chainers import Chainer
from pln.rules import *
__author__ = 'Sebastian Ruder'
class ContextAgent(MindAgent):
def __init__(self):
self.chainer = None
def create_chainer(self, atomspace):
self.chainer = Chainer(atomspace,
stimulateAtoms=False,
preferAttentionalFocus=False,
allow_output_with_variables=True,
delete_temporary_variables=True)
self.chainer.add_rule(InheritanceToContextRule(self.chainer))
self.chainer.add_rule(EvaluationToContextRule(self.chainer))
self.chainer.add_rule(SubsetToContextRule(self.chainer))
self.chainer.add_rule(ContextToInheritanceRule(self.chainer))
self.chainer.add_rule(ContextToEvaluationRule(self.chainer))
self.chainer.add_rule(ContextToSubsetRule(self.chainer))
self.chainer.add_rule(ContextFreeToSensitiveRule(self.chainer))
def run(self, atomspace):
if self.chainer is None:
self.create_chainer(atomspace)
return
result = self.chainer.forward_step()
return result
| agpl-3.0 |
jc0n/scrapy | scrapy/commands/__init__.py | 38 | 3532 | """
Base class for Scrapy commands
"""
import os
from optparse import OptionGroup
from twisted.python import failure
from scrapy.utils.conf import arglist_to_dict
from scrapy.exceptions import UsageError
class ScrapyCommand(object):
requires_project = False
crawler_process = None
# default settings to be used for this command instead of global defaults
default_settings = {}
exitcode = 0
def __init__(self):
self.settings = None # set in scrapy.cmdline
def set_crawler(self, crawler):
assert not hasattr(self, '_crawler'), "crawler already set"
self._crawler = crawler
def syntax(self):
"""
Command syntax (preferably one-line). Do not include command name.
"""
return ""
def short_desc(self):
"""
A short description of the command
"""
return ""
def long_desc(self):
"""A long description of the command. Return short description when not
available. It cannot contain newlines, since contents will be formatted
by optparser which removes newlines and wraps text.
"""
return self.short_desc()
def help(self):
"""An extensive help for the command. It will be shown when using the
"help" command. It can contain newlines, since not post-formatting will
be applied to its contents.
"""
return self.long_desc()
def add_options(self, parser):
"""
Populate option parse with options available for this command
"""
group = OptionGroup(parser, "Global Options")
group.add_option("--logfile", metavar="FILE",
help="log file. if omitted stderr will be used")
group.add_option("-L", "--loglevel", metavar="LEVEL", default=None,
help="log level (default: %s)" % self.settings['LOG_LEVEL'])
group.add_option("--nolog", action="store_true",
help="disable logging completely")
group.add_option("--profile", metavar="FILE", default=None,
help="write python cProfile stats to FILE")
group.add_option("--pidfile", metavar="FILE",
help="write process ID to FILE")
group.add_option("-s", "--set", action="append", default=[], metavar="NAME=VALUE",
help="set/override setting (may be repeated)")
group.add_option("--pdb", action="store_true", help="enable pdb on failure")
parser.add_option_group(group)
def process_options(self, args, opts):
try:
self.settings.setdict(arglist_to_dict(opts.set),
priority='cmdline')
except ValueError:
raise UsageError("Invalid -s value, use -s NAME=VALUE", print_help=False)
if opts.logfile:
self.settings.set('LOG_ENABLED', True, priority='cmdline')
self.settings.set('LOG_FILE', opts.logfile, priority='cmdline')
if opts.loglevel:
self.settings.set('LOG_ENABLED', True, priority='cmdline')
self.settings.set('LOG_LEVEL', opts.loglevel, priority='cmdline')
if opts.nolog:
self.settings.set('LOG_ENABLED', False, priority='cmdline')
if opts.pidfile:
with open(opts.pidfile, "w") as f:
f.write(str(os.getpid()) + os.linesep)
if opts.pdb:
failure.startDebugMode()
def run(self, args, opts):
"""
Entry point for running commands
"""
raise NotImplementedError
| bsd-3-clause |
CaptFrank/EsxiController | server/utils/error/taskhandler.py | 1 | 1464 | """
task.py
==========
This is the task handler for the error handler.
:copyright: (c) 2015 by GammaRay.
:license: BSD, see LICENSE for more details.
Author: GammaRay
Version: 1.0
Date: 3/11/2015
"""
"""
=============================================
Imports
=============================================
"""
from server.utils.error.basehandler import *
"""
=============================================
Source
=============================================
"""
class TaskException(BaseHandler):
"""
This class provides a base class to the error handlers that
will later be implemented to tell the user that there
has been a problem with the REST API transaction.
"""
# The base error status code
status_code = 400
def __init__(self, message, status_code = None, payload = None):
"""
This is the base default constructor.
:param message: the message to print
:param status_code: the status code to send
:param payload: the payload to send
:return:
"""
# Override the Exception class
Exception.__init__(self)
# Set the message
self.message = message
# Set the status code
if status_code is not None:
self.status_code = status_code
# Set the payload
self.payload = payload
return | gpl-2.0 |
kaday/rose | t/rose-config/02-self.py | 1 | 7930 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2012-6 Met Office.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
import os.path
import rose.config
from StringIO import StringIO
import unittest
class TestConfigData(unittest.TestCase):
"""Test usage of the rose.config.ConfigNode object."""
def test_init(self):
"""Test empty Config object."""
conf = rose.config.ConfigNode()
self.assertFalse(conf is None)
self.assertEqual(conf.get([]), conf)
self.assertFalse(conf.get(["rubbish"]))
node = conf.get([])
self.assertEqual(node.value, {})
node = conf.get(["rubbish"])
self.assertTrue(node is None)
self.assertTrue(conf.unset(["rubbish"]) is None)
def test_set(self):
"""Test setting/unsetting value/ignored flag in a Config object."""
conf = rose.config.ConfigNode()
self.assertFalse(conf is None)
self.assertEqual(conf.set([], {}), conf)
conf.set(["", "top-option"], "rubbish")
node = conf.get(["", "top-option"])
self.assertEqual((node.value, node.state), ("rubbish", ""))
node = conf.get(["top-option"])
self.assertEqual((node.value, node.state), ("rubbish", ""))
conf.set(["rubbish"], {})
node = conf.get(["rubbish"])
self.assertEqual((node.value, node.state), ({}, ""))
conf.set(["rubbish", "item"], "value")
node = conf.get(["rubbish", "item"])
self.assertEqual((node.value, node.state), ("value", ""))
self.assertEqual(conf.get(["rubbish", "item"]).value, "value")
conf.get(["rubbish", "item"]).state = "!"
node = conf.get(["rubbish", "item"], no_ignore=True)
self.assertTrue(node is None)
self.assertEqual(conf.get(["rubbish", "item"]).value, "value")
conf.get(["rubbish", "item"]).state = ""
self.assertTrue(conf.get(["rubbish", "item"]) is not None)
self.assertEqual(conf.get(["rubbish", "item"]).value, "value")
node = conf.unset(["rubbish", "item"])
self.assertEqual((node.value, node.state), ("value", ""))
self.assertEqual(conf.unset(["rubbish", "item"]), None)
conf.set(["rubbish", "item"], "value", "!!")
node = conf.get(["rubbish", "item"])
self.assertEqual((node.value, node.state), ("value", "!!"))
self.assertTrue(conf.unset(["rubbish"]) is not None)
conf.set(["rubbish"], {})
node = conf.get(["rubbish"])
self.assertEqual((node.value, node.state), ({}, ""))
conf.set(["rubbish", "item"], "value")
node = conf.get(["rubbish", "item"])
self.assertEqual((node.value, node.state), ("value", ""))
conf.get(["rubbish"]).state = "!"
self.assertTrue(conf.get(["rubbish", "item"], True) is None)
def test_iter(self):
"""Test the iterator"""
conf = rose.config.ConfigNode()
conf.set(["", "food"], "glorious")
conf.set(["dinner", "starter"], "soup")
conf.set(["dinner", "dessert"], "custard")
self.assertEqual(list(iter(conf)), ["food", "dinner"])
end_node = conf.get(["", "food"])
self.assertEqual(list(iter(end_node)), [])
class TestConfigDump(unittest.TestCase):
"""Test usage of the rose.config.Dump object."""
def test_dump_empty(self):
"""Test dumping an empty configuration."""
conf = rose.config.ConfigNode({})
dumper = rose.config.ConfigDumper()
target = StringIO()
dumper.dump(conf, target)
self.assertEqual(target.getvalue(), "")
target.close()
def test_dump_normal(self):
"""Test normal dumping a configuration."""
conf = rose.config.ConfigNode({})
conf.set(["foo"], {})
conf.set(["foo", "bar"], "BAR BAR")
conf.set(["foo", "baz"], "BAZ\n BAZ")
conf.set(["egg"], {})
conf.set(["egg", "fried"], "true")
conf.set(["egg", "boiled"], "false")
conf.set(["egg", "scrambled"], "false", "!")
conf.set(["egg", "poached"], "true", "!!")
dumper = rose.config.ConfigDumper()
target = StringIO()
dumper.dump(conf, target)
self.assertEqual(target.getvalue(), """[egg]
boiled=false
fried=true
!!poached=true
!scrambled=false
[foo]
bar=BAR BAR
baz=BAZ
= BAZ
""")
target.close()
def test_dump_root(self):
"""Test dumping of a configuration with root settings."""
conf = rose.config.ConfigNode({}, comments=["hello"])
conf.set(["foo"], "foo", comments=["foo foo", "foo foo"])
conf.set(["bar"], "bar")
conf.set(["baz"], {})
conf.set(["baz", "egg"], "egg")
conf.set(["baz", "ham"], "ham")
dumper = rose.config.ConfigDumper()
target = StringIO()
dumper.dump(conf, target)
self.assertEqual(target.getvalue(), """#hello
bar=bar
#foo foo
#foo foo
foo=foo
[baz]
egg=egg
ham=ham
""")
target.close()
class TestConfigLoad(unittest.TestCase):
"""Test usage of the rose.config.Load object."""
def test_load_empty(self):
"""Test loading an empty configuration."""
conf = rose.config.ConfigNode({})
loader = rose.config.ConfigLoader()
loader.load(os.path.devnull, conf)
self.assertEqual((conf.value, conf.state), ({}, ""))
def test_load_basic(self):
"""Test basic loading a configuration."""
conf = rose.config.ConfigNode({})
source = StringIO("""# test
stuff=stuffing
#eggy
[egg]
boiled=false
fried=true
scrambled=false
[foo]
bar=BAR BAR
baz=BAZ
BAZ
[hello]
!name = fred
!!greet = hi
worlds=earth
= moon
= mars
[foo]
bar=BAR BAR BAR
""")
loader = rose.config.ConfigLoader()
loader.load(source, conf)
source.close()
self.assertEqual(conf.comments, [" test"])
for keys in [[], ["egg"], ["foo"]]:
node = conf.get(keys)
self.assertFalse(node is None)
self.assertEqual(node.state, "")
node = conf.get(["not-defined"])
self.assertTrue(node is None)
for keys, value in [(["egg", "boiled"], "false"),
(["egg", "fried"], "true"),
(["egg", "scrambled"], "false"),
(["foo", "bar"], "BAR BAR BAR"),
(["foo", "baz"], "BAZ\nBAZ"),
(["hello", "worlds"], "earth\n moon\n mars")]:
node = conf.get(keys)
self.assertEqual((node.value, node.state), (value, ""))
node = conf.get(["egg"])
self.assertEqual(node.comments, ["eggy"])
node = conf.get(["stuff"])
self.assertEqual(node.value, "stuffing")
node = conf.get(["hello", "name"], True)
self.assertTrue(node is None)
node = conf.get(["hello", "name"])
self.assertEqual(node.value, "fred")
self.assertEqual(node.state, "!")
node = conf.get(["hello", "greet"])
self.assertEqual(node.value, "hi")
self.assertEqual(node.state, "!!")
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
dsfsdgsbngfggb/odoo | addons/stock_invoice_directly/__openerp__.py | 260 | 1618 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Invoice Picking Directly',
'version': '1.0',
'category' : 'Warehouse Management',
'description': """
Invoice Wizard for Delivery.
============================
When you send or deliver goods, this module automatically launch the invoicing
wizard if the delivery is to be invoiced.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/warehouse',
'depends': ['delivery', 'stock'],
'data': [],
'demo': [],
'test': ['test/stock_invoice_directly.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
SEL-Columbia/commcare-hq | corehq/apps/hqadmin/urls.py | 1 | 2341 | from django.conf.urls.defaults import *
from corehq.apps.reports.dispatcher import AdminReportDispatcher
from .views import FlagBrokenBuilds
urlpatterns = patterns('corehq.apps.hqadmin.views',
url(r'^$', 'default', name="default_admin_report"),
url(r'^export/global/$', 'global_report', name="export_global_report", kwargs=dict(as_export=True)),
url(r'^global/$', 'global_report', name="global_report"),
url(r'^system/$', 'system_info', name="system_info"),
url(r'^user_reports/$', 'mobile_user_reports', name='mobile_user_reports'),
url(r'^system/download_recent_changes', 'download_recent_changes', name="download_recent_changes"),
url(r'^system/system_ajax$', 'system_ajax', name="system_ajax"),
url(r'^system/db_comparisons', 'db_comparisons', name="db_comparisons"),
url(r'^users/$', 'active_users', name="active_users"),
url(r'^commcare_version/$', 'commcare_version_report', name='commcare_version_report'),
url(r'^domain_activity/$', 'domain_activity_report', name='domain_activity_report'),
url(r'^message_logs/$', 'message_log_report', name='message_log_report'),
url(r'^emails/$', 'emails', name='global_email_list'),
url(r'^submissions_errors/$', 'submissions_errors', name='global_submissions_errors'),
url(r'^domains/update/$', 'update_domains', name="domain_update"),
url(r'^mass_email/$', 'mass_email', name="mass_email"),
url(r'^domains/download/$', 'domain_list_download', name="domain_list_download"),
url(r'^noneulized_users/$', 'noneulized_users', name="noneulized_users"),
url(r'^commcare_settings/$', 'all_commcare_settings', name="all_commcare_settings"),
url(r'^broken_suite_files/$', 'find_broken_suite_files', name="find_broken_suite_files"),
url(r'^management_commands/$', 'management_commands', name="management_commands"),
url(r'^run_command/$', 'run_command', name="run_management_command"),
url(r'^phone/restore/$', 'admin_restore', name="admin_restore"),
url(r'^flag_broken_builds/$', FlagBrokenBuilds.as_view(), name="flag_broken_builds"),
url(r'^stats_data/$', 'stats_data', name="admin_stats_data"),
url(r'^loadtest/$', 'loadtest', name="loadtest_report"),
url(r'^reset_pillow_checkpoint/$', 'reset_pillow_checkpoint', name="reset_pillow_checkpoint"),
AdminReportDispatcher.url_pattern(),
)
| bsd-3-clause |
fedspendingtransparency/data-act-core | dataactcore/aws/s3UrlHandler.py | 1 | 4148 | from datetime import datetime
import boto
from boto import sts
import os
import inspect
import json
from dataactcore.config import CONFIG_BROKER
class s3UrlHandler:
"""
This class acts a wrapper for S3 URL Signing
"""
BASE_URL = "https://s3.amazonaws.com/"
ENABLE_S3 = True
URL_LIFETIME = 2000
STS_LIFETIME = 2000
S3_ROLE = ""
def __init__(self,name = None):
"""
Creates the object for signing URLS
arguments:
name -- (String) Name of the S3 bucket
user -- (int) User id folder of S3 bucket
"""
if(name == None):
self.bucketRoute = CONFIG_BROKER['aws_bucket']
else:
self.bucketRoute = name
s3UrlHandler.S3_ROLE = CONFIG_BROKER['aws_role']
s3UrlHandler.REGION = CONFIG_BROKER['aws_region']
def _signUrl(self,path,fileName,method="PUT") :
"""
Creates the object for signing URLS
arguments:
fileName -- (String) File name of file to be uploaded to S3.
returns signed url (String)
"""
if(s3UrlHandler.ENABLE_S3) :
s3connection = boto.s3.connect_to_region(s3UrlHandler.REGION)
if(method=="PUT") :
return s3connection.generate_url(s3UrlHandler.URL_LIFETIME, method, self.bucketRoute, "/"+path+"/" +fileName,headers={'Content-Type': 'application/octet-stream'})
return s3connection.generate_url(s3UrlHandler.URL_LIFETIME, method, self.bucketRoute, "/"+path+"/" +fileName)
return s3UrlHandler.BASE_URL + "/"+self.bucketRoute +"/"+path+"/" +fileName
def getSignedUrl(self,path,fileName,method="PUT"):
"""
Signs a URL for PUT requests
arguments:
fileName -- (String) File name of file to be uploaded to S3.
returns signed url (String)
"""
if(method=="PUT"):
self.s3FileName = s3UrlHandler.getTimeStampedFilename(fileName)
else:
self.s3FileName = fileName
return self._signUrl(path,self.s3FileName, method)
@staticmethod
def getTimestampedFilename(filename) :
"""
Gets a Timestamped file name to prevent conflicts on S3 Uploading
"""
seconds = int((datetime.utcnow()-datetime(1970,1,1)).total_seconds())
return str(seconds)+"_"+filename
@staticmethod
def doesFileExist(filename):
""" Returns True if specified filename exists in the S3 bucket """
# Get key
try:
s3UrlHandler.REGION
except AttributeError as e:
s3UrlHandler.REGION = CONFIG_BROKER["aws_region"]
s3connection = boto.s3.connect_to_region(s3UrlHandler.REGION)
bucket = s3connection.get_bucket(CONFIG_BROKER['aws_bucket'])
key = bucket.get_key(filename)
if key:
return True
else:
return False
def getTemporaryCredentials(self,user):
"""
Gets token that allows for S3 Uploads for seconds set in STS_LIFETIME
"""
stsConnection = sts.connect_to_region(s3UrlHandler.REGION)
role = stsConnection.assume_role(s3UrlHandler.S3_ROLE,"FileUpload"+str(user),duration_seconds=s3UrlHandler.STS_LIFETIME)
credentials ={}
credentials["AccessKeyId"] = role.credentials.access_key
credentials["SecretAccessKey"] = role.credentials.secret_key
credentials["SessionToken"] = role.credentials.session_token
credentials["Expiration"] = role.credentials.expiration
return credentials
@staticmethod
def getFileSize(filename):
""" Returns file size in number of bytes for specified filename, or False if file doesn't exist """
# Get key
try:
s3UrlHandler.REGION
except AttributeError as e:
s3UrlHandler.REGION = CONFIG_BROKER["aws_region"]
s3connection = boto.s3.connect_to_region(s3UrlHandler.REGION)
bucket = s3connection.get_bucket(CONFIG_BROKER['aws_bucket'])
key = bucket.get_key(filename)
if(key == None):
return False
else:
return key.size
| cc0-1.0 |
Chandlercjy/OnePy | OnePy/sys_module/models/calendar.py | 1 | 2501 |
import arrow
from OnePy.sys_module.components.exceptions import BacktestFinished
from OnePy.sys_module.metabase_env import OnePyEnvBase
from OnePy.utils.easy_func import get_day_ratio
class Calendar(OnePyEnvBase):
def __init__(self, instrument):
if instrument == 'A_shares':
self.is_trading_time = self._is_A_shares_trading_time
elif instrument == 'Forex':
self.is_trading_time = self._is_forex_trading_time
def _is_forex_trading_time(self, now: arrow.arrow.Arrow) -> bool:
weekday = now.isoweekday()
date = now.format('YYYY-MM-DD')
if weekday <= 4:
return True
elif weekday == 5:
if now < arrow.get(f'{date} 22:00'): # 夏令时为21点,冬令时为22点,但其实只要保持最大值即可。
return True
elif weekday == 6:
return False
elif weekday == 7:
if now >= arrow.get(f'{date} 21:00'):
return True
return False
def _is_A_shares_trading_time(self, now: arrow.arrow.Arrow) -> bool:
weekday = now.isoweekday()
date = now.format('YYYY-MM-DD')
if self.env.sys_frequency == 'D':
if weekday <= 5:
return True
else:
if weekday <= 5:
left_1 = arrow.get(f'{date} 09:30')
right_1 = arrow.get(f'{date} 11:30')
left_2 = arrow.get(f'{date} 13:00')
right_2 = arrow.get(f'{date} 15:00')
if left_1 <= now <= right_1 or left_2 <= now <= right_2:
return True
return False
def update_calendar(self):
if self.env.is_live_trading:
self.env.sys_date = arrow.utcnow().format('YYYY-MM-DD HH:mm:ss')
else:
self._check_todate()
ratio = get_day_ratio(self.env.sys_frequency)
new_sys_date = arrow.get(self.env.sys_date).shift(days=ratio)
self.env.sys_date = new_sys_date.format('YYYY-MM-DD HH:mm:ss')
while not self.is_trading_time(new_sys_date):
self._check_todate()
new_sys_date = arrow.get(self.env.sys_date).shift(days=ratio)
self.env.sys_date = new_sys_date.format('YYYY-MM-DD HH:mm:ss')
def _check_todate(self):
if arrow.get(self.env.sys_date) >= arrow.get(self.env.todate):
# TODO: 还有至少一个ticker时间超过
raise BacktestFinished
| mit |
CFIS-Octarine/octarine | src/ossos_tests/test_ossos/test_tools/test_coding.py | 2 | 1122 | __author__ = "David Rusk <[email protected]>"
import unittest
from hamcrest import assert_that, equal_to
from ossos import coding
class EncodingDecodingTest(unittest.TestCase):
def test_base36_encode_decode_1(self):
assert_that(coding.base36encode(1), equal_to("1"))
assert_that(coding.base36decode("1"), equal_to(1))
def test_base36_encode_10(self):
assert_that(coding.base36encode(10), equal_to("A"))
assert_that(coding.base36decode("A"), equal_to(10))
def test_base36_encode_100(self):
assert_that(coding.base36encode(100), equal_to("2S"))
assert_that(coding.base36decode("2S"), equal_to(100))
def test_base36_encode_10000(self):
assert_that(coding.base36encode(10000), equal_to("7PS"))
assert_that(coding.base36decode("7PS"), equal_to(10000))
def test_base36_encode_pad_short(self):
assert_that(coding.base36encode(1, pad_length=2), equal_to("01"))
def test_base36_encode_pad_long(self):
assert_that(coding.base36encode(10000, pad_length=2), equal_to("7PS"))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.