repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
dayatz/taiga-back | taiga/projects/tasks/migrations/0009_auto_20151104_1131.py | 2 | 1459 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import connection, migrations, models
def set_finished_date_for_tasks(apps, schema_editor):
# Updates the finished date from tasks according to the history_entries associated
# It takes the last history change updateing the status of a task and if it's a closed
# one it updates the finished_date attribute
sql="""
WITH status_update AS(
WITH status_update AS(
WITH history_entries AS (
SELECT
diff #>>'{status, 1}' new_status_id,
regexp_split_to_array(key, ':') as split_key,
created_at as date
FROM history_historyentry
WHERE diff #>>'{status, 1}' != ''
)
SELECT
split_key[2] as object_id,
new_status_id::int,
MAX(date) as status_change_datetime
FROM history_entries
WHERE split_key[1] = 'tasks.task'
GROUP BY object_id, new_status_id, date
)
SELECT status_update.*
FROM status_update
INNER JOIN projects_taskstatus
ON projects_taskstatus.id = new_status_id AND projects_taskstatus.is_closed = True
)
UPDATE tasks_task
SET finished_date = status_update.status_change_datetime
FROM status_update
WHERE tasks_task.id = status_update.object_id::int
"""
cursor = connection.cursor()
cursor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('tasks', '0008_remove_task_watchers'),
]
operations = [
migrations.RunPython(set_finished_date_for_tasks),
]
| agpl-3.0 | -1,495,348,254,073,087,500 | 28.18 | 90 | 0.696367 | false |
PaddlePaddle/models | PaddleCV/image_classification/legacy/dist_train/batch_merge.py | 1 | 2499 | #copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import paddle.fluid as fluid
import numpy as np
def copyback_repeat_bn_params(main_prog):
repeat_vars = set()
for op in main_prog.global_block().ops:
if op.type == "batch_norm":
repeat_vars.add(op.input("Mean")[0])
repeat_vars.add(op.input("Variance")[0])
for vname in repeat_vars:
real_var = fluid.global_scope().find_var("%s.repeat.0" %
vname).get_tensor()
orig_var = fluid.global_scope().find_var(vname).get_tensor()
orig_var.set(np.array(real_var), fluid.CUDAPlace(0)) # test on GPU0
def append_bn_repeat_init_op(main_prog, startup_prog, num_repeats):
repeat_vars = set()
for op in main_prog.global_block().ops:
if op.type == "batch_norm":
repeat_vars.add(op.input("Mean")[0])
repeat_vars.add(op.input("Variance")[0])
for i in range(num_repeats):
for op in startup_prog.global_block().ops:
if op.type == "fill_constant":
for oname in op.output_arg_names:
if oname in repeat_vars:
var = startup_prog.global_block().var(oname)
repeat_var_name = "%s.repeat.%d" % (oname, i)
repeat_var = startup_prog.global_block().create_var(
name=repeat_var_name,
type=var.type,
dtype=var.dtype,
shape=var.shape,
persistable=var.persistable)
main_prog.global_block()._clone_variable(repeat_var)
startup_prog.global_block().append_op(
type="fill_constant",
inputs={},
outputs={"Out": repeat_var},
attrs=op.all_attrs())
| apache-2.0 | -5,866,482,817,361,182,000 | 42.842105 | 76 | 0.561825 | false |
openfisca/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/build_survey_data/calibration_aliss.py | 4 | 6081 | # -*- coding: utf-8 -*-
from __future__ import division
import itertools
import os
import pandas
import pkg_resources
from openfisca_survey_manager.survey_collections import SurveyCollection
from openfisca_survey_manager import default_config_files_directory as config_files_directory
elasticities_path = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location,
'openfisca_france_indirect_taxation',
'assets',
'aliss',
)
def build_clean_aliss_data_frame():
year = 2011
aliss_survey_collection = SurveyCollection.load(
collection = 'aliss', config_files_directory = config_files_directory
)
survey = aliss_survey_collection.get_survey('aliss_{}'.format(year))
aliss = survey.get_values(table = 'Base_ALISS_2011')
aliss['age'] = 99
aliss['revenus'] = 99
triplets = [
('1 : Jeune/Ais', 0, 3),
('2 : Jeune/MoyenSup', 0, 2),
('3 : Jeune/MoyenInf', 0, 1),
('4 : Jeune/Modeste', 0, 0),
('5 : Age Moyen/Ais', 1, 3),
('6 : Age Moyen/MoyenSup', 1, 2),
('7 : Age Moyen/MoyenInf', 1, 1),
('8 : Age Moyen/Modeste', 1, 0),
('9 : Age Sup/Ais', 2, 3),
('10 : Age Sup/MoyenSup', 2, 2),
('11 : Age Sup/MoyenInf', 2, 1),
('12 : Age Sup/Modeste', 2, 0),
('13 : Vieux/Ais', 3, 3),
('14 : Vieux/MoyenSup', 3, 2),
('15 : Vieux/MoyenInf', 3, 1),
('16 : Vieux/Modeste', 3, 0),
]
for household_type, age, revenus in triplets:
print household_type, age, revenus
selection = aliss.type.str.startswith(household_type)
aliss.loc[selection, 'age'] = age
aliss.loc[selection, 'revenus'] = revenus
assert aliss.age.isin(range(4)).all()
assert aliss.revenus.isin(range(4)).all()
del aliss['type']
return aliss
def compute_correction_coefficient():
# Calculer les cales
pass
def compute_kantar_elasticities(aliss):
nomf_by_dirty_nomf = {
'1 : Juices': 'Juic',
'2 : Alcohol': 'Alc',
'3 : Soft drinks': 'SD',
'4 : Bottled water': 'Wat',
'5 : Coffee and tea': 'Cof',
'6 : Fresh fruits and vegetables': 'FFV',
'7 : Spices': 'Spices',
'8 : Plant-based foods high in fats': 'PBF',
'9 : Plant-based dishes': 'PBD',
'10 : Plant-based foods high in sugar': 'PBS',
'11 : Starchy foods': 'Starch',
'12 : Processed fruits and vegetables': 'PFV',
'13 : Beef': 'Beef',
'14 : Other meats': 'OM',
'15 : Cooked meats': 'CM',
'16 : Animal-based foods high in fats': 'ABF',
'17 : Cheese': 'Cheese',
'18 : Fish and seafoods': 'Fish',
'19 : Dairy products': 'Dairy',
'20 : Prepared mixed meals': 'PrepM',
'21 : Prepared desserts': 'PrepD',
}
nomf_nomk = aliss.query('age == 0 & revenus == 0')[['nomf', 'nomk']]
(nomf_nomk.nomk.value_counts() == 1).all()
nomf_by_nomk = nomf_nomk.set_index('nomk').to_dict()['nomf']
nomks_by_nomf = dict(
(nomf_by_dirty_nomf.get(nomf), nomf_nomk.query('nomf == @nomf')['nomk'].unique())
for nomf in nomf_nomk.nomf.unique()
)
# budget shares
budget_share_path = os.path.join(elasticities_path, 'budget_share.csv')
if os.path.exists(budget_share_path):
kantar_budget_share = pandas.read_csv(budget_share_path)
else:
kantar_budget_share = pandas.DataFrame()
for age, revenus, nomf in itertools.product(aliss.age.unique(), aliss.revenus.unique(), aliss.nomf.unique()):
extract = aliss.query(
'nomf == @nomf & age == @age & revenus == @revenus'
)[
['age', 'revenus', 'nomk', 'dm_k', 'dm_f']
]
assert len(extract.dm_f.unique()) == 1
extract['budget_share_kf'] = extract.dm_k / extract.dm_f
extract['nomf'] = nomf_by_dirty_nomf.get(nomf)
kantar_budget_share = kantar_budget_share.append(extract)
kantar_budget_share.fillna(0, inplace = True)
kantar_budget_share.to_csv(budget_share_path)
csv_path_name = os.path.join(
elasticities_path,
'cross_price_elasticities.csv',
)
nomf_cross_price_elasticities = pandas.read_csv(csv_path_name)
nomks = aliss.nomk.unique()
nomk_cross_price_elasticity = pandas.DataFrame(
index = nomks,
columns = list(nomks) + ['age', 'revenus'],
)
for age, revenus in itertools.product(aliss.age.unique(), aliss.revenus.unique()):
nomf_cross_price_elasticity = nomf_cross_price_elasticities.query(
'age == @age & revenus == @revenus').set_index('product')
nomf_cross_price_elasticity.drop(['age', 'revenus'], axis = 1, inplace = True)
nomfs = nomf_cross_price_elasticity.index.unique()
for f, fprime in itertools.product(nomfs, nomfs):
elasticity_ffprime = nomf_cross_price_elasticity.loc[f, fprime]
elasticity_kkprime = pandas.DataFrame(
index = nomks_by_nomf[f],
columns = nomks_by_nomf[fprime],
)
nomks_for_fprime = nomks_by_nomf[fprime]
budget_share = kantar_budget_share.query(
'age == @age & revenus == @revenus & nomk in @nomks_for_fprime & nomf == @fprime'
)[['nomk', 'budget_share_kf']].set_index(('nomk'))
transposed_elasticity_kkprime = elasticity_kkprime.T
transposed_elasticity_kkprime.loc[nomks_for_fprime] = budget_share * elasticity_ffprime
elasticity_kkprime = transposed_elasticity_kkprime.T
elasticity_kkprime['age'] = age
elasticity_kkprime['revenus'] = revenus
nomk_cross_price_elasticity = nomk_cross_price_elasticity.combine_first(elasticity_kkprime)
return nomk_cross_price_elasticity
if __name__ == '__main__':
aliss = build_clean_aliss_data_frame()
kantar_elasticities = compute_kantar_elasticities(aliss)
| agpl-3.0 | -2,006,823,806,000,192,500 | 34.770588 | 117 | 0.582306 | false |
molobrakos/home-assistant | tests/helpers/test_template.py | 4 | 39409 | """Test Home Assistant template helper methods."""
import asyncio
from datetime import datetime
import unittest
import random
import math
import pytz
from unittest.mock import patch
from homeassistant.components import group
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import template
from homeassistant.util.unit_system import UnitSystem
from homeassistant.const import (
LENGTH_METERS,
TEMP_CELSIUS,
MASS_GRAMS,
PRESSURE_PA,
VOLUME_LITERS,
MATCH_ALL,
)
import homeassistant.util.dt as dt_util
from tests.common import get_test_home_assistant
import pytest
class TestHelpersTemplate(unittest.TestCase):
"""Test the Template."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up the tests."""
self.hass = get_test_home_assistant()
self.hass.config.units = UnitSystem('custom', TEMP_CELSIUS,
LENGTH_METERS, VOLUME_LITERS,
MASS_GRAMS, PRESSURE_PA)
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down stuff we started."""
self.hass.stop()
def test_referring_states_by_entity_id(self):
"""Test referring states by entity id."""
self.hass.states.set('test.object', 'happy')
assert 'happy' == \
template.Template(
'{{ states.test.object.state }}', self.hass).render()
def test_iterating_all_states(self):
"""Test iterating all states."""
self.hass.states.set('test.object', 'happy')
self.hass.states.set('sensor.temperature', 10)
assert '10happy' == \
template.Template(
'{% for state in states %}{{ state.state }}{% endfor %}',
self.hass).render()
def test_iterating_domain_states(self):
"""Test iterating domain states."""
self.hass.states.set('test.object', 'happy')
self.hass.states.set('sensor.back_door', 'open')
self.hass.states.set('sensor.temperature', 10)
assert 'open10' == \
template.Template("""
{% for state in states.sensor %}{{ state.state }}{% endfor %}
""", self.hass).render()
def test_float(self):
"""Test float."""
self.hass.states.set('sensor.temperature', '12')
assert '12.0' == \
template.Template(
'{{ float(states.sensor.temperature.state) }}',
self.hass).render()
assert 'True' == \
template.Template(
'{{ float(states.sensor.temperature.state) > 11 }}',
self.hass).render()
def test_rounding_value(self):
"""Test rounding value."""
self.hass.states.set('sensor.temperature', 12.78)
assert '12.8' == \
template.Template(
'{{ states.sensor.temperature.state | round(1) }}',
self.hass).render()
assert '128' == \
template.Template(
'{{ states.sensor.temperature.state | multiply(10) | round }}',
self.hass).render()
assert '12.7' == \
template.Template(
'{{ states.sensor.temperature.state | round(1, "floor") }}',
self.hass).render()
assert '12.8' == \
template.Template(
'{{ states.sensor.temperature.state | round(1, "ceil") }}',
self.hass).render()
def test_rounding_value_get_original_value_on_error(self):
"""Test rounding value get original value on error."""
assert 'None' == \
template.Template('{{ None | round }}', self.hass).render()
assert 'no_number' == \
template.Template(
'{{ "no_number" | round }}', self.hass).render()
def test_multiply(self):
"""Test multiply."""
tests = {
None: 'None',
10: '100',
'"abcd"': 'abcd'
}
for inp, out in tests.items():
assert out == \
template.Template('{{ %s | multiply(10) | round }}' % inp,
self.hass).render()
def test_logarithm(self):
"""Test logarithm."""
tests = [
(4, 2, '2.0'),
(1000, 10, '3.0'),
(math.e, '', '1.0'),
('"invalid"', '_', 'invalid'),
(10, '"invalid"', '10.0'),
]
for value, base, expected in tests:
assert expected == \
template.Template(
'{{ %s | log(%s) | round(1) }}' % (value, base),
self.hass).render()
assert expected == \
template.Template(
'{{ log(%s, %s) | round(1) }}' % (value, base),
self.hass).render()
def test_sine(self):
"""Test sine."""
tests = [
(0, '0.0'),
(math.pi / 2, '1.0'),
(math.pi, '0.0'),
(math.pi * 1.5, '-1.0'),
(math.pi / 10, '0.309')
]
for value, expected in tests:
assert expected == \
template.Template(
'{{ %s | sin | round(3) }}' % value,
self.hass).render()
def test_cos(self):
"""Test cosine."""
tests = [
(0, '1.0'),
(math.pi / 2, '0.0'),
(math.pi, '-1.0'),
(math.pi * 1.5, '-0.0'),
(math.pi / 10, '0.951')
]
for value, expected in tests:
assert expected == \
template.Template(
'{{ %s | cos | round(3) }}' % value,
self.hass).render()
def test_tan(self):
"""Test tangent."""
tests = [
(0, '0.0'),
(math.pi, '-0.0'),
(math.pi / 180 * 45, '1.0'),
(math.pi / 180 * 90, '1.633123935319537e+16'),
(math.pi / 180 * 135, '-1.0')
]
for value, expected in tests:
assert expected == \
template.Template(
'{{ %s | tan | round(3) }}' % value,
self.hass).render()
def test_sqrt(self):
"""Test square root."""
tests = [
(0, '0.0'),
(1, '1.0'),
(2, '1.414'),
(10, '3.162'),
(100, '10.0'),
]
for value, expected in tests:
assert expected == \
template.Template(
'{{ %s | sqrt | round(3) }}' % value,
self.hass).render()
def test_strptime(self):
"""Test the parse timestamp method."""
tests = [
('2016-10-19 15:22:05.588122 UTC',
'%Y-%m-%d %H:%M:%S.%f %Z', None),
('2016-10-19 15:22:05.588122+0100',
'%Y-%m-%d %H:%M:%S.%f%z', None),
('2016-10-19 15:22:05.588122',
'%Y-%m-%d %H:%M:%S.%f', None),
('2016-10-19', '%Y-%m-%d', None),
('2016', '%Y', None),
('15:22:05', '%H:%M:%S', None),
('1469119144', '%Y', '1469119144'),
('invalid', '%Y', 'invalid')
]
for inp, fmt, expected in tests:
if expected is None:
expected = datetime.strptime(inp, fmt)
temp = '{{ strptime(\'%s\', \'%s\') }}' % (inp, fmt)
assert str(expected) == \
template.Template(temp, self.hass).render()
def test_timestamp_custom(self):
"""Test the timestamps to custom filter."""
now = dt_util.utcnow()
tests = [
(None, None, None, 'None'),
(1469119144, None, True, '2016-07-21 16:39:04'),
(1469119144, '%Y', True, '2016'),
(1469119144, 'invalid', True, 'invalid'),
(dt_util.as_timestamp(now), None, False,
now.strftime('%Y-%m-%d %H:%M:%S'))
]
for inp, fmt, local, out in tests:
if fmt:
fil = 'timestamp_custom(\'{}\')'.format(fmt)
elif fmt and local:
fil = 'timestamp_custom(\'{0}\', {1})'.format(fmt, local)
else:
fil = 'timestamp_custom'
assert out == template.Template(
'{{ %s | %s }}' % (inp, fil), self.hass).render()
def test_timestamp_local(self):
"""Test the timestamps to local filter."""
tests = {
None: 'None',
1469119144: '2016-07-21 16:39:04',
}
for inp, out in tests.items():
assert out == \
template.Template('{{ %s | timestamp_local }}' % inp,
self.hass).render()
def test_min(self):
"""Test the min filter."""
assert '1' == \
template.Template('{{ [1, 2, 3] | min }}',
self.hass).render()
def test_max(self):
"""Test the max filter."""
assert '3' == \
template.Template('{{ [1, 2, 3] | max }}',
self.hass).render()
def test_base64_encode(self):
"""Test the base64_encode filter."""
self.assertEqual(
'aG9tZWFzc2lzdGFudA==',
template.Template('{{ "homeassistant" | base64_encode }}',
self.hass).render())
def test_base64_decode(self):
"""Test the base64_decode filter."""
self.assertEqual(
'homeassistant',
template.Template('{{ "aG9tZWFzc2lzdGFudA==" | base64_decode }}',
self.hass).render())
def test_ordinal(self):
"""Test the ordinal filter."""
tests = [
(1, '1st'),
(2, '2nd'),
(3, '3rd'),
(4, '4th'),
(5, '5th'),
]
for value, expected in tests:
self.assertEqual(
expected,
template.Template(
'{{ %s | ordinal }}' % value,
self.hass).render())
def test_timestamp_utc(self):
"""Test the timestamps to local filter."""
now = dt_util.utcnow()
tests = {
None: 'None',
1469119144: '2016-07-21 16:39:04',
dt_util.as_timestamp(now):
now.strftime('%Y-%m-%d %H:%M:%S')
}
for inp, out in tests.items():
assert out == \
template.Template('{{ %s | timestamp_utc }}' % inp,
self.hass).render()
def test_as_timestamp(self):
"""Test the as_timestamp function."""
assert "None" == \
template.Template(
'{{ as_timestamp("invalid") }}', self.hass).render()
self.hass.mock = None
assert "None" == \
template.Template('{{ as_timestamp(states.mock) }}',
self.hass).render()
tpl = '{{ as_timestamp(strptime("2024-02-03T09:10:24+0000", ' \
'"%Y-%m-%dT%H:%M:%S%z")) }}'
assert "1706951424.0" == \
template.Template(tpl, self.hass).render()
@patch.object(random, 'choice')
def test_random_every_time(self, test_choice):
"""Ensure the random filter runs every time, not just once."""
tpl = template.Template('{{ [1,2] | random }}', self.hass)
test_choice.return_value = 'foo'
assert 'foo' == tpl.render()
test_choice.return_value = 'bar'
assert 'bar' == tpl.render()
def test_passing_vars_as_keywords(self):
"""Test passing variables as keywords."""
assert '127' == \
template.Template('{{ hello }}', self.hass).render(hello=127)
def test_passing_vars_as_vars(self):
"""Test passing variables as variables."""
assert '127' == \
template.Template('{{ hello }}', self.hass).render({'hello': 127})
def test_passing_vars_as_list(self):
"""Test passing variables as list."""
assert "['foo', 'bar']" == \
template.render_complex(template.Template('{{ hello }}',
self.hass), {'hello': ['foo', 'bar']})
def test_passing_vars_as_list_element(self):
"""Test passing variables as list."""
assert 'bar' == \
template.render_complex(template.Template('{{ hello[1] }}',
self.hass),
{'hello': ['foo', 'bar']})
def test_passing_vars_as_dict_element(self):
"""Test passing variables as list."""
assert 'bar' == \
template.render_complex(template.Template('{{ hello.foo }}',
self.hass),
{'hello': {'foo': 'bar'}})
def test_passing_vars_as_dict(self):
"""Test passing variables as list."""
assert "{'foo': 'bar'}" == \
template.render_complex(template.Template('{{ hello }}',
self.hass), {'hello': {'foo': 'bar'}})
def test_render_with_possible_json_value_with_valid_json(self):
"""Render with possible JSON value with valid JSON."""
tpl = template.Template('{{ value_json.hello }}', self.hass)
assert 'world' == \
tpl.render_with_possible_json_value('{"hello": "world"}')
def test_render_with_possible_json_value_with_invalid_json(self):
"""Render with possible JSON value with invalid JSON."""
tpl = template.Template('{{ value_json }}', self.hass)
assert '' == \
tpl.render_with_possible_json_value('{ I AM NOT JSON }')
def test_render_with_possible_json_value_with_template_error_value(self):
"""Render with possible JSON value with template error value."""
tpl = template.Template('{{ non_existing.variable }}', self.hass)
assert '-' == \
tpl.render_with_possible_json_value('hello', '-')
def test_render_with_possible_json_value_with_missing_json_value(self):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template('{{ value_json.goodbye }}', self.hass)
assert '' == \
tpl.render_with_possible_json_value('{"hello": "world"}')
def test_render_with_possible_json_value_valid_with_is_defined(self):
"""Render with possible JSON value with known JSON object."""
tpl = template.Template('{{ value_json.hello|is_defined }}', self.hass)
assert 'world' == \
tpl.render_with_possible_json_value('{"hello": "world"}')
def test_render_with_possible_json_value_undefined_json(self):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template('{{ value_json.bye|is_defined }}', self.hass)
assert '{"hello": "world"}' == \
tpl.render_with_possible_json_value('{"hello": "world"}')
def test_render_with_possible_json_value_undefined_json_error_value(self):
"""Render with possible JSON value with unknown JSON object."""
tpl = template.Template('{{ value_json.bye|is_defined }}', self.hass)
assert '' == \
tpl.render_with_possible_json_value('{"hello": "world"}', '')
def test_render_with_possible_json_value_non_string_value(self):
"""Render with possible JSON value with non-string value."""
tpl = template.Template("""
{{ strptime(value~'+0000', '%Y-%m-%d %H:%M:%S%z') }}
""", self.hass)
value = datetime(2019, 1, 18, 12, 13, 14)
expected = str(pytz.utc.localize(value))
assert expected == \
tpl.render_with_possible_json_value(value)
def test_raise_exception_on_error(self):
"""Test raising an exception on error."""
with pytest.raises(TemplateError):
template.Template('{{ invalid_syntax').ensure_valid()
def test_if_state_exists(self):
"""Test if state exists works."""
self.hass.states.set('test.object', 'available')
tpl = template.Template(
'{% if states.test.object %}exists{% else %}not exists{% endif %}',
self.hass)
assert 'exists' == tpl.render()
def test_is_state(self):
"""Test is_state method."""
self.hass.states.set('test.object', 'available')
tpl = template.Template("""
{% if is_state("test.object", "available") %}yes{% else %}no{% endif %}
""", self.hass)
assert 'yes' == tpl.render()
tpl = template.Template("""
{{ is_state("test.noobject", "available") }}
""", self.hass)
assert 'False' == tpl.render()
def test_is_state_attr(self):
"""Test is_state_attr method."""
self.hass.states.set('test.object', 'available', {'mode': 'on'})
tpl = template.Template("""
{% if is_state_attr("test.object", "mode", "on") %}yes{% else %}no{% endif %}
""", self.hass)
assert 'yes' == tpl.render()
tpl = template.Template("""
{{ is_state_attr("test.noobject", "mode", "on") }}
""", self.hass)
assert 'False' == tpl.render()
def test_state_attr(self):
"""Test state_attr method."""
self.hass.states.set('test.object', 'available', {'mode': 'on'})
tpl = template.Template("""
{% if state_attr("test.object", "mode") == "on" %}yes{% else %}no{% endif %}
""", self.hass)
assert 'yes' == tpl.render()
tpl = template.Template("""
{{ state_attr("test.noobject", "mode") == None }}
""", self.hass)
assert 'True' == tpl.render()
def test_states_function(self):
"""Test using states as a function."""
self.hass.states.set('test.object', 'available')
tpl = template.Template('{{ states("test.object") }}', self.hass)
assert 'available' == tpl.render()
tpl2 = template.Template('{{ states("test.object2") }}', self.hass)
assert 'unknown' == tpl2.render()
@patch('homeassistant.helpers.template.TemplateEnvironment.'
'is_safe_callable', return_value=True)
def test_now(self, mock_is_safe):
"""Test now method."""
now = dt_util.now()
with patch.dict(template.ENV.globals, {'now': lambda: now}):
assert now.isoformat() == \
template.Template('{{ now().isoformat() }}',
self.hass).render()
@patch('homeassistant.helpers.template.TemplateEnvironment.'
'is_safe_callable', return_value=True)
def test_utcnow(self, mock_is_safe):
"""Test utcnow method."""
now = dt_util.utcnow()
with patch.dict(template.ENV.globals, {'utcnow': lambda: now}):
assert now.isoformat() == \
template.Template('{{ utcnow().isoformat() }}',
self.hass).render()
def test_regex_match(self):
"""Test regex_match method."""
tpl = template.Template(r"""
{{ '123-456-7890' | regex_match('(\\d{3})-(\\d{3})-(\\d{4})') }}
""", self.hass)
assert 'True' == tpl.render()
tpl = template.Template("""
{{ 'home assistant test' | regex_match('Home', True) }}
""", self.hass)
assert 'True' == tpl.render()
tpl = template.Template("""
{{ 'Another home assistant test' | regex_match('home') }}
""", self.hass)
assert 'False' == tpl.render()
def test_regex_search(self):
"""Test regex_search method."""
tpl = template.Template(r"""
{{ '123-456-7890' | regex_search('(\\d{3})-(\\d{3})-(\\d{4})') }}
""", self.hass)
assert 'True' == tpl.render()
tpl = template.Template("""
{{ 'home assistant test' | regex_search('Home', True) }}
""", self.hass)
assert 'True' == tpl.render()
tpl = template.Template("""
{{ 'Another home assistant test' | regex_search('home') }}
""", self.hass)
assert 'True' == tpl.render()
def test_regex_replace(self):
"""Test regex_replace method."""
tpl = template.Template(r"""
{{ 'Hello World' | regex_replace('(Hello\\s)',) }}
""", self.hass)
assert 'World' == tpl.render()
def test_regex_findall_index(self):
"""Test regex_findall_index method."""
tpl = template.Template("""
{{ 'Flight from JFK to LHR' | regex_findall_index('([A-Z]{3})', 0) }}
""", self.hass)
assert 'JFK' == tpl.render()
tpl = template.Template("""
{{ 'Flight from JFK to LHR' | regex_findall_index('([A-Z]{3})', 1) }}
""", self.hass)
assert 'LHR' == tpl.render()
def test_bitwise_and(self):
"""Test bitwise_and method."""
tpl = template.Template("""
{{ 8 | bitwise_and(8) }}
""", self.hass)
assert str(8 & 8) == tpl.render()
tpl = template.Template("""
{{ 10 | bitwise_and(2) }}
""", self.hass)
assert str(10 & 2) == tpl.render()
tpl = template.Template("""
{{ 8 | bitwise_and(2) }}
""", self.hass)
assert str(8 & 2) == tpl.render()
def test_bitwise_or(self):
"""Test bitwise_or method."""
tpl = template.Template("""
{{ 8 | bitwise_or(8) }}
""", self.hass)
assert str(8 | 8) == tpl.render()
tpl = template.Template("""
{{ 10 | bitwise_or(2) }}
""", self.hass)
assert str(10 | 2) == tpl.render()
tpl = template.Template("""
{{ 8 | bitwise_or(2) }}
""", self.hass)
assert str(8 | 2) == tpl.render()
def test_distance_function_with_1_state(self):
"""Test distance function with 1 state."""
self.hass.states.set('test.object', 'happy', {
'latitude': 32.87336,
'longitude': -117.22943,
})
tpl = template.Template('{{ distance(states.test.object) | round }}',
self.hass)
assert '187' == tpl.render()
def test_distance_function_with_2_states(self):
"""Test distance function with 2 states."""
self.hass.states.set('test.object', 'happy', {
'latitude': 32.87336,
'longitude': -117.22943,
})
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template(
'{{ distance(states.test.object, states.test.object_2) | round }}',
self.hass)
assert '187' == tpl.render()
def test_distance_function_with_1_coord(self):
"""Test distance function with 1 coord."""
tpl = template.Template(
'{{ distance("32.87336", "-117.22943") | round }}', self.hass)
assert '187' == \
tpl.render()
def test_distance_function_with_2_coords(self):
"""Test distance function with 2 coords."""
assert '187' == \
template.Template(
'{{ distance("32.87336", "-117.22943", %s, %s) | round }}'
% (self.hass.config.latitude, self.hass.config.longitude),
self.hass).render()
def test_distance_function_with_1_state_1_coord(self):
"""Test distance function with 1 state 1 coord."""
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template(
'{{ distance("32.87336", "-117.22943", states.test.object_2) '
'| round }}', self.hass)
assert '187' == tpl.render()
tpl2 = template.Template(
'{{ distance(states.test.object_2, "32.87336", "-117.22943") '
'| round }}', self.hass)
assert '187' == tpl2.render()
def test_distance_function_return_None_if_invalid_state(self):
"""Test distance function return None if invalid state."""
self.hass.states.set('test.object_2', 'happy', {
'latitude': 10,
})
tpl = template.Template('{{ distance(states.test.object_2) | round }}',
self.hass)
assert 'None' == \
tpl.render()
def test_distance_function_return_None_if_invalid_coord(self):
"""Test distance function return None if invalid coord."""
assert 'None' == \
template.Template(
'{{ distance("123", "abc") }}', self.hass).render()
assert 'None' == \
template.Template('{{ distance("123") }}', self.hass).render()
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template('{{ distance("123", states.test_object_2) }}',
self.hass)
assert 'None' == \
tpl.render()
def test_distance_function_with_2_entity_ids(self):
"""Test distance function with 2 entity ids."""
self.hass.states.set('test.object', 'happy', {
'latitude': 32.87336,
'longitude': -117.22943,
})
self.hass.states.set('test.object_2', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template(
'{{ distance("test.object", "test.object_2") | round }}',
self.hass)
assert '187' == tpl.render()
def test_distance_function_with_1_entity_1_coord(self):
"""Test distance function with 1 entity_id and 1 coord."""
self.hass.states.set('test.object', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
tpl = template.Template(
'{{ distance("test.object", "32.87336", "-117.22943") | round }}',
self.hass)
assert '187' == tpl.render()
def test_closest_function_home_vs_domain(self):
"""Test closest function home vs domain."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('not_test_domain.but_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
assert 'test_domain.object' == \
template.Template('{{ closest(states.test_domain).entity_id }}',
self.hass).render()
def test_closest_function_home_vs_all_states(self):
"""Test closest function home vs all states."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain_2.and_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
assert 'test_domain_2.and_closer' == \
template.Template('{{ closest(states).entity_id }}',
self.hass).render()
def test_closest_function_home_vs_group_entity_id(self):
"""Test closest function home vs group entity id."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('not_in_group.but_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
group.Group.create_group(
self.hass, 'location group', ['test_domain.object'])
assert 'test_domain.object' == \
template.Template(
'{{ closest("group.location_group").entity_id }}',
self.hass).render()
def test_closest_function_home_vs_group_state(self):
"""Test closest function home vs group state."""
self.hass.states.set('test_domain.object', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('not_in_group.but_closer', 'happy', {
'latitude': self.hass.config.latitude,
'longitude': self.hass.config.longitude,
})
group.Group.create_group(
self.hass, 'location group', ['test_domain.object'])
assert 'test_domain.object' == \
template.Template(
'{{ closest(states.group.location_group).entity_id }}',
self.hass).render()
def test_closest_function_to_coord(self):
"""Test closest function to coord."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain.closest_zone', 'happy', {
'latitude': self.hass.config.latitude + 0.2,
'longitude': self.hass.config.longitude + 0.2,
})
self.hass.states.set('zone.far_away', 'zoning', {
'latitude': self.hass.config.latitude + 0.3,
'longitude': self.hass.config.longitude + 0.3,
})
tpl = template.Template(
'{{ closest("%s", %s, states.test_domain).entity_id }}'
% (self.hass.config.latitude + 0.3,
self.hass.config.longitude + 0.3), self.hass)
assert 'test_domain.closest_zone' == \
tpl.render()
def test_closest_function_to_entity_id(self):
"""Test closest function to entity id."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain.closest_zone', 'happy', {
'latitude': self.hass.config.latitude + 0.2,
'longitude': self.hass.config.longitude + 0.2,
})
self.hass.states.set('zone.far_away', 'zoning', {
'latitude': self.hass.config.latitude + 0.3,
'longitude': self.hass.config.longitude + 0.3,
})
assert 'test_domain.closest_zone' == \
template.Template(
'{{ closest("zone.far_away", '
'states.test_domain).entity_id }}', self.hass).render()
def test_closest_function_to_state(self):
"""Test closest function to state."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
self.hass.states.set('test_domain.closest_zone', 'happy', {
'latitude': self.hass.config.latitude + 0.2,
'longitude': self.hass.config.longitude + 0.2,
})
self.hass.states.set('zone.far_away', 'zoning', {
'latitude': self.hass.config.latitude + 0.3,
'longitude': self.hass.config.longitude + 0.3,
})
assert 'test_domain.closest_zone' == \
template.Template(
'{{ closest(states.zone.far_away, '
'states.test_domain).entity_id }}', self.hass).render()
def test_closest_function_invalid_state(self):
"""Test closest function invalid state."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
for state in ('states.zone.non_existing', '"zone.non_existing"'):
assert 'None' == \
template.Template('{{ closest(%s, states) }}' % state,
self.hass).render()
def test_closest_function_state_with_invalid_location(self):
"""Test closest function state with invalid location."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': 'invalid latitude',
'longitude': self.hass.config.longitude + 0.1,
})
assert 'None' == \
template.Template(
'{{ closest(states.test_domain.closest_home, '
'states) }}', self.hass).render()
def test_closest_function_invalid_coordinates(self):
"""Test closest function invalid coordinates."""
self.hass.states.set('test_domain.closest_home', 'happy', {
'latitude': self.hass.config.latitude + 0.1,
'longitude': self.hass.config.longitude + 0.1,
})
assert 'None' == \
template.Template('{{ closest("invalid", "coord", states) }}',
self.hass).render()
def test_closest_function_no_location_states(self):
"""Test closest function without location states."""
assert '' == \
template.Template('{{ closest(states).entity_id }}',
self.hass).render()
def test_extract_entities_none_exclude_stuff(self):
"""Test extract entities function with none or exclude stuff."""
assert [] == template.extract_entities(None)
assert [] == template.extract_entities("mdi:water")
assert MATCH_ALL == \
template.extract_entities(
'{{ closest(states.zone.far_away, '
'states.test_domain).entity_id }}')
assert MATCH_ALL == \
template.extract_entities(
'{{ distance("123", states.test_object_2) }}')
def test_extract_entities_no_match_entities(self):
"""Test extract entities function with none entities stuff."""
assert MATCH_ALL == \
template.extract_entities(
"{{ value_json.tst | timestamp_custom('%Y' True) }}")
assert MATCH_ALL == \
template.extract_entities("""
{% for state in states.sensor %}
{{ state.entity_id }}={{ state.state }},d
{% endfor %}
""")
def test_extract_entities_match_entities(self):
"""Test extract entities function with entities stuff."""
assert ['device_tracker.phone_1'] == \
template.extract_entities("""
{% if is_state('device_tracker.phone_1', 'home') %}
Ha, Hercules is home!
{% else %}
Hercules is at {{ states('device_tracker.phone_1') }}.
{% endif %}
""")
assert ['binary_sensor.garage_door'] == \
template.extract_entities("""
{{ as_timestamp(states.binary_sensor.garage_door.last_changed) }}
""")
assert ['binary_sensor.garage_door'] == \
template.extract_entities("""
{{ states("binary_sensor.garage_door") }}
""")
assert ['device_tracker.phone_2'] == \
template.extract_entities("""
{{ is_state_attr('device_tracker.phone_2', 'battery', 40) }}
""")
assert sorted([
'device_tracker.phone_1',
'device_tracker.phone_2',
]) == \
sorted(template.extract_entities("""
{% if is_state('device_tracker.phone_1', 'home') %}
Ha, Hercules is home!
{% elif states.device_tracker.phone_2.attributes.battery < 40 %}
Hercules you power goes done!.
{% endif %}
"""))
assert sorted([
'sensor.pick_humidity',
'sensor.pick_temperature',
]) == \
sorted(template.extract_entities("""
{{
states.sensor.pick_temperature.state ~ „°C (“ ~
states.sensor.pick_humidity.state ~ „ %“
}}
"""))
assert sorted([
'sensor.luftfeuchtigkeit_mean',
'input_number.luftfeuchtigkeit',
]) == \
sorted(template.extract_entities(
"{% if (states('sensor.luftfeuchtigkeit_mean') | int)"
" > (states('input_number.luftfeuchtigkeit') | int +1.5)"
" %}true{% endif %}"
))
def test_extract_entities_with_variables(self):
"""Test extract entities function with variables and entities stuff."""
assert ['input_boolean.switch'] == \
template.extract_entities(
"{{ is_state('input_boolean.switch', 'off') }}", {})
assert ['trigger.entity_id'] == \
template.extract_entities(
"{{ is_state(trigger.entity_id, 'off') }}", {})
assert MATCH_ALL == \
template.extract_entities(
"{{ is_state(data, 'off') }}", {})
assert ['input_boolean.switch'] == \
template.extract_entities(
"{{ is_state(data, 'off') }}",
{'data': 'input_boolean.switch'})
assert ['input_boolean.switch'] == \
template.extract_entities(
"{{ is_state(trigger.entity_id, 'off') }}",
{'trigger': {'entity_id': 'input_boolean.switch'}})
assert MATCH_ALL == \
template.extract_entities(
"{{ is_state('media_player.' ~ where , 'playing') }}",
{'where': 'livingroom'})
def test_jinja_namespace(self):
"""Test Jinja's namespace command can be used."""
test_template = template.Template(
(
"{% set ns = namespace(a_key='') %}"
"{% set ns.a_key = states.sensor.dummy.state %}"
"{{ ns.a_key }}"
),
self.hass
)
self.hass.states.set('sensor.dummy', 'a value')
assert 'a value' == test_template.render()
self.hass.states.set('sensor.dummy', 'another value')
assert 'another value' == test_template.render()
@asyncio.coroutine
def test_state_with_unit(hass):
"""Test the state_with_unit property helper."""
hass.states.async_set('sensor.test', '23', {
'unit_of_measurement': 'beers',
})
hass.states.async_set('sensor.test2', 'wow')
tpl = template.Template(
'{{ states.sensor.test.state_with_unit }}', hass)
assert tpl.async_render() == '23 beers'
tpl = template.Template(
'{{ states.sensor.test2.state_with_unit }}', hass)
assert tpl.async_render() == 'wow'
tpl = template.Template(
'{% for state in states %}{{ state.state_with_unit }} {% endfor %}',
hass)
assert tpl.async_render() == '23 beers wow'
tpl = template.Template('{{ states.sensor.non_existing.state_with_unit }}',
hass)
assert tpl.async_render() == ''
@asyncio.coroutine
def test_length_of_states(hass):
"""Test fetching the length of states."""
hass.states.async_set('sensor.test', '23')
hass.states.async_set('sensor.test2', 'wow')
hass.states.async_set('climate.test2', 'cooling')
tpl = template.Template('{{ states | length }}', hass)
assert tpl.async_render() == '3'
tpl = template.Template('{{ states.sensor | length }}', hass)
assert tpl.async_render() == '2'
| apache-2.0 | 3,563,766,284,263,300,000 | 35.549165 | 79 | 0.517944 | false |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/OpenGL/GL/INTEL/map_texture.py | 9 | 1716 | '''OpenGL extension INTEL.map_texture
This module customises the behaviour of the
OpenGL.raw.GL.INTEL.map_texture to provide a more
Python-friendly API
Overview (from the spec)
Systems with integrated GPUs can share the same physical memory between CPU
and GPU. This feature, if exposed by API, can bring significant performance
benefits for graphics applications by reducing the complexity of
uploading/accessing texture contents. This extension enables CPU direct
access to the GPU memory holding textures.
The problem with texture memory directly exposed to clients is that
textures are often 'tiled'. Texels are kept in specific layout to improve
locality of reference and thus performance of texturing. This 'tiling'
is specific to particular hardware and would be thus difficult to use.
This extension allows to create textures with 'linear' layout which allows
for simplified access on user side (potentially sacrificing some
performance during texture sampling).
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/INTEL/map_texture.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.INTEL.map_texture import *
from OpenGL.raw.GL.INTEL.map_texture import _EXTENSION_NAME
def glInitMapTextureINTEL():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
glMapTexture2DINTEL=wrapper.wrapper(glMapTexture2DINTEL).setInputArraySize(
'stride', 1
).setInputArraySize(
'layout', 1
)
### END AUTOGENERATED SECTION | gpl-3.0 | 1,124,484,426,113,575,600 | 38.930233 | 76 | 0.798368 | false |
ac1apeyre/CS290_GroupAssignment1 | convertShaders.py | 4 | 1796 | #Purpose: To create Javascript multiline strings from all of the vertex
#and fragment shaders in this directory and to automatically put those strings
#at the end of Shaders.js. This eliminates security problems loading external
#files and allows users to execute code on their own computer without cross
#origin problems, while still maintaining some semblance of modularity by
#allowing the main shader editing to be outside of Javascript and in .C files
import os
STARTLINE = "///*****SHADER STRINGS START*****///\n"
ENDLINE = "///*****SHADER STRINGS END*****///\n"
(OUTPUT_BEGINNING, OUTPUT_SHADERSTRS, OUTPUT_END) = (0, 1, 2)
if __name__ == '__main__':
files = os.listdir(".")
files = [f for f in files if (f[-2:] == ".c" or f[-2:] == ".C")]
fin = open("Shaders.js")
lines = fin.readlines()
fin.close()
fout = open("Shaders.js", "w")
state = OUTPUT_BEGINNING
for l in lines:
if not state == OUTPUT_SHADERSTRS:
fout.write(l)
if l == STARTLINE and state == OUTPUT_BEGINNING:
state = OUTPUT_SHADERSTRS
#Now open up every shader and convert it into a multiline string
for f in files:
shdin = open(f)
fout.write("var %s = "%f[0:-2])
linesshd = [lshd.rstrip() for lshd in shdin.readlines()]
for i in range(len(linesshd)):
print linesshd[i]
fout.write("\"%s\\n\""%linesshd[i])
if i < len(linesshd)-1:
fout.write(" + \n")
fout.write(";\n\n")
shdin.close()
elif l == ENDLINE and state == OUTPUT_SHADERSTRS:
state = OUTPUT_END
fout.write(l)
fout.close()
| apache-2.0 | -8,371,898,065,905,836,000 | 39.818182 | 78 | 0.563474 | false |
mahak/neutron | neutron/tests/functional/db/test_models.py | 3 | 1254 | # Copyright (c) 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from neutron.tests.functional import base
class TestDBCreation(base.BaseLoggingTestCase):
"""Check database schema can be created without conflicts.
For each test case is created a SQLite memory database.
"""
def setUp(self):
super(TestDBCreation, self).setUp()
self.engine = sqlalchemy.create_engine('sqlite://')
def _test_creation(self, module):
metadata = module.get_metadata()
metadata.create_all(self.engine)
def test_head_creation(self):
from neutron.db.migration.models import head
self._test_creation(head)
| apache-2.0 | 4,078,523,603,800,438,000 | 32 | 78 | 0.708134 | false |
jhrozek/samba-ldb-mdb | buildtools/wafsamba/samba_pidl.py | 9 | 5486 | # waf build tool for building IDL files with pidl
from TaskGen import before
import Build, os, sys, Logs
from samba_utils import *
def SAMBA_PIDL(bld, pname, source,
options='',
output_dir='.',
generate_tables=True):
'''Build a IDL file using pidl.
This will produce up to 13 output files depending on the options used'''
bname = source[0:-4]; # strip off the .idl suffix
bname = os.path.basename(bname)
name = "%s_%s" % (pname, bname.upper())
if not SET_TARGET_TYPE(bld, name, 'PIDL'):
return
bld.SET_BUILD_GROUP('build_source')
# the output files depend on the options used. Use this dictionary
# to map between the options and the resulting file names
options_map = { '--header' : '%s.h',
'--ndr-parser' : 'ndr_%s.c ndr_%s.h',
'--samba3-ndr-server' : 'srv_%s.c srv_%s.h',
'--samba3-ndr-client' : 'cli_%s.c cli_%s.h',
'--server' : 'ndr_%s_s.c',
'--client' : 'ndr_%s_c.c ndr_%s_c.h',
'--python' : 'py_%s.c',
'--tdr-parser' : 'tdr_%s.c tdr_%s.h',
'--dcom-proxy' : '%s_p.c',
'--com-header' : 'com_%s.h'
}
table_header_idx = None
out_files = []
options_list = TO_LIST(options)
for o in options_list:
if o in options_map:
ofiles = TO_LIST(options_map[o])
for f in ofiles:
out_files.append(os.path.join(output_dir, f % bname))
if f == 'ndr_%s.h':
# remember this one for the tables generation
table_header_idx = len(out_files) - 1
# depend on the full pidl sources
source = TO_LIST(source)
try:
pidl_src_nodes = bld.pidl_files_cache
except AttributeError:
bld.pidl_files_cache = bld.srcnode.ant_glob('pidl/lib/Parse/**/*.pm', flat=False)
bld.pidl_files_cache.extend(bld.srcnode.ant_glob('pidl', flat=False))
pidl_src_nodes = bld.pidl_files_cache
# the cd .. is needed because pidl currently is sensitive to the directory it is run in
cpp = ""
cc = ""
if bld.CONFIG_SET("CPP") and bld.CONFIG_GET("CPP") != "":
if isinstance(bld.CONFIG_GET("CPP"), list):
cpp = 'CPP="%s"' % " ".join(bld.CONFIG_GET("CPP"))
else:
cpp = 'CPP="%s"' % bld.CONFIG_GET("CPP")
if cpp == "CPP=xlc_r":
cpp = ""
if bld.CONFIG_SET("CC"):
if isinstance(bld.CONFIG_GET("CC"), list):
cc = 'CC="%s"' % " ".join(bld.CONFIG_GET("CC"))
else:
cc = 'CC="%s"' % bld.CONFIG_GET("CC")
t = bld(rule='cd .. && %s %s ${PERL} "${PIDL}" --quiet ${OPTIONS} --outputdir ${OUTPUTDIR} -- "${SRC[0].abspath(env)}"' % (cpp, cc),
ext_out = '.c',
before = 'cc',
update_outputs = True,
shell = True,
source = source,
target = out_files,
name = name,
samba_type = 'PIDL')
# prime the list of nodes we are dependent on with the cached pidl sources
t.allnodes = pidl_src_nodes
t.env.PIDL = os.path.join(bld.srcnode.abspath(), 'pidl/pidl')
t.env.OPTIONS = TO_LIST(options)
t.env.OUTPUTDIR = bld.bldnode.name + '/' + bld.path.find_dir(output_dir).bldpath(t.env)
if generate_tables and table_header_idx is not None:
pidl_headers = LOCAL_CACHE(bld, 'PIDL_HEADERS')
pidl_headers[name] = [bld.path.find_or_declare(out_files[table_header_idx])]
t.more_includes = '#' + bld.path.relpath_gen(bld.srcnode)
Build.BuildContext.SAMBA_PIDL = SAMBA_PIDL
def SAMBA_PIDL_LIST(bld, name, source,
options='',
output_dir='.',
generate_tables=True):
'''A wrapper for building a set of IDL files'''
for p in TO_LIST(source):
bld.SAMBA_PIDL(name, p, options=options, output_dir=output_dir, generate_tables=generate_tables)
Build.BuildContext.SAMBA_PIDL_LIST = SAMBA_PIDL_LIST
#################################################################
# the rule for generating the NDR tables
from TaskGen import feature, before
@feature('collect')
@before('exec_rule')
def collect(self):
pidl_headers = LOCAL_CACHE(self.bld, 'PIDL_HEADERS')
for (name, hd) in pidl_headers.items():
y = self.bld.get_tgen_by_name(name)
self.bld.ASSERT(y is not None, 'Failed to find PIDL header %s' % name)
y.post()
for node in hd:
self.bld.ASSERT(node is not None, 'Got None as build node generating PIDL table for %s' % name)
self.source += " " + node.relpath_gen(self.path)
def SAMBA_PIDL_TABLES(bld, name, target):
'''generate the pidl NDR tables file'''
headers = bld.env.PIDL_HEADERS
bld.SET_BUILD_GROUP('main')
t = bld(
features = 'collect',
rule = '${PERL} ${SRC} --output ${TGT} | sed "s|default/||" > ${TGT}',
ext_out = '.c',
before = 'cc',
update_outputs = True,
shell = True,
source = '../../librpc/tables.pl',
target = target,
name = name)
t.env.LIBRPC = os.path.join(bld.srcnode.abspath(), 'librpc')
Build.BuildContext.SAMBA_PIDL_TABLES = SAMBA_PIDL_TABLES
| gpl-3.0 | -1,922,757,916,528,531,500 | 36.834483 | 136 | 0.530441 | false |
kiddinn/plaso | plaso/parsers/plugins.py | 2 | 4656 | # -*- coding: utf-8 -*-
"""This file contains basic interface for plugins within Plaso.
This library serves a basis for all plugins in Plaso, whether that are
Windows registry plugins, SQLite plugins or any other parsing plugins.
This is provided as a separate file to make it easier to inherit in other
projects that may want to use the Plaso plugin system.
"""
class BasePlugin(object):
"""A plugin is a lightweight parser that makes use of a common data structure.
When a data structure is common among several artifacts or files a plugin
infrastructure can be written to make writing parsers simpler. The goal of a
plugin is have only a single parser that understands the data structure that
can call plugins that have specialized knowledge of certain structures.
An example of this is a SQLite database. A plugin can be written that has
knowledge of certain database, such as Chrome history, or Skype history, etc.
This can be done without needing to write a full fledged parser that needs
to re-implement the data structure knowledge. A single parser can be created
that calls the plugins to see if it knows that particular database.
Another example is Windows registry, there a single parser that can parse
the registry can be made and the job of a single plugin is to parse a
particular registry key. The parser can then read a registry key and compare
it to a list of available plugins to see if it can be parsed.
"""
# The name of the plugin. This is the name that is used in the registration
# and used for parser/plugin selection, so this needs to be concise and unique
# for all plugins/parsers, such as 'Chrome', 'Safari' or 'UserAssist'.
NAME = 'base_plugin'
# Data format supported by the parser plugin. This information is used by
# the parser manager to generate parser and plugin information.
DATA_FORMAT = ''
# The URLS should contain a list of URLs with additional information about
# the plugin, for instance some additional reading material. That can be
# a description of the data structure, or how to read the data that comes
# out of the parser, etc. So in essence this is a field to define pointers
# to additional resources to assist the practitioner reading the output of
# the plugin.
URLS = []
# TODO: remove.
@property
def plugin_name(self):
"""Return the name of the plugin."""
return self.NAME
# pylint: disable=unused-argument
def Process(self, parser_mediator, **kwargs):
"""Evaluates if this is the correct plugin and processes data accordingly.
The purpose of the process function is to evaluate if this particular
plugin is the correct one for the particular data structure at hand.
This function accepts one value to use for evaluation, that could be
a registry key, list of table names for a database or any other criteria
that can be used to evaluate if the plugin should be run or not.
Args:
parser_mediator (ParserMediator): mediates interactions between
parsers and other components, such as storage and dfvfs.
kwargs (dict[str, object]): Depending on the plugin they may require
different sets of arguments to be able to evaluate whether or not
this is the correct plugin.
Raises:
ValueError: when there are unused keyword arguments.
"""
if kwargs:
raise ValueError('Unused keyword arguments: {0:s}.'.format(
', '.join(kwargs.keys())))
def UpdateChainAndProcess(self, parser_mediator, **kwargs):
"""Wrapper for Process() to synchronize the parser chain.
This convenience method updates the parser chain object held by the
mediator, transfers control to the plugin-specific Process() method,
and updates the chain again once the processing is complete. It provides a
simpler parser API in most cases.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
"""
parser_mediator.AppendToParserChain(self)
try:
self.Process(parser_mediator, **kwargs)
finally:
parser_mediator.PopFromParserChain()
class BasePluginCache(object):
"""A generic cache for parser plugins."""
def GetResults(self, attribute, default_value=None):
"""Retrieves a cached attribute.
Args:
attribute (str): name of the cached attribute.
default_value (Optional[object]): default value.
Returns:
object: value of the cached attribute or default value if the cache
does not contain the attribute.
"""
return getattr(self, attribute, default_value)
| apache-2.0 | -2,405,555,486,327,840,000 | 40.571429 | 80 | 0.7311 | false |
tswast/google-cloud-python | language/setup.py | 2 | 2778 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import setuptools
# Package metadata.
name = "google-cloud-language"
description = "Google Cloud Natural Language API client library"
version = "1.3.0"
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
# 'Development Status :: 5 - Production/Stable'
release_status = "Development Status :: 5 - Production/Stable"
dependencies = [
"google-api-core[grpc] >= 1.14.0, < 2.0.0dev",
'enum34;python_version<"3.4"',
]
extras = {}
# Setup boilerplate below this line.
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.rst")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
# Only include packages under the 'google' namespace. Do not include tests,
# benchmarks, etc.
packages = [
package for package in setuptools.find_packages() if package.startswith("google")
]
# Determine which namespaces are needed.
namespaces = ["google"]
if "google.cloud" in packages:
namespaces.append("google.cloud")
setuptools.setup(
name=name,
version=version,
description=description,
long_description=readme,
author="Google LLC",
author_email="[email protected]",
license="Apache 2.0",
url="https://github.com/GoogleCloudPlatform/google-cloud-python",
classifiers=[
release_status,
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
"Topic :: Internet",
],
platforms="Posix; MacOS X; Windows",
packages=packages,
namespace_packages=namespaces,
install_requires=dependencies,
extras_require=extras,
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
include_package_data=True,
zip_safe=False,
)
| apache-2.0 | -7,441,570,764,534,308,000 | 30.213483 | 85 | 0.681425 | false |
Excito/audiotools | audiotools/py_decoders/wavpack.py | 2 | 38340 | #!/usr/bin/python
#Audio Tools, a module and set of tools for manipulating audio data
#Copyright (C) 2007-2012 Brian Langenberger
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from audiotools.bitstream import BitstreamReader
from audiotools.pcm import from_channels, from_list
from math import log
from hashlib import md5
def sub_blocks(reader, sub_blocks_size):
while (sub_blocks_size > 0):
sub_block = Sub_Block.read(reader)
yield sub_block
sub_blocks_size -= sub_block.total_size()
class WavPackDecoder:
def __init__(self, filename):
self.reader = BitstreamReader(open(filename, "rb"), 1)
#read initial block to populate
#sample_rate, bits_per_sample, channels, and channel_mask
self.reader.mark()
block_header = Block_Header.read(self.reader)
sub_blocks_size = block_header.block_size - 24
sub_blocks_data = self.reader.substream(sub_blocks_size)
if (block_header.sample_rate != 15):
self.sample_rate = [6000, 8000, 9600, 11025, 12000,
16000, 22050, 24000, 32000, 44100,
48000, 64000, 88200, 96000,
192000][block_header.sample_rate]
else:
sub_blocks_data.mark()
try:
for sub_block in sub_blocks(sub_blocks_data, sub_blocks_size):
if (((sub_block.metadata_function == 7) and
(sub_block.nondecoder_data == 1))):
self.sample_rate = sub_block.data.read(
sub_block.data_size() * 8)
break
else:
raise ValueError("invalid sample rate")
finally:
sub_blocks_data.rewind()
sub_blocks_data.unmark()
self.bits_per_sample = [8, 16, 24, 32][block_header.bits_per_sample]
if (block_header.initial_block and block_header.final_block):
if (((block_header.mono_output == 0) or
(block_header.false_stereo == 1))):
self.channels = 2
self.channel_mask = 0x3
else:
self.channels = 1
self.channel_mask = 0x4
else:
#look for channel mask sub block
sub_blocks_data.mark()
for sub_block in sub_blocks(sub_blocks_data, sub_blocks_size):
if (((sub_block.metadata_function == 13) and
(sub_block.nondecoder_data == 0))):
self.channels = sub_block.data.read(8)
self.channel_mask = sub_block.data.read(
(sub_block.data_size() - 1) * 8)
break
else:
#FIXME - handle case of no channel mask sub block
raise NotImplementedError()
sub_blocks_data.rewind()
sub_blocks_data.unmark()
self.reader.rewind()
self.reader.unmark()
self.pcm_finished = False
self.md5_checked = False
self.md5sum = md5()
def read(self, pcm_frames):
if (self.pcm_finished):
if (not self.md5_checked):
self.reader.mark()
try:
try:
header = Block_Header.read(self.reader)
sub_blocks_size = header.block_size - 24
sub_blocks_data = \
self.reader.substream(sub_blocks_size)
for sub_block in sub_blocks(sub_blocks_data,
sub_blocks_size):
if (((sub_block.metadata_function == 6) and
(sub_block.nondecoder_data == 1))):
if ((sub_block.data.read_bytes(16) !=
self.md5sum.digest())):
raise ValueError("invalid stream MD5 sum")
except (IOError, ValueError):
#no error if a block isn't found
pass
finally:
self.reader.rewind()
self.reader.unmark()
return from_list([], self.channels, self.bits_per_sample, True)
channels = []
while (True): # in place of a do-while loop
try:
block_header = Block_Header.read(self.reader)
except (ValueError, IOError):
self.pcm_finished = True
return from_list([], self.channels, self.bits_per_sample, True)
sub_blocks_size = block_header.block_size - 24
sub_blocks_data = self.reader.substream(sub_blocks_size)
channels.extend(read_block(block_header,
sub_blocks_size,
sub_blocks_data))
if (block_header.final_block == 1):
break
if ((block_header.block_index +
block_header.block_samples) >= block_header.total_samples):
self.pcm_finished = True
#combine channels of audio data into single block
block = from_channels([from_list(ch, 1, self.bits_per_sample, True)
for ch in channels])
#update MD5 sum
self.md5sum.update(block.to_bytes(False, self.bits_per_sample > 8))
#return single block of audio data
return block
def close(self):
self.reader.close()
class Block_Header:
def __init__(self,
block_id, block_size, version, track_number, index_number,
total_samples, block_index, block_samples, bits_per_sample,
mono_output, hybrid_mode, joint_stereo, channel_decorrelation,
hybrid_noise_shaping, floating_point_data,
extended_size_integers, hybrid_controls_bitrate,
hybrid_noise_balanced, initial_block, final_block,
left_shift_data, maximum_magnitude, sample_rate,
use_IIR, false_stereo, CRC):
if (block_id != "wvpk"):
raise ValueError("invalid WavPack block ID")
self.block_size = block_size
self.version = version
self.track_number = track_number
self.index_number = index_number
self.total_samples = total_samples
self.block_index = block_index
self.block_samples = block_samples
self.bits_per_sample = bits_per_sample
self.mono_output = mono_output
self.hybrid_mode = hybrid_mode
self.joint_stereo = joint_stereo
self.channel_decorrelation = channel_decorrelation
self.hybrid_noise_shaping = hybrid_noise_shaping
self.floating_point_data = floating_point_data
self.extended_size_integers = extended_size_integers
self.hybrid_controls_bitrate = hybrid_controls_bitrate
self.hybrid_noise_balanced = hybrid_noise_balanced
self.initial_block = initial_block
self.final_block = final_block
self.left_shift_data = left_shift_data
self.maximum_magnitude = maximum_magnitude
self.sample_rate = sample_rate
self.use_IIR = use_IIR
self.false_stereo = false_stereo
self.CRC = CRC
def __repr__(self):
return "Block_Header(%s)" % \
", ".join(["%s=%s" % (attr, getattr(self, attr))
for attr in
["block_size", "version", "track_number",
"index_number", "total_samples", "block_index",
"block_samples", "bits_per_sample", "mono_output",
"hybrid_mode", "joint_stereo",
"channel_decorrelation", "hybrid_noise_shaping",
"floating_point_data", "extended_size_integers",
"hybrid_controls_bitrate", "hybrid_noise_balanced",
"initial_block", "final_block", "left_shift_data",
"maximum_magnitude", "sample_rate",
"use_IIR", "false_stereo", "CRC"]])
@classmethod
def read(cls, reader):
return cls(*reader.parse("4b 32u 16u 8u 8u 32u 32u 32u" +
"2u 1u 1u 1u 1u 1u 1u 1u " +
"1u 1u 1u 1u 5u 5u 4u 2p 1u 1u 1p" +
"32u"))
class Sub_Block:
def __init__(self, metadata_function, nondecoder_data,
actual_size_1_less, large_block, sub_block_size,
data):
self.metadata_function = metadata_function
self.nondecoder_data = nondecoder_data
self.actual_size_1_less = actual_size_1_less
self.large_block = large_block
self.sub_block_size = sub_block_size
self.data = data
def __repr__(self):
return "Sub_Block(%s)" % \
", ".join(["%s=%s" % (attr, getattr(self, attr))
for attr in
["metadata_function", "nondecoder_data",
"actual_size_1_less", "large_block",
"sub_block_size", "data"]])
def total_size(self):
if (self.large_block):
return 1 + 3 + (self.sub_block_size * 2)
else:
return 1 + 1 + (self.sub_block_size * 2)
def data_size(self):
if (self.actual_size_1_less):
return self.sub_block_size * 2 - 1
else:
return self.sub_block_size * 2
@classmethod
def read(cls, reader):
(metadata_function,
nondecoder_data,
actual_size_1_less,
large_block) = reader.parse("5u 1u 1u 1u")
if (large_block == 0):
sub_block_size = reader.read(8)
else:
sub_block_size = reader.read(24)
if (actual_size_1_less == 0):
data = reader.substream(sub_block_size * 2)
else:
data = reader.substream(sub_block_size * 2 - 1)
reader.skip(8)
return cls(metadata_function,
nondecoder_data,
actual_size_1_less,
large_block,
sub_block_size,
data)
def read_block(block_header, sub_blocks_size, sub_blocks_data):
"""returns 1 or 2 channels of PCM data integers"""
decorrelation_terms_read = False
decorrelation_weights_read = False
decorrelation_samples_read = False
entropies_read = False
residuals_read = False
extended_integers_read = False
while (sub_blocks_size > 0):
(metadata_function,
nondecoder_data,
actual_size_1_less,
large_sub_block) = sub_blocks_data.parse("5u 1u 1u 1u")
if (large_sub_block == 0):
sub_block_size = sub_blocks_data.read(8)
else:
sub_block_size = sub_blocks_data.read(24)
if (actual_size_1_less == 0):
sub_block_data = sub_blocks_data.substream(sub_block_size * 2)
else:
sub_block_data = sub_blocks_data.substream(sub_block_size * 2 - 1)
sub_blocks_data.skip(8)
if (nondecoder_data == 0):
if (metadata_function == 2):
(decorrelation_terms,
decorrelation_deltas) = read_decorrelation_terms(
sub_block_size, actual_size_1_less, sub_block_data)
decorrelation_terms_read = True
if (metadata_function == 3):
if (not decorrelation_terms_read):
raise ValueError(
"weights sub block found before terms sub block")
decorrelation_weights = read_decorrelation_weights(
block_header, len(decorrelation_terms),
sub_block_size, actual_size_1_less, sub_block_data)
decorrelation_weights_read = True
if (metadata_function == 4):
if (not decorrelation_terms_read):
raise ValueError(
"samples sub block found before terms sub block")
if (actual_size_1_less):
raise ValueError(
"decorrelation samples must have an even byte count")
decorrelation_samples = read_decorrelation_samples(
block_header, decorrelation_terms,
sub_block_size, sub_block_data)
decorrelation_samples_read = True
if (metadata_function == 5):
entropies = read_entropy_variables(block_header,
sub_block_data)
entropies_read = True
if (metadata_function == 9):
(zero_bits,
one_bits,
duplicate_bits) = read_extended_integers(sub_block_data)
extended_integers_read = True
if (metadata_function == 10):
if (not entropies_read):
raise ValueError("bitstream sub block before " +
"entropy variables sub block")
residuals = read_bitstream(block_header, entropies,
sub_block_data)
residuals_read = True
if (large_sub_block == 0):
sub_blocks_size -= (2 + 2 * sub_block_size)
else:
sub_blocks_size -= (4 + 2 * sub_block_size)
if (decorrelation_terms_read):
if (not decorrelation_weights_read):
raise ValueError("decorrelation weights sub block not found")
if (not decorrelation_samples_read):
raise ValueError("decorrelation samples sub block not found")
if (not residuals_read):
raise ValueError("bitstream sub block not found")
if ((block_header.mono_output == 0) and (block_header.false_stereo == 0)):
if (decorrelation_terms_read and len(decorrelation_terms) > 0):
decorrelated = decorrelate_channels(residuals,
decorrelation_terms,
decorrelation_deltas,
decorrelation_weights,
decorrelation_samples)
else:
decorrelated = residuals
if (block_header.joint_stereo == 1):
left_right = undo_joint_stereo(decorrelated)
else:
left_right = decorrelated
channels_crc = calculate_crc(left_right)
if (channels_crc != block_header.CRC):
raise ValueError("CRC mismatch (0x%8.8X != 0x%8.8X)" %
(channels_crc, block_header.CRC))
if (block_header.extended_size_integers == 1):
un_shifted = undo_extended_integers(zero_bits,
one_bits,
duplicate_bits,
left_right)
else:
un_shifted = left_right
return un_shifted
else:
if (decorrelation_terms_read and len(decorrelation_terms) > 0):
decorrelated = decorrelate_channels(residuals,
decorrelation_terms,
decorrelation_deltas,
decorrelation_weights,
decorrelation_samples)
else:
decorrelated = residuals
channels_crc = calculate_crc(decorrelated)
if (channels_crc != block_header.CRC):
raise ValueError("CRC mismatch (0x%8.8X != 0x%8.8X)" %
(channels_crc, block_header.CRC))
if (block_header.extended_size_integers == 1):
un_shifted = undo_extended_integers(zero_bits,
one_bits,
duplicate_bits,
decorrelated)
else:
un_shifted = decorrelated
if (block_header.false_stereo == 0):
return un_shifted
else:
return (un_shifted[0], un_shifted[0])
def read_decorrelation_terms(sub_block_size,
actual_size_1_less,
sub_block_data):
"""returns a list of decorrelation terms
and a list of decorrelation deltas per decorrelation pass
term[pass] , delta[pass]"""
if (actual_size_1_less == 0):
passes = sub_block_size * 2
else:
passes = sub_block_size * 2 - 1
if (passes > 16):
raise ValueError("invalid decorrelation passes count")
decorrelation_terms = []
decorrelation_deltas = []
for i in xrange(passes):
decorrelation_terms.append(sub_block_data.read(5) - 5)
if (not (((1 <= decorrelation_terms[-1]) and
(decorrelation_terms[-1] <= 18)) or
((-3 <= decorrelation_terms[-1]) and
(decorrelation_terms[-1] <= -1)))):
raise ValueError("invalid decorrelation term")
decorrelation_deltas.append(sub_block_data.read(3))
decorrelation_terms.reverse()
decorrelation_deltas.reverse()
return (decorrelation_terms, decorrelation_deltas)
def read_decorrelation_weights(block_header, decorrelation_terms_count,
sub_block_size, actual_size_1_less,
sub_block_data):
"""returns one tuple of decorrelation weights per decorrelation pass
the number of weights in each tuple equals the number of channels
weight[pass][channel]
"""
if (actual_size_1_less == 0):
weight_count = sub_block_size * 2
else:
weight_count = sub_block_size * 2 - 1
weight_values = []
for i in xrange(weight_count):
value_i = sub_block_data.read_signed(8)
if (value_i > 0):
weight_values.append((value_i * 2 ** 3) +
((value_i * 2 ** 3 + 2 ** 6) / 2 ** 7))
elif(value_i == 0):
weight_values.append(0)
else:
weight_values.append(value_i * 2 ** 3)
weights = []
if ((block_header.mono_output == 0) and (block_header.false_stereo == 0)):
#two channels
if ((weight_count / 2) > decorrelation_terms_count):
raise ValueError("invalid number of decorrelation weights")
for i in xrange(weight_count / 2):
weights.append((weight_values[i * 2],
weight_values[i * 2 + 1]))
for i in xrange(weight_count / 2, decorrelation_terms_count):
weights.append((0, 0))
weights.reverse()
else:
#one channel
if (weight_count > decorrelation_terms_count):
raise ValueError("invalid number of decorrelation weights")
for i in xrange(weight_count):
weights.append((weight_values[i], ))
for i in xrange(weight_count, decorrelation_terms_count):
weights.append((0, 0))
weights.reverse()
return weights
def read_decorrelation_samples(block_header, decorrelation_terms,
sub_block_size, sub_block_data):
"""returns one tuple of decorrelation samples lists
per decorrelation pass
sample[pass][channel][s]"""
sub_block_bytes = sub_block_size * 2
samples = []
if ((block_header.mono_output == 0) and (block_header.false_stereo == 0)):
#two channels
for term in reversed(decorrelation_terms):
if ((17 <= term) and (term <= 18)):
if (sub_block_bytes >= 8):
samples.append(([read_exp2(sub_block_data),
read_exp2(sub_block_data)],
[read_exp2(sub_block_data),
read_exp2(sub_block_data)]))
sub_block_bytes -= 8
else:
samples.append(([0, 0], [0, 0]))
sub_block_bytes = 0
elif ((1 <= term) and (term <= 8)):
term_samples = ([], [])
if (sub_block_bytes >= (term * 4)):
for s in xrange(term):
term_samples[0].append(read_exp2(sub_block_data))
term_samples[1].append(read_exp2(sub_block_data))
sub_block_bytes -= (term * 4)
else:
for s in xrange(term):
term_samples[0].append(0)
term_samples[1].append(0)
sub_block_bytes = 0
samples.append(term_samples)
elif ((-3 <= term) and (term <= -1)):
if (sub_block_bytes >= 4):
samples.append(([read_exp2(sub_block_data)],
[read_exp2(sub_block_data)]))
sub_block_bytes -= 4
else:
samples.append(([0], [0]))
sub_block_bytes = 0
else:
raise ValueError("invalid decorrelation term")
samples.reverse()
return samples
else:
#one channel
for term in reversed(decorrelation_terms):
if ((17 <= term) and (term <= 18)):
if (sub_block_bytes >= 4):
samples.append(([read_exp2(sub_block_data),
read_exp2(sub_block_data)],))
sub_block_bytes -= 4
else:
samples[0].append(([0, 0],))
sub_block_bytes = 0
elif ((1 <= term) and (term <= 8)):
term_samples = ([],)
if (sub_block_bytes >= (term * 2)):
for s in xrange(term):
term_samples[0].append(read_exp2(sub_block_data))
sub_block_bytes -= (term * 2)
else:
for s in xrange(term):
term_samples[0].append(0)
sub_block_bytes = 0
samples.append(term_samples)
else:
raise ValueError("invalid decorrelation term")
samples.reverse()
return samples
def read_entropy_variables(block_header, sub_block_data):
entropies = ([], [])
for i in xrange(3):
entropies[0].append(read_exp2(sub_block_data))
if ((block_header.mono_output == 0) and (block_header.false_stereo == 0)):
for i in xrange(3):
entropies[1].append(read_exp2(sub_block_data))
else:
entropies[1].extend([0, 0, 0])
return entropies
def read_bitstream(block_header, entropies, sub_block_data):
if ((block_header.mono_output == 0) and (block_header.false_stereo == 0)):
channel_count = 2
residuals = ([], [])
else:
channel_count = 1
residuals = ([], )
u = None
i = 0
while (i < (block_header.block_samples * channel_count)):
if ((u is None) and (entropies[0][0] < 2) and (entropies[1][0] < 2)):
#handle long run of 0 residuals
zeroes = read_egc(sub_block_data)
if (zeroes > 0):
for j in xrange(zeroes):
residuals[i % channel_count].append(0)
i += 1
entropies[0][0] = entropies[0][1] = entropies[0][2] = 0
entropies[1][0] = entropies[1][1] = entropies[1][2] = 0
if (i < (block_header.block_samples * channel_count)):
(residual, u) = read_residual(
sub_block_data,
u,
entropies[i % channel_count])
residuals[i % channel_count].append(residual)
i += 1
else:
(residual, u) = read_residual(
sub_block_data,
u,
entropies[i % channel_count])
residuals[i % channel_count].append(residual)
i += 1
return residuals
def read_egc(reader):
t = reader.unary(0)
if (t > 0):
p = reader.read(t - 1)
return 2 ** (t - 1) + p
else:
return t
def read_residual(reader, last_u, entropies):
if (last_u is None):
u = reader.unary(0)
if (u == 16):
u += read_egc(reader)
m = u / 2
elif ((last_u % 2) == 1):
u = reader.unary(0)
if (u == 16):
u += read_egc(reader)
m = (u / 2) + 1
else:
u = None
m = 0
if (m == 0):
base = 0
add = entropies[0] >> 4
entropies[0] -= ((entropies[0] + 126) >> 7) * 2
elif (m == 1):
base = (entropies[0] >> 4) + 1
add = entropies[1] >> 4
entropies[0] += ((entropies[0] + 128) >> 7) * 5
entropies[1] -= ((entropies[1] + 62) >> 6) * 2
elif (m == 2):
base = ((entropies[0] >> 4) + 1) + ((entropies[1] >> 4) + 1)
add = entropies[2] >> 4
entropies[0] += ((entropies[0] + 128) >> 7) * 5
entropies[1] += ((entropies[1] + 64) >> 6) * 5
entropies[2] -= ((entropies[2] + 30) >> 5) * 2
else:
base = (((entropies[0] >> 4) + 1) +
((entropies[1] >> 4) + 1) +
(((entropies[2] >> 4) + 1) * (m - 2)))
add = entropies[2] >> 4
entropies[0] += ((entropies[0] + 128) >> 7) * 5
entropies[1] += ((entropies[1] + 64) >> 6) * 5
entropies[2] += ((entropies[2] + 32) >> 5) * 5
if (add == 0):
unsigned = base
else:
p = int(log(add) / log(2))
e = 2 ** (p + 1) - add - 1
r = reader.read(p)
if (r >= e):
b = reader.read(1)
unsigned = base + (r * 2) - e + b
else:
unsigned = base + r
sign = reader.read(1)
if (sign == 1):
return (-unsigned - 1, u)
else:
return (unsigned, u)
def undo_joint_stereo(samples):
assert(len(samples) == 2)
assert(len(samples[0]) == len(samples[1]))
stereo = [[], []]
for (mid, side) in zip(*samples):
right = side - (mid >> 1)
left = mid + right
stereo[0].append(left)
stereo[1].append(right)
return stereo
def read_extended_integers(sub_block_data):
(sent_bits,
zero_bits,
one_bits,
duplicate_bits) = sub_block_data.parse("8u 8u 8u 8u")
return (zero_bits, one_bits, duplicate_bits)
def undo_extended_integers(zero_bits, one_bits, duplicate_bits,
channels):
un_shifted = []
for channel in channels:
if (zero_bits > 0):
un_shifted.append([s << zero_bits for s in channel])
elif (one_bits > 0):
ones = (1 << one_bits) - 1
un_shifted.append([(s << one_bits) + ones for s in channel])
elif (duplicate_bits > 0):
dupes = []
ones = (1 << duplicate_bits) - 1
for s in channel:
if ((s % 2) == 0):
dupes.append(s << duplicate_bits)
else:
dupes.append((s << duplicate_bits) + ones)
un_shifted.append(dupes)
else:
un_shifted.append(channel)
return tuple(un_shifted)
EXP2 = [0x100, 0x101, 0x101, 0x102, 0x103, 0x103, 0x104, 0x105,
0x106, 0x106, 0x107, 0x108, 0x108, 0x109, 0x10a, 0x10b,
0x10b, 0x10c, 0x10d, 0x10e, 0x10e, 0x10f, 0x110, 0x110,
0x111, 0x112, 0x113, 0x113, 0x114, 0x115, 0x116, 0x116,
0x117, 0x118, 0x119, 0x119, 0x11a, 0x11b, 0x11c, 0x11d,
0x11d, 0x11e, 0x11f, 0x120, 0x120, 0x121, 0x122, 0x123,
0x124, 0x124, 0x125, 0x126, 0x127, 0x128, 0x128, 0x129,
0x12a, 0x12b, 0x12c, 0x12c, 0x12d, 0x12e, 0x12f, 0x130,
0x130, 0x131, 0x132, 0x133, 0x134, 0x135, 0x135, 0x136,
0x137, 0x138, 0x139, 0x13a, 0x13a, 0x13b, 0x13c, 0x13d,
0x13e, 0x13f, 0x140, 0x141, 0x141, 0x142, 0x143, 0x144,
0x145, 0x146, 0x147, 0x148, 0x148, 0x149, 0x14a, 0x14b,
0x14c, 0x14d, 0x14e, 0x14f, 0x150, 0x151, 0x151, 0x152,
0x153, 0x154, 0x155, 0x156, 0x157, 0x158, 0x159, 0x15a,
0x15b, 0x15c, 0x15d, 0x15e, 0x15e, 0x15f, 0x160, 0x161,
0x162, 0x163, 0x164, 0x165, 0x166, 0x167, 0x168, 0x169,
0x16a, 0x16b, 0x16c, 0x16d, 0x16e, 0x16f, 0x170, 0x171,
0x172, 0x173, 0x174, 0x175, 0x176, 0x177, 0x178, 0x179,
0x17a, 0x17b, 0x17c, 0x17d, 0x17e, 0x17f, 0x180, 0x181,
0x182, 0x183, 0x184, 0x185, 0x187, 0x188, 0x189, 0x18a,
0x18b, 0x18c, 0x18d, 0x18e, 0x18f, 0x190, 0x191, 0x192,
0x193, 0x195, 0x196, 0x197, 0x198, 0x199, 0x19a, 0x19b,
0x19c, 0x19d, 0x19f, 0x1a0, 0x1a1, 0x1a2, 0x1a3, 0x1a4,
0x1a5, 0x1a6, 0x1a8, 0x1a9, 0x1aa, 0x1ab, 0x1ac, 0x1ad,
0x1af, 0x1b0, 0x1b1, 0x1b2, 0x1b3, 0x1b4, 0x1b6, 0x1b7,
0x1b8, 0x1b9, 0x1ba, 0x1bc, 0x1bd, 0x1be, 0x1bf, 0x1c0,
0x1c2, 0x1c3, 0x1c4, 0x1c5, 0x1c6, 0x1c8, 0x1c9, 0x1ca,
0x1cb, 0x1cd, 0x1ce, 0x1cf, 0x1d0, 0x1d2, 0x1d3, 0x1d4,
0x1d6, 0x1d7, 0x1d8, 0x1d9, 0x1db, 0x1dc, 0x1dd, 0x1de,
0x1e0, 0x1e1, 0x1e2, 0x1e4, 0x1e5, 0x1e6, 0x1e8, 0x1e9,
0x1ea, 0x1ec, 0x1ed, 0x1ee, 0x1f0, 0x1f1, 0x1f2, 0x1f4,
0x1f5, 0x1f6, 0x1f8, 0x1f9, 0x1fa, 0x1fc, 0x1fd, 0x1ff]
def read_exp2(reader):
value = reader.read_signed(16)
if ((-32768 <= value) and (value < -2304)):
return -(EXP2[-value & 0xFF] << ((-value >> 8) - 9))
elif ((-2304 <= value) and (value < 0)):
return -(EXP2[-value & 0xFF] >> (9 - (-value >> 8)))
elif ((0 <= value) and (value <= 2304)):
return EXP2[value & 0xFF] >> (9 - (value >> 8))
elif ((2304 < value) and (value <= 32767)):
return EXP2[value & 0xFF] << ((value >> 8) - 9)
def decorrelate_channels(residuals,
decorrelation_terms, decorrelation_deltas,
decorrelation_weights, decorrelation_samples):
"""returns a tuple of 1 or 2 lists of decorrelated channel data"""
if (len(residuals) == 2):
latest_pass = [r[:] for r in residuals]
for (term,
delta,
weights,
samples) in zip(decorrelation_terms,
decorrelation_deltas,
decorrelation_weights,
decorrelation_samples):
latest_pass = decorrelation_pass_2ch(latest_pass,
term,
delta,
weights,
samples)
return latest_pass
else:
latest_pass = residuals[0][:]
for (term,
delta,
weight,
samples) in zip(decorrelation_terms,
decorrelation_deltas,
decorrelation_weights,
decorrelation_samples):
latest_pass = decorrelation_pass_1ch(latest_pass,
term,
delta,
weight[0],
samples[0])
return (latest_pass, )
def decorrelation_pass_1ch(correlated_samples,
term, delta, weight, decorrelation_samples):
if (term == 18):
assert(len(decorrelation_samples) == 2)
decorrelated = decorrelation_samples[:]
decorrelated.reverse()
for i in xrange(len(correlated_samples)):
temp = (3 * decorrelated[i + 1] - decorrelated[i]) / 2
decorrelated.append(apply_weight(weight, temp) +
correlated_samples[i])
weight += update_weight(temp, correlated_samples[i], delta)
return decorrelated[2:]
elif (term == 17):
assert(len(decorrelation_samples) == 2)
decorrelated = decorrelation_samples[:]
decorrelated.reverse()
for i in xrange(len(correlated_samples)):
temp = 2 * decorrelated[i + 1] - decorrelated[i]
decorrelated.append(apply_weight(weight, temp) +
correlated_samples[i])
weight += update_weight(temp, correlated_samples[i], delta)
return decorrelated[2:]
elif ((1 <= term) and (term <= 8)):
assert(len(decorrelation_samples) == term)
decorrelated = decorrelation_samples[:]
for i in xrange(len(correlated_samples)):
decorrelated.append(apply_weight(weight, decorrelated[i]) +
correlated_samples[i])
weight += update_weight(decorrelated[i],
correlated_samples[i],
delta)
return decorrelated[term:]
else:
raise ValueError("unsupported term")
def decorrelation_pass_2ch(correlated,
term, delta, weights, decorrelation_samples):
assert(len(correlated) == 2)
assert(len(correlated[0]) == len(correlated[1]))
assert(len(weights) == 2)
if (((17 <= term) and (term <= 18)) or ((1 <= term) and (term <= 8))):
return (decorrelation_pass_1ch(correlated[0],
term, delta, weights[0],
decorrelation_samples[0]),
decorrelation_pass_1ch(correlated[1],
term, delta, weights[1],
decorrelation_samples[1]))
elif ((-3 <= term) and (term <= -1)):
assert(len(decorrelation_samples[0]) == 1)
decorrelated = ([decorrelation_samples[1][0]],
[decorrelation_samples[0][0]])
weights = list(weights)
if (term == -1):
for i in xrange(len(correlated[0])):
decorrelated[0].append(apply_weight(weights[0],
decorrelated[1][i]) +
correlated[0][i])
decorrelated[1].append(apply_weight(weights[1],
decorrelated[0][i + 1]) +
correlated[1][i])
weights[0] += update_weight(decorrelated[1][i],
correlated[0][i],
delta)
weights[1] += update_weight(decorrelated[0][i + 1],
correlated[1][i],
delta)
weights[0] = max(min(weights[0], 1024), -1024)
weights[1] = max(min(weights[1], 1024), -1024)
elif (term == -2):
for i in xrange(len(correlated[0])):
decorrelated[1].append(apply_weight(weights[1],
decorrelated[0][i]) +
correlated[1][i])
decorrelated[0].append(apply_weight(weights[0],
decorrelated[1][i + 1]) +
correlated[0][i])
weights[1] += update_weight(decorrelated[0][i],
correlated[1][i],
delta)
weights[0] += update_weight(decorrelated[1][i + 1],
correlated[0][i],
delta)
weights[1] = max(min(weights[1], 1024), -1024)
weights[0] = max(min(weights[0], 1024), -1024)
elif (term == -3):
for i in xrange(len(correlated[0])):
decorrelated[0].append(apply_weight(weights[0],
decorrelated[1][i]) +
correlated[0][i])
decorrelated[1].append(apply_weight(weights[1],
decorrelated[0][i]) +
correlated[1][i])
weights[0] += update_weight(decorrelated[1][i],
correlated[0][i],
delta)
weights[1] += update_weight(decorrelated[0][i],
correlated[1][i],
delta)
weights[0] = max(min(weights[0], 1024), -1024)
weights[1] = max(min(weights[1], 1024), -1024)
assert(len(decorrelated[0]) == len(decorrelated[1]))
return (decorrelated[0][1:], decorrelated[1][1:])
else:
raise ValueError("unsupported term")
def apply_weight(weight, sample):
return ((weight * sample) + 512) >> 10
def update_weight(source, result, delta):
if ((source == 0) or (result == 0)):
return 0
elif ((source ^ result) >= 0):
return delta
else:
return -delta
def calculate_crc(samples):
crc = 0xFFFFFFFF
for frame in zip(*samples):
for s in frame:
crc = 3 * crc + s
if (crc >= 0):
return crc % 0x100000000
else:
return (2 ** 32 - (-crc)) % 0x100000000
| gpl-2.0 | 2,428,501,330,193,078,300 | 38.9375 | 79 | 0.497418 | false |
baklanovp/pystella | rezone.py | 1 | 2128 | #!/usr/bin/env python3
# #!/usr/bin/python3
import getopt
import os
import sys
from os.path import dirname
import matplotlib.pyplot as plt
from pystella.model.stella import Stella
__author__ = 'bakl'
ROOT_DIRECTORY = dirname(dirname(os.path.abspath(__file__)))
def usage():
print("\n Create hyd- abn-files from res-file.")
print("Usage:")
print(" rezone.py [params]")
print(" -i <model name>. Example: cat_R450_M15_Ni007_E7")
# print(" -n new zon number, default: 100")
print(" -p <model path(directory)>, default: ./")
print(" -s silence mode: no info, no plot")
print(" -t time moment, default: 10")
print(" -h print usage")
def main(name=False):
is_silence = False
t = 1 # days
path = os.getcwd()
try:
opts, args = getopt.getopt(sys.argv[1:], "hsp:i:t:")
except getopt.GetoptError as err:
print(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
if not name:
if len(opts) == 0:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-i':
path = ROOT_DIRECTORY
name = str(arg)
break
for opt, arg in opts:
if opt == '-s':
is_silence = True
continue
if opt == '-t':
t = float(arg)
continue
if opt == '-p':
path = os.path.expanduser(str(arg))
if not (os.path.isdir(path) and os.path.exists(path)):
print("No such directory: " + path)
sys.exit(2)
continue
elif opt == '-h':
usage()
sys.exit(2)
model = Stella(name, path=path)
if not model.is_res:
print("There are no %s in the directory: %s " % (name, path))
res = model.get_res()
block = res.read_at_time(time=t)
print("Len(block) = %i " % len(block))
if not is_silence:
plt.plot(block['M'], block['V8'])
plt.show()
# write_data(res, path, fname=name)
if __name__ == '__main__':
main()
| mit | 2,307,978,648,476,167,700 | 24.035294 | 79 | 0.522556 | false |
tonygalmiche/is_plastigray | is_inventaire.py | 1 | 44072 | # -*- coding: utf-8 -*-
from openerp import models,fields,api
from openerp.tools.translate import _
from openerp.exceptions import Warning
import datetime
import base64
import MySQLdb
import logging
_logger = logging.getLogger(__name__)
def _date_creation():
return datetime.date.today() # Date du jour
class is_inventaire(models.Model):
_name='is.inventaire'
_order='name desc'
name = fields.Char("N°", readonly=True)
date_creation = fields.Date("Date de l'inventaire", required=True)
createur_id = fields.Many2one('res.users', 'Créé par', readonly=True)
commentaire = fields.Text('Commentaire')
line_ids = fields.One2many('is.inventaire.feuille' , 'inventaire_id', u"Lignes")
inventory_ids = fields.One2many('is.inventaire.inventory', 'inventaire_id', u"Inventaires Odoo")
ecart_ids = fields.One2many('is.inventaire.ecart' , 'inventaire_id', u"Ecarts")
anomalie_ids = fields.One2many('is.inventaire.anomalie' , 'inventaire_id', u"Anomalies")
state = fields.Selection([('creation', u'Création'),('cloture', u'Cloturé'),('traite', u'Traité')], u"État", readonly=True, select=True)
selection = fields.Boolean('Imprimer uniquement les écarts à controler', default=True)
_defaults = {
'date_creation': lambda *a: _date_creation(),
'createur_id': lambda obj, cr, uid, ctx=None: uid,
'state': 'creation',
}
@api.model
def create(self, vals):
#Blocage si inventaire existe déja
inventaires=self.env['is.inventaire'].search([ ['state', '=', 'creation'] ])
if len(inventaires)>1:
raise Warning(u"Un inventaire en cours existe déjà !")
data_obj = self.pool.get('ir.model.data')
sequence_ids = data_obj.search(self._cr, self._uid, [('name','=','is_inventaire_seq')])
if sequence_ids:
sequence_id = data_obj.browse(self._cr, self._uid, sequence_ids[0]).res_id
vals['name'] = self.pool.get('ir.sequence').get_id(self._cr, self._uid, sequence_id, 'id')
new_id = super(is_inventaire, self).create(vals)
return new_id
@api.multi
def get_products_ecarts(self):
for obj in self:
res=[]
products=[]
for line in obj.ecart_ids:
if line.selection:
if line.product_id not in products:
products.append(line.product_id)
return products
@api.multi
def get_emplacement_ecarts(self,product_id):
cr=self._cr
emplacements=[]
for obj in self:
SQL="""
select distinct sl.name,iil.lieu
from is_inventaire_line iil inner join stock_location sl on iil.location_id=sl.id
where
iil.product_id='"""+str(product_id)+"""' and
iil.inventaire_id="""+str(obj.id)+"""
"""
cr.execute(SQL)
res=cr.fetchall()
for row in res:
vals={
'magasin': row[0],
'lieu' : row[1],
}
emplacements.append(vals)
return emplacements
@api.multi
def creer_feuille(self,obj):
dummy, view_id = self.env['ir.model.data'].get_object_reference('is_plastigray', 'is_inventaire_feuille_form_view')
context=self._context
if context is None:
context = {}
ctx = context.copy()
ctx.update({'default_inventaire_id': obj.id})
return {
'name': "Feuille d'inventaire",
'view_mode': 'form',
'view_id': view_id,
'view_type': 'form',
'res_model': 'is.inventaire.feuille',
'type': 'ir.actions.act_window',
'target': 'current',
'context':ctx,
}
@api.multi
def action_creer_feuille(self):
for obj in self:
return self.creer_feuille(obj)
@api.multi
def action_fin_inventaire(self):
for obj in self:
# ** Calcul des encours ********************************************
for line in obj.line_ids:
line.calculer_encours()
# ** Détermine si les lots sont importés ou calculés ***************
lots=self.env['is.inventaire.line'].search([ ['inventaire_id','=',obj.id], ['lot_id','!=',False] ])
nb=len(lots)
if nb>0:
self.action_fin_inventaire_avec_lot()
else:
self.action_fin_inventaire_sans_lot()
@api.multi
def action_fin_inventaire_sans_lot(self):
cr=self._cr
for obj in self:
self.action_calcul_ecart()
# ** Suppression des inventaires liés à cette importation **********
for row in obj.inventory_ids:
row.inventory_id.unlink()
row.unlink()
#*******************************************************************
# ** Recherche de la liste des emplacements ************************
SQL="""
select distinct location_id
from is_inventaire_line
where inventaire_id='"""+str(obj.id)+"""'
"""
cr.execute(SQL)
res=cr.fetchall()
# ******************************************************************
for row in res:
location_id=row[0]
# ** Creation inventaire ***************************************
location=self.env['stock.location'].browse(location_id)
vals={
'name': obj.name+'-'+location.name,
'location_id': location_id,
'state': 'confirm',
}
inventory=self.env['stock.inventory'].create(vals)
vals={
'inventaire_id': obj.id,
'inventory_id': inventory.id,
}
inventaire_inventory=self.env['is.inventaire.inventory'].create(vals)
# ** Suppression des données temporaires **********************
SQL="delete from is_inventaire_line_tmp"
cr.execute(SQL)
# ** Liste des stocks actuels **********************************
SQL="""
select sq.product_id, pt.uom_id, sq.lot_id, spl.create_date, sq.qty
from stock_quant sq inner join product_product pp on sq.product_id=pp.id
inner join product_template pt on pp.product_tmpl_id=pt.id
left outer join stock_production_lot spl on sq.lot_id=spl.id
where sq.location_id='"""+str(location_id)+"""'
"""
cr.execute(SQL)
res2=cr.fetchall()
for row2 in res2:
vals={
'product_id' : row2[0],
'us_id' : row2[1],
'location_id': location_id,
'lot_id' : row2[2],
'date_lot' : row2[3] or datetime.datetime.now(),
'qt_us' : row2[4],
}
tmp=self.env['is.inventaire.line.tmp'].create(vals)
# ** Traitement des écarts *************************************
ecarts=self.env['is.inventaire.ecart'].search([ ['inventaire_id','=',obj.id], ['location_id','=',location_id] ])
for ecart in ecarts:
#Si ecart positif, il faut ajouter des lignes :
if ecart.ecart>0:
# ** Recherche du dernier lot pour cet article *********
lot=False
lots=self.env['stock.production.lot'].search([['product_id', '=', ecart.product_id.id]],limit=1,order='id desc')
for l in lots:
lot=l
# ** Il faut créer un lot du nom de l'inventaire *******
if lot==False:
vals={
'name': obj.name,
'product_id': ecart.product_id.id,
}
lot=self.env['stock.production.lot'].create(vals)
vals={
'product_id' : ecart.product_id.id,
'us_id' : ecart.product_id.product_tmpl_id.uom_id.id,
'location_id': location_id,
'lot_id' : lot.id,
'date_lot' : datetime.datetime.now(),
'qt_us' : ecart.ecart,
}
tmp=self.env['is.inventaire.line.tmp'].create(vals)
#Si ecart négatif, il faut enlever les quantités sur les lots les plus anciens
if ecart.ecart<0:
SQL="""
select id,product_id, lot_id, date_lot, qt_us
from is_inventaire_line_tmp
where location_id='"""+str(location_id)+"""' and
product_id='"""+str(ecart.product_id.id)+"""'
order by date_lot, qt_us
"""
cr.execute(SQL)
res2=cr.fetchall()
ecart=-ecart.ecart
for row2 in res2:
line=self.env['is.inventaire.line.tmp'].browse(row2[0])
qt=line.qt_us
if qt>=ecart:
qt_us=qt-ecart
ecart=0
else:
qt_us=0
ecart=ecart-qt
line.qt_us=qt_us
# ** Création des inventaires à partir de la table temporaire **
SQL="""
select product_id, us_id, lot_id, sum(qt_us)
from is_inventaire_line_tmp
where location_id='"""+str(location_id)+"""'
group by product_id, us_id, lot_id
"""
cr.execute(SQL)
res2=cr.fetchall()
for row2 in res2:
qty=row2[3]
vals={
'inventory_id' : inventory.id,
'location_id' : location_id,
'product_id' : row2[0],
'product_uom_id': row2[1],
'prod_lot_id' : row2[2],
'product_qty' : qty,
}
line_id=self.env['stock.inventory.line'].create(vals)
for feuille in obj.line_ids:
feuille.state="cloture"
for line in feuille.line_ids:
line.state="cloture"
obj.state="cloture"
@api.multi
def action_fin_inventaire_avec_lot(self):
cr=self._cr
for obj in self:
# ** Suppression des inventaires liés à cette importation **********
for row in obj.inventory_ids:
row.inventory_id.unlink()
row.unlink()
#*******************************************************************
# ** Recherche de la liste des emplacements ************************
SQL="""
select distinct location_id
from is_inventaire_line
where inventaire_id='"""+str(obj.id)+"""'
"""
cr.execute(SQL)
res=cr.fetchall()
# ******************************************************************
for row in res:
# ** Creation inventaire ***************************************
location_id=row[0]
location=self.env['stock.location'].browse(location_id)
vals={
'name': obj.name+'-'+location.name,
'location_id': location_id,
'state': 'confirm',
}
inventory=self.env['stock.inventory'].create(vals)
vals={
'inventaire_id': obj.id,
'inventory_id': inventory.id,
}
new_id=self.env['is.inventaire.inventory'].create(vals)
# ** Suppression des données temporaires **********************
SQL="delete from is_inventaire_line_tmp"
cr.execute(SQL)
# ** Liste des saisies de toutes les feuilles ******************
SQL="""
select product_id, us_id, lot_id, sum(qt_us_calc)
from is_inventaire_line
where inventaire_id='"""+str(obj.id)+"""' and
location_id='"""+str(location_id)+"""' and
encours!=True
group by product_id, us_id, lot_id
"""
cr.execute(SQL)
res2=cr.fetchall()
for row2 in res2:
vals={
'product_id' : row2[0],
'us_id' : row2[1],
'location_id': location_id,
'lot_id' : row2[2],
'qt_us' : row2[3],
}
tmp=self.env['is.inventaire.line.tmp'].create(vals)
# ** Liste des stocks actuels pour les mettre à 0 **************
SQL="""
select sq.product_id, pt.uom_id, sq.lot_id, sum(sq.qty)
from stock_quant sq inner join product_product pp on sq.product_id=pp.id
inner join product_template pt on pp.product_tmpl_id=pt.id
where sq.location_id='"""+str(location_id)+"""'
group by sq.product_id, pt.uom_id, sq.lot_id
"""
cr.execute(SQL)
res2=cr.fetchall()
for row2 in res2:
vals={
'product_id' : row2[0],
'us_id' : row2[1],
'location_id': location_id,
'lot_id' : row2[2],
'qt_us' : 0,
}
tmp=self.env['is.inventaire.line.tmp'].create(vals)
# ** Création des inventaires à partir de la table temporaire **
SQL="""
select product_id, us_id, lot_id, sum(qt_us)
from is_inventaire_line_tmp
where location_id='"""+str(location_id)+"""'
group by product_id, us_id, lot_id
"""
cr.execute(SQL)
res2=cr.fetchall()
for row2 in res2:
qty=row2[3]
if qty<0:
qty=0
vals={
'inventory_id' : inventory.id,
'location_id' : location_id,
'product_id' : row2[0],
'product_uom_id': row2[1],
'prod_lot_id' : row2[2],
'product_qty' : qty,
}
line_id=self.env['stock.inventory.line'].create(vals)
self.action_calcul_ecart()
for feuille in obj.line_ids:
feuille.state="cloture"
for line in feuille.line_ids:
line.state="cloture"
obj.state="cloture"
@api.multi
def action_calcul_ecart(self):
cr=self._cr
for obj in self:
for row in obj.ecart_ids:
row.unlink()
# ** Recherche de la liste des emplacements ************************
SQL="""
select distinct location_id
from is_inventaire_line
where inventaire_id='"""+str(obj.id)+"""'
"""
cr.execute(SQL)
res=cr.fetchall()
# ******************************************************************
for row in res:
location_id=row[0]
SQL="""
select
pt.is_code,
pt.name,
pt.uom_id,
( select sum(sq.qty)
from stock_quant sq
where sq.location_id='"""+str(location_id)+"""' and
sq.product_id=pp.id
),
( select sum(qt_us_calc)
from is_inventaire_line iil
where iil.location_id='"""+str(location_id)+"""' and
iil.product_id=pp.id and
iil.inventaire_id='"""+str(obj.id)+"""' and
iil.encours!=True
),
pp.id
from product_product pp inner join product_template pt on pp.product_tmpl_id=pt.id
where pp.id>0
"""
cr.execute(SQL)
res2=cr.fetchall()
ct=0
nb=len(res2)
for row2 in res2:
ct=ct+1
#_logger.info('action_calcul_ecart : location_id='+str(location_id)+' : '+str(ct)+'/'+str(nb)+' : '+str(row2[0]))
qt_odoo = row2[3] or 0
qt_inventaire = row2[4] or 0
ecart = qt_inventaire-qt_odoo
product_id = row2[5]
product=self.env['product.product'].browse(product_id)
if product and product.is_category_id.a_inventorier:
if ecart!=0:
#** Recherche de la liste des lieux ****************
SQL="""
select iil.lieu
from is_inventaire_line iil
where iil.location_id='"""+str(location_id)+"""' and
iil.product_id="""+str(product_id)+""" and
iil.inventaire_id='"""+str(obj.id)+"""' and
iil.encours!=True
"""
cr.execute(SQL)
res3=cr.fetchall()
lieux=[]
for row3 in res3:
lieu=row3[0] or '#N/A'
if lieu not in lieux:
lieux.append(lieu)
lieux='\n'.join(lieux)
#***************************************************
#** Recherche du coût actualisé ********************
SQL="""
select cout_act_total
from is_cout
where name="""+str(product_id)+"""
"""
cr.execute(SQL)
res4=cr.fetchall()
cout_actualise = 0
valorisation_ecart = 0
for row4 in res4:
if row4[0]:
cout_actualise = row4[0]
valorisation_ecart = cout_actualise * ecart
#***************************************************
vals={
'inventaire_id' : obj.id,
'location_id' : location_id,
'product_id' : row2[5],
'code' : row2[0],
'designation' : row2[1],
'us_id' : row2[2],
'qt_odoo' : qt_odoo,
'qt_inventaire' : qt_inventaire,
'lieu' : lieux,
'ecart' : ecart,
'cout_actualise' : cout_actualise,
'valorisation_ecart': valorisation_ecart,
}
tmp=self.env['is.inventaire.ecart'].create(vals)
@api.multi
def action_valide_inventaire(self):
for obj in self:
for row in obj.inventory_ids:
row.inventory_id.action_done()
for feuille in obj.line_ids:
feuille.state="traite"
for line in feuille.line_ids:
line.state="traite"
obj.state="traite"
@api.multi
def action_lignes_inventaire(self):
for obj in self:
return {
'name': u'Lignes inventaire '+obj.name,
'view_mode': 'tree,form',
'view_type': 'form',
'res_model': 'is.inventaire.line',
'domain': [
('inventaire_id','=',obj.id),
],
'type': 'ir.actions.act_window',
}
@api.multi
def action_ecarts_inventaire(self):
for obj in self:
return {
'name': u'Ecarts inventaire '+obj.name,
'view_mode': 'tree,form',
'view_type': 'form',
'res_model': 'is.inventaire.ecart',
'domain': [
('inventaire_id','=',obj.id),
],
'type': 'ir.actions.act_window',
}
@api.multi
def action_anomalies_import_inventaire(self):
cr=self._cr
for obj in self:
SQL="""
select
sl.id,
iie.product_id,
iie.code,
iie.designation,
iie.qt_odoo::Numeric(16,2),
iie.qt_inventaire::Numeric(16,2),
iie.ecart::Numeric(16,2),
(
select sum(sil1.theoretical_qty)::Numeric(16,2)
from stock_inventory_line sil1
where
sil1.location_id=iie.location_id and
sil1.product_id=iie.product_id and
sil1.inventory_id=iii.inventory_id
) theoretical_qty,
(
select sum(sil2.product_qty)::Numeric(16,2)
from stock_inventory_line sil2
where
sil2.location_id=iie.location_id and
sil2.product_id=iie.product_id and
sil2.inventory_id=iii.inventory_id
) product_qty,
(
select (sum(sil3.product_qty)-sum(sil3.theoretical_qty))::Numeric(16,2)
from stock_inventory_line sil3
where
sil3.location_id=iie.location_id and
sil3.product_id=iie.product_id and
sil3.inventory_id=iii.inventory_id
) ecart_odoo,
(iie.ecart-(
select (sum(sil3.product_qty)-sum(sil3.theoretical_qty))::Numeric(16,2)
from stock_inventory_line sil3
where
sil3.location_id=iie.location_id and
sil3.product_id=iie.product_id and
sil3.inventory_id=iii.inventory_id
))::Numeric(16,2) anomalie
from is_inventaire_ecart iie inner join stock_location sl on iie.location_id=sl.id
inner join is_inventaire_inventory iii on iie.inventaire_id=iii.inventaire_id
inner join stock_inventory si on iii.inventory_id=si.id and iie.location_id=si.location_id
where iie.inventaire_id="""+str(obj.id)+""" and
(
select (sum(sil3.product_qty)-sum(sil3.theoretical_qty))::Numeric(16,2)
from stock_inventory_line sil3
where
sil3.location_id=iie.location_id and
sil3.product_id=iie.product_id and
sil3.inventory_id=iii.inventory_id
) <> iie.ecart::Numeric(16,2)
"""
cr.execute(SQL)
res=cr.fetchall()
obj.anomalie_ids.unlink()
for row in res:
vals={
'inventaire_id' : obj.id,
'location_id' : row[0],
'product_id' : row[1],
'code' : row[2],
'designation' : row[3],
'qt_odoo' : row[4],
'qt_inventaire' : row[5],
'ecart' : row[6],
'theoretical_qty': row[7],
'product_qty' : row[8],
'ecart_odoo' : row[9],
'anomalie' : row[10],
}
self.env['is.inventaire.anomalie'].create(vals)
return {
'name': u'Anomalies inventaire '+obj.name,
'view_mode': 'tree,form',
'view_type': 'form',
'res_model': 'is.inventaire.anomalie',
'domain': [
('inventaire_id','=',obj.id),
],
'type': 'ir.actions.act_window',
}
class is_inventaire_feuille(models.Model):
_name='is.inventaire.feuille'
_order='name'
inventaire_id = fields.Many2one('is.inventaire', 'Inventaire', required=True, ondelete='cascade', readonly=True)
name = fields.Char('Numéro de feuille', required=True)
date_creation = fields.Date("Date de création" , readonly=True)
createur_id = fields.Many2one('res.users', 'Créé par', readonly=True)
fichier = fields.Binary('Fichier à importer', help=u"Le fichier doit-être au format CSV. Séparateur virgule avec ces colonnes : article, lot, emplacement, statut, stotck A et stock Q")
line_ids = fields.One2many('is.inventaire.line', 'feuille_id', u"Lignes")
anomalies = fields.Text('Anomalies')
state = fields.Selection([('creation', u'Création'),('cloture', u'Cloturé'),('traite', u'Traité')], u"État", readonly=True, select=True)
sequence=1
_defaults = {
'createur_id': lambda obj, cr, uid, ctx=None: uid,
'date_creation': lambda *a: _date_creation(),
'state': 'creation',
}
def calculer_encours(self):
for obj in self:
#Suppression des lignes calculées
for line in obj.line_ids:
if line.composant_encours:
line.unlink()
#Renumérotation des lignes
self.sequence=1
for line in obj.line_ids:
line.sequence=self.sequence
self.sequence=self.sequence+1
#Création des lignes des composants
self.sequence=10000
for line in obj.line_ids:
if line.encours:
product_tmpl_id=line.product_id.product_tmpl_id.id
self.eclate_nomenclature(obj,product_tmpl_id,line.qt_us,line.location_id.id)
self.sequence=self.sequence+1
@api.multi
def eclate_nomenclature(self,obj,product_tmpl_id,qt_us,location_id):
nomenclatures=self.env['mrp.bom'].search([ ['product_tmpl_id','=',product_tmpl_id],['is_sous_traitance','!=',True] ])
if len(nomenclatures)>0:
code=self.env['product.template'].browse(product_tmpl_id).is_code
nomenclature=nomenclatures[0]
for bom_line in nomenclature.bom_line_ids:
qt=bom_line.product_qty*qt_us
if bom_line.type=='phantom':
product_tmpl_id=bom_line.product_id.product_tmpl_id.id
self.eclate_nomenclature(obj,product_tmpl_id,qt,location_id)
else:
vals={
'feuille_id' : obj.id,
'sequence' : self.sequence,
'product_id' : bom_line.product_id.id,
'encours' : False,
'composant_encours' : True,
'location_id' : location_id,
'qt_us' : qt,
'lieu' : code
}
tmp=self.env['is.inventaire.line'].create(vals)
self.sequence=self.sequence+1
@api.multi
def action_acceder_feuille(self):
dummy, view_id = self.env['ir.model.data'].get_object_reference('is_plastigray', 'is_inventaire_feuille_form_view')
for obj in self:
return {
'name': "Feuille d'inventaire",
'view_mode': 'form',
'view_id': view_id,
'view_type': 'form',
'res_model': 'is.inventaire.feuille',
'type': 'ir.actions.act_window',
'res_id': obj.id,
'domain': '[]',
}
@api.multi
def action_creer_feuille(self):
for obj in self:
return obj.inventaire_id.creer_feuille(obj.inventaire_id)
#TODO : Cette fonction n'est plus active, mais je la garde pour l'exemple
@api.multi
def action_import_fichier(self):
for obj in self:
for row in obj.line_ids:
row.unlink()
csvfile=base64.decodestring(obj.fichier)
csvfile=csvfile.split("\n")
tab=[]
sequence=0
for row in csvfile:
lig=row.split(",")
ligne=[]
for cel in lig:
if cel.startswith('"'):
cel = cel[1:]
if cel.endswith('"'):
cel = cel[0:-1]
ligne.append(cel)
tab.append(ligne)
# Recherche de l'article
products=self.env['product.product'].search([['is_code', '=', ligne[0]]])
product_id=0
for product in products:
product_id=product.id
# Recherche emplacement
location_id=0
statut=""
if len(ligne)>4:
# Si statur = Q recherche d'un autre emplacement
emplacement = ligne[2]
statut = ligne[3][0:1]
if statut=="Q":
emplacement="Q0"
locations=self.env['stock.location'].search([ ['usage','=','internal'],['name','like',emplacement] ])
for location in locations:
location_id=location.id
#Quantité
qt=0
if len(ligne)>6:
if statut=="Q":
val=ligne[6]
else:
val=ligne[4]
val=val.replace(" ", "")
try:
qt=float(val)
except ValueError:
continue
if product_id and location_id:
sequence=sequence+1
vals={
'feuille_id': obj.id,
'sequence': sequence,
'product_id':product_id,
'location_id':location_id,
'qt_us': qt,
'lot': ligne[1],
}
self.env['is.inventaire.line'].create(vals)
@api.multi
def action_import_prodstar(self):
uid=self._uid
user=self.env['res.users'].browse(uid)
soc=user.company_id.partner_id.is_code
pwd=user.company_id.is_mysql_pwd
for obj in self:
for row in obj.line_ids:
row.unlink()
try:
db = MySQLdb.connect(host="dynacase", user="root", passwd=pwd, db="Plastigray")
except MySQLdb.OperationalError, msg:
raise Warning(u"La connexion à Prodstar a échouée ! \n"+str(msg[1]))
cur = db.cursor()
SQL="""
SELECT PA0003, PF0023, PF0059, PF0102, sum(PF0104), sum(PF0113)
FROM FP2STO inner join FP2ART on PF0001=PA0001 AND PF0003=PA0003
WHERE PF0001="""+str(soc)+""" AND (PA0184<70 OR PA0184=80)
GROUP BY PA0003, PF0023, PF0059, PF0102
"""
cur.execute(SQL)
sequence=0
anomalies=[]
for row in cur.fetchall():
# Recherche de l'article
products=self.env['product.product'].search([['is_code', '=', row[0]]])
product=False
for p in products:
product=p
if product==False:
anomalies.append("Article "+str(row[0])+" inexistant !")
else:
#** Recherche du lot ***************************************
lot=False
lots=self.env['stock.production.lot'].search([['name', '=', row[1]],['product_id', '=', product.id]])
for l in lots:
lot=l
#** Création du lot s'il n'existe pas **********************
if lot==False:
vals={
'name': str(row[1]),
'product_id': product.id,
}
lot=self.env['stock.production.lot'].create(vals)
# Recherche emplacement
location_id=0
statut=""
# Si statur = Q recherche d'un autre emplacement
emplacement = row[2]
statut = row[3][0:1]
if statut=="Q":
emplacement=row[3]
if emplacement=="Q":
emplacement="Q0"
locations=self.env['stock.location'].search([ ['usage','=','internal'],['name','=',emplacement] ])
location=False
for l in locations:
location=l
if location==False:
anomalies.append("Emplacement "+str(emplacement)+" inexistant !")
if statut=="Q":
qt=round(row[5],2)
else:
qt=round(row[4],2)
if product and location:
sequence=sequence+1
vals={
'feuille_id': obj.id,
'sequence': sequence,
'product_id':product.id,
'location_id':location.id,
'qt_us': qt,
'lot_id': lot.id,
}
self.env['is.inventaire.line'].create(vals)
obj.anomalies='\n'.join(anomalies)
db.close()
class is_inventaire_line(models.Model):
_name='is.inventaire.line'
_order='feuille_id,sequence,id'
inventaire_id = fields.Many2one('is.inventaire', 'Inventaire', store=True, compute='_compute')
feuille_id = fields.Many2one('is.inventaire.feuille', 'Feuille Inventaire', required=True, ondelete='cascade')
sequence = fields.Integer('Séquence')
product_id = fields.Many2one('product.product', 'Article' , required=True,select=1)
encours = fields.Boolean('Encours')
composant_encours = fields.Boolean('Composant', help='Composant encours')
us_id = fields.Many2one('product.uom','US', store=True, compute='_compute')
uc = fields.Char('UC', store=True, compute='_compute')
uc_us = fields.Integer('US par UC', store=True, compute='_compute')
location_id = fields.Many2one('stock.location', 'Emplacement', required=True,select=1)
qt_us = fields.Float("Qt US saisie")
qt_uc = fields.Float("Qt UC saisie")
qt_us_calc = fields.Float('Qt US', store=True, compute='_compute')
lieu = fields.Char('Lieu')
lot_id = fields.Many2one('stock.production.lot','Lot')
state = fields.Selection([('creation', u'Création'),('cloture', u'Cloturé'),('traite', u'Traité')], u"État", readonly=True, select=True)
_defaults = {
'sequence': 10,
'state': 'creation',
}
@api.multi
def get_emplacement(self, obj):
emplacement_name = ''
if obj.location_id.location_id:
emplacement_name = str(obj.location_id.location_id.name) + '/' + str(obj.location_id.name)
return emplacement_name
@api.depends('product_id','qt_us','qt_uc')
def _compute(self):
for obj in self:
obj.inventaire_id=obj.feuille_id.inventaire_id.id
if obj.product_id:
obj.us_id=obj.product_id.uom_id.id
obj.uc_us=1
if len(obj.product_id.packaging_ids):
packaging=obj.product_id.packaging_ids[0]
obj.uc=packaging.ul.name
obj.uc_us=packaging.qty
if obj.qt_uc!=0:
obj.qt_us_calc=obj.qt_uc*obj.uc_us
else:
obj.qt_us_calc=obj.qt_us
@api.multi
def onchange_product_id(self,product_id):
v={}
valeur=self.env['is.mem.var'].get(self._uid,'location_id')
v['location_id'] = int(valeur)
return {'value': v}
@api.multi
def onchange_location_id(self,product_id,location_id,qt_us,qt_uc,lieu):
v={}
v['product_id'] = product_id
v['location_id'] = location_id
v['qt_us'] = qt_us
v['qt_uc'] = qt_uc
v['lieu'] = lieu
if location_id:
self.env['is.mem.var'].set(self._uid, 'location_id', location_id)
return {'value': v}
class is_inventaire_line_tmp(models.Model):
_name='is.inventaire.line.tmp'
_order='product_id'
product_id = fields.Many2one('product.product', 'Article' , required=True)
us_id = fields.Many2one('product.uom','US')
location_id = fields.Many2one('stock.location', 'Emplacement', required=True)
qt_us = fields.Float("Qt US")
lot_id = fields.Many2one('stock.production.lot','Lot')
date_lot = fields.Datetime('Date création lot')
class is_inventaire_inventory(models.Model):
_name='is.inventaire.inventory'
_order='inventaire_id'
inventaire_id = fields.Many2one('is.inventaire', 'Inventaire', required=True, ondelete='cascade', readonly=True)
inventory_id = fields.Many2one('stock.inventory', 'Inventaire')
@api.multi
def action_acceder_inventaire(self):
dummy, view_id = self.env['ir.model.data'].get_object_reference('stock', 'view_inventory_form')
for obj in self:
return {
'name': "Inventaire",
'view_mode': 'form',
'view_id': view_id,
'view_type': 'form',
'res_model': 'stock.inventory',
'type': 'ir.actions.act_window',
'res_id': obj.inventory_id.id,
'domain': '[]',
}
class is_inventaire_ecart(models.Model):
_name='is.inventaire.ecart'
_order='inventaire_id,code,location_id'
inventaire_id = fields.Many2one('is.inventaire', u'Inventaire', select=True)
location_id = fields.Many2one('stock.location', u'Magasin', required=True, select=True)
product_id = fields.Many2one('product.product', u'Article' , required=True, select=True)
code = fields.Char(u"Article")
designation = fields.Char(u"Désignation")
us_id = fields.Many2one('product.uom',u'US')
qt_odoo = fields.Float(u"Qt Odoo")
qt_inventaire = fields.Float(u"Qt Inventaire")
ecart = fields.Float(u"Ecart" , digits=(12, 2), help="Qt Inventaire - Qt Odoo")
cout_actualise = fields.Float(u"Coût actualisé" , digits=(12, 4))
valorisation_ecart = fields.Float(u"Valorisation écart", digits=(12, 0))
lieu = fields.Text(u'Emplacement')
selection = fields.Boolean(u'Ecart à controler')
@api.multi
def get_feuilles(self):
for obj in self:
filtre=[
('inventaire_id','=', obj.inventaire_id.id),
('product_id' ,'=', obj.product_id.id),
('location_id' ,'=', obj.location_id.id),
]
lines = self.env['is.inventaire.line'].search(filtre)
feuilles=[]
for line in lines:
feuille=line.feuille_id.name
if feuille not in feuilles:
feuilles.append(feuille)
return u', '.join(feuilles)
class is_inventaire_anomalie(models.Model):
_name='is.inventaire.anomalie'
_order='inventaire_id,location_id,code'
inventaire_id = fields.Many2one('is.inventaire', 'Inventaire', select=True)
location_id = fields.Many2one('stock.location', 'Emplacement', required=True, select=True)
product_id = fields.Many2one('product.product', 'Article' , required=True, select=True)
code = fields.Char("Article")
designation = fields.Char("Désignation")
qt_odoo = fields.Float("Qt Odoo" , digits=(12, 2), help="Qt dans Odoo dans le programme 'Import inventaire'")
qt_inventaire = fields.Float("Qt saisie inventaire" , digits=(12, 2), help="Qt saisie (compté) dans le programme 'Import inventaire'")
ecart = fields.Float("Ecart" , digits=(12, 2), help="Ecart calculé par le programme 'Import inventaire'")
theoretical_qty = fields.Float('Qt Odoo fiche inventaire', digits=(12, 2), help="Qt dans Odoo dans la fiche d'inventaire")
product_qty = fields.Float("Qt fiche inventaire" , digits=(12, 2), help="Qt à mettre à jour (nouvelle quantité) dans la fiche d'inventaire")
ecart_odoo = fields.Float("Ecart Odoo" , digits=(12, 2), help="Ecart de la fiche d'inventaire")
anomalie = fields.Float("Anomalie" , digits=(12, 2), help="Différence entre l'écart du programme 'Import inventaire' et l’écart calculé par Odoo dans la fiche d'inventaire")
| mit | -8,118,558,261,304,154,000 | 41.345525 | 199 | 0.443417 | false |
ibab/tensorprob | tests/distributions/test_combinators.py | 1 | 3595 | from __future__ import division
import numpy as np
from tensorprob import Model, Parameter, Normal, Exponential, Mix2
def test_mix2_fit():
with Model() as model:
mu = Parameter()
sigma = Parameter(lower=1)
a = Parameter(lower=0)
f = Parameter(lower=0, upper=1)
X1 = Normal(mu, sigma, bounds=[(-np.inf, 21), (22, np.inf)])
X2 = Exponential(a, bounds=[(-np.inf, 8), (10, np.inf)])
X12 = Mix2(f, X1, X2, bounds=[(6, 17), (18, 36)])
model.observed(X12)
model.initialize({
mu: 23,
sigma: 1.2,
a: 0.2,
f: 0.3,
})
# Generate some data to fit
np.random.seed(42)
exp_data = np.random.exponential(10, 200000)
exp_data = exp_data[(exp_data < 8) | (10 < exp_data)]
# Include the data blinded by the Mix2 bounds as we use the len(norm_data)
norm_data = np.random.normal(19, 2, 100000)
norm_data = norm_data[
((6 < norm_data) & (norm_data < 17)) |
((18 < norm_data) & (norm_data < 21)) |
((22 < norm_data) & (norm_data < 36))
]
data = np.concatenate([exp_data, norm_data])
data = data[((6 < data) & (data < 17)) | ((18 < data) & (data < 36))]
result = model.fit(data)
assert result.success
assert abs(model.state[mu] - 19) < 5e-3
assert abs(model.state[sigma] - 2) < 5e-3
assert abs(model.state[a] - 0.1) < 5e-4
assert abs(model.state[f] - (len(norm_data)/len(data))) < 5e-4
def test_mix2_fit_with_mix2_input():
with Model() as model:
mu = Parameter()
sigma = Parameter(lower=1, upper=4)
a = Parameter(lower=0.06)
b = Parameter(lower=0)
f_1 = Parameter(lower=0, upper=1)
f_2 = Parameter(lower=0, upper=1)
X1 = Normal(mu, sigma, bounds=[(-np.inf, 21), (22, np.inf)])
X2 = Exponential(a, bounds=[(-np.inf, 8), (10, 27), (31, np.inf)])
X12 = Mix2(f_1, X1, X2, bounds=[(6, 17), (18, 36)])
X3 = Exponential(b)
X123 = Mix2(f_2, X12, X3, bounds=[(6, 17), (18, 36)])
model.observed(X123)
model.initialize({
mu: 23,
sigma: 1.2,
a: 0.2,
b: 0.04,
f_1: 0.3,
f_2: 0.4
})
# Generate some data to fit
np.random.seed(42)
exp_1_data = np.random.exponential(10, 200000)
exp_1_data = exp_1_data[
(6 < exp_1_data) &
((exp_1_data < 8) | (10 < exp_1_data)) &
((exp_1_data < 17) | (18 < exp_1_data)) &
((exp_1_data < 27) | (31 < exp_1_data)) &
(exp_1_data < 36)
]
exp_2_data = np.random.exponential(20, 200000)
exp_2_data = exp_2_data[
(6 < exp_2_data) &
((exp_2_data < 17) | (18 < exp_2_data)) &
(exp_2_data < 36)
]
# Include the data blinded by the Mix2 bounds as we use the len(norm_data)
norm_data = np.random.normal(19, 2, 100000)
norm_data = norm_data[
((6 < norm_data) & (norm_data < 17)) |
((18 < norm_data) & (norm_data < 21)) |
((22 < norm_data) & (norm_data < 36))
]
data = np.concatenate([exp_1_data, exp_2_data, norm_data])
data = data[((6 < data) & (data < 17)) | ((18 < data) & (data < 36))]
result = model.fit(data)
assert result.success
assert abs(model.state[mu] - 19) < 3e-2
assert abs(model.state[sigma] - 2) < 1e-3
assert abs(model.state[a] - 0.1) < 1e-3
assert abs(model.state[b] - 0.05) < 3e-4
assert abs(model.state[f_1] - (len(norm_data)/(len(exp_1_data)+len(norm_data)))) < 5e-3
assert abs(model.state[f_2] - ((len(exp_1_data)+len(norm_data))/len(data))) < 5e-4
| mit | 7,315,858,475,312,611,000 | 29.726496 | 91 | 0.532962 | false |
Svolcano/python_exercise | dianhua/test/bill_data_fusion_test.py | 1 | 1842 | # -*- coding: utf-8 -*-
import pymongo
import sys
sys.path.append('../')
from worker.bill_data_fusion import data_fusion
DEV_MONGO_CONFIG = {
'host': '172.18.19.219',
'port': 27017,
'db': 'crs'
}
class Dev_MongodbConnection(object):
"""
MongoDB连接类
"""
def __init__(self):
self.conn = None
self.set_dev_db_conn()
def set_dev_db_conn(self):
self.client = pymongo.MongoClient(DEV_MONGO_CONFIG['host'], DEV_MONGO_CONFIG['port'], connect=False)
self.conn = self.client[DEV_MONGO_CONFIG['db']]
def insert_call(self,data):
self.conn['call_log'].insert_many(data)
if __name__ == '__main__':
dev_conn = Dev_MongodbConnection()
tel='13070194201'
print '开始测试'
#进行数据融合
ret = dev_conn.conn['phone_bill'].find({'tel':tel},{'_id':0})
for log in ret:
missing_log_list=['201801', '201802']
call_log=[]
print 'missing_log_list={}'.format(missing_log_list)
log=log['phone_bill']
# print log
for bill in log:
# print bill
if bill['bill_month'] in missing_log_list:
continue
call_log.append(bill)
# print call_log
para_dict={'tel':tel,'final_bill_logs':call_log,
'missing_month_list':missing_log_list}
call_log, missing_log_list, \
cache_hit_month_list,fusion_cost_time=data_fusion(**para_dict)
print '结束测试'
print 'missing_log_list={}'.format(missing_log_list)
print 'cache_hit_month_list={}'.format(cache_hit_month_list)
print 'fusion_cost_time={}'.format(fusion_cost_time)
#dev_conn.insert_call(call_log)
# for x in call_log:
# print call_log
| mit | -3,320,518,550,378,024,000 | 26.25 | 108 | 0.551991 | false |
JoshRodd/mockdevices | asa_config.py | 1 | 13358 | #!/usr/bin/env python3
from jinja2 import Environment
baseline_asa_conf_file_text = '''\
: Saved
:
: Serial Number: 9AJDFJGV165
: Hardware: ASAv, 2048 MB RAM, CPU Pentium II 2000 MHz
:
ASA Version 9.5(2)204
!
terminal width 511
hostname {{ hostname }}
enable password 2KFQnbNIdI.2KYOU encrypted
xlate per-session deny tcp any4 any4
xlate per-session deny tcp any4 any6
xlate per-session deny tcp any6 any4
xlate per-session deny tcp any6 any6
xlate per-session deny udp any4 any4 eq domain
xlate per-session deny udp any4 any6 eq domain
xlate per-session deny udp any6 any4 eq domain
xlate per-session deny udp any6 any6 eq domain
passwd 2KFQnbNIdI.2KYOU encrypted
names
!
interface GigabitEthernet0/0
description to ** MPLS Uplink **
duplex full
nameif outside
security-level 0
ip address {{ wan_address }}
!
interface GigabitEthernet0/1
description to ** Internal MPI Network **
duplex full
nameif MPI
security-level 100
ip address {{ mpi_address }}
!
interface GigabitEthernet0/2
description ** Internal MPE Network **
duplex full
nameif MPE
security-level 90
ip address {{ mpe_address }}
!
interface GigabitEthernet0/3
description to ** Internal Users **
nameif users
no security-level
ip address {{ users_address }}
!
interface Management0/0
shutdown
description to ** MGMT **
duplex full
management-only
nameif mgmt
security-level 100
ip address {{ management_address }}
!
ftp mode passive
same-security-traffic permit inter-interface
object-group network INTERNAL_USERS
network-object {{ users_network }}
object-group network ENCLAVE_USER_SERVICES
network-object 10.100.70.160 255.254.255.252
object-group network INTERNET_SERVERS
network-object 71.129.45.34 255.255.255.255
object-group network ENCLAVE_MAIL_SERVERS
network-object 10.100.51.0 255.255.255.0
network-object 10.100.53.0 255.255.255.0
network-object 10.100.55.0 255.255.255.0
network-object 10.100.57.0 255.255.255.0
network-object 10.100.59.0 255.255.255.0
object-group network ENCLAVE_ORDERING_SERVERS
network-object 10.100.52.0 255.255.255.0
network-object 10.100.54.0 255.255.255.0
network-object 10.100.56.0 255.255.255.0
network-object 10.100.58.0 255.255.255.0
network-object 10.100.60.0 255.255.255.0
object-group network INTERNAL_MPE_SERVERS
network-object {{ mpe_network }}
object-group network INTERNAL_MPI_SERVERS
network-object {{ mpi_network }}
object-group network MPE_SERVERS
network-object 10.0.0.128 255.0.0.192
object-group network MPI_SERVERS
network-object 10.0.0.192 255.0.0.192
object-group service MPI_SERVICES
service-object tcp destination range 1098 1099
service-object udp destination range 1098 1099
service-object tcp destination range ftp telnet
service-object ah
service-object 97
object-group service MPE_SERVICES
service-object tcp destination eq https
service-object tcp destination eq 8443
object-group service INTERNET_SERVICES
service-object tcp destination eq 8443
service-object udp destination eq 25
object-group service USER_SERVICES
service-object tcp destination eq 8080
object-group service SCW_12345_svc_AR1
service-object tcp destination range 1000 10000
service-object udp destination eq domain
service-object udp destination range 1000 10000
service-object esp
access-list outside-in remark ### outside-in ACL
access-list outside-in extended permit icmp any any
access-list outside-in extended permit object-group INTERNET_SERVICES object-group INTERNET_SERVERS object-group INTERNAL_MPE_SERVERS
access-list outside-in extended permit object-group INTERNET_SERVICES object-group INTERNET_SERVERS object-group INTERNAL_MPI_SERVERS
access-list outside-in extended permit udp host 10.101.70.14 object-group INTERNAL_USERS eq 139
access-list outside-in extended permit tcp 10.101.72.10 255.255.255.254 object-group INTERNAL_USERS eq 445
access-list outside-in extended permit object-group MPI_SERVICES object-group MPI_SERVERS object-group INTERNAL_MPI_SERVERS
access-list outside-in extended permit object-group MPE_SERVICES object-group MPE_SERVERS object-group INTERNAL_MPE_SERVERS
access-list MPE-in remark ### MPE-in ACL
access-list MPE-in extended permit icmp any any
access-list MPE-in extended permit object-group MPE_SERVICES object-group INTERNAL_MPE_SERVERS object-group MPE_SERVERS
access-list MPE-in extended permit object-group MPE_SERVICES object-group INTERNAL_MPI_SERVERS object-group MPE_SERVERS
access-list MPE-in extended permit object-group MPE_SERVICES object-group INTERNAL_MPI_SERVERS object-group ENCLAVE_MAIL_SERVERS
access-list MPI-in remark ### MPI-in ACL
access-list MPI-in extended permit icmp any any
access-list MPI-in extended permit object-group MPI_SERVICES object-group INTERNAL_MPI_SERVERS object-group MPI_SERVERS
access-list users-in remark ### users-in ACL
access-list users-in extended permit object-group MPI_SERVICES object-group INTERNAL_USERS object-group ENCLAVE_ORDERING_SERVERS
access-list users-in extended permit object-group MPI_SERVICES object-group INTERNAL_USERS object-group MPE_SERVERS
access-list users-in extended permit object-group USER_SERVICES object-group INTERNAL_USERS object-group INTERNET_SERVERS
access-list users-in extended permit object-group USER_SERVICES object-group INTERNAL_USERS object-group ENCLAVE_USER_SERVICES
pager lines 23
logging enable
logging timestamp
logging monitor debugging
logging asdm informational
logging facility 23
logging host mgmt 10.20.50.103
logging message 106023 level notifications
mtu outside 1500
mtu MPI 1500
mtu MPE 1500
mtu mgmt 1500
no failover
icmp unreachable rate-limit 1 burst-size 1
no asdm history enable
arp timeout 14400
no arp permit-nonconnected
access-group outside-in in interface outside
access-group MPI-in in interface MPI
access-group MPE-in in interface MPE
access-group users-in in interface users
route outside 0.0.0.0 0.0.0.0 {{ wan_peer }} 1
timeout xlate 3:00:00
timeout pat-xlate 0:00:30
timeout conn 1:00:00 half-closed 0:10:00 udp 0:02:00 sctp 0:02:00 icmp 0:00:02
timeout sunrpc 0:10:00 h323 0:05:00 h225 1:00:00 mgcp 0:05:00 mgcp-pat 0:05:00
timeout sip 0:30:00 sip_media 0:02:00 sip-invite 0:03:00 sip-disconnect 0:02:00
timeout sip-provisional-media 0:02:00 uauth 0:05:00 absolute
timeout tcp-proxy-reassembly 0:01:00
timeout floating-conn 0:00:00
user-identity default-domain LOCAL
aaa authentication ssh console LOCAL
http server enable
http 0.0.0.0 0.0.0.0 mgmt
http 0.0.0.0 0.0.0.0 outside
http 0.0.0.0 0.0.0.0 MPI
http 0.0.0.0 0.0.0.0 MPE
no snmp-server location
no snmp-server contact
crypto ipsec security-association pmtu-aging infinite
crypto ca trustpoint _SmartCallHome_ServerCA
no validation-usage
crl configure
crypto ca trustpool policy
auto-import
crypto ca certificate chain _SmartCallHome_ServerCA
certificate ca 6ecc7aa5a7032009b8cebcf4e952d491
308205ec 308204d4 a0030201 0202106e cc7aa5a7 032009b8 cebcf4e9 52d49130
0d06092a 864886f7 0d010105 05003081 ca310b30 09060355 04061302 55533117
30150603 55040a13 0e566572 69536967 6e2c2049 6e632e31 1f301d06 0355040b
13165665 72695369 676e2054 72757374 204e6574 776f726b 313a3038 06035504
0b133128 63292032 30303620 56657269 5369676e 2c20496e 632e202d 20466f72
20617574 686f7269 7a656420 75736520 6f6e6c79 31453043 06035504 03133c56
65726953 69676e20 436c6173 73203320 5075626c 69632050 72696d61 72792043
65727469 66696361 74696f6e 20417574 686f7269 7479202d 20473530 1e170d31
30303230 38303030 3030305a 170d3230 30323037 32333539 35395a30 81b5310b
30090603 55040613 02555331 17301506 0355040a 130e5665 72695369 676e2c20
496e632e 311f301d 06035504 0b131656 65726953 69676e20 54727573 74204e65
74776f72 6b313b30 39060355 040b1332 5465726d 73206f66 20757365 20617420
68747470 733a2f2f 7777772e 76657269 7369676e 2e636f6d 2f727061 20286329
3130312f 302d0603 55040313 26566572 69536967 6e20436c 61737320 33205365
63757265 20536572 76657220 4341202d 20473330 82012230 0d06092a 864886f7
0d010101 05000382 010f0030 82010a02 82010100 b187841f c20c45f5 bcab2597
a7ada23e 9cbaf6c1 39b88bca c2ac56c6 e5bb658e 444f4dce 6fed094a d4af4e10
9c688b2e 957b899b 13cae234 34c1f35b f3497b62 83488174 d188786c 0253f9bc
7f432657 5833833b 330a17b0 d04e9124 ad867d64 12dc744a 34a11d0a ea961d0b
15fca34b 3bce6388 d0f82d0c 948610ca b69a3dca eb379c00 48358629 5078e845
63cd1941 4ff595ec 7b98d4c4 71b350be 28b38fa0 b9539cf5 ca2c23a9 fd1406e8
18b49ae8 3c6e81fd e4cd3536 b351d369 ec12ba56 6e6f9b57 c58b14e7 0ec79ced
4a546ac9 4dc5bf11 b1ae1c67 81cb4455 33997f24 9b3f5345 7f861af3 3cfa6d7f
81f5b84a d3f58537 1cb5a6d0 09e4187b 384efa0f 02030100 01a38201 df308201
db303406 082b0601 05050701 01042830 26302406 082b0601 05050730 01861868
7474703a 2f2f6f63 73702e76 65726973 69676e2e 636f6d30 12060355 1d130101
ff040830 060101ff 02010030 70060355 1d200469 30673065 060b6086 480186f8
45010717 03305630 2806082b 06010505 07020116 1c687474 70733a2f 2f777777
2e766572 69736967 6e2e636f 6d2f6370 73302a06 082b0601 05050702 02301e1a
1c687474 70733a2f 2f777777 2e766572 69736967 6e2e636f 6d2f7270 61303406
03551d1f 042d302b 3029a027 a0258623 68747470 3a2f2f63 726c2e76 65726973
69676e2e 636f6d2f 70636133 2d67352e 63726c30 0e060355 1d0f0101 ff040403
02010630 6d06082b 06010505 07010c04 61305fa1 5da05b30 59305730 55160969
6d616765 2f676966 3021301f 30070605 2b0e0302 1a04148f e5d31a86 ac8d8e6b
c3cf806a d448182c 7b192e30 25162368 7474703a 2f2f6c6f 676f2e76 65726973
69676e2e 636f6d2f 76736c6f 676f2e67 69663028 0603551d 11042130 1fa41d30
1b311930 17060355 04031310 56657269 5369676e 4d504b49 2d322d36 301d0603
551d0e04 1604140d 445c1653 44c1827e 1d20ab25 f40163d8 be79a530 1f060355
1d230418 30168014 7fd365a7 c2ddecbb f03009f3 4339fa02 af333133 300d0609
2a864886 f70d0101 05050003 82010100 0c8324ef ddc30cd9 589cfe36 b6eb8a80
4bd1a3f7 9df3cc53 ef829ea3 a1e697c1 589d756c e01d1b4c fad1c12d 05c0ea6e
b2227055 d9203340 3307c265 83fa8f43 379bea0e 9a6c70ee f69c803b d937f47a
6decd018 7d494aca 99c71928 a2bed877 24f78526 866d8705 404167d1 273aeddc
481d22cd 0b0b8bbc f4b17bfd b499a8e9 762ae11a 2d876e74 d388dd1e 22c6df16
b62b8214 0a945cf2 50ecafce ff62370d ad65d306 4153ed02 14c8b558 28a1ace0
5becb37f 954afb03 c8ad26db e6667812 4ad99f42 fbe198e6 42839b8f 8f6724e8
6119b5dd cdb50b26 058ec36e c4c875b8 46cfe218 065ea9ae a8819a47 16de0c28
6c2527b9 deb78458 c61f381e a4c4cb66
quit
telnet timeout 15
ssh stricthostkeycheck
ssh 0.0.0.0 0.0.0.0 mgmt
ssh timeout 5
ssh version 2
ssh key-exchange group dh-group14-sha1
console timeout 0
threat-detection basic-threat
threat-detection statistics access-list
no threat-detection statistics tcp-intercept
dynamic-access-policy-record DfltAccessPolicy
username cisco password 3USUcOPFUiMCO4Jk encrypted privilege 15
!
class-map inspection_default
match default-inspection-traffic
!
!
policy-map type inspect dns preset_dns_map
parameters
message-length maximum client auto
message-length maximum 512
policy-map type inspect dns migrated_dns_map_1
parameters
message-length maximum client auto
message-length maximum 512
policy-map global_policy
class inspection_default
inspect ip-options
inspect netbios
inspect rtsp
inspect sunrpc
inspect tftp
inspect xdmcp
inspect ftp
inspect h323 h225
inspect h323 ras
inspect rsh
inspect esmtp
inspect sqlnet
inspect sip
inspect skinny
inspect icmp
inspect http
inspect dns migrated_dns_map_1
!
service-policy global_policy global
prompt hostname context
no call-home reporting anonymous
call-home
profile CiscoTAC-1
no active
destination address http https://tools.cisco.com/its/service/oddce/services/DDCEService
destination address email [email protected]
profile License
destination address http https://tools.cisco.com/its/service/oddce/services/DDCEService
destination transport-method http
Cryptochecksum:89fc0e7a75db639607400231a59c9051
: end
'''
def asa_config(hostname=None, wan_network=None, wan_address=None, mpi_address=None, mpe_address=None,
users_address=None, management_address=None,
users_network=None, mpe_network=None, mpi_network=None, wan_peer=None,
management_network=None,
conf_file='baseline_asa.conf'):
try:
with open(conf_file) as cfg:
filecontent = cfg.read()
assert filecontent == baseline_asa_conf_file_text
except:
filecontent = baseline_asa_conf_file_text
return Environment(newline_sequence = '\n').from_string(filecontent).render( {
'hostname': hostname or 'asa-site1-9-0-0-0',
'wan_network': wan_network or '9.0.0.0 255.255.255.252',
'wan_address': wan_address or '9.0.0.1 255.255.255.252',
'wan_peer': wan_peer or '9.0.0.2 255.255.248.0',
'mpi_network': mpi_network or '9.128.0.0 255.255.255.128',
'mpi_address': mpi_address or '9.128.0.1 255.255.255.128',
'mpe_network': mpe_network or '9.128.0.128 255.255.255.128',
'mpe_address': mpe_address or '9.128.0.129 255.255.255.128',
'users_network': users_network or '10.100.0.0 255.255.255.0',
'users_address': users_address or '10.100.0.1 255.255.255.0',
'management_network': management_network or '172.16.0.0 255.255.254.0',
'management_address': management_address or '172.16.1.99 255.255.254.0'
} )
| mit | 564,070,875,583,957,500 | 41.951768 | 133 | 0.793008 | false |
romanz/python-keepkey | tests/test_msg_signidentity.py | 1 | 4304 | from __future__ import print_function
import unittest
import common
import binascii
import hashlib
import struct
from keepkeylib.client import CallException
import keepkeylib.types_pb2 as proto_types
def check_path(identity):
m = hashlib.sha256()
m.update(struct.pack("<I", identity.index))
uri = ''
if identity.proto: uri += identity.proto + '://'
if identity.user: uri += identity.user + '@'
if identity.host: uri += identity.host
if identity.port: uri += ':' + identity.port
if identity.path: uri += identity.path
m.update(uri)
print('hash:', m.hexdigest())
(a, b, c, d, _, _, _, _) = struct.unpack('<8I', m.digest())
address_n = [0x80000000 | 13, 0x80000000 | a, 0x80000000 | b, 0x80000000 | c, 0x80000000 | d]
print('path:', 'm/' + '/'.join([str(x) for x in address_n]))
class TestMsgSignidentity(common.KeepKeyTest):
def test_sign(self):
self.setup_mnemonic_nopin_nopassphrase()
hidden = binascii.unhexlify('cd8552569d6e4509266ef137584d1e62c7579b5b8ed69bbafa4b864c6521e7c2')
visual = '2015-03-23 17:39:22'
# URI : https://[email protected]/login
# hash : d0e2389d4c8394a9f3e32de01104bf6e8db2d9e2bb0905d60fffa5a18fd696db
# path : m/2147483661/2637750992/2845082444/3761103859/4005495825
identity = proto_types.IdentityType(proto='https', user='satoshi', host='bitcoin.org', port='', path='/login', index=0)
sig = self.client.sign_identity(identity, hidden, visual)
self.assertEqual(sig.address, '17F17smBTX9VTZA9Mj8LM5QGYNZnmziCjL')
self.assertEqual(binascii.hexlify(sig.public_key), '023a472219ad3327b07c18273717bb3a40b39b743756bf287fbd5fa9d263237f45')
self.assertEqual(binascii.hexlify(sig.signature), '20f2d1a42d08c3a362be49275c3ffeeaa415fc040971985548b9f910812237bb41770bf2c8d488428799fbb7e52c11f1a3404011375e4080e077e0e42ab7a5ba02')
# URI : ftp://[email protected]:2323/pub
# hash : 79a6b53831c6ff224fb283587adc4ebae8fb0d734734a46c876838f52dff53f3
# path : m/2147483661/3098912377/2734671409/3632509519/3125730426
identity = proto_types.IdentityType(proto='ftp', user='satoshi', host='bitcoin.org', port='2323', path='/pub', index=3)
sig = self.client.sign_identity(identity, hidden, visual)
self.assertEqual(sig.address, '1KAr6r5qF2kADL8bAaRQBjGKYEGxn9WrbS')
self.assertEqual(binascii.hexlify(sig.public_key), '0266cf12d2ba381c5fd797da0d64f59c07a6f1b034ad276cca6bf2729e92b20d9c')
self.assertEqual(binascii.hexlify(sig.signature), '20bbd12dc657d534fc0f7e40186e22c447e0866a016f654f380adffa9a84e9faf412a1bb0ae908296537838cf91145e77da08681c63d07b7dca40728b9e6cb17cf')
# URI : ssh://[email protected]
# hash : 5fa612f558a1a3b1fb7f010b2ea0a25cb02520a0ffa202ce74a92fc6145da5f3
# path : m/2147483661/4111640159/2980290904/2332131323/3701645358
identity = proto_types.IdentityType(proto='ssh', user='satoshi', host='bitcoin.org', port='', path='', index=47)
sig = self.client.sign_identity(identity, hidden, visual, ecdsa_curve_name='nist256p1')
self.assertEqual(sig.address, '')
self.assertEqual(binascii.hexlify(sig.public_key), '0373f21a3da3d0e96fc2189f81dd826658c3d76b2d55bd1da349bc6c3573b13ae4')
self.assertEqual(binascii.hexlify(sig.signature), '005122cebabb852cdd32103b602662afa88e54c0c0c1b38d7099c64dcd49efe908288114e66ed2d8c82f23a70b769a4db723173ec53840c08aafb840d3f09a18d3')
# URI : ssh://[email protected]
# hash : 5fa612f558a1a3b1fb7f010b2ea0a25cb02520a0ffa202ce74a92fc6145da5f3
# path : m/2147483661/4111640159/2980290904/2332131323/3701645358
identity = proto_types.IdentityType(proto='ssh', user='satoshi', host='bitcoin.org', port='', path='', index=47)
sig = self.client.sign_identity(identity, hidden, visual, ecdsa_curve_name='ed25519')
self.assertEqual(sig.address, '')
self.assertEqual(binascii.hexlify(sig.public_key), '000fac2a491e0f5b871dc48288a4cae551bac5cb0ed19df0764d6e721ec5fade18')
self.assertEqual(binascii.hexlify(sig.signature), '00f05e5085e666429de397c70a081932654369619c0bd2a6579ea6c1ef2af112ef79998d6c862a16b932d44b1ac1b83c8cbcd0fbda228274fde9e0d0ca6e9cb709')
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 | -5,067,317,723,074,652,000 | 57.958904 | 191 | 0.740474 | false |
fraricci/pymatgen | pymatgen/analysis/aflow_prototypes.py | 2 | 4003 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
from monty.serialization import loadfn
from pymatgen.analysis.structure_matcher import StructureMatcher
"""
This module uses data from the AFLOW LIBRARY OF CRYSTALLOGRAPHIC PROTOTYPES.
If using this module, please cite their publication appropriately:
Mehl, M. J., Hicks, D., Toher, C., Levy, O., Hanson, R. M., Hart, G., & Curtarolo, S. (2017).
The AFLOW library of crystallographic prototypes: part 1.
Computational Materials Science, 136, S1-S828.
http://doi.org/10.1016/j.commatsci.2017.01.017
"""
module_dir = os.path.dirname(os.path.abspath(__file__))
AFLOW_PROTOTYPE_LIBRARY = loadfn(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"aflow_prototypes.json"))
class AflowPrototypeMatcher:
"""
This class will match structures to their crystal prototypes, and will
attempt to group species together to match structures derived from
prototypes (e.g. an A_xB_1-x_C from a binary prototype), and will
give these the names the "-like" suffix.
This class uses data from the AFLOW LIBRARY OF CRYSTALLOGRAPHIC PROTOTYPES.
If using this class, please cite their publication appropriately:
Mehl, M. J., Hicks, D., Toher, C., Levy, O., Hanson, R. M., Hart, G., & Curtarolo, S. (2017).
The AFLOW library of crystallographic prototypes: part 1.
Computational Materials Science, 136, S1-S828.
http://doi.org/10.1016/j.commatsci.2017.01.017
"""
def __init__(self, initial_ltol=0.2, initial_stol=0.3, initial_angle_tol=5):
"""
Tolerances as defined in StructureMatcher. Tolerances will be
gradually decreased until only a single match is found (if possible).
Args:
initial_ltol: fractional length tolerance
initial_stol: site tolerance
initial_angle_tol: angle tolerance
"""
self.initial_ltol = initial_ltol
self.initial_stol = initial_stol
self.initial_angle_tol = initial_angle_tol
@staticmethod
def _match_prototype(structure_matcher, structure):
tags = []
for d in AFLOW_PROTOTYPE_LIBRARY:
p = d['snl'].structure
match = structure_matcher.fit_anonymous(p, structure)
if match:
tags.append(d)
return tags
def _match_single_prototype(self, structure):
sm = StructureMatcher(ltol=self.initial_ltol,
stol=self.initial_stol,
angle_tol=self.initial_angle_tol)
tags = self._match_prototype(sm, structure)
while len(tags) > 1:
sm.ltol *= 0.8
sm.stol *= 0.8
sm.angle_tol *= 0.8
tags = self._match_prototype(sm, structure)
if sm.ltol < 0.01:
break
return tags
def get_prototypes(self, structure):
"""
Get prototype(s) structures for a given
input structure. If you use this method in
your work, please cite the appropriate
AFLOW publication:
Mehl, M. J., Hicks, D., Toher, C., Levy, O.,
Hanson, R. M., Hart, G., & Curtarolo, S. (2017).
The AFLOW library of crystallographic prototypes: part 1.
Computational Materials Science, 136, S1-S828.
http://doi.org/10.1016/j.commatsci.2017.01.017
Args:
structure: structure to match
Returns (list): A list of dicts with keys
'snl' for the matched prototype and 'tags',
a dict of tags ('mineral', 'strukturbericht'
and 'aflow') of that prototype. This should
be a list containing just a single entry,
but it is possible a material can match
multiple prototypes.
"""
tags = self._match_single_prototype(structure)
if len(tags) == 0:
return None
else:
return tags
| mit | -4,214,893,756,256,269,000 | 35.390909 | 97 | 0.623782 | false |
Hakuba/youtube-dl | youtube_dl/extractor/mitele.py | 21 | 3018 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urlparse,
)
from ..utils import (
encode_dict,
get_element_by_attribute,
int_or_none,
)
class MiTeleIE(InfoExtractor):
IE_DESC = 'mitele.es'
_VALID_URL = r'http://www\.mitele\.es/[^/]+/[^/]+/[^/]+/(?P<id>[^/]+)/'
_TESTS = [{
'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/',
'md5': '0ff1a13aebb35d9bc14081ff633dd324',
'info_dict': {
'id': '0NF1jJnxS1Wu3pHrmvFyw2',
'display_id': 'programa-144',
'ext': 'flv',
'title': 'Tor, la web invisible',
'description': 'md5:3b6fce7eaa41b2d97358726378d9369f',
'thumbnail': 're:(?i)^https?://.*\.jpg$',
'duration': 2913,
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
config_url = self._search_regex(
r'data-config\s*=\s*"([^"]+)"', webpage, 'data config url')
config_url = compat_urlparse.urljoin(url, config_url)
config = self._download_json(
config_url, display_id, 'Downloading config JSON')
mmc = self._download_json(
config['services']['mmc'], display_id, 'Downloading mmc JSON')
formats = []
for location in mmc['locations']:
gat = self._proto_relative_url(location.get('gat'), 'http:')
bas = location.get('bas')
loc = location.get('loc')
ogn = location.get('ogn')
if None in (gat, bas, loc, ogn):
continue
token_data = {
'bas': bas,
'icd': loc,
'ogn': ogn,
'sta': '0',
}
media = self._download_json(
'%s/?%s' % (gat, compat_urllib_parse.urlencode(encode_dict(token_data))),
display_id, 'Downloading %s JSON' % location['loc'])
file_ = media.get('file')
if not file_:
continue
formats.extend(self._extract_f4m_formats(
file_ + '&hdcore=3.2.0&plugin=aasp-3.2.0.77.18',
display_id, f4m_id=loc))
title = self._search_regex(
r'class="Destacado-text"[^>]*>\s*<strong>([^<]+)</strong>', webpage, 'title')
video_id = self._search_regex(
r'data-media-id\s*=\s*"([^"]+)"', webpage,
'data media id', default=None) or display_id
thumbnail = config.get('poster', {}).get('imageUrl')
duration = int_or_none(mmc.get('duration'))
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': get_element_by_attribute('class', 'text', webpage),
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
| unlicense | -6,856,082,474,153,087,000 | 32.910112 | 89 | 0.510272 | false |
rsepassi/tensor2tensor | tensor2tensor/data_generators/inspect.py | 1 | 3424 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Inspect a TFRecord file of tensorflow.Example and show tokenizations.
python data_generators/inspect.py \
--logtostderr \
--print_targets \
--subword_text_encoder_filename=$DATA_DIR/vocab.endefr.8192 \
--input_filename=$DATA_DIR/wmt_ende_tokens_8k-train-00000-of-00100
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from tensor2tensor.data_generators import text_encoder
import tensorflow as tf
tf.flags.DEFINE_string("subword_text_encoder_filename", "",
"SubwordTextEncoder vocabulary file")
tf.flags.DEFINE_string("token_text_encoder_filename", "",
"TokenTextEncoder vocabulary file")
tf.flags.DEFINE_bool("byte_text_encoder", False, "use a ByteTextEncoder")
tf.flags.DEFINE_string("input_filename", "", "input filename")
tf.flags.DEFINE_bool("print_inputs", False, "Print decoded inputs to stdout")
tf.flags.DEFINE_bool("print_targets", False, "Print decoded targets to stdout")
tf.flags.DEFINE_bool("print_all", False, "Print all fields")
FLAGS = tf.flags.FLAGS
def main(_):
"""Convert a file to examples."""
if FLAGS.subword_text_encoder_filename:
encoder = text_encoder.SubwordTextEncoder(
FLAGS.subword_text_encoder_filename)
elif FLAGS.token_text_encoder_filename:
encoder = text_encoder.TokenTextEncoder(FLAGS.token_text_encoder_filename)
elif FLAGS.byte_text_encoder:
encoder = text_encoder.ByteTextEncoder()
else:
encoder = None
reader = tf.python_io.tf_record_iterator(FLAGS.input_filename)
total_sequences = 0
total_input_tokens = 0
total_target_tokens = 0
max_input_length = 0
max_target_length = 0
for record in reader:
x = tf.train.Example()
x.ParseFromString(record)
inputs = [int(i) for i in x.features.feature["inputs"].int64_list.value]
targets = [int(i) for i in x.features.feature["targets"].int64_list.value]
if FLAGS.print_inputs:
print("INPUTS:\n" + encoder.decode(inputs) if encoder else inputs)
if FLAGS.print_targets:
print("TARGETS:\n" + encoder.decode(targets) if encoder else targets)
total_input_tokens += len(inputs)
total_target_tokens += len(targets)
total_sequences += 1
max_input_length = max(max_input_length, len(inputs))
max_target_length = max(max_target_length, len(targets))
if FLAGS.print_all:
for k, v in x.features.feature.iteritems():
print("%s: %s" % (k, v.int64_list.value))
print("total_sequences: %d" % total_sequences)
print("total_input_tokens: %d" % total_input_tokens)
print("total_target_tokens: %d" % total_target_tokens)
print("max_input_length: %d" % max_input_length)
print("max_target_length: %d" % max_target_length)
if __name__ == "__main__":
tf.app.run()
| apache-2.0 | -6,508,975,020,971,683,000 | 36.626374 | 79 | 0.70736 | false |
jiwanlimbu/aura | keystone/auth/plugins/password.py | 3 | 1675 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.auth import plugins as auth_plugins
from keystone.auth.plugins import base
from keystone.common import dependency
from keystone import exception
from keystone.i18n import _
METHOD_NAME = 'password'
@dependency.requires('identity_api')
class Password(base.AuthMethodHandler):
def authenticate(self, request, auth_payload):
"""Try to authenticate against the identity backend."""
response_data = {}
user_info = auth_plugins.UserAuthInfo.create(auth_payload, METHOD_NAME)
try:
self.identity_api.authenticate(
request,
user_id=user_info.user_id,
password=user_info.password)
except AssertionError:
# authentication failed because of invalid username or password
msg = _('Invalid username or password')
raise exception.Unauthorized(msg)
response_data['user_id'] = user_info.user_id
return base.AuthHandlerResponse(status=True, response_body=None,
response_data=response_data)
| apache-2.0 | -6,764,065,640,510,353,000 | 35.413043 | 79 | 0.69194 | false |
szecsi/Gears | GearsPy/Ide.py | 1 | 3617 | from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import QApplication, QLabel, QDialog, QWidget, QGridLayout, QPushButton, QSplitter, QHBoxLayout, QSlider
from PyQt5.Qsci import QsciScintilla, QsciScintillaBase, QsciLexerPython, QsciAPIs
from Editor import Editor
from Preview import Preview
from SequenceLoader import *
class Ide(QWidget):
def __init__(self, sequencePath, browser, errline=0, parent=None):
super().__init__(parent)
self.sequencePath = sequencePath
self.browser = browser
self.playSpeed = 1
hbox = QHBoxLayout(self)
self.splitter = QSplitter()
self.editor = Editor(sequencePath, errline, None)
self.splitter.addWidget(self.editor)
self.rpanel = QWidget(None)
grid = QGridLayout()
self.reloadButton = QPushButton('Save script and load sequence', self.rpanel)
self.reloadButton.clicked.connect(self.reload)
grid.addWidget(self.reloadButton, 1, 2, 1, 8)
self.discardButton = QPushButton('Discard changes', self.rpanel)
self.discardButton.clicked.connect(self.discard)
grid.addWidget(self.discardButton, 5, 2, 1, 8)
self.preview = Preview(self.rpanel, self.editor)
grid.addWidget(self.preview, 2, 2, 1, 8)
self.seeker = QSlider(Qt.Horizontal, self.rpanel)
self.seeker.setTickPosition(QSlider.TicksBelow)
self.seeker.setMinimum(0)
seq = gears.getSequence()
if seq :
self.seeker.setMaximum(seq.getDuration())
self.seeker.sliderPressed.connect(self.seekerPressed)
self.seeker.sliderReleased.connect(self.seekerReleased)
self.seeker.valueChanged.connect(self.seekerChanged)
self.seeking = False
grid.addWidget(self.seeker, 3, 2, 1, 8)
self.pauseButton = QPushButton('\u275a\u275a', self.rpanel)
self.pauseButton.clicked.connect( self.pause )
grid.addWidget(self.pauseButton, 4, 2, 1, 1)
self.play1button = QPushButton('\u25b6', self.rpanel)
self.play1button.clicked.connect(self.play1)
grid.addWidget(self.play1button, 4, 3, 1, 1)
self.play2button = QPushButton('\u25b6\u25b6', self.rpanel)
self.play2button.clicked.connect(self.play2)
grid.addWidget(self.play2button, 4, 4, 1, 1)
self.rpanel.setLayout(grid)
self.splitter.addWidget(self.rpanel)
hbox.addWidget(self.splitter)
self.setLayout(hbox)
self.setGeometry(100, 100, 1600, 900)
self.timer = QTimer(self)
self.timer.setInterval(16)
self.timer.timeout.connect(self.onTimer)
self.timer.start()
def onTimer(self):
self.seeker.setValue(self.preview.sFrame)
if not self.seeking :
self.preview.sFrame += self.playSpeed
self.preview.update()
def reload(self, e):
self.editor.save()
self.close()
if loadSequence(self.sequencePath, self.browser, False):
self.browser.launcherWindow.start(self.browser.browserWindow, self.sequencePath)
QApplication.instance().processEvents()
self.browser.browserWindow.hide()
def discard(self, e):
self.close()
def seekerPressed(self):
self.seeking = True
def seekerReleased(self):
self.seeking = False
def seekerChanged(self):
self.preview.sFrame = self.seeker.value()
def pause(self, e):
self.playSpeed = 0
def play1(self, e):
self.playSpeed = 1
def play2(self, e):
self.playSpeed = 2 | gpl-2.0 | 1,888,139,975,741,167,600 | 31.872727 | 125 | 0.652006 | false |
yinquan529/platform-external-chromium_org | tools/json_schema_compiler/idl_schema.py | 23 | 15958 | #! /usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import json
import os.path
import re
import sys
from json_parse import OrderedDict
# This file is a peer to json_schema.py. Each of these files understands a
# certain format describing APIs (either JSON or IDL), reads files written
# in that format into memory, and emits them as a Python array of objects
# corresponding to those APIs, where the objects are formatted in a way that
# the JSON schema compiler understands. compiler.py drives both idl_schema.py
# and json_schema.py.
# idl_parser expects to be able to import certain files in its directory,
# so let's set things up the way it wants.
_idl_generators_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.pardir, os.pardir, 'ppapi', 'generators')
if _idl_generators_path in sys.path:
import idl_parser
else:
sys.path.insert(0, _idl_generators_path)
try:
import idl_parser
finally:
sys.path.pop(0)
def ProcessComment(comment):
'''
Convert a comment into a parent comment and a list of parameter comments.
Function comments are of the form:
Function documentation. May contain HTML and multiple lines.
|arg1_name|: Description of arg1. Use <var>argument</var> to refer
to other arguments.
|arg2_name|: Description of arg2...
Newlines are removed, and leading and trailing whitespace is stripped.
Args:
comment: The string from a Comment node.
Returns: A tuple that looks like:
(
"The processed comment, minus all |parameter| mentions.",
{
'parameter_name_1': "The comment that followed |parameter_name_1|:",
...
}
)
'''
# Find all the parameter comments of the form '|name|: comment'.
parameter_starts = list(re.finditer(r' *\|([^|]*)\| *: *', comment))
# Get the parent comment (everything before the first parameter comment.
first_parameter_location = (parameter_starts[0].start()
if parameter_starts else len(comment))
parent_comment = comment[:first_parameter_location]
# We replace \n\n with <br/><br/> here and below, because the documentation
# needs to know where the newlines should be, and this is easier than
# escaping \n.
parent_comment = (parent_comment.strip().replace('\n\n', '<br/><br/>')
.replace('\n', ''))
params = OrderedDict()
for (cur_param, next_param) in itertools.izip_longest(parameter_starts,
parameter_starts[1:]):
param_name = cur_param.group(1)
# A parameter's comment goes from the end of its introduction to the
# beginning of the next parameter's introduction.
param_comment_start = cur_param.end()
param_comment_end = next_param.start() if next_param else len(comment)
params[param_name] = (comment[param_comment_start:param_comment_end
].strip().replace('\n\n', '<br/><br/>')
.replace('\n', ''))
return (parent_comment, params)
class Callspec(object):
'''
Given a Callspec node representing an IDL function declaration, converts into
a tuple:
(name, list of function parameters, return type)
'''
def __init__(self, callspec_node, comment):
self.node = callspec_node
self.comment = comment
def process(self, callbacks):
parameters = []
return_type = None
if self.node.GetProperty('TYPEREF') not in ('void', None):
return_type = Typeref(self.node.GetProperty('TYPEREF'),
self.node.parent,
{'name': self.node.GetName()}).process(callbacks)
# The IDL parser doesn't allow specifying return types as optional.
# Instead we infer any object return values to be optional.
# TODO(asargent): fix the IDL parser to support optional return types.
if return_type.get('type') == 'object' or '$ref' in return_type:
return_type['optional'] = True
for node in self.node.GetChildren():
parameter = Param(node).process(callbacks)
if parameter['name'] in self.comment:
parameter['description'] = self.comment[parameter['name']]
parameters.append(parameter)
return (self.node.GetName(), parameters, return_type)
class Param(object):
'''
Given a Param node representing a function parameter, converts into a Python
dictionary that the JSON schema compiler expects to see.
'''
def __init__(self, param_node):
self.node = param_node
def process(self, callbacks):
return Typeref(self.node.GetProperty('TYPEREF'),
self.node,
{'name': self.node.GetName()}).process(callbacks)
class Dictionary(object):
'''
Given an IDL Dictionary node, converts into a Python dictionary that the JSON
schema compiler expects to see.
'''
def __init__(self, dictionary_node):
self.node = dictionary_node
def process(self, callbacks):
properties = OrderedDict()
for node in self.node.GetChildren():
if node.cls == 'Member':
k, v = Member(node).process(callbacks)
properties[k] = v
result = {'id': self.node.GetName(),
'properties': properties,
'type': 'object'}
if self.node.GetProperty('inline_doc'):
result['inline_doc'] = True
elif self.node.GetProperty('noinline_doc'):
result['noinline_doc'] = True
return result
class Member(object):
'''
Given an IDL dictionary or interface member, converts into a name/value pair
where the value is a Python dictionary that the JSON schema compiler expects
to see.
'''
def __init__(self, member_node):
self.node = member_node
def process(self, callbacks):
properties = OrderedDict()
name = self.node.GetName()
for property_name in ('OPTIONAL', 'nodoc', 'nocompile', 'nodart'):
if self.node.GetProperty(property_name):
properties[property_name.lower()] = True
for option_name, sanitizer in [
('maxListeners', int),
('supportsFilters', lambda s: s == 'true'),
('supportsListeners', lambda s: s == 'true'),
('supportsRules', lambda s: s == 'true')]:
if self.node.GetProperty(option_name):
if 'options' not in properties:
properties['options'] = {}
properties['options'][option_name] = sanitizer(self.node.GetProperty(
option_name))
is_function = False
parameter_comments = OrderedDict()
for node in self.node.GetChildren():
if node.cls == 'Comment':
(parent_comment, parameter_comments) = ProcessComment(node.GetName())
properties['description'] = parent_comment
elif node.cls == 'Callspec':
is_function = True
name, parameters, return_type = (Callspec(node, parameter_comments)
.process(callbacks))
properties['parameters'] = parameters
if return_type is not None:
properties['returns'] = return_type
properties['name'] = name
if is_function:
properties['type'] = 'function'
else:
properties = Typeref(self.node.GetProperty('TYPEREF'),
self.node, properties).process(callbacks)
enum_values = self.node.GetProperty('legalValues')
if enum_values:
if properties['type'] == 'integer':
enum_values = map(int, enum_values)
elif properties['type'] == 'double':
enum_values = map(float, enum_values)
properties['enum'] = enum_values
return name, properties
class Typeref(object):
'''
Given a TYPEREF property representing the type of dictionary member or
function parameter, converts into a Python dictionary that the JSON schema
compiler expects to see.
'''
def __init__(self, typeref, parent, additional_properties=OrderedDict()):
self.typeref = typeref
self.parent = parent
self.additional_properties = additional_properties
def process(self, callbacks):
properties = self.additional_properties
result = properties
if self.parent.GetProperty('OPTIONAL'):
properties['optional'] = True
# The IDL parser denotes array types by adding a child 'Array' node onto
# the Param node in the Callspec.
for sibling in self.parent.GetChildren():
if sibling.cls == 'Array' and sibling.GetName() == self.parent.GetName():
properties['type'] = 'array'
properties['items'] = OrderedDict()
properties = properties['items']
break
if self.typeref == 'DOMString':
properties['type'] = 'string'
elif self.typeref == 'boolean':
properties['type'] = 'boolean'
elif self.typeref == 'double':
properties['type'] = 'number'
elif self.typeref == 'long':
properties['type'] = 'integer'
elif self.typeref == 'any':
properties['type'] = 'any'
elif self.typeref == 'object':
properties['type'] = 'object'
if 'additionalProperties' not in properties:
properties['additionalProperties'] = OrderedDict()
properties['additionalProperties']['type'] = 'any'
instance_of = self.parent.GetProperty('instanceOf')
if instance_of:
properties['isInstanceOf'] = instance_of
elif self.typeref == 'ArrayBuffer':
properties['type'] = 'binary'
properties['isInstanceOf'] = 'ArrayBuffer'
elif self.typeref == 'FileEntry':
properties['type'] = 'object'
properties['isInstanceOf'] = 'FileEntry'
if 'additionalProperties' not in properties:
properties['additionalProperties'] = OrderedDict()
properties['additionalProperties']['type'] = 'any'
elif self.typeref is None:
properties['type'] = 'function'
else:
if self.typeref in callbacks:
# Do not override name and description if they are already specified.
name = properties.get('name', None)
description = properties.get('description', None)
properties.update(callbacks[self.typeref])
if description is not None:
properties['description'] = description
if name is not None:
properties['name'] = name
else:
properties['$ref'] = self.typeref
return result
class Enum(object):
'''
Given an IDL Enum node, converts into a Python dictionary that the JSON
schema compiler expects to see.
'''
def __init__(self, enum_node):
self.node = enum_node
self.description = ''
def process(self, callbacks):
enum = []
for node in self.node.GetChildren():
if node.cls == 'EnumItem':
enum_value = {'name': node.GetName()}
for child in node.GetChildren():
if child.cls == 'Comment':
enum_value['description'] = ProcessComment(child.GetName())[0]
else:
raise ValueError('Did not process %s %s' % (child.cls, child))
enum.append(enum_value)
elif node.cls == 'Comment':
self.description = ProcessComment(node.GetName())[0]
else:
sys.exit('Did not process %s %s' % (node.cls, node))
result = {'id' : self.node.GetName(),
'description': self.description,
'type': 'string',
'enum': enum}
for property_name in ('inline_doc', 'noinline_doc', 'nodoc'):
if self.node.GetProperty(property_name):
result[property_name] = True
return result
class Namespace(object):
'''
Given an IDLNode representing an IDL namespace, converts into a Python
dictionary that the JSON schema compiler expects to see.
'''
def __init__(self,
namespace_node,
description,
nodoc=False,
internal=False,
platforms=None,
compiler_options=None):
self.namespace = namespace_node
self.nodoc = nodoc
self.internal = internal
self.platforms = platforms
self.compiler_options = compiler_options
self.events = []
self.functions = []
self.types = []
self.callbacks = OrderedDict()
self.description = description
def process(self):
for node in self.namespace.GetChildren():
if node.cls == 'Dictionary':
self.types.append(Dictionary(node).process(self.callbacks))
elif node.cls == 'Callback':
k, v = Member(node).process(self.callbacks)
self.callbacks[k] = v
elif node.cls == 'Interface' and node.GetName() == 'Functions':
self.functions = self.process_interface(node)
elif node.cls == 'Interface' and node.GetName() == 'Events':
self.events = self.process_interface(node)
elif node.cls == 'Enum':
self.types.append(Enum(node).process(self.callbacks))
else:
sys.exit('Did not process %s %s' % (node.cls, node))
if self.compiler_options is not None:
compiler_options = self.compiler_options
else:
compiler_options = {}
return {'namespace': self.namespace.GetName(),
'description': self.description,
'nodoc': self.nodoc,
'types': self.types,
'functions': self.functions,
'internal': self.internal,
'events': self.events,
'platforms': self.platforms,
'compiler_options': compiler_options}
def process_interface(self, node):
members = []
for member in node.GetChildren():
if member.cls == 'Member':
name, properties = Member(member).process(self.callbacks)
members.append(properties)
return members
class IDLSchema(object):
'''
Given a list of IDLNodes and IDLAttributes, converts into a Python list
of api_defs that the JSON schema compiler expects to see.
'''
def __init__(self, idl):
self.idl = idl
def process(self):
namespaces = []
nodoc = False
internal = False
description = None
platforms = None
compiler_options = None
for node in self.idl:
if node.cls == 'Namespace':
if not description:
# TODO(kalman): Go back to throwing an error here.
print('%s must have a namespace-level comment. This will '
'appear on the API summary page.' % node.GetName())
description = ''
namespace = Namespace(node, description, nodoc, internal,
platforms=platforms,
compiler_options=compiler_options)
namespaces.append(namespace.process())
nodoc = False
internal = False
platforms = None
compiler_options = None
elif node.cls == 'Copyright':
continue
elif node.cls == 'Comment':
description = node.GetName()
elif node.cls == 'ExtAttribute':
if node.name == 'nodoc':
nodoc = bool(node.value)
elif node.name == 'internal':
internal = bool(node.value)
elif node.name == 'platforms':
platforms = list(node.value)
elif node.name == 'implemented_in':
compiler_options = {'implemented_in': node.value}
else:
continue
else:
sys.exit('Did not process %s %s' % (node.cls, node))
return namespaces
def Load(filename):
'''
Given the filename of an IDL file, parses it and returns an equivalent
Python dictionary in a format that the JSON schema compiler expects to see.
'''
f = open(filename, 'r')
contents = f.read()
f.close()
idl = idl_parser.IDLParser().ParseData(contents, filename)
idl_schema = IDLSchema(idl)
return idl_schema.process()
def Main():
'''
Dump a json serialization of parse result for the IDL files whose names
were passed in on the command line.
'''
for filename in sys.argv[1:]:
schema = Load(filename)
print json.dumps(schema, indent=2)
if __name__ == '__main__':
Main()
| bsd-3-clause | 359,429,400,023,107,400 | 34.072527 | 80 | 0.626018 | false |
petrlosa/ella | ella/articles/south_migrations/0001_initial.py | 6 | 7112 |
from south.db import db
from django.db import models
import datetime
from south.v2 import SchemaMigration
from ella.core.models import Publishable
class Migration(SchemaMigration):
depends_on = (
("core", "0002_initial_publishable"),
)
def forwards(self, orm):
# Adding model 'Article'
db.create_table('articles_article', (
('publishable_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=Publishable, unique=True, primary_key=True)),
('upper_title', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('articles', ['Article'])
# Adding model 'ArticleContents'
db.create_table('articles_articlecontents', (
('id', models.AutoField(primary_key=True)),
('article', models.ForeignKey(orm['articles.Article'], verbose_name=_('Article'))),
('title', models.CharField(_('Title'), max_length=200, blank=True)),
('content', models.TextField(_('Content'))),
))
db.send_create_signal('articles', ['ArticleContents'])
# Adding model 'InfoBox'
db.create_table('articles_infobox', (
('id', models.AutoField(primary_key=True)),
('title', models.CharField(_('Title'), max_length=255)),
('created', models.DateTimeField(_('Created'), default=datetime.datetime.now, editable=False)),
('updated', models.DateTimeField(_('Updated'), null=True, blank=True)),
('content', models.TextField(_('Content'))),
))
db.send_create_signal('articles', ['InfoBox'])
# Adding ManyToManyField 'Article.authors'
db.create_table('articles_article_authors', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('article', models.ForeignKey(orm.Article, null=False)),
('author', models.ForeignKey(orm['core.Author'], null=False))
))
def backwards(self, orm):
# Deleting model 'ArticleContents'
db.delete_table('articles_articlecontents')
# Deleting model 'InfoBox'
db.delete_table('articles_infobox')
# Deleting model 'Article'
db.delete_table('articles_article')
# Dropping ManyToManyField 'Article.authors'
db.delete_table('articles_article_authors')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.category': {
'Meta': {'ordering': "('site','tree_path',)", 'unique_together': "(('site','tree_path'),)"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'core.publishable': {
'Meta': {'object_name': 'Publishable'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.Author']", 'symmetrical': 'False'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Category']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['photos.Photo']", 'null': 'True', 'blank': 'True'}),
'publish_from': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(3000, 1, 1, 0, 0, 0, 2)', 'db_index': 'True'}),
'publish_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Source']", 'null': 'True', 'blank': 'True'}),
'static': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'articles.articlecontents': {
'article': ('models.ForeignKey', ["orm['articles.Article']"], {'verbose_name': "_('Article')"}),
'content': ('models.TextField', ["_('Content')"], {}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'title': ('models.CharField', ["_('Title')"], {'max_length': '200', 'blank': 'True'})
},
'articles.infobox': {
'Meta': {'ordering': "('-created',)"},
'content': ('models.TextField', ["_('Content')"], {}),
'created': ('models.DateTimeField', ["_('Created')"], {'default': 'datetime.datetime.now', 'editable': 'False'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'title': ('models.CharField', ["_('Title')"], {'max_length': '255'}),
'updated': ('models.DateTimeField', ["_('Updated')"], {'null': 'True', 'blank': 'True'})
},
'photos.photo': {
'Meta': {'ordering': "('-created',)"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'articles.article': {
'Meta': {'object_name': 'Article', '_ormbases': ['core.Publishable']},
'content': ('django.db.models.fields.TextField', [], {'default': "''"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'publishable_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Publishable']", 'unique': 'True', 'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'upper_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'core.source': {
'Meta': {'ordering': "('name',)"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'core.author': {
'Meta': {'ordering': "('name','slug',)"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['articles']
| bsd-3-clause | -2,924,197,103,444,478,500 | 51.681481 | 161 | 0.546682 | false |
pinax/django-waitinglist | waitinglist/tests.py | 1 | 4566 | from django.test import TestCase
from .forms import SurveyForm
from .models import (
Survey,
SurveyQuestion,
WaitingListEntry
)
class SurveyTests(TestCase):
def setUp(self):
self.survey = Survey.objects.create(
label="My Test Survey"
)
self.entry = WaitingListEntry.objects.create(email="[email protected]")
self.ice_cream_question = self.survey.questions.create(
question="What is your favorite ice cream flavor?",
kind=SurveyQuestion.TEXT_FIELD,
help_text="(e.g. Vanilla, Strawberry, Chocolate)",
required=True
)
self.summer_question = self.survey.questions.create(
question="What did you do last summer?",
kind=SurveyQuestion.TEXT_AREA,
required=False
)
self.season_question = self.survey.questions.create(
question="What is your favorite season?",
kind=SurveyQuestion.RADIO_CHOICES,
required=True
)
self.spring = self.season_question.choices.create(
label="Spring"
)
self.summer = self.season_question.choices.create(
label="Summer"
)
self.fall = self.season_question.choices.create(
label="Fall"
)
self.winter = self.season_question.choices.create(
label="Winter"
)
self.city_question = self.survey.questions.create(
question="Select all the cities you have visited",
kind=SurveyQuestion.CHECKBOX_FIELD,
required=True
)
self.boston = self.city_question.choices.create(
label="Boston"
)
self.denver = self.city_question.choices.create(
label="Denver"
)
self.nashville = self.city_question.choices.create(
label="Nashville"
)
self.danville = self.city_question.choices.create(
label="Danville"
)
self.golf_question = self.survey.questions.create(
question="Do you like golf?",
kind=SurveyQuestion.BOOLEAN_FIELD,
required=True
)
def test_create_second_survey(self):
Survey.objects.create(label="Another test survey")
self.assertEquals(Survey.objects.count(), 2)
self.assertEquals(Survey.objects.filter(active=False).count(), 1)
self.assertEquals(Survey.objects.filter(active=True).count(), 1)
def test_survey_form_creation(self):
form = SurveyForm(survey=self.survey)
self.assertTrue(len(form.fields), 5)
def test_survey_form_invalid(self):
form = SurveyForm(
data={
self.ice_cream_question.name: "Strawberry"
},
survey=self.survey
)
self.assertFalse(form.is_valid())
def test_survey_form_valid(self):
form = SurveyForm(
data={
self.ice_cream_question.name: "Strawberry",
self.summer_question.name: "Swam in the lake",
self.season_question.name: self.summer.pk,
self.city_question.name: [self.nashville.pk],
self.golf_question.name: True
},
survey=self.survey
)
self.assertTrue(form.is_valid())
def test_survey_form_save(self):
form = SurveyForm(
data={
self.ice_cream_question.name: "Strawberry",
self.summer_question.name: "Swam in the lake",
self.season_question.name: self.summer.pk,
self.city_question.name: [self.nashville.pk, self.boston.pk],
self.golf_question.name: True
},
survey=self.survey
)
self.assertTrue(form.is_valid())
form.save(self.entry.surveyinstance)
answers = self.entry.surveyinstance.answers.all()
self.assertEquals(answers.count(), 5)
self.assertEquals(answers.get(question=self.ice_cream_question).value, "Strawberry")
self.assertEquals(answers.get(question=self.summer_question).value, "Swam in the lake")
self.assertEquals(answers.get(question=self.season_question).value, self.summer.label)
self.assertTrue(
self.nashville.label in answers.get(question=self.city_question).value
)
self.assertTrue(
self.boston.label in answers.get(question=self.city_question).value
)
self.assertTrue(answers.get(question=self.golf_question).value_boolean)
| mit | 6,137,663,377,484,723,000 | 35.528 | 95 | 0.593955 | false |
huizhuzhao/rfscore | bin/csar.py | 2 | 4569 | import os
import sys
import csv
import glob
from optparse import OptionParser
from rfscore.config import config, logger
from rfscore.ob import get_molecule, extract_ligand
from rfscore.credo import contacts
def parse_options():
'''
'''
# PARSE COMMAND LINE
usage = "%prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("--debug",
action = "store_true",
dest = "debug",
default = False,
help = 'Set logging level to debug and print more verbose output.')
parser.add_option("-B", "--binsize",
dest = "binsize",
type = float,
default = 0.0,
help = "Bin size (in Angstrom) to use for binning contacts based on inter-atomic distance.")
parser.add_option("-F", "--format",
dest = "format",
default = 'csv',
help = "Format to use for writing the SIFt of the protein-ligand complex.")
parser.add_option("-O", "--output",
dest = "output",
default = None,
help = "File to which the data will be written (default=STDOUT).")
parser.add_option("-D", "--descriptor",
dest = "descriptor",
default = 'elements',
help = "Descriptor to use. Valid descriptors are 'credo', 'elements' and 'sybyl'.")
# GET COMMAND LINE OPTIONS
(options, args) = parser.parse_args()
if options.descriptor not in ('elements', 'credo', 'sybyl'):
logger.fatal("Invalid descriptor: {0}.".format(options.descriptor))
parser.print_help()
sys.exit(1)
return options
def main():
"""
"""
options = parse_options()
# this option will produce more verbose output
if options.debug: logger.setLevel(logging.DEBUG)
csarconf = config['csar']
if options.output: fh = open(options.output,'wb')
else: fh = sys.stdout
# choose how the ouptput data will be written
if options.format == 'csv':
writer = csv.writer(fh, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
HEADER = True
# iterate through all numbered directories
for directory in os.listdir(csarconf['directory']):
entrydir = os.path.join(csarconf['directory'], directory)
# parse kd.dat to get the pKd
kddat_path = os.path.join(entrydir, 'kd.dat')
# exit if kd.dat is missing
if not os.path.isfile(kddat_path):
logger.fatal("CSAR directory {} does not contain kd.dat file."
.format(directory))
sys.exit(1)
entry, pdb, pkd = open(kddat_path).read().strip().replace(' ','').split(',')
protein_path = glob.glob(os.path.join(entrydir, '*_complex.mol2')).pop()
protein = get_molecule(str(protein_path))
ligand = extract_ligand(protein.OBMol)
# calculate descriptor based on the sum of interacting element pairs
if options.descriptor == 'elements':
# calculate element pair descriptor for this complex
descriptor, labels = contacts.element_descriptor(protein, ligand,
binsize=options.binsize)
# calculate descriptor based on the sum of interacting element pairs
elif options.descriptor == 'sybyl':
# calculate element pair descriptor for this complex
descriptor, labels = contacts.sybyl_atom_type_descriptor(protein, ligand,
binsize=options.binsize)
# calculate descriptor using structural interaction fingerprints
elif options.descriptor == 'credo':
# get the protein-ligand structural interaction fingerprint
descriptor, labels = contacts.sift_descriptor(protein, ligand,
binsize=options.binsize)
if HEADER:
# UPDATE COLUMN LABELS
labels.insert(0,'pKd/pKi')
labels.append('pdb')
writer.writerow(labels)
HEADER = False
if options.format == 'csv':
# FIRST COLUMN OF OUTPUT ROW
row = [pkd] + descriptor.tolist() + [pdb]
writer.writerow(row)
main()
| mit | -724,087,612,189,265,800 | 33.353383 | 117 | 0.546071 | false |
D-K-E/cltk | src/cltk/lemmatize/processes.py | 4 | 5044 | """
Processes for lemmatization.
"""
from copy import deepcopy
from dataclasses import dataclass
from boltons.cacheutils import cachedproperty
from cltk.core.data_types import Doc, Process
from cltk.lemmatize.ang import OldEnglishDictionaryLemmatizer
from cltk.lemmatize.fro import OldFrenchDictionaryLemmatizer
from cltk.lemmatize.grc import GreekBackoffLemmatizer
from cltk.lemmatize.lat import LatinBackoffLemmatizer
@dataclass
class LemmatizationProcess(Process):
"""To be inherited for each language's lemmatization declarations.
Example: ``LemmatizationProcess`` -> ``LatinLemmatizationProcess``
>>> from cltk.lemmatize.processes import LemmatizationProcess
>>> from cltk.core.data_types import Process
>>> issubclass(LemmatizationProcess, Process)
True
"""
def run(self, input_doc: Doc) -> Doc:
lemmatizer = self.algorithm
output_doc = deepcopy(input_doc)
for word in output_doc.words:
word.lemma = lemmatizer(word.string)
return output_doc
class GreekLemmatizationProcess(LemmatizationProcess):
"""The default Ancient Greek lemmatization algorithm.
>>> from cltk.core.data_types import Process, Pipeline
>>> from cltk.tokenizers import MultilingualTokenizationProcess
>>> from cltk.languages.utils import get_lang
>>> from cltk.languages.example_texts import get_example_text
>>> from cltk.nlp import NLP
>>> pipe = Pipeline(description="A custom Greek pipeline", \
processes=[MultilingualTokenizationProcess, GreekLemmatizationProcess], \
language=get_lang("grc"))
>>> nlp = NLP(language='grc', custom_pipeline=pipe, suppress_banner=True)
>>> nlp(get_example_text("grc")).lemmata[30:40]
['ἔλεγον.', 'καίτοι', 'ἀληθές', 'γε', 'ὡς', 'ἔπος', 'εἰπεῖν', 'οὐδὲν', 'εἰρήκασιν.', 'μάλιστα']
"""
description = "Lemmatization process for Ancient Greek"
@cachedproperty
def algorithm(self):
return GreekBackoffLemmatizer()
class LatinLemmatizationProcess(LemmatizationProcess):
"""The default Latin lemmatization algorithm.
>>> from cltk.core.data_types import Process, Pipeline
>>> from cltk.tokenizers import LatinTokenizationProcess
>>> from cltk.languages.utils import get_lang
>>> from cltk.languages.example_texts import get_example_text
>>> from cltk.nlp import NLP
>>> pipe = Pipeline(description="A custom Latin pipeline", \
processes=[LatinTokenizationProcess, LatinLemmatizationProcess], \
language=get_lang("lat"))
>>> nlp = NLP(language='lat', custom_pipeline=pipe, suppress_banner=True)
>>> nlp(get_example_text("lat")).lemmata[30:40]
['institutis', ',', 'legibus', 'inter', 'se', 'differunt', '.', 'Gallos', 'ab', 'Aquitanis']
"""
description = "Lemmatization process for Latin"
@cachedproperty
def algorithm(self):
return LatinBackoffLemmatizer()
@dataclass
class OldEnglishLemmatizationProcess(LemmatizationProcess):
"""The default Old English lemmatization algorithm.
>>> from cltk.core.data_types import Process, Pipeline
>>> from cltk.tokenizers import MultilingualTokenizationProcess
>>> from cltk.languages.utils import get_lang
>>> from cltk.languages.example_texts import get_example_text
>>> from cltk.nlp import NLP
>>> pipe = Pipeline(description="A custom Old English pipeline", \
processes=[MultilingualTokenizationProcess, OldEnglishLemmatizationProcess], \
language=get_lang("ang"))
>>> nlp = NLP(language='ang', custom_pipeline=pipe, suppress_banner=True)
>>> nlp(get_example_text("ang")).lemmata[30:40]
['siððan', 'ær', 'weorþan', 'feasceaft', 'findan', ',', 'he', 'se', 'frofre', 'gebidan']
"""
description = "Lemmatization process for Old English"
@cachedproperty
def algorithm(self):
return OldEnglishDictionaryLemmatizer()
@dataclass
class OldFrenchLemmatizationProcess(LemmatizationProcess):
"""The default Old French lemmatization algorithm.
>>> from cltk.core.data_types import Process, Pipeline
>>> from cltk.tokenizers import MultilingualTokenizationProcess
>>> from cltk.languages.utils import get_lang
>>> from cltk.languages.example_texts import get_example_text
>>> from cltk.nlp import NLP
>>> pipe = Pipeline(description="A custom Old French pipeline", \
processes=[MultilingualTokenizationProcess, OldFrenchLemmatizationProcess], \
language=get_lang("fro"))
>>> nlp = NLP(language='fro', custom_pipeline=pipe, suppress_banner=True)
>>> nlp(get_example_text("fro")).lemmata[30:40]
['avenir', 'jadis', 'en', 'bretaingne', 'avoir', '.I.', 'molt', 'riche', 'chevalier', 'PUNK']
"""
description = "Lemmatization process for Old French"
@cachedproperty
def algorithm(self):
return OldFrenchDictionaryLemmatizer()
| mit | 1,679,960,883,668,530,400 | 36.292308 | 99 | 0.689835 | false |
hachreak/invenio-ext | invenio_ext/restful/__init__.py | 3 | 11926 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Initialization and configuration for *Flask-Restful*."""
import re
import six
import warnings
from datetime import date
from dateutil import parser
from dateutil.tz import tzlocal, tzutc
from flask import request, session
from flask.ext import restful
from flask_restful import fields
from flask_registry import ModuleAutoDiscoveryRegistry
from functools import wraps
from cerberus import Validator
error_codes = dict(
validation_error=10,
)
"""
Available error codes for REST API.
"""
# errors and codes when validating JSON data concerning restful APIs
validation_errors = dict(
INCORRECT_TYPE=dict(
error_code=1,
error_mesg="An Attribute has incorrect type according to schema"
),
MISSING_FROM_USER_INPUT=dict(
error_code=2,
error_mesg="Input is missing a required field"
),
NON_EXISTING_TO_SCHEMA=dict(
error_code=3,
error_mesg="Input contains a field that does not exist in schema"
),
NO_UTC_ISO_FORMAT=dict(
error_code=4,
error_mesg=("Input contains datetime attribute "
"that is not in utc iso format")
),
DATETIME_PARSE_ERROR=dict(
error_code=5,
error_mesg="Input contains datetime attribute that cannot be parsed"
),
VALUE_OUT_OF_BOUNDS=dict(
error_code=6,
error_mesg="Input contains an attribute with an out of bounds value"
),
INCORRECT_ELEMENT_TYPE_IN_DATASTRUCTURE=dict(
error_code=7,
error_mesg="Elements in data structure have incorrect type"),
)
class RESTValidator(Validator):
"""Validator for restful Api."""
def _validate_utciso(self, utciso, field, value):
"""Validate UTC ISO format."""
try:
dt = parser.parse(value)
if dt.tzinfo != tzutc():
self._error(field, "not in utc iso format")
except Exception:
self._error(field, "cannot parse date-time")
def get_errors(self):
"""Transform cerberus validator errors to a list of dictionaries.
Example::
{
"code": c,
"message": "a message",
"field": f
}
"""
found_errors = []
all_errors = self.errors
for key in all_errors:
if isinstance(all_errors[key], str):
msg_error = all_errors[key]
if re.match(
"must be of (string|integer|float|boolean|list) type",
msg_error
):
error_to_append = dict(
code=validation_errors['INCORRECT_TYPE']['error_code'],
message=(
validation_errors['INCORRECT_TYPE']['error_mesg'] +
": " + "'" + key + "' " + msg_error
),
field=key
)
found_errors.append(error_to_append)
elif msg_error == "unknown field":
error_to_append = dict(
code=(validation_errors['NON_EXISTING_TO_SCHEMA']
['error_code']),
message=(validation_errors['NON_EXISTING_TO_SCHEMA']
['error_mesg']),
field=key
)
found_errors.append(error_to_append)
elif msg_error == "required field":
error_to_append = dict(
code=(validation_errors['MISSING_FROM_USER_INPUT']
['error_code']),
message=(validation_errors['MISSING_FROM_USER_INPUT']
['error_mesg']),
field=key
)
found_errors.append(error_to_append)
elif msg_error == "not in utc iso format":
error_to_append = dict(
code=(validation_errors['NO_UTC_ISO_FORMAT']
['error_code']),
message=(validation_errors['NO_UTC_ISO_FORMAT']
['error_mesg']),
field=key
)
found_errors.append(error_to_append)
elif msg_error == "cannot parse date-time":
error_to_append = dict(
code=(validation_errors['DATETIME_PARSE_ERROR']
['error_code']),
message=(validation_errors['DATETIME_PARSE_ERROR']
['error_mesg']),
field=key
)
found_errors.append(error_to_append)
elif msg_error.startswith("unallowed value"):
error_to_append = dict(
code=(validation_errors['VALUE_OUT_OF_BOUNDS']
['error_code']),
message=(validation_errors['VALUE_OUT_OF_BOUNDS']
['error_mesg'] +
" : " + msg_error),
field=key
)
found_errors.append(error_to_append)
elif isinstance(all_errors[key], dict):
error_dict = all_errors[key]
for entry in error_dict:
if re.match(
"must be of (string|integer|float|boolean|list) type",
error_dict[entry]
):
error_to_append = dict(
code=(validation_errors
['INCORRECT_ELEMENT_TYPE_IN_DATASTRUCTURE']
['error_code']),
message=(
validation_errors
['INCORRECT_ELEMENT_TYPE_IN_DATASTRUCTURE']
['error_mesg'] +
" : " + error_dict[entry]),
field=key
)
found_errors.append(error_to_append)
break
return found_errors
#
# Marshal fields
#
class ISODate(fields.Raw):
"""Format a datetime object in ISO format."""
def format(self, dt):
"""Format a datetime object in ISO format."""
try:
if isinstance(dt, date):
return six.text_type(dt.isoformat())
else:
return six.text_type(dt)
except AttributeError as ae:
raise fields.MarshallingException(ae)
class UTCISODateTime(fields.DateTime):
"""Format a datetime object in ISO format.
Convert to UTC if necessary.
"""
def format(self, dt):
"""Format a datetime object in ISO format.
Convert to UTC if necessary.
"""
try:
if not dt.tzinfo:
dt = dt.replace(tzinfo=tzlocal())
return six.text_type(dt.astimezone(tzutc()).isoformat())
except AttributeError as ae:
raise fields.MarshallingException(ae)
class UTCISODateTimeString(fields.DateTime):
"""Format a string which represents a datetime in ISO format.
Convert to UTC if necessary.
"""
def format(self, value):
"""Format a string which represents a datetime in ISO format.
Convert to UTC if necessary.
"""
try:
dt = parser.parse(value)
if not dt.tzinfo:
dt = dt.replace(tzinfo=tzlocal())
return six.text_type(dt.astimezone(tzutc()).isoformat())
except AttributeError as ae:
raise fields.MarshallingException(ae)
#
# Decorators
#
def require_api_auth(*scopes):
"""Decorator to require API authentication using either API key or OAuth.
Note, API key usage will be deprecated. Personal OAuth access tokens
provide the same features as API keys.
:param scopes: List of required OAuth scopes.
"""
# Decorators specified in ``method_decorators`` in Flask-Restful's
# attribute is applied to a bound instance method, where as if you
# decorate the class method is applied to an unbound instance. If you
# are not accessing *args or **kwargs this doesn't matter. If you are
# you can check if the method is bound using the following line:
# is_bound = hasattr(f, '__self__') and f.__self__
def wrapper(f):
"""Wrap function with oauth require decorator."""
from invenio_oauth2server.provider import oauth2
f_oauth_required = oauth2.require_oauth()(f)
@wraps(f)
def decorated(*args, **kwargs):
"""OAuth 2.0 Authentication."""
resp = f_oauth_required(*args, **kwargs)
session.clear()
return resp
return decorated
return wrapper
def require_oauth_scopes(*scopes):
"""Decorator to require a list of OAuth scopes.
Decorator must be preceded by a ``require_api_auth()`` decorator.
Note, API key authentication is bypassing this check
"""
required_scopes = set(scopes)
def wrapper(f):
@wraps(f)
def decorated(*args, **kwargs):
# Variable requests.oauth is only defined for oauth requests (see
# require_api_auth() above).
if hasattr(request, 'oauth') and request.oauth is not None:
token_scopes = set(request.oauth.access_token.scopes)
if not required_scopes.issubset(token_scopes):
restful.abort(403)
return f(*args, **kwargs)
return decorated
return wrapper
def require_header(header, value):
"""Decorator to test if proper content-type is provided."""
def decorator(f):
@wraps(f)
def inner(*args, **kwargs):
if header == 'Content-Type':
test_value = request.headers.get(header, '').split(';')[0]
else:
test_value = request.headers.get(header, '')
if (callable(value) and not value(test_value)) or \
test_value != value:
msg = value if not callable(value) else value.__doc__
restful.abort(
415,
message="Expected %s: %s" % (header, msg),
status=415,
)
return f(*args, **kwargs)
return inner
return decorator
def setup_app(app):
"""Setup api extension."""
api = restful.Api(app=app)
app.extensions['restful'] = api
class RestfulRegistry(ModuleAutoDiscoveryRegistry):
setup_func_name = 'setup_app'
def register(self, module, *args, **kwargs):
return super(RestfulRegistry, self).register(module, app, api,
*args, **kwargs)
app.extensions['registry']['restful'] = RestfulRegistry(
'restful', app=app, with_setup=True
)
| gpl-2.0 | 2,646,861,535,427,532,300 | 33.568116 | 79 | 0.533121 | false |
heromod/migrid | mig/shared/functionality/isjobactive.py | 1 | 3383 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# isjobactive - Check if sandbox job is still active
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""This is a job kill helper for sandboxes"""
import os
import shared.returnvalues as returnvalues
from shared.functional import validate_input
from shared.init import initialize_main_variables
from shared.resadm import get_sandbox_exe_stop_command
def signature():
"""Signature of the main function"""
defaults = {'iosessionid': [None], 'sandboxkey': [None],
'exe_name': [None]}
return ['text', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False, op_title=False,
op_menu=client_id)
defaults = signature()[1]
(validate_status, accepted) = validate_input(user_arguments_dict,
defaults, output_objects, allow_rejects=False)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
iosessionid = accepted['iosessionid'][-1]
sandboxkey = accepted['sandboxkey'][-1]
exe_name = accepted['exe_name'][-1]
status = returnvalues.OK
# Web format for cert access and no header for SID access
if client_id:
output_objects.append({'object_type': 'title', 'text'
: 'SSS job activity checker'})
output_objects.append({'object_type': 'header', 'text'
: 'SSS job activity checker'})
else:
output_objects.append({'object_type': 'start'})
# check that the job exists, iosessionid is ok (does symlink exist?)
if iosessionid and os.path.islink(configuration.webserver_home
+ iosessionid):
msg = 'jobactive'
else:
if sandboxkey and exe_name:
(result, msg) = \
get_sandbox_exe_stop_command(configuration.sandbox_home,
sandboxkey, exe_name, logger)
if result:
msg = 'stop_command: %s' % msg
else:
msg = 'jobinactive'
status = returnvalues.ERROR
# Status code line followed by raw output
if not client_id:
output_objects.append({'object_type': 'script_status', 'text': ''})
output_objects.append({'object_type': 'binary', 'data': '%s' % status[0]})
output_objects.append({'object_type': 'binary', 'data': msg})
return (output_objects, status)
| gpl-2.0 | -4,693,156,376,203,703,000 | 33.876289 | 82 | 0.632279 | false |
beiko-lab/gengis | plugins/LinearRegression/LinearRegressionLayout.py | 1 | 13203 | # -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Sep 8 2010)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
###########################################################################
## Class LinearRegressionLayout
###########################################################################
class LinearRegressionLayout ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Linear Regression", pos = wx.DefaultPosition, size = wx.Size( 1200,750 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
self.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_BTNFACE ) )
bSizer1 = wx.BoxSizer( wx.VERTICAL )
bSizer5 = wx.BoxSizer( wx.HORIZONTAL )
bSizer6 = wx.BoxSizer( wx.VERTICAL )
sbSizer1 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Linear regression" ), wx.VERTICAL )
gSizer1 = wx.GridSizer( 1, 2, 0, 0 )
cboIndependentVariableDataTypeChoices = [ u"Use Location Data", u"Use Sequence Data" ]
self.cboIndependentVariableDataType = wx.RadioBox( self, wx.ID_ANY, u"Independent Data", wx.DefaultPosition, wx.DefaultSize, cboIndependentVariableDataTypeChoices, 1, wx.RA_SPECIFY_COLS )
self.cboIndependentVariableDataType.SetSelection( 0 )
gSizer1.Add( self.cboIndependentVariableDataType, 0, wx.ALL, 5 )
cboDependentVariableDataTypeChoices = [ u"Use Location Data", u"Use Sequence Data" ]
self.cboDependentVariableDataType = wx.RadioBox( self, wx.ID_ANY, u"Dependent Data", wx.DefaultPosition, wx.DefaultSize, cboDependentVariableDataTypeChoices, 1, wx.RA_SPECIFY_COLS )
self.cboDependentVariableDataType.SetSelection( 0 )
gSizer1.Add( self.cboDependentVariableDataType, 0, wx.ALL, 5 )
sbSizer1.Add( gSizer1, 0, 0, 5 )
szrLocVar = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Independent variable" ), wx.VERTICAL )
fgSizer4 = wx.FlexGridSizer( 2, 2, 0, 0 )
fgSizer4.SetFlexibleDirection( wx.BOTH )
fgSizer4.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.lblIndependent = wx.StaticText( self, wx.ID_ANY, u"Independent variable (x):", wx.DefaultPosition, wx.DefaultSize, 0 )
self.lblIndependent.Wrap( -1 )
fgSizer4.Add( self.lblIndependent, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
cboIndependentChoices = []
self.cboIndependent = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, cboIndependentChoices, 0 )
self.cboIndependent.SetSelection( 0 )
fgSizer4.Add( self.cboIndependent, 0, wx.ALL|wx.EXPAND, 5 )
self.lblIndependentIDField = wx.StaticText( self, wx.ID_ANY, u"Independent ID Field:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.lblIndependentIDField.Wrap( -1 )
fgSizer4.Add( self.lblIndependentIDField, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
cboIndependentIDFieldChoices = []
self.cboIndependentIDField = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, cboIndependentIDFieldChoices, 0 )
self.cboIndependentIDField.SetSelection( 0 )
self.cboIndependentIDField.Enable( False )
fgSizer4.Add( self.cboIndependentIDField, 0, wx.ALL, 5 )
self.lblIndependentSubtype = wx.StaticText( self, wx.ID_ANY, u"Independent Subtype:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.lblIndependentSubtype.Wrap( -1 )
fgSizer4.Add( self.lblIndependentSubtype, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
cboIndependentSubtypeFieldChoices = []
self.cboIndependentSubtypeField = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, cboIndependentSubtypeFieldChoices, 0 )
self.cboIndependentSubtypeField.SetSelection( 0 )
self.cboIndependentSubtypeField.Enable( False )
fgSizer4.Add( self.cboIndependentSubtypeField, 0, wx.ALL, 5 )
self.m_staticText35 = wx.StaticText( self, wx.ID_ANY, u"Independent count field:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText35.Wrap( -1 )
fgSizer4.Add( self.m_staticText35, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
cboIndependentCountFieldChoices = []
self.cboIndependentCountField = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, cboIndependentCountFieldChoices, 0 )
self.cboIndependentCountField.SetSelection( 0 )
self.cboIndependentCountField.Enable( False )
fgSizer4.Add( self.cboIndependentCountField, 0, wx.ALL, 5 )
szrLocVar.Add( fgSizer4, 1, wx.EXPAND, 5 )
sbSizer1.Add( szrLocVar, 0, wx.ALL|wx.EXPAND, 5 )
szrSeqVar = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Dependent variable" ), wx.VERTICAL )
fgSizer41 = wx.FlexGridSizer( 2, 2, 0, 0 )
fgSizer41.SetFlexibleDirection( wx.BOTH )
fgSizer41.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.lblDependent = wx.StaticText( self, wx.ID_ANY, u"Dependent variable (y):", wx.DefaultPosition, wx.DefaultSize, 0 )
self.lblDependent.Wrap( -1 )
fgSizer41.Add( self.lblDependent, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
cboDependentChoices = []
self.cboDependent = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, cboDependentChoices, 0 )
self.cboDependent.SetSelection( 0 )
fgSizer41.Add( self.cboDependent, 0, wx.ALL|wx.EXPAND, 5 )
self.lblDependentIDField = wx.StaticText( self, wx.ID_ANY, u"Dependent ID Field:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.lblDependentIDField.Wrap( -1 )
fgSizer41.Add( self.lblDependentIDField, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
cboDependentIDFieldChoices = []
self.cboDependentIDField = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, cboDependentIDFieldChoices, 0 )
self.cboDependentIDField.SetSelection( 0 )
self.cboDependentIDField.Enable( False )
fgSizer41.Add( self.cboDependentIDField, 0, wx.ALL, 5 )
self.lblDependentSubtype = wx.StaticText( self, wx.ID_ANY, u"Dependent Subtype:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.lblDependentSubtype.Wrap( -1 )
fgSizer41.Add( self.lblDependentSubtype, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
cboDependentSubtypeFieldChoices = []
self.cboDependentSubtypeField = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, cboDependentSubtypeFieldChoices, 0 )
self.cboDependentSubtypeField.SetSelection( 0 )
self.cboDependentSubtypeField.Enable( False )
fgSizer41.Add( self.cboDependentSubtypeField, 0, wx.ALL, 5 )
self.m_staticText36 = wx.StaticText( self, wx.ID_ANY, u"Dependent count field", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText36.Wrap( -1 )
fgSizer41.Add( self.m_staticText36, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
cboDependentCountFieldChoices = []
self.cboDependentCountField = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, cboDependentCountFieldChoices, 0 )
self.cboDependentCountField.SetSelection( 0 )
self.cboDependentCountField.Enable( False )
fgSizer41.Add( self.cboDependentCountField, 0, wx.ALL, 5 )
szrSeqVar.Add( fgSizer41, 1, wx.EXPAND, 5 )
sbSizer1.Add( szrSeqVar, 1, wx.ALL|wx.EXPAND, 5 )
bSizer6.Add( sbSizer1, 0, wx.ALL|wx.EXPAND, 5 )
sbSizer2 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Viewport display" ), wx.VERTICAL )
fgSizer1 = wx.FlexGridSizer( 2, 2, 0, 0 )
fgSizer1.SetFlexibleDirection( wx.BOTH )
fgSizer1.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_staticText3 = wx.StaticText( self, wx.ID_ANY, u"Plot type:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText3.Wrap( -1 )
fgSizer1.Add( self.m_staticText3, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
cboViewportPlotChoices = [ u"Residuals", u"x data", u"y data" ]
self.cboViewportPlot = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, cboViewportPlotChoices, 0 )
self.cboViewportPlot.SetSelection( 0 )
fgSizer1.Add( self.cboViewportPlot, 1, wx.ALL|wx.EXPAND, 5 )
self.m_staticText4 = wx.StaticText( self, wx.ID_ANY, u"Colour map:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText4.Wrap( -1 )
fgSizer1.Add( self.m_staticText4, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
cboColourMapChoices = []
self.cboColourMap = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, cboColourMapChoices, 0 )
self.cboColourMap.SetSelection( 0 )
fgSizer1.Add( self.cboColourMap, 1, wx.ALL|wx.EXPAND, 5 )
self.m_staticText5 = wx.StaticText( self, wx.ID_ANY, u"Line width:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText5.Wrap( -1 )
fgSizer1.Add( self.m_staticText5, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.spinLineWidth = wx.SpinCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.SP_ARROW_KEYS, 0, 10, 5 )
fgSizer1.Add( self.spinLineWidth, 0, wx.ALL|wx.EXPAND, 5 )
self.m_staticText6 = wx.StaticText( self, wx.ID_ANY, u"Scale factor:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText6.Wrap( -1 )
fgSizer1.Add( self.m_staticText6, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
bSizer8 = wx.BoxSizer( wx.HORIZONTAL )
self.txtScaleFactor = wx.TextCtrl( self, wx.ID_ANY, u"1.0", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer8.Add( self.txtScaleFactor, 0, wx.ALL|wx.EXPAND, 5 )
fgSizer1.Add( bSizer8, 1, wx.EXPAND, 5 )
sbSizer2.Add( fgSizer1, 1, wx.EXPAND, 5 )
bSizer6.Add( sbSizer2, 0, wx.ALL|wx.EXPAND, 5 )
bSizer4 = wx.BoxSizer( wx.HORIZONTAL )
bSizer4.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_btnCalculate = wx.Button( self, wx.ID_ANY, u"Calculate", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer4.Add( self.m_btnCalculate, 0, wx.ALL, 5 )
bSizer6.Add( bSizer4, 0, wx.EXPAND, 5 )
self.txtOutput = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_MULTILINE )
bSizer6.Add( self.txtOutput, 1, wx.ALL|wx.EXPAND, 5 )
bSizer61 = wx.BoxSizer( wx.HORIZONTAL )
self.m_btnHelp = wx.Button( self, wx.ID_ANY, u"?", wx.DefaultPosition, wx.DefaultSize, wx.BU_EXACTFIT )
self.m_btnHelp.SetToolTipString( u"Help" )
bSizer61.Add( self.m_btnHelp, 0, wx.ALL, 5 )
self.m_btnClearLog = wx.Button( self, wx.ID_ANY, u"Clear log", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer61.Add( self.m_btnClearLog, 0, wx.ALL, 5 )
self.m_btnSaveLog = wx.Button( self, wx.ID_ANY, u"Save log", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer61.Add( self.m_btnSaveLog, 0, wx.ALL, 5 )
bSizer61.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
bSizer6.Add( bSizer61, 0, wx.EXPAND, 5 )
bSizer5.Add( bSizer6, 0, wx.EXPAND, 5 )
bSizer51 = wx.BoxSizer( wx.VERTICAL )
self.pnlPlot = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.SIMPLE_BORDER|wx.TAB_TRAVERSAL )
bSizer51.Add( self.pnlPlot, 1, wx.EXPAND, 5 )
bSizer7 = wx.BoxSizer( wx.HORIZONTAL )
self.m_btnSavePlot = wx.Button( self, wx.ID_ANY, u"Save plot", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer7.Add( self.m_btnSavePlot, 0, wx.ALL, 5 )
bSizer7.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )
self.btnOK = wx.Button( self, wx.ID_ANY, u"OK", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer7.Add( self.btnOK, 0, wx.ALL, 5 )
bSizer51.Add( bSizer7, 0, wx.EXPAND, 5 )
bSizer5.Add( bSizer51, 1, wx.ALL|wx.EXPAND, 5 )
bSizer1.Add( bSizer5, 1, wx.EXPAND, 5 )
self.SetSizer( bSizer1 )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.Bind( wx.EVT_CLOSE, self.OnClose )
self.cboIndependentVariableDataType.Bind( wx.EVT_RADIOBOX, self.OnIndependentClicked )
self.cboDependentVariableDataType.Bind( wx.EVT_RADIOBOX, self.OnDependentClicked )
self.cboIndependentIDField.Bind( wx.EVT_CHOICE, self.OnIndependentIDFieldChoice )
self.cboDependentIDField.Bind( wx.EVT_CHOICE, self.OnDependentIDFieldChoice )
self.m_btnCalculate.Bind( wx.EVT_BUTTON, self.OnCalculate )
self.m_btnHelp.Bind( wx.EVT_BUTTON, self.OnHelp )
self.m_btnClearLog.Bind( wx.EVT_BUTTON, self.OnClearLog )
self.m_btnSaveLog.Bind( wx.EVT_BUTTON, self.OnSaveLog )
self.m_btnSavePlot.Bind( wx.EVT_BUTTON, self.OnSavePlot )
self.btnOK.Bind( wx.EVT_BUTTON, self.OnOK )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def OnClose( self, event ):
event.Skip()
def OnIndependentClicked( self, event ):
event.Skip()
def OnDependentClicked( self, event ):
event.Skip()
def OnIndependentIDFieldChoice( self, event ):
event.Skip()
def OnDependentIDFieldChoice( self, event ):
event.Skip()
def OnCalculate( self, event ):
event.Skip()
def OnHelp( self, event ):
event.Skip()
def OnClearLog( self, event ):
event.Skip()
def OnSaveLog( self, event ):
event.Skip()
def OnSavePlot( self, event ):
event.Skip()
def OnOK( self, event ):
event.Skip()
| gpl-3.0 | -1,419,284,371,034,977,800 | 41.006515 | 189 | 0.686208 | false |
dexterx17/nodoSocket | clients/Python-2.7.6/Demo/pdist/RCSProxy.py | 47 | 4724 | #! /usr/bin/env python
"""RCS Proxy.
Provide a simplified interface on RCS files, locally or remotely.
The functionality is geared towards implementing some sort of
remote CVS like utility. It is modeled after the similar module
FSProxy.
The module defines two classes:
RCSProxyLocal -- used for local access
RCSProxyServer -- used on the server side of remote access
The corresponding client class, RCSProxyClient, is defined in module
rcsclient.
The remote classes are instantiated with an IP address and an optional
verbosity flag.
"""
import server
import md5
import os
import fnmatch
import string
import tempfile
import rcslib
class DirSupport:
def __init__(self):
self._dirstack = []
def __del__(self):
self._close()
def _close(self):
while self._dirstack:
self.back()
def pwd(self):
return os.getcwd()
def cd(self, name):
save = os.getcwd()
os.chdir(name)
self._dirstack.append(save)
def back(self):
if not self._dirstack:
raise os.error, "empty directory stack"
dir = self._dirstack[-1]
os.chdir(dir)
del self._dirstack[-1]
def listsubdirs(self, pat = None):
files = os.listdir(os.curdir)
files = filter(os.path.isdir, files)
return self._filter(files, pat)
def isdir(self, name):
return os.path.isdir(name)
def mkdir(self, name):
os.mkdir(name, 0777)
def rmdir(self, name):
os.rmdir(name)
class RCSProxyLocal(rcslib.RCS, DirSupport):
def __init__(self):
rcslib.RCS.__init__(self)
DirSupport.__init__(self)
def __del__(self):
DirSupport.__del__(self)
rcslib.RCS.__del__(self)
def sumlist(self, list = None):
return self._list(self.sum, list)
def sumdict(self, list = None):
return self._dict(self.sum, list)
def sum(self, name_rev):
f = self._open(name_rev)
BUFFERSIZE = 1024*8
sum = md5.new()
while 1:
buffer = f.read(BUFFERSIZE)
if not buffer:
break
sum.update(buffer)
self._closepipe(f)
return sum.digest()
def get(self, name_rev):
f = self._open(name_rev)
data = f.read()
self._closepipe(f)
return data
def put(self, name_rev, data, message=None):
name, rev = self._unmangle(name_rev)
f = open(name, 'w')
f.write(data)
f.close()
self.checkin(name_rev, message)
self._remove(name)
def _list(self, function, list = None):
"""INTERNAL: apply FUNCTION to all files in LIST.
Return a list of the results.
The list defaults to all files in the directory if None.
"""
if list is None:
list = self.listfiles()
res = []
for name in list:
try:
res.append((name, function(name)))
except (os.error, IOError):
res.append((name, None))
return res
def _dict(self, function, list = None):
"""INTERNAL: apply FUNCTION to all files in LIST.
Return a dictionary mapping files to results.
The list defaults to all files in the directory if None.
"""
if list is None:
list = self.listfiles()
dict = {}
for name in list:
try:
dict[name] = function(name)
except (os.error, IOError):
pass
return dict
class RCSProxyServer(RCSProxyLocal, server.SecureServer):
def __init__(self, address, verbose = server.VERBOSE):
RCSProxyLocal.__init__(self)
server.SecureServer.__init__(self, address, verbose)
def _close(self):
server.SecureServer._close(self)
RCSProxyLocal._close(self)
def _serve(self):
server.SecureServer._serve(self)
# Retreat into start directory
while self._dirstack: self.back()
def test_server():
import string
import sys
if sys.argv[1:]:
port = string.atoi(sys.argv[1])
else:
port = 4127
proxy = RCSProxyServer(('', port))
proxy._serverloop()
def test():
import sys
if not sys.argv[1:] or sys.argv[1] and sys.argv[1][0] in '0123456789':
test_server()
sys.exit(0)
proxy = RCSProxyLocal()
what = sys.argv[1]
if hasattr(proxy, what):
attr = getattr(proxy, what)
if callable(attr):
print apply(attr, tuple(sys.argv[2:]))
else:
print repr(attr)
else:
print "%s: no such attribute" % what
sys.exit(2)
if __name__ == '__main__':
test()
| mit | -5,689,671,796,840,622,000 | 22.858586 | 74 | 0.573455 | false |
enginoid/mutagen | tests/test_oggspeex.py | 4 | 2091 | import os
import shutil
import sys
from cStringIO import StringIO
from mutagen.ogg import OggPage
from mutagen.oggspeex import OggSpeex, OggSpeexInfo, delete
from tests import TestCase, add
from tests.test_ogg import TOggFileType
from tempfile import mkstemp
class TOggSpeex(TOggFileType):
Kind = OggSpeex
def setUp(self):
original = os.path.join("tests", "data", "empty.spx")
fd, self.filename = mkstemp(suffix='.ogg')
os.close(fd)
shutil.copy(original, self.filename)
self.audio = self.Kind(self.filename)
def test_module_delete(self):
delete(self.filename)
self.scan_file()
self.failIf(OggSpeex(self.filename).tags)
def test_channels(self):
self.failUnlessEqual(2, self.audio.info.channels)
def test_sample_rate(self):
self.failUnlessEqual(44100, self.audio.info.sample_rate)
def test_bitrate(self):
self.failUnlessEqual(0, self.audio.info.bitrate)
def test_invalid_not_first(self):
page = OggPage(open(self.filename, "rb"))
page.first = False
self.failUnlessRaises(IOError, OggSpeexInfo, StringIO(page.write()))
def test_vendor(self):
self.failUnless(
self.audio.tags.vendor.startswith("Encoded with Speex 1.1.12"))
self.failUnlessRaises(KeyError, self.audio.tags.__getitem__, "vendor")
def test_not_my_ogg(self):
fn = os.path.join('tests', 'data', 'empty.oggflac')
self.failUnlessRaises(IOError, type(self.audio), fn)
self.failUnlessRaises(IOError, self.audio.save, fn)
self.failUnlessRaises(IOError, self.audio.delete, fn)
def test_multiplexed_in_headers(self):
shutil.copy(
os.path.join("tests", "data", "multiplexed.spx"), self.filename)
audio = self.Kind(self.filename)
audio.tags["foo"] = ["bar"]
audio.save()
audio = self.Kind(self.filename)
self.failUnlessEqual(audio.tags["foo"], ["bar"])
def test_mime(self):
self.failUnless("audio/x-speex" in self.audio.mime)
add(TOggSpeex)
| gpl-2.0 | 7,577,141,766,819,959,000 | 31.671875 | 78 | 0.65758 | false |
jesseklein406/data-structures | data-structures/linked_list.py | 2 | 1548 | class Node(object):
def __init__(self, value):
self.value = value
self.next_node = None
class LinkedList(object):
def __init__(self, iterable=None):
self.sizeOfList = 0
self.head = None
if iterable != None:
for item in iterable:
self.insert(item)
def insert(self, val):
new_node = Node(val)
new_node.next_node = self.head
self.head = new_node
self.sizeOfList += 1
def pop(self):
val = self.head.value
self.head = self.head.next_node
self.sizeOfList -= 1
return val
def size(self):
return self.sizeOfList
def search(self,val):
current = self.head
while current != None:
if current.value == val:
return current
current = current.next_node
def remove(self,node):
previous = self.head
current = self.head
if current is node:
self.head = current.next_node
self.sizeOfList -= 1
else:
while current != None and not (current is node):
previous = current
current = current.next_node
if current is node:
previous.next_node = current.next_node
self.sizeOfList -=1
def display(self):
current = self.head
result = ()
while current != None:
result += (current.value,)
current = current.next_node
return result
| mit | -8,443,942,395,358,694,000 | 23.1875 | 60 | 0.520026 | false |
adaxi/sickbeard | tests/name_parser_tests.py | 14 | 17744 | import datetime
import unittest
import sys, os.path
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('../lib'))
from sickbeard.name_parser import parser
import sickbeard
sickbeard.SYS_ENCODING = 'UTF-8'
DEBUG = VERBOSE = False
simple_test_cases = {
'standard': {
'Mr.Show.Name.S01E02.Source.Quality.Etc-Group': parser.ParseResult(None, 'Mr Show Name', 1, [2], 'Source.Quality.Etc', 'Group'),
'Show.Name.S01E02': parser.ParseResult(None, 'Show Name', 1, [2]),
'Show Name - S01E02 - My Ep Name': parser.ParseResult(None, 'Show Name', 1, [2], 'My Ep Name'),
'Show.1.0.Name.S01.E03.My.Ep.Name-Group': parser.ParseResult(None, 'Show 1.0 Name', 1, [3], 'My.Ep.Name', 'Group'),
'Show.Name.S01E02E03.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2,3], 'Source.Quality.Etc', 'Group'),
'Mr. Show Name - S01E02-03 - My Ep Name': parser.ParseResult(None, 'Mr. Show Name', 1, [2,3], 'My Ep Name'),
'Show.Name.S01.E02.E03': parser.ParseResult(None, 'Show Name', 1, [2,3]),
'Show.Name-0.2010.S01E02.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name-0 2010', 1, [2], 'Source.Quality.Etc', 'Group'),
'S01E02 Ep Name': parser.ParseResult(None, None, 1, [2], 'Ep Name'),
'Show Name - S06E01 - 2009-12-20 - Ep Name': parser.ParseResult(None, 'Show Name', 6, [1], '2009-12-20 - Ep Name'),
'Show Name - S06E01 - -30-': parser.ParseResult(None, 'Show Name', 6, [1], '30-' ),
'Show-Name-S06E01-720p': parser.ParseResult(None, 'Show-Name', 6, [1], '720p' ),
'Show-Name-S06E01-1080i': parser.ParseResult(None, 'Show-Name', 6, [1], '1080i' ),
'Show.Name.S06E01.Other.WEB-DL': parser.ParseResult(None, 'Show Name', 6, [1], 'Other.WEB-DL' ),
'Show.Name.S06E01 Some-Stuff Here': parser.ParseResult(None, 'Show Name', 6, [1], 'Some-Stuff Here' ),
},
'fov': {
'Show_Name.1x02.Source_Quality_Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2], 'Source_Quality_Etc', 'Group'),
'Show Name 1x02': parser.ParseResult(None, 'Show Name', 1, [2]),
'Show Name 1x02 x264 Test': parser.ParseResult(None, 'Show Name', 1, [2], 'x264 Test'),
'Show Name - 1x02 - My Ep Name': parser.ParseResult(None, 'Show Name', 1, [2], 'My Ep Name'),
'Show_Name.1x02x03x04.Source_Quality_Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2,3,4], 'Source_Quality_Etc', 'Group'),
'Show Name - 1x02-03-04 - My Ep Name': parser.ParseResult(None, 'Show Name', 1, [2,3,4], 'My Ep Name'),
'1x02 Ep Name': parser.ParseResult(None, None, 1, [2], 'Ep Name'),
'Show-Name-1x02-720p': parser.ParseResult(None, 'Show-Name', 1, [2], '720p'),
'Show-Name-1x02-1080i': parser.ParseResult(None, 'Show-Name', 1, [2], '1080i'),
'Show Name [05x12] Ep Name': parser.ParseResult(None, 'Show Name', 5, [12], 'Ep Name'),
'Show.Name.1x02.WEB-DL': parser.ParseResult(None, 'Show Name', 1, [2], 'WEB-DL'),
},
'standard_repeat': {
'Show.Name.S01E02.S01E03.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2,3], 'Source.Quality.Etc', 'Group'),
'Show.Name.S01E02.S01E03': parser.ParseResult(None, 'Show Name', 1, [2,3]),
'Show Name - S01E02 - S01E03 - S01E04 - Ep Name': parser.ParseResult(None, 'Show Name', 1, [2,3,4], 'Ep Name'),
'Show.Name.S01E02.S01E03.WEB-DL': parser.ParseResult(None, 'Show Name', 1, [2,3], 'WEB-DL'),
},
'fov_repeat': {
'Show.Name.1x02.1x03.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2,3], 'Source.Quality.Etc', 'Group'),
'Show.Name.1x02.1x03': parser.ParseResult(None, 'Show Name', 1, [2,3]),
'Show Name - 1x02 - 1x03 - 1x04 - Ep Name': parser.ParseResult(None, 'Show Name', 1, [2,3,4], 'Ep Name'),
'Show.Name.1x02.1x03.WEB-DL': parser.ParseResult(None, 'Show Name', 1, [2,3], 'WEB-DL'),
},
'bare': {
'Show.Name.102.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2], 'Source.Quality.Etc', 'Group'),
'show.name.2010.123.source.quality.etc-group': parser.ParseResult(None, 'show name 2010', 1, [23], 'source.quality.etc', 'group'),
'show.name.2010.222.123.source.quality.etc-group': parser.ParseResult(None, 'show name 2010.222', 1, [23], 'source.quality.etc', 'group'),
'Show.Name.102': parser.ParseResult(None, 'Show Name', 1, [2]),
'the.event.401.hdtv-lol': parser.ParseResult(None, 'the event', 4, [1], 'hdtv', 'lol'),
'show.name.2010.special.hdtv-blah': None,
},
'stupid': {
'tpz-abc102': parser.ParseResult(None, None, 1, [2], None, 'tpz'),
'tpz-abc.102': parser.ParseResult(None, None, 1, [2], None, 'tpz'),
},
'no_season': {
'Show Name - 01 - Ep Name': parser.ParseResult(None, 'Show Name', None, [1], 'Ep Name'),
'01 - Ep Name': parser.ParseResult(None, None, None, [1], 'Ep Name'),
'Show Name - 01 - Ep Name - WEB-DL': parser.ParseResult(None, 'Show Name', None, [1], 'Ep Name - WEB-DL'),
},
'no_season_general': {
'Show.Name.E23.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [23], 'Source.Quality.Etc', 'Group'),
'Show Name - Episode 01 - Ep Name': parser.ParseResult(None, 'Show Name', None, [1], 'Ep Name'),
'Show.Name.Part.3.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [3], 'Source.Quality.Etc', 'Group'),
'Show.Name.Part.1.and.Part.2.Blah-Group': parser.ParseResult(None, 'Show Name', None, [1,2], 'Blah', 'Group'),
'Show.Name.Part.IV.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [4], 'Source.Quality.Etc', 'Group'),
'Deconstructed.E07.1080i.HDTV.DD5.1.MPEG2-TrollHD': parser.ParseResult(None, 'Deconstructed', None, [7], '1080i.HDTV.DD5.1.MPEG2', 'TrollHD'),
'Show.Name.E23.WEB-DL': parser.ParseResult(None, 'Show Name', None, [23], 'WEB-DL'),
},
'no_season_multi_ep': {
'Show.Name.E23-24.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [23,24], 'Source.Quality.Etc', 'Group'),
'Show Name - Episode 01-02 - Ep Name': parser.ParseResult(None, 'Show Name', None, [1,2], 'Ep Name'),
'Show.Name.E23-24.WEB-DL': parser.ParseResult(None, 'Show Name', None, [23,24], 'WEB-DL'),
},
'season_only': {
'Show.Name.S02.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', 2, [], 'Source.Quality.Etc', 'Group'),
'Show Name Season 2': parser.ParseResult(None, 'Show Name', 2),
'Season 02': parser.ParseResult(None, None, 2),
},
'scene_date_format': {
'Show.Name.2010.11.23.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [], 'Source.Quality.Etc', 'Group', datetime.date(2010,11,23)),
'Show Name - 2010.11.23': parser.ParseResult(None, 'Show Name', air_date = datetime.date(2010,11,23)),
'Show.Name.2010.23.11.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [], 'Source.Quality.Etc', 'Group', datetime.date(2010,11,23)),
'Show Name - 2010-11-23 - Ep Name': parser.ParseResult(None, 'Show Name', extra_info = 'Ep Name', air_date = datetime.date(2010,11,23)),
'2010-11-23 - Ep Name': parser.ParseResult(None, extra_info = 'Ep Name', air_date = datetime.date(2010,11,23)),
'Show.Name.2010.11.23.WEB-DL': parser.ParseResult(None, 'Show Name', None, [], 'WEB-DL', None, datetime.date(2010,11,23)),
}
}
combination_test_cases = [
('/test/path/to/Season 02/03 - Ep Name.avi',
parser.ParseResult(None, None, 2, [3], 'Ep Name'),
['no_season', 'season_only']),
('Show.Name.S02.Source.Quality.Etc-Group/tpz-sn203.avi',
parser.ParseResult(None, 'Show Name', 2, [3], 'Source.Quality.Etc', 'Group'),
['stupid', 'season_only']),
('MythBusters.S08E16.720p.HDTV.x264-aAF/aaf-mb.s08e16.720p.mkv',
parser.ParseResult(None, 'MythBusters', 8, [16], '720p.HDTV.x264', 'aAF'),
['standard']),
('/home/drop/storage/TV/Terminator The Sarah Connor Chronicles/Season 2/S02E06 The Tower is Tall, But the Fall is Short.mkv',
parser.ParseResult(None, None, 2, [6], 'The Tower is Tall, But the Fall is Short'),
['standard']),
(r'/Test/TV/Jimmy Fallon/Season 2/Jimmy Fallon - 2010-12-15 - blah.avi',
parser.ParseResult(None, 'Jimmy Fallon', extra_info = 'blah', air_date = datetime.date(2010,12,15)),
['scene_date_format']),
(r'/X/30 Rock/Season 4/30 Rock - 4x22 -.avi',
parser.ParseResult(None, '30 Rock', 4, [22]),
['fov']),
('Season 2/Show Name - 03-04 - Ep Name.avi',
parser.ParseResult(None, 'Show Name', 2, [3,4], extra_info = 'Ep Name'),
['no_season', 'season_only']),
('Season 02/03-04-05 - Ep Name.avi',
parser.ParseResult(None, None, 2, [3,4,5], extra_info = 'Ep Name'),
['no_season', 'season_only']),
]
unicode_test_cases = [
(u'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv',
parser.ParseResult(None, 'The.Big.Bang.Theory', 2, [7], '720p.HDTV.x264.AC3', 'SHELDON')
),
('The.Big.Bang.Theory.2x07.The.Panty.Pi\xc3\xb1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv',
parser.ParseResult(None, 'The.Big.Bang.Theory', 2, [7], '720p.HDTV.x264.AC3', 'SHELDON')
),
]
failure_cases = ['7sins-jfcs01e09-720p-bluray-x264']
class UnicodeTests(unittest.TestCase):
def _test_unicode(self, name, result):
np = parser.NameParser(True)
parse_result = np.parse(name)
# this shouldn't raise an exception
a = repr(str(parse_result))
def test_unicode(self):
for (name, result) in unicode_test_cases:
self._test_unicode(name, result)
class FailureCaseTests(unittest.TestCase):
def _test_name(self, name):
np = parser.NameParser(True)
try:
parse_result = np.parse(name)
except parser.InvalidNameException:
return True
if VERBOSE:
print 'Actual: ', parse_result.which_regex, parse_result
return False
def test_failures(self):
for name in failure_cases:
self.assertTrue(self._test_name(name))
class ComboTests(unittest.TestCase):
def _test_combo(self, name, result, which_regexes):
if VERBOSE:
print
print 'Testing', name
np = parser.NameParser(True)
test_result = np.parse(name)
if DEBUG:
print test_result, test_result.which_regex
print result, which_regexes
self.assertEqual(test_result, result)
for cur_regex in which_regexes:
self.assertTrue(cur_regex in test_result.which_regex)
self.assertEqual(len(which_regexes), len(test_result.which_regex))
def test_combos(self):
for (name, result, which_regexes) in combination_test_cases:
# Normalise the paths. Converts UNIX-style paths into Windows-style
# paths when test is run on Windows.
self._test_combo(os.path.normpath(name), result, which_regexes)
class BasicTests(unittest.TestCase):
def _test_names(self, np, section, transform=None, verbose=False):
if VERBOSE or verbose:
print
print 'Running', section, 'tests'
for cur_test_base in simple_test_cases[section]:
if transform:
cur_test = transform(cur_test_base)
else:
cur_test = cur_test_base
if VERBOSE or verbose:
print 'Testing', cur_test
result = simple_test_cases[section][cur_test_base]
if not result:
self.assertRaises(parser.InvalidNameException, np.parse, cur_test)
return
else:
test_result = np.parse(cur_test)
if DEBUG or verbose:
print 'air_by_date:', test_result.air_by_date, 'air_date:', test_result.air_date
print test_result
print result
self.assertEqual(test_result.which_regex, [section])
self.assertEqual(test_result, result)
def test_standard_names(self):
np = parser.NameParser(False)
self._test_names(np, 'standard')
def test_standard_repeat_names(self):
np = parser.NameParser(False)
self._test_names(np, 'standard_repeat')
def test_fov_names(self):
np = parser.NameParser(False)
self._test_names(np, 'fov')
def test_fov_repeat_names(self):
np = parser.NameParser(False)
self._test_names(np, 'fov_repeat')
def test_bare_names(self):
np = parser.NameParser(False)
self._test_names(np, 'bare')
def test_stupid_names(self):
np = parser.NameParser(False)
self._test_names(np, 'stupid')
def test_no_season_names(self):
np = parser.NameParser(False)
self._test_names(np, 'no_season')
def test_no_season_general_names(self):
np = parser.NameParser(False)
self._test_names(np, 'no_season_general')
def test_no_season_multi_ep_names(self):
np = parser.NameParser(False)
self._test_names(np, 'no_season_multi_ep')
def test_season_only_names(self):
np = parser.NameParser(False)
self._test_names(np, 'season_only')
def test_scene_date_format_names(self):
np = parser.NameParser(False)
self._test_names(np, 'scene_date_format')
def test_standard_file_names(self):
np = parser.NameParser()
self._test_names(np, 'standard', lambda x: x + '.avi')
def test_standard_repeat_file_names(self):
np = parser.NameParser()
self._test_names(np, 'standard_repeat', lambda x: x + '.avi')
def test_fov_file_names(self):
np = parser.NameParser()
self._test_names(np, 'fov', lambda x: x + '.avi')
def test_fov_repeat_file_names(self):
np = parser.NameParser()
self._test_names(np, 'fov_repeat', lambda x: x + '.avi')
def test_bare_file_names(self):
np = parser.NameParser()
self._test_names(np, 'bare', lambda x: x + '.avi')
def test_stupid_file_names(self):
np = parser.NameParser()
self._test_names(np, 'stupid', lambda x: x + '.avi')
def test_no_season_file_names(self):
np = parser.NameParser()
self._test_names(np, 'no_season', lambda x: x + '.avi')
def test_no_season_general_file_names(self):
np = parser.NameParser()
self._test_names(np, 'no_season_general', lambda x: x + '.avi')
def test_no_season_multi_ep_file_names(self):
np = parser.NameParser()
self._test_names(np, 'no_season_multi_ep', lambda x: x + '.avi')
def test_season_only_file_names(self):
np = parser.NameParser()
self._test_names(np, 'season_only', lambda x: x + '.avi')
def test_scene_date_format_file_names(self):
np = parser.NameParser()
self._test_names(np, 'scene_date_format', lambda x: x + '.avi')
def test_combination_names(self):
pass
if __name__ == '__main__':
if len(sys.argv) > 1:
suite = unittest.TestLoader().loadTestsFromName('name_parser_tests.BasicTests.test_'+sys.argv[1])
else:
suite = unittest.TestLoader().loadTestsFromTestCase(BasicTests)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(ComboTests)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(UnicodeTests)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.TestLoader().loadTestsFromTestCase(FailureCaseTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-3.0 | 6,614,100,721,204,994,000 | 48.552707 | 169 | 0.547565 | false |
creativeprojects/speedcam | picamera/capture_client.py | 1 | 1563 | import io
import socket
import struct
import time
import picamera
# Connect a client socket to my_server:8000 (change my_server to the
# hostname of your server)
client_socket = socket.socket()
client_socket.connect(('Ethelwyn.lan', 8000))
# Make a file-like object out of the connection
connection = client_socket.makefile('wb')
try:
camera = picamera.PiCamera()
camera.resolution = (640, 480)
# Start a preview and let the camera warm up for 2 seconds
camera.start_preview()
time.sleep(2)
# Note the start time and construct a stream to hold image data
# temporarily (we could write it directly to connection but in this
# case we want to find out the size of each capture first to keep
# our protocol simple)
start = time.time()
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg'):
# Write the length of the capture to the stream and flush to
# ensure it actually gets sent
connection.write(struct.pack('<L', stream.tell()))
connection.flush()
# Rewind the stream and send the image data over the wire
stream.seek(0)
connection.write(stream.read())
# If we've been capturing for more than 30 seconds, quit
if time.time() - start > 30:
break
# Reset the stream for the next capture
stream.seek(0)
stream.truncate()
# Write a length of zero to the stream to signal we're done
connection.write(struct.pack('<L', 0))
finally:
connection.close()
client_socket.close() | mit | 5,104,022,669,589,617,000 | 33.755556 | 71 | 0.672425 | false |
churchie317/wheel_of_fortune | Versions/Wheel_of_Fortune_v0.py | 1 | 16877 | ##for Raleigh & Grant
##who contributed more than they know
################################################################################
############################## WHEEL OF FORTUNE ################################
################################################################################
import random
import string
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
## print "Loading word list from file..."
inFile = open(WORDLIST_FILENAME, 'r', 0)
line = inFile.readline()
wordlist = string.split(line)
## print " ", len(wordlist), "words loaded."
return wordlist
def choose_word(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
wordlist = load_words()
def intro():
print "----------------------"
print "Welcome to Wheel of Fortune!"
print "I'm your host, Pat Sajak, with your hostess Vanna White."
print "----------------------"
playerNames_hum = ["Player 1", "Player 2", "Player 3"]
playerNames_comp = ["Chad Ledouche", "Roger"]
playerOrder_val = [[0, 0], [0, 0], [0, 0]]
rounds = ["first", "second", "third", "fourth"]
gameSetup(playerNames_hum, playerNames_comp, playerOrder_val, rounds)
def gameSetup(playerNames_hum, playerNames_comp, playerOrder_val, rounds):
numPlayers = get_numPlayers()
players = get_playerNames(numPlayers, playerNames_hum, playerNames_comp)
game(players, playerOrder_val)
def game(players, playerOrder_val):
playerOrder = preRound_one(players, playerOrder_val)
## print "playerOrder is:", playerOrder
playerOrder_val = round_one(playerOrder, playerOrder_val)
playerOrder_val = round_two(playerOrder, playerOrder_val)
playerOrder_val = round_three(playerOrder, playerOrder_val)
playerOrder_val = round_four(playerOrder, playerOrder_val)
end_game(players)
def preRound_one(players, playerOrder_val):
playerOrder = get_playerOrder(players, playerOrder_val)
return playerOrder
def round_one(playerOrder, playerOrder_val):
hidden_word = choose_word(wordlist).lower()
alpha = string.ascii_lowercase
disp_word = "_ " * len(hidden_word)
incom_word = "_" * len(hidden_word)
print "The hidden_word is:", hidden_word
counter = 10
while counter > 0:
for i in range(counter):
counter += 1
print "The puzzle is:", disp_word
for j in range(len(playerOrder)):
print "j is equal to:", j
counter += 1
possession = True
while possession == True:
selection = 0
selection = get_playerSelection(playerOrder, hidden_word, disp_word, j)
if selection == 1:
guess = get_guessLetter()
print "----------------------"
print "Vanna, does the puzzle contain any '" + guess.upper() + "'s?"
raw_input("----------------------")
if guess in hidden_word:
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
incom_word = incom_word[0:i] + guess + incom_word[(i + 1):]
letter_app = 0
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
letter_app += 1
if letter_app == 1:
print disp_word
print "Good guess:", playerOrder[j] + "! There is 1", guess.upper(), "in the puzzle!"
raw_input("----------------------")
else:
print disp_word
print "Good guess:", playerOrder[j] + "! There are", letter_app, "'" + guess.upper() + "'s in the puzzle!"
raw_input("----------------------")
possession = True
if incom_word == hidden_word:
break
else:
possession = False
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
else:
guess = get_guessWord()
if guess == hidden_word:
incom_word = guess
break
break
if incom_word == hidden_word:
print "j is equal to:", j
print "Congratulations,", playerOrder[j] + ". You correctly solved the puzzle:", hidden_word + "."
break
if incom_word == hidden_word:
break
return playerOrder_val
##def check_guessLetter(guess, hidden_word, disp_word):
## Exact same as bodies of rounds one through four! Figure out implementation
## if guess in hidden_word:
## for i in range(len(hidden_word)):
## if hidden_word[i] == guess:
## disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
## print "Good guess:", disp_word
## return true
## else:
## return false
def round_two(playerOrder, playerOrder_val):
hidden_word = choose_word(wordlist).lower()
alpha = string.ascii_lowercase
disp_word = "_ " * len(hidden_word)
incom_word = "_" * len(hidden_word)
## print "The hidden_word is:", hidden_word
counter = 10
while counter > 0:
for j in range(len(playerOrder)):
counter += 1
print "The puzzle is:", disp_word
for i in range(counter):
counter += 1
selection = 0
selection = get_playerSelection(playerOrder, hidden_word, disp_word)
if selection == 1:
guess = get_guessLetter()
if guess in hidden_word:
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
incom_word = incom_word[0:i] + guess + incom_word[(i + 1):]
print "Good guess:", disp_word
if incom_word == hidden_word:
break
else:
guess = get_guessWord()
if guess == hidden_word:
incom_word = guess
break
break
if incom_word == hidden_word:
print "Congratulations,", playerOrder[j] + ". You correctly solved the puzzle:", hidden_word + "."
break
if incom_word == hidden_word:
break
return playerOrder_val
def round_three(playerOrder, playerOrder_val):
hidden_word = choose_word(wordlist).lower()
alpha = string.ascii_lowercase
disp_word = "_ " * len(hidden_word)
incom_word = "_" * len(hidden_word)
## print "The hidden_word is:", hidden_word
counter = 10
while counter > 0:
for j in range(len(playerOrder)):
counter += 1
print "The puzzle is:", disp_word
for i in range(counter):
counter += 1
selection = 0
selection = get_playerSelection(playerOrder, hidden_word, disp_word)
if selection == 1:
guess = get_guessLetter()
if guess in hidden_word:
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
incom_word = incom_word[0:i] + guess + incom_word[(i + 1):]
print "Good guess:", disp_word
if incom_word == hidden_word:
break
else:
guess = get_guessWord()
if guess == hidden_word:
incom_word = guess
break
break
if incom_word == hidden_word:
print "Congratulations,", playerOrder[j] + ". You correctly solved the puzzle:", hidden_word + "."
break
if incom_word == hidden_word:
break
return playerOrder_val
def round_four(playerOrder, playerOrder_val):
hidden_word = choose_word(wordlist).lower()
alpha = string.ascii_lowercase
disp_word = "_ " * len(hidden_word)
incom_word = "_" * len(hidden_word)
## print "The hidden_word is:", hidden_word
counter = 10
while counter > 0:
for j in range(len(playerOrder)):
counter += 1
print "The puzzle is:", disp_word
for i in range(counter):
counter += 1
selection = 0
selection = get_playerSelection(playerOrder, hidden_word, disp_word)
if selection == 1:
guess = get_guessLetter()
if guess in hidden_word:
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
incom_word = incom_word[0:i] + guess + incom_word[(i + 1):]
print "Good guess:", disp_word
if incom_word == hidden_word:
break
else:
guess = get_guessWord()
if guess == hidden_word:
incom_word = guess
break
break
if incom_word == hidden_word:
print "Congratulations,", playerOrder[j] + ". You correctly solved the puzzle:", hidden_word + "."
break
if incom_word == hidden_word:
break
return playerOrder_val
def end_game(players):
print "----------------------"
print "GAME OVER!"
print "----------------------"
print "Would you like to play again? (y/n)"
selection = string.lower(raw_input())
if selection == "y" or selection == "yes":
playerOrder_val = [[0, 0], [0, 0], [0, 0]]
game(players, playerOrder_val)
def get_numPlayers():
numPlayers = 0
while numPlayers <= 0 or numPlayers > 3:
print "How many players (max players = 3) would like to play today?"
numPlayers = raw_input("Number of players: ",)
if numPlayers == "One" or numPlayers == "one" or numPlayers == "ONE" or numPlayers == "1":
numPlayers = 1
print "You have selected play for 1 player."
if numPlayers == "Two" or numPlayers == "two" or numPlayers == "TWO" or numPlayers == "2":
numPlayers = 2
print "You have selected play for 2 players."
if numPlayers == "Three" or numPlayers == "three" or numPlayers == "THREE" or numPlayers == "3":
numPlayers = 3
print "You have selected play for 3 players."
if numPlayers < 1 or numPlayers > 3 or numPlayers == type(int):
print "----------------------"
print "ERROR: INVALID PLAYER NUMBER"
raw_input ("----------------------")
return numPlayers
def get_playerNames(numPlayers, playerNames_hum, playerNames_comp):
players = ["Player 1", "Player 2", "Player 3"]
print "----------------------"
for i in range(numPlayers):
name = ""
while name == "":
name = raw_input(players[i] + ", what is your name? ")
name = name.title()
if name == "":
print "----------------------"
print "ERROR, FIELD EMPTY"
print "Please try again."
print "----------------------"
players[i] = name
if numPlayers == 3:
print "----------------------"
print "Welcome", players[0] + ",", players[1] + ", and", players[2] + "!"
if numPlayers == 2:
players[2] = playerNames_comp[0]
print "----------------------"
print "Welcome", players[0] + " and", players[1] + "! Today you will be playing against", players[2] + "."
if numPlayers == 1:
players[1] = playerNames_comp[0]
players[2] = playerNames_comp[1]
print "----------------------"
print "Welcome", players[0] + "! Today you will be playing against", players[1], "and", players[2] + "."
return players
def get_playerOrder(players, playerOrder_val):
playerOrder = [0, 0, 0]
print "We will now spin for first play."
for i in (0, 1, 2):
raw_input ("----------------------")
if i == 0:
print players[i] + " will spin fist."
print "----------------------"
print players[i] + "'s turn to spin."
raw_input("Press 'ENTER' to spin.")
print players[i] + " received $" + str(i * 100) + "."
for j in (0, 1):
if j == 0:
playerOrder_val[i][j] = (i * 100)
else:
playerOrder_val[i][j] = players[i]
playerOrder_val.sort(reverse=True)
for i in range(3):
playerOrder[i] = playerOrder_val[i][1]
print "----------------------"
print "Congratulations,", playerOrder[0] + ". You will spin first!"
print "The order of play will be:", playerOrder[0] + ", followed by", playerOrder[1] + ", followed by", playerOrder[2] + "."
raw_input ("----------------------")
return playerOrder
def get_playerOrder_val(playerOrder_val):
for i in (0, 1):
if j == 0:
playerOrder_val[i][j] = (i * 100)
def get_guessLetter():
check = False
while check == False:
guess = string.lower(raw_input("Please guess a letter: ",))
if len(guess) == 1 and guess in string.ascii_lowercase:
check = True
else:
print "----------------------"
print "ERROR: INVALID ENTRY!"
print "Please enter one letter per guess."
print "----------------------"
return guess
def get_guessWord():
guess = string.lower(raw_input("Input puzzle solution: ",))
return guess
def check_guessLetter(guess, hidden_word, disp_word):
## Exact same as bodies of rounds one through four! Figure out implementation
if guess in hidden_word:
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
print "Good guess:", disp_word
return true
else:
return false
def get_playerSelection(playerOrder, hidden_word, disp_word, j):
selection = 0
while selection != "solve" or selection != "spin" or selection != "s" or selection != "pick":
print "----------------------"
print playerOrder[j] + ", would you like to SPIN or SOLVE THE PUZZLE?"
selection = raw_input("Selection: ")
selection = selection.lower()
if selection == "solve" or selection == "pick" or selection == "spin" or selection == "solve the puzzle":
break
else:
print "----------------------"
print "ERROR: UNRECOGNIZED COMMAND."
print "Please select from the following and try again:"
print "'SOLVE'"
print "'SOLVE THE PUZZLE'"
print "'SPIN'"
print "----------------------"
if selection == "pick a letter" or selection == "pick" or selection == "spin" or selection == "letter":
selection = 1
return selection
else:
selection = 2
return selection
def get_hidden_word(hidden_word, used_letters):
"""Returns a string of the form __ad___ by filling in correct guesses"""
visible_word = ""
for letter in hidden_word:
if letter in used_letters:
visible_word += letter
else:
if len(visible_word) > 0 and visible_word[-1] == '_':
visible_word += " "
visible_word += "_"
return visible_word
intro()
| unlicense | -2,878,137,057,453,136,400 | 40.982587 | 138 | 0.485394 | false |
uncommoncode/good_dog | good_dog/data.py | 1 | 2491 | import glob
import os
import scipy.ndimage
import random
import shutil
def load_np_image(image_path):
return scipy.ndimage.imread(image_path)
class DogsVsCats(object):
def __init__(self, train_dir, test_dir=None):
self.train_dir = train_dir
self.test_dir = test_dir
self.classes = ["cats", "dogs"]
def train_iter(self, shuffle):
"""Return an iterator that yields pairs of (img_path, label)."""
paths = glob.glob(os.path.join(self.train_dir, "*.jpg"))
if shuffle:
random.shuffle(paths)
for path in paths:
label = os.path.basename(path).partition(".")[0]
yield (path, label)
# TODO yield labels as well? Not sure where labels are...
#def test_iter(self):
# """Return an iterator that yields unlabeled images."""
# pass
def load_training_test_sample(data_iterator, sample_size=None, test_pct=0.2):
training_labels = []
training_images = []
test_images = []
test_labels = []
if sample_size is None:
data = list(data_iterator)
sample_size = len(data)
data_iterator = iter(data)
test_size = int(sample_size * test_pct)
training_size = sample_size - test_size
for i in range(training_size):
image_path, label = data_iterator.next()
training_images.append(image_path)
training_labels.append(label)
for i in range(test_size):
image_path, label = data_iterator.next()
test_images.append(image_path)
test_labels.append(label)
return training_images, training_labels, test_images, test_labels
def require_directory(path):
if not os.path.exists(path):
os.makedirs(path)
def copy_to_directory(source_path, target_dir):
shutil.copy(source_path, target_dir)
def make_keras_training(data_iterator, out_dir, sample_size=None, validation_pct=0.2):
samples = load_training_test_sample(data_iterator, sample_size, validation_pct)
training_images, training_labels, validation_images, validation_labels = samples
for image_path, label in zip(training_images, training_labels):
image_dir = os.path.join(out_dir, "train", label)
require_directory(image_dir)
copy_to_directory(image_path, image_dir)
for image_path, label in zip(validation_images, validation_labels):
image_dir = os.path.join(out_dir, "validate", label)
require_directory(image_dir)
copy_to_directory(image_path, image_dir)
| bsd-2-clause | -7,083,395,612,974,963,000 | 35.632353 | 86 | 0.65275 | false |
punit-haria/multimodal-learning | code/models/vae.py | 1 | 20821 | import tensorflow as tf
import numpy as np
from copy import deepcopy
from models import base
from models import layers as nw
class VAE(base.Model):
"""
Variational Auto-Encoder
"""
def __init__(self, arguments, name, tracker, init_minibatch, session=None, log_dir=None, model_dir=None):
# dictionary of model/inference arguments
self.args = deepcopy(arguments)
# options
self.nw_type = self.args["type"]
self.dataset = self.args["data"]
self.is_autoregressive = self.args["autoregressive"]
self.is_flow = self.args["flow"]
self.distribution = self.args["output"]
# input and latent dimensions
self.n_z = self.args['n_z']
self.n_ch = self.args['n_channels']
self.h = self.args['height']
self.w = self.args['width']
self.n_x = self.h * self.w * self.n_ch
# sample minibatch for weight initialization
self.init_minibatch = init_minibatch
# object to track model performance (can be None)
self.tracker = tracker
if self.tracker is not None:
self.tracker.create_run(run_name=name, model_name=self.__class__.__name__, parameters=self.args)
# training steps counter
self.n_steps = 0
# base class constructor (initializes model)
super(VAE, self).__init__(name=name, session=session, log_dir=log_dir, model_dir=model_dir)
def _initialize(self):
# placeholders
self.x = tf.placeholder(tf.float32, [None, self.n_x], name='x')
# data-dependent weight initialization (Salisman, Kingma - 2016)
x_init = tf.constant(self.init_minibatch, tf.float32)
self._model(x_init, init=True)
# variational autoencoder
self.z_mu, self.z_sigma, self.z, log_q, self.rx, self.rx_probs = self._model(self.x, init=False)
# reconstruction and penalty terms
self.l1 = self._reconstruction(logits=self.rx, labels=self.x, scope='reconstruction')
self.l2, self.log_q, self.log_p = self._penalty(mu=self.z_mu, sigma=self.z_sigma,
log_q=log_q, z_K=self.z, scope='penalty')
# training and test bounds
self.bound = self._variational_bound(scope='lower_bound')
# loss function
self.loss = self._loss(scope='loss')
# optimizer
self.step = self._optimizer(self.loss)
# summary variables
self.summary = self._summaries()
def _model(self, x, init):
with tf.variable_scope('autoencoder') as scope:
if not init:
scope.reuse_variables()
z_mu, z_sigma, h, _ = self._encoder(x, init=init, scope='x_enc')
z, log_q = self._sample(z_mu, z_sigma, h, init=init, scope='sampler')
rx, rx_probs = self._decoder(z, x, init=init, scope='x_dec')
return z_mu, z_sigma, z, log_q, rx, rx_probs
def _encoder(self, x, init, scope):
with tf.variable_scope(scope):
n_units = self.args['n_units']
n_fmaps = self.args['n_feature_maps']
extra = self.args['flow'] # extra output if using normalizing flow
mu = sigma = h = he = None
if self.nw_type == "fc":
mu, sigma, h, he = nw.fc_encode(x, n_units=n_units, n_z=self.n_z, extra=extra,
init=init, scope='fc_network')
elif self.nw_type == "cnn":
if self.dataset == "mnist":
mu, sigma, h, he = nw.convolution_mnist(x, n_ch=self.n_ch, n_feature_maps=n_fmaps, n_units=n_units,
n_z=self.n_z, extra=extra, init=init, scope='conv_network')
elif self.dataset == "cifar":
mu, sigma, h, he = nw.convolution_cifar(x, n_ch=self.n_ch, n_feature_maps=n_fmaps, n_units=n_units,
n_z=self.n_z, extra=extra, init=init, scope='conv_network')
elif self.dataset == "halved_mnist":
mu, sigma, h, he = nw.convolution_halved_mnist(x, n_ch=self.n_ch, n_feature_maps=n_fmaps,
n_units=n_units, n_z=self.n_z, extra=extra,
init=init, scope='conv_network')
elif self.dataset == 'sketchy':
mu, sigma, h, he = nw.convolution_sketchy(x, n_ch=self.n_ch, n_feature_maps=n_fmaps, n_units=n_units,
n_z=self.n_z, extra=extra, init=init, scope='conv_network')
elif self.dataset == 'daynight':
mu, sigma, h, he = nw.convolution_daynight(x, n_ch=self.n_ch, n_feature_maps=n_fmaps, n_units=n_units,
n_z=self.n_z, extra=extra, init=init, scope='conv_network')
else:
raise NotImplementedError
return mu, sigma, h, he
def _sample(self, mu0, sigma0, h, init, scope):
with tf.variable_scope(scope):
n_samples = tf.shape(mu0)[0]
epsilon = tf.random_normal((n_samples, self.n_z))
if self.is_flow:
n_layers = self.args['flow_layers']
n_units = self.args['flow_units']
flow_type = self.args['flow_type']
z, log_q = nw.normalizing_flow(mu0, sigma0, h=h, epsilon=epsilon, K=n_layers, n_units=n_units,
flow_type=flow_type, init=init, scope='normalizing_flow')
else:
z = mu0 + tf.multiply(sigma0, epsilon)
log_q = None
return z, log_q
def _decoder(self, z, x, init, scope):
with tf.variable_scope(scope):
n_units = self.args['n_units']
n_fmaps = self.args['n_feature_maps']
n_layers = self.args['n_pixelcnn_layers']
n_x = self.n_x
n_ch = self.n_ch
n_mix = self.args['n_mixtures']
if self.n_ch == 1:
n_cats = 1
elif self.distribution == 'discrete':
n_cats = 256
elif self.distribution == 'continuous':
n_cats = n_mix * 3
else:
raise NotImplementedError
n_ch = n_ch * n_cats
n_x = n_x * n_cats
if not self.is_autoregressive:
if self.nw_type == "fc":
z = nw.fc_decode(z, n_units=n_units, n_x=n_x, init=init, scope='fc_decoder')
elif self.nw_type == "cnn":
if self.dataset == "mnist":
z = nw.deconvolution_mnist(z, n_ch=n_ch, n_feature_maps=n_fmaps, n_units=n_units,
init=init, scope='deconv_network')
elif self.dataset == "cifar":
z = nw.deconvolution_cifar(z, n_ch=n_ch, n_feature_maps=n_fmaps, n_units=n_units,
init=init, scope='deconv_network')
elif self.dataset == "halved_mnist":
z = nw.deconvolution_halved_mnist(z, n_ch=n_ch, n_feature_maps=n_fmaps, n_units=n_units,
init=init, scope='deconv_network')
elif self.dataset == 'sketchy':
z = nw.deconvolution_sketchy(z, n_ch=n_ch, n_feature_maps=n_fmaps, n_units=n_units,
init=init, scope='deconv_network')
elif self.dataset == 'daynight':
z = nw.deconvolution_daynight(z, n_ch=n_ch, n_feature_maps=n_fmaps, n_units=n_units,
init=init, scope='deconv_network')
else:
raise NotImplementedError
else: # autoregressive decoder
x = tf.reshape(x, shape=[-1, self.h, self.w, self.n_ch])
if self.nw_type == "fc":
raise NotImplementedError
if self.dataset == "mnist":
z = nw.deconvolution_mnist_ar(x, z, out_ch=n_ch, n_feature_maps=n_fmaps,
n_units=n_units, n_ar_layers=n_layers, init=init, scope='ar_decoder')
elif self.dataset == "cifar":
x = 2 * (x - 0.5)
z = nw.deconvolution_cifar_ar(x, z, out_ch=n_ch, n_feature_maps=n_fmaps,
n_units=n_units, n_ar_layers=n_layers, init=init, scope='ar_decoder')
elif self.dataset == "sketchy":
z = nw.deconvolution_sketchy_ar(x, z, out_ch=n_ch, n_feature_maps=n_fmaps,
n_units=n_units, n_ar_layers=n_layers, init=init, scope='ar_decoder')
elif self.dataset == "halved_mnist":
raise NotImplementedError
else:
raise NotImplementedError
if self.n_ch == 1:
logits = tf.reshape(z, shape=[-1, self.n_x])
parms = tf.nn.sigmoid(logits)
else:
logits = tf.reshape(z, shape=[-1, self.n_x, n_cats])
if self.distribution == 'discrete':
parms = tf.nn.softmax(logits, dim=-1)
elif self.distribution == 'continuous':
parms = self._sample_mixture(logits)
else:
raise NotImplementedError
return logits, parms
def _log_mixture_of_logistics(self, x, parms, scope):
"""
Discretized Mixture of Logisitics based on PixelCNN++ (Salismans et al, 2017).
x: ground truth data
parms: decoder output (i.e. mixture parameters)
"""
with tf.variable_scope(scope):
# x.shape = [batch_size, n_x]
# parms.shape = [batch_size, n_x, n_mix], n_mix = 5*5*5 for 5 mixtures
K = self.args['n_mixtures']
assert K == parms.get_shape()[2].value / 3
m = tf.slice(parms, begin=[0, 0, 0], size=[-1, -1, K]) # means
log_s = tf.slice(parms, begin=[0, 0, K], size=[-1, -1, K]) # log scale
log_s = tf.maximum(log_s, -7)
pi_logits = tf.slice(parms, begin=[0, 0, 2*K], size=[-1, -1, K])
log_pi = nw.logsoftmax(pi_logits) # log mixture proportions
x = tf.expand_dims(x, axis=-1)
x = tf.tile(x, multiples=[1,1,K])
c_x = x - m
inv_s = tf.exp(-log_s)
bin_w = 1 / 255
plus = inv_s * (c_x + bin_w)
minus = inv_s * (c_x - bin_w)
cdf_plus = tf.nn.sigmoid(plus)
cdf_minus = tf.nn.sigmoid(minus)
pdf = tf.maximum(cdf_plus - cdf_minus, 1e-12) # case (0,255)
log_pdf0 = plus - tf.nn.softplus(plus) # case 0
log_pdf255 = -tf.nn.softplus(minus) # case 255
log_pdf = tf.where(x < -0.999, log_pdf0,
tf.where(x > 0.999, log_pdf255, tf.log(pdf)))
log_mixture = nw.logsumexp(log_pdf + log_pi)
return log_mixture
def _reconstruction(self, logits, labels, scope):
with tf.variable_scope(scope):
if self.n_ch == 1:
l1 = tf.reduce_sum(-tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels), axis=1)
l1 = tf.reduce_mean(l1, axis=0)
return l1
elif self.n_ch == 3:
if self.distribution == 'discrete':
labels = tf.cast(labels * 255, dtype=tf.int32) # integers [0,255] inclusive
l1 = -tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
elif self.distribution == 'continuous':
labels = 2 * (labels - 0.5) # scale to [-1,1]
l1 = self._log_mixture_of_logistics(x=labels, parms=logits, scope='mixture_of_logistics')
else:
raise NotImplementedError
l1 = tf.reduce_sum(l1, axis=1)
l1 = tf.reduce_mean(l1, axis=0)
return l1
def _penalty(self, mu, sigma, log_q, z_K, scope):
with tf.variable_scope(scope):
if self.is_flow:
log_p = -0.5 * tf.square(z_K) - 0.5 * np.log(2*np.pi)
penalty = tf.reduce_sum(-log_q + log_p, axis=1)
penalty = tf.reduce_mean(penalty, axis=0)
else:
log_p = -0.5*(tf.square(mu) + tf.square(sigma)) #- 0.5*np.log(2*np.pi)
log_q = -0.5*(1 + 2*tf.log(sigma)) #- 0.5*np.log(2*np.pi)
penalty = 0.5 * tf.reduce_sum(1 + 2*tf.log(sigma) - tf.square(mu) - tf.square(sigma), axis=1)
penalty = tf.reduce_mean(penalty, axis=0)
return penalty, log_q, log_p
def _variational_bound(self, scope):
with tf.variable_scope(scope):
return self.l1 + self.l2
def _loss(self, scope):
with tf.variable_scope(scope):
alpha = self.args['anneal']
if alpha < 0:
# free bits penalty (also works with normalizing flows)
l2 = tf.reduce_mean(-self.log_q + self.log_p, axis=0)
l2 = tf.minimum(l2, alpha)
l2 = tf.reduce_sum(l2)
else:
l2 = self.l2
return -(self.l1 + l2)
def _optimizer(self, loss, scope='optimizer'):
with tf.variable_scope(scope):
lr = self.args['learning_rate']
step = tf.train.RMSPropOptimizer(lr).minimize(loss)
return step
def _autoregressive_sampling(self, z, x, n_pixels):
"""
Synthesize images autoregressively.
"""
def _locate_2d(idx, w):
pos = idx + 1
r = np.ceil(pos / w)
c = pos - (r-1)*w
return int(r-1), int(c-1)
h = self.h
w = self.w
ch = self.n_ch
n_x = h * w * ch
remain = h*w - n_pixels
x = x.copy()
for i in range(remain):
feed = {self.z: z, self.x: x}
probs = self.sess.run(self.rx_probs, feed_dict=feed)
hp, wp = _locate_2d(n_pixels + i, w)
x = np.reshape(x, newshape=[-1, h, w, ch])
if self.n_ch == 1:
probs = np.reshape(probs, newshape=[-1, h, w, ch])
probs = probs[:, hp, wp, :]
x[:, hp, wp, :] = np.random.binomial(n=1, p=probs)
elif self.distribution == 'discrete':
probs = np.reshape(probs, newshape=[-1, h, w, ch, 256])
probs = probs[:, hp, wp, :, :]
x[:, hp, wp, :] = self._categorical_sampling(probs) / 255
elif self.distribution == 'continuous':
samples = np.reshape(probs, newshape=[-1, h, w, ch])
x[:, hp, wp, :] = samples[:, hp, wp, :]
else:
raise NotImplementedError
x = np.reshape(x, newshape=[-1, n_x])
return x
def _sample_mixture(self, parms):
"""
Sample from mixture of logistics.
"""
K = self.args['n_mixtures']
pi_logits = tf.slice(parms, begin=[0, 0, 2 * K], size=[-1, -1, K])
samp = tf.random_uniform(tf.shape(pi_logits), minval=1e-5, maxval=1 - 1e-5)
samp = tf.log(-tf.log(samp)) # scale the samples to (-infty, infty)
mix_idx = tf.argmax(pi_logits - samp, axis=2) # sample from categorical distribution
mix_choice = tf.one_hot(mix_idx, depth=K, axis=-1, dtype=tf.float32)
m = tf.slice(parms, begin=[0, 0, 0], size=[-1, -1, K]) # means
m = m * mix_choice
m = tf.reduce_sum(m, axis=2)
log_s = tf.slice(parms, begin=[0, 0, K], size=[-1, -1, K]) # log scale
log_s = log_s * mix_choice
log_s = tf.reduce_sum(log_s, axis=2)
log_s = tf.maximum(log_s, -7)
s = tf.exp(log_s)
u = tf.random_uniform(tf.shape(m), minval=1e-5, maxval=1 - 1e-5)
x = m + s * (tf.log(u) - tf.log(1-u))
x = tf.minimum(tf.maximum(x, -1), 1)
x = (x + 1) / 2 # scale to (0,1)
return x
def _factorized_sampling(self, rx):
"""
Sample from probabilities rx in a factorized way.
"""
if self.n_ch == 1:
rxp = np.random.binomial(n=1, p=rx)
elif self.distribution == 'discrete':
rxp = self._categorical_sampling(rx) / 255
elif self.distribution == 'continuous':
rxp = rx # (already sampled within tensorflow)
else:
raise NotImplementedError
return rxp
def _categorical_sampling(self, rx):
"""
Categorical sampling. Probabilities assumed to be on third dimension of three dimensional vector.
"""
batch = rx.shape[0]
features = rx.shape[1]
rxp = np.empty([batch, features], dtype=rx.dtype)
for i in range(batch):
for j in range(features):
rxp[i, j] = np.random.choice(a=256, p=rx[i, j])
return rxp
def _summaries(self,):
with tf.variable_scope("summaries"):
tf.summary.scalar('lower_bound', self.bound)
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('reconstruction', self.l1)
tf.summary.scalar('penalty', self.l2)
tf.summary.scalar('sigma0', tf.reduce_mean(self.z_sigma))
c = 0.5 * np.log(2*np.pi)
lq = tf.reduce_sum(self.log_q - c, axis=1)
tf.summary.scalar('penalty_log_q', tf.reduce_mean(lq, axis=0))
lp = tf.reduce_sum(self.log_p - c, axis=1)
tf.summary.scalar('penalty_log_p', tf.reduce_mean(lp, axis=0))
return tf.summary.merge_all()
def _track(self, terms, prefix):
if self.tracker is not None:
for name, term in terms.items():
self.tracker.add(i=self.n_steps, value=term, series_name=prefix+name, run_name=self.name)
def train(self, x):
"""
Performs single training step.
"""
feed = {self.x: x}
outputs = [self.summary, self.step, self.bound, self.loss, self.l1, self.l2]
summary, _, bound, loss, reconstruction, penalty = self.sess.run(outputs, feed_dict=feed)
# track performance
terms = {'lower_bound': bound, 'loss': loss, 'reconstruction': reconstruction, 'penalty': penalty}
self._track(terms, prefix='train_')
self.tr_writer.add_summary(summary, self.n_steps)
self.n_steps = self.n_steps + 1
def test(self, x):
"""
Computes lower bound on test data.
"""
feed = {self.x: x}
outputs = [self.summary, self.bound, self.loss, self.l1, self.l2]
summary, bound, loss, reconstruction, penalty = self.sess.run(outputs, feed_dict=feed)
# track performance
terms = {'lower_bound': bound, 'loss': loss, 'reconstruction': reconstruction, 'penalty': penalty}
self._track(terms, prefix='test_')
self.te_writer.add_summary(summary, self.n_steps)
def reconstruct(self, x):
"""
Reconstruct x.
"""
if self.is_autoregressive:
n_pixels = self.args['n_conditional_pixels']
z = self.encode(x, mean=False)
return self._autoregressive_sampling(z, x, n_pixels)
else:
feed = {self.x: x}
rx = self.sess.run(self.rx_probs, feed_dict=feed)
return self._factorized_sampling(rx)
def encode(self, x, mean=False):
"""
Encode x.
"""
feed = {self.x: x}
if mean:
assert self.is_flow == False
return self.sess.run(self.z_mu, feed_dict=feed)
else:
return self.sess.run(self.z, feed_dict=feed)
def decode(self, z):
"""
Decodes z.
"""
if self.is_autoregressive:
x = np.random.rand(z.shape[0], self.n_x)
return self._autoregressive_sampling(z, x, n_pixels=0)
else:
feed = {self.z: z}
rx = self.sess.run(self.rx_probs, feed_dict=feed)
return self._factorized_sampling(rx)
def sample_prior(self, n_samples):
"""
Samples z from prior distribution.
"""
return np.random.normal(size=[n_samples, self.n_z])
| mit | 7,226,428,171,072,180,000 | 33.358086 | 122 | 0.505307 | false |
azhar3339/RAKE-tutorial | test_data.py | 7 | 1260 | __author__ = 'a_medelyan'
import os
# class to hold our test instance (document plus its correct manual keywords)
class TestDoc:
def __init__(self, name):
self.name = name
self.text = ''
self.keywords = []
# reading documents and their keywords from a directory
def read_data(input_dir):
test_set = {}
for doc in os.listdir(input_dir):
file_reader = open(os.path.join(input_dir,doc), 'r')
file_name = doc[:-4]
if file_name not in test_set:
d = TestDoc(file_name)
else:
d = test_set[file_name]
if not doc.endswith(".txt"):
continue
# get document text
text = file_reader.read()
d.text = text
# get document keywords
file_reader = open(os.path.join(input_dir,file_name + ".key"), 'r')
manual_keywords = file_reader.read()
for line in manual_keywords.split('\n'):
line = line.rstrip().lower()
if len(line) > 0:
if '\t' in line:
d.keywords.append(line[0:line.find('\t')])
else:
d.keywords.append(line)
# add document to test set
test_set[file_name] = d
return test_set | mit | -4,995,581,503,702,450,000 | 25.270833 | 77 | 0.535714 | false |
pnorman/mapnik | utils/mapnik-config/build.py | 2 | 6293 | #
# This file is part of Mapnik (c++ mapping toolkit)
#
# Copyright (C) 2015 Artem Pavlenko
#
# Mapnik is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
import re
import os
import sys
from copy import copy
from subprocess import Popen, PIPE
Import('env')
config_env = env.Clone()
def GetMapnikLibVersion():
ver = []
for line in open('../../include/mapnik/version.hpp').readlines():
if line.startswith('#define MAPNIK_MAJOR_VERSION'):
ver.append(line.split(' ')[2].strip())
if line.startswith('#define MAPNIK_MINOR_VERSION'):
ver.append(line.split(' ')[2].strip())
if line.startswith('#define MAPNIK_PATCH_VERSION'):
ver.append(line.split(' ')[2].strip())
version_string = ".".join(ver)
return version_string
if (GetMapnikLibVersion() != config_env['MAPNIK_VERSION_STRING']):
print 'Error: version.hpp mismatch (%s) to cached value (%s): please reconfigure mapnik' % (GetMapnikLibVersion(),config_env['MAPNIK_VERSION_STRING'])
Exit(1)
config_variables = '''#!/usr/bin/env bash
## variables
CONFIG_PREFIX="$( cd "$( dirname $( dirname "$0" ))" && pwd )"
CONFIG_MAPNIK_VERSION_STRING='%(version_string)s'
CONFIG_MAPNIK_VERSION='%(version)s'
CONFIG_GIT_REVISION='%(git_revision)s'
CONFIG_GIT_DESCRIBE='%(git_describe)s'
CONFIG_FONTS="%(fonts)s"
CONFIG_INPUT_PLUGINS="%(input_plugins)s"
CONFIG_MAPNIK_DEFINES='%(defines)s'
CONFIG_MAPNIK_LIBNAME='%(mapnik_libname)s'
CONFIG_MAPNIK_LIBPATH="%(mapnik_libpath)s"
CONFIG_DEP_LIBS='%(dep_libs)s'
CONFIG_MAPNIK_LDFLAGS="%(ldflags)s"
CONFIG_MAPNIK_INCLUDE="${CONFIG_PREFIX}/include -I${CONFIG_PREFIX}/include/mapnik/agg -I${CONFIG_PREFIX}/include/mapnik"
CONFIG_DEP_INCLUDES="%(dep_includes)s"
CONFIG_CXXFLAGS="%(cxxflags)s"
CONFIG_CXX='%(cxx)s'
CONFIG_MAPNIK_GDAL_DATA='%(mapnik_bundled_gdal_data)s'
CONFIG_MAPNIK_PROJ_LIB='%(mapnik_bundled_proj_data)s'
CONFIG_MAPNIK_ICU_DATA='%(mapnik_bundled_icu_data)s'
'''
def write_config(configuration,template,config_file):
template = open(template,'r').read()
open(config_file,'w').write(config_variables % configuration + template)
try:
os.chmod(config_file,0755)
except: pass
cxxflags = ' '.join(config_env['LIBMAPNIK_CXXFLAGS'])
defines = ' '.join(config_env['LIBMAPNIK_DEFINES'])
dep_includes = ''.join([' -I${NODE_CONFIG_PREFIX:-""}%s' % i for i in config_env['CPPPATH'] if not i.startswith('#')])
dep_includes += ' '
if config_env['HAS_CAIRO']:
dep_includes += ''.join([' -I${NODE_CONFIG_PREFIX:-""}%s' % i for i in env['CAIRO_CPPPATHS'] if not i.startswith('#')])
ldflags = ''.join([' -L%s' % i for i in config_env['LIBPATH'] if not i.startswith('#')])
ldflags += config_env['LIBMAPNIK_LINKFLAGS']
dep_libs = ''.join([' -l%s' % i for i in env['LIBMAPNIK_LIBS']])
# remove local agg from public linking
dep_libs = dep_libs.replace('-lagg','')
git_revision = 'N/A'
git_describe = config_env['MAPNIK_VERSION_STRING']
try:
git_cmd = "git rev-list --max-count=1 HEAD"
stdin, stderr = Popen(git_cmd, shell=True, stdout=PIPE, stderr=PIPE).communicate()
if not stderr:
git_revision = stdin.strip()
git_cmd = "git describe"
stdin, stderr = Popen(git_cmd, shell=True, stdout=PIPE, stderr=PIPE).communicate()
if not stderr:
git_describe = stdin.strip()
except:
pass
# for fonts and input plugins we should try
# to store the relative path, if feasible
fontspath = config_env['MAPNIK_FONTS']
lib_root = os.path.join(config_env['PREFIX'], config_env['LIBDIR_SCHEMA'])
if lib_root in fontspath:
fontspath = "${CONFIG_PREFIX}/" + os.path.relpath(fontspath,config_env['PREFIX'])
inputpluginspath = config_env['MAPNIK_INPUT_PLUGINS']
if lib_root in inputpluginspath:
inputpluginspath = "${CONFIG_PREFIX}/" + os.path.relpath(inputpluginspath,config_env['PREFIX'])
lib_path = "${CONFIG_PREFIX}/" + config_env['LIBDIR_SCHEMA']
mapnik_bundled_gdal_data = ''
mapnik_bundled_proj_data = ''
mapnik_bundled_icu_data = ''
configuration = {
"git_revision": git_revision,
"git_describe": git_describe,
"version_string": config_env['MAPNIK_VERSION_STRING'],
"version": config_env['MAPNIK_VERSION'],
"mapnik_libname": env['MAPNIK_NAME'],
"mapnik_libpath": lib_path,
"ldflags": ldflags,
"dep_libs": dep_libs,
"dep_includes": dep_includes,
"fonts": fontspath,
"input_plugins": inputpluginspath,
"defines":defines,
"cxxflags":cxxflags,
"cxx":env['CXX'],
"mapnik_bundled_gdal_data":mapnik_bundled_gdal_data,
"mapnik_bundled_proj_data":mapnik_bundled_proj_data,
"mapnik_bundled_icu_data":mapnik_bundled_icu_data,
}
## if we are statically linking depedencies
## then they do not need to be reported in ldflags
#if env['RUNTIME_LINK'] == 'static':
# configuration['ldflags'] = ''
# configuration['dep_libs'] = ''
template = 'mapnik-config.template.sh'
config_file = 'mapnik-config'
source = config_file
write_config(configuration,template,config_file)
target_path = os.path.normpath(os.path.join(config_env['INSTALL_PREFIX'],'bin'))
full_target = os.path.join(target_path,config_file)
Depends(full_target, env.subst('../../src/%s' % env['MAPNIK_LIB_NAME']))
Depends(full_target, '../../include/mapnik/version.hpp')
if 'install' in COMMAND_LINE_TARGETS:
# we must add 'install' catch here because otherwise
# custom command will be run when not installing
env.Alias('install',full_target)
env.Command(full_target, config_file,
[
Copy("$TARGET","$SOURCE"),
Chmod("$TARGET", 0755),
])
config_env['create_uninstall_target'](env,os.path.join(target_path,config_file))
| lgpl-2.1 | 4,558,087,957,803,858,400 | 33.767956 | 154 | 0.685841 | false |
dsarkozi/care4care-sdp-grp4 | Care4Care/C4CApplication/views/forms/DonateTimeForm.py | 1 | 1901 | from django import forms
from django.utils.translation import ugettext_lazy as _
from C4CApplication.models.member import Member
class DonateTimeForm(forms.Form):
message = forms.CharField(
widget=forms.Textarea(
attrs={'rows':'5'}
)
)
days = forms.DecimalField(
initial = 0,
min_value=0,
)
hours = forms.DecimalField(
initial = 0,
min_value=0,
)
minutes = forms.DecimalField(
initial = 0,
min_value=0,
)
receiver = forms.ChoiceField(
widget=forms.RadioSelect,
choices=(('c4c', _('Send to one of your branches')), ('user', _('Send to user')))
)
members = Member.objects.values('mail', 'first_name', 'last_name')
tup = ()
for member in members:
tup += ((member['mail'], member['first_name'] + ' ' + member['last_name']),)
tup = tuple(tup)
userDropdown = forms.ChoiceField(
widget=forms.Select,
choices=tup
)
def __init__(self, db_member=None, *args, **kwargs):
super(DonateTimeForm, self).__init__(*args, **kwargs)
BRANCH = ()
if db_member is not None:
for branch in db_member.branch.all():
BRANCH += ((branch.name,branch.name),)
self.fields['branchDropdown'] = forms.CharField(widget=forms.Select(choices=BRANCH))
def clean(self):
cleaned_data = super(DonateTimeForm, self).clean()
time = 0
days = 0
days = cleaned_data.get("days")
hours = 0
hours = cleaned_data.get("hours")
minutes = 0
minutes = cleaned_data.get("minutes")
time = days*1440+hours*60+minutes
#check a good time donation
if time == 0 :
self.add_error(_("minutes"), forms.ValidationError(_("You can't do a donation of 0 time !"), code='invalid'))
return cleaned_data | agpl-3.0 | -3,614,336,686,322,137,000 | 26.970588 | 121 | 0.571804 | false |
kenorb/BitTorrent | twisted/protocols/policies.py | 2 | 17426 | # -*- test-case-name: twisted.test.test_policies -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Resource limiting policies.
@seealso: See also L{twisted.protocols.htb} for rate limiting.
"""
# system imports
import sys, operator
# twisted imports
from twisted.internet.protocol import ServerFactory, Protocol, ClientFactory
from twisted.internet.interfaces import ITransport
from twisted.internet import reactor, error
from twisted.python import log, components
from zope.interface import implements, providedBy, directlyProvides
class ProtocolWrapper(Protocol):
"""Wraps protocol instances and acts as their transport as well."""
disconnecting = 0
def __init__(self, factory, wrappedProtocol):
self.wrappedProtocol = wrappedProtocol
self.factory = factory
def makeConnection(self, transport):
directlyProvides(self, *providedBy(self) + providedBy(transport))
Protocol.makeConnection(self, transport)
# Transport relaying
def write(self, data):
self.transport.write(data)
def writeSequence(self, data):
self.transport.writeSequence(data)
def loseConnection(self):
self.disconnecting = 1
self.transport.loseConnection()
def getPeer(self):
return self.transport.getPeer()
def getHost(self):
return self.transport.getHost()
def registerProducer(self, producer, streaming):
self.transport.registerProducer(producer, streaming)
def unregisterProducer(self):
self.transport.unregisterProducer()
def stopConsuming(self):
self.transport.stopConsuming()
def __getattr__(self, name):
return getattr(self.transport, name)
# Protocol relaying
def connectionMade(self):
self.factory.registerProtocol(self)
self.wrappedProtocol.makeConnection(self)
def dataReceived(self, data):
self.wrappedProtocol.dataReceived(data)
def connectionLost(self, reason):
self.factory.unregisterProtocol(self)
self.wrappedProtocol.connectionLost(reason)
class WrappingFactory(ClientFactory):
"""Wraps a factory and its protocols, and keeps track of them."""
protocol = ProtocolWrapper
def __init__(self, wrappedFactory):
self.wrappedFactory = wrappedFactory
self.protocols = {}
def doStart(self):
self.wrappedFactory.doStart()
ClientFactory.doStart(self)
def doStop(self):
self.wrappedFactory.doStop()
ClientFactory.doStop(self)
def startedConnecting(self, connector):
self.wrappedFactory.startedConnecting(connector)
def clientConnectionFailed(self, connector, reason):
self.wrappedFactory.clientConnectionFailed(connector, reason)
def clientConnectionLost(self, connector, reason):
self.wrappedFactory.clientConnectionLost(connector, reason)
def buildProtocol(self, addr):
return self.protocol(self, self.wrappedFactory.buildProtocol(addr))
def registerProtocol(self, p):
"""Called by protocol to register itself."""
self.protocols[p] = 1
def unregisterProtocol(self, p):
"""Called by protocols when they go away."""
del self.protocols[p]
class ThrottlingProtocol(ProtocolWrapper):
"""Protocol for ThrottlingFactory."""
# wrap API for tracking bandwidth
def write(self, data):
self.factory.registerWritten(len(data))
ProtocolWrapper.write(self, data)
def writeSequence(self, seq):
self.factory.registerWritten(reduce(operator.add, map(len, seq)))
ProtocolWrapper.writeSequence(self, seq)
def dataReceived(self, data):
self.factory.registerRead(len(data))
ProtocolWrapper.dataReceived(self, data)
def registerProducer(self, producer, streaming):
self.producer = producer
ProtocolWrapper.registerProducer(self, producer, streaming)
def unregisterProducer(self):
del self.producer
ProtocolWrapper.unregisterProducer(self)
def throttleReads(self):
self.transport.pauseProducing()
def unthrottleReads(self):
self.transport.resumeProducing()
def throttleWrites(self):
if hasattr(self, "producer"):
self.producer.pauseProducing()
def unthrottleWrites(self):
if hasattr(self, "producer"):
self.producer.resumeProducing()
class ThrottlingFactory(WrappingFactory):
"""Throttles bandwidth and number of connections.
Write bandwidth will only be throttled if there is a producer
registered.
"""
protocol = ThrottlingProtocol
def __init__(self, wrappedFactory, maxConnectionCount=sys.maxint, readLimit=None, writeLimit=None):
WrappingFactory.__init__(self, wrappedFactory)
self.connectionCount = 0
self.maxConnectionCount = maxConnectionCount
self.readLimit = readLimit # max bytes we should read per second
self.writeLimit = writeLimit # max bytes we should write per second
self.readThisSecond = 0
self.writtenThisSecond = 0
self.unthrottleReadsID = None
self.checkReadBandwidthID = None
self.unthrottleWritesID = None
self.checkWriteBandwidthID = None
def registerWritten(self, length):
"""Called by protocol to tell us more bytes were written."""
self.writtenThisSecond += length
def registerRead(self, length):
"""Called by protocol to tell us more bytes were read."""
self.readThisSecond += length
def checkReadBandwidth(self):
"""Checks if we've passed bandwidth limits."""
if self.readThisSecond > self.readLimit:
self.throttleReads()
throttleTime = (float(self.readThisSecond) / self.readLimit) - 1.0
self.unthrottleReadsID = reactor.callLater(throttleTime,
self.unthrottleReads)
self.readThisSecond = 0
self.checkReadBandwidthID = reactor.callLater(1, self.checkReadBandwidth)
def checkWriteBandwidth(self):
if self.writtenThisSecond > self.writeLimit:
self.throttleWrites()
throttleTime = (float(self.writtenThisSecond) / self.writeLimit) - 1.0
self.unthrottleWritesID = reactor.callLater(throttleTime,
self.unthrottleWrites)
# reset for next round
self.writtenThisSecond = 0
self.checkWriteBandwidthID = reactor.callLater(1, self.checkWriteBandwidth)
def throttleReads(self):
"""Throttle reads on all protocols."""
log.msg("Throttling reads on %s" % self)
for p in self.protocols.keys():
p.throttleReads()
def unthrottleReads(self):
"""Stop throttling reads on all protocols."""
self.unthrottleReadsID = None
log.msg("Stopped throttling reads on %s" % self)
for p in self.protocols.keys():
p.unthrottleReads()
def throttleWrites(self):
"""Throttle writes on all protocols."""
log.msg("Throttling writes on %s" % self)
for p in self.protocols.keys():
p.throttleWrites()
def unthrottleWrites(self):
"""Stop throttling writes on all protocols."""
self.unthrottleWritesID = None
log.msg("Stopped throttling writes on %s" % self)
for p in self.protocols.keys():
p.unthrottleWrites()
def buildProtocol(self, addr):
if self.connectionCount == 0:
if self.readLimit is not None:
self.checkReadBandwidth()
if self.writeLimit is not None:
self.checkWriteBandwidth()
if self.connectionCount < self.maxConnectionCount:
self.connectionCount += 1
return WrappingFactory.buildProtocol(self, addr)
else:
log.msg("Max connection count reached!")
return None
def unregisterProtocol(self, p):
WrappingFactory.unregisterProtocol(self, p)
self.connectionCount -= 1
if self.connectionCount == 0:
if self.unthrottleReadsID is not None:
self.unthrottleReadsID.cancel()
if self.checkReadBandwidthID is not None:
self.checkReadBandwidthID.cancel()
if self.unthrottleWritesID is not None:
self.unthrottleWritesID.cancel()
if self.checkWriteBandwidthID is not None:
self.checkWriteBandwidthID.cancel()
class SpewingProtocol(ProtocolWrapper):
def dataReceived(self, data):
log.msg("Received: %r" % data)
ProtocolWrapper.dataReceived(self,data)
def write(self, data):
log.msg("Sending: %r" % data)
ProtocolWrapper.write(self,data)
class SpewingFactory(WrappingFactory):
protocol = SpewingProtocol
class LimitConnectionsByPeer(WrappingFactory):
"""Stability: Unstable"""
maxConnectionsPerPeer = 5
def startFactory(self):
self.peerConnections = {}
def buildProtocol(self, addr):
peerHost = addr[0]
connectionCount = self.peerConnections.get(peerHost, 0)
if connectionCount >= self.maxConnectionsPerPeer:
return None
self.peerConnections[peerHost] = connectionCount + 1
return WrappingFactory.buildProtocol(self, addr)
def unregisterProtocol(self, p):
peerHost = p.getPeer()[1]
self.peerConnections[peerHost] -= 1
if self.peerConnections[peerHost] == 0:
del self.peerConnections[peerHost]
class LimitTotalConnectionsFactory(ServerFactory):
"""Factory that limits the number of simultaneous connections.
API Stability: Unstable
@type connectionCount: C{int}
@ivar connectionCount: number of current connections.
@type connectionLimit: C{int} or C{None}
@cvar connectionLimit: maximum number of connections.
@type overflowProtocol: L{Protocol} or C{None}
@cvar overflowProtocol: Protocol to use for new connections when
connectionLimit is exceeded. If C{None} (the default value), excess
connections will be closed immediately.
"""
connectionCount = 0
connectionLimit = None
overflowProtocol = None
def buildProtocol(self, addr):
if (self.connectionLimit is None or
self.connectionCount < self.connectionLimit):
# Build the normal protocol
wrappedProtocol = self.protocol()
elif self.overflowProtocol is None:
# Just drop the connection
return None
else:
# Too many connections, so build the overflow protocol
wrappedProtocol = self.overflowProtocol()
wrappedProtocol.factory = self
protocol = ProtocolWrapper(self, wrappedProtocol)
self.connectionCount += 1
return protocol
def registerProtocol(self, p):
pass
def unregisterProtocol(self, p):
self.connectionCount -= 1
class TimeoutProtocol(ProtocolWrapper):
"""Protocol that automatically disconnects when the connection is idle.
Stability: Unstable
"""
def __init__(self, factory, wrappedProtocol, timeoutPeriod):
"""Constructor.
@param factory: An L{IFactory}.
@param wrappedProtocol: A L{Protocol} to wrapp.
@param timeoutPeriod: Number of seconds to wait for activity before
timing out.
"""
ProtocolWrapper.__init__(self, factory, wrappedProtocol)
self.timeoutCall = None
self.setTimeout(timeoutPeriod)
def setTimeout(self, timeoutPeriod=None):
"""Set a timeout.
This will cancel any existing timeouts.
@param timeoutPeriod: If not C{None}, change the timeout period.
Otherwise, use the existing value.
"""
self.cancelTimeout()
if timeoutPeriod is not None:
self.timeoutPeriod = timeoutPeriod
self.timeoutCall = reactor.callLater(self.timeoutPeriod, self.timeoutFunc)
def cancelTimeout(self):
"""Cancel the timeout.
If the timeout was already cancelled, this does nothing.
"""
if self.timeoutCall:
try:
self.timeoutCall.cancel()
except error.AlreadyCalled:
pass
self.timeoutCall = None
def resetTimeout(self):
"""Reset the timeout, usually because some activity just happened."""
if self.timeoutCall:
self.timeoutCall.reset(self.timeoutPeriod)
def write(self, data):
self.resetTimeout()
ProtocolWrapper.write(self, data)
def writeSequence(self, seq):
self.resetTimeout()
ProtocolWrapper.writeSequence(self, seq)
def dataReceived(self, data):
self.resetTimeout()
ProtocolWrapper.dataReceived(self, data)
def connectionLost(self, reason):
self.cancelTimeout()
ProtocolWrapper.connectionLost(self, reason)
def timeoutFunc(self):
"""This method is called when the timeout is triggered.
By default it calls L{loseConnection}. Override this if you want
something else to happen.
"""
self.loseConnection()
class TimeoutFactory(WrappingFactory):
"""Factory for TimeoutWrapper.
Stability: Unstable
"""
protocol = TimeoutProtocol
def __init__(self, wrappedFactory, timeoutPeriod=30*60):
self.timeoutPeriod = timeoutPeriod
WrappingFactory.__init__(self, wrappedFactory)
def buildProtocol(self, addr):
return self.protocol(self, self.wrappedFactory.buildProtocol(addr),
timeoutPeriod=self.timeoutPeriod)
class TrafficLoggingProtocol(ProtocolWrapper):
_counter = 0
def __init__(self, factory, wrappedProtocol, logfile, lengthLimit=None):
ProtocolWrapper.__init__(self, factory, wrappedProtocol)
self.logfile = logfile
self.lengthLimit = lengthLimit
TrafficLoggingProtocol._counter += 1
self._number = TrafficLoggingProtocol._counter
def _log(self, line):
self.logfile.write(line + '\n')
self.logfile.flush()
def _mungeData(self, data):
if self.lengthLimit and len(data) > self.lengthLimit:
data = data[:self.lengthLimit - 12] + '<... elided>'
return data
# IProtocol
def connectionMade(self):
self._log('*')
return ProtocolWrapper.connectionMade(self)
def dataReceived(self, data):
self._log('C %d: %r' % (self._number, self._mungeData(data)))
return ProtocolWrapper.dataReceived(self, data)
def connectionLost(self, reason):
self._log('C %d: %r' % (self._number, reason))
return ProtocolWrapper.connectionLost(self, reason)
# ITransport
def write(self, data):
self._log('S %d: %r' % (self._number, self._mungeData(data)))
return ProtocolWrapper.write(self, data)
def writeSequence(self, iovec):
self._log('SV %d: %r' % (self._number, [self._mungeData(d) for d in iovec]))
return ProtocolWrapper.writeSequence(self, iovec)
def loseConnection(self):
self._log('S %d: *' % (self._number,))
return ProtocolWrapper.loseConnection(self)
class TrafficLoggingFactory(WrappingFactory):
protocol = TrafficLoggingProtocol
_counter = 0
def __init__(self, wrappedFactory, logfilePrefix, lengthLimit=None):
self.logfilePrefix = logfilePrefix
self.lengthLimit = lengthLimit
WrappingFactory.__init__(self, wrappedFactory)
def open(self, name):
return file(name, 'w')
def buildProtocol(self, addr):
self._counter += 1
logfile = self.open(self.logfilePrefix + '-' + str(self._counter))
return self.protocol(self, self.wrappedFactory.buildProtocol(addr),
logfile, self.lengthLimit)
class TimeoutMixin:
"""Mixin for protocols which wish to timeout connections
@cvar timeOut: The number of seconds after which to timeout the connection.
"""
timeOut = None
__timeoutCall = None
def callLater(self, period, func):
return reactor.callLater(period, func)
def resetTimeout(self):
"""Reset the timeout count down"""
if self.__timeoutCall is not None and self.timeOut is not None:
self.__timeoutCall.reset(self.timeOut)
def setTimeout(self, period):
"""Change the timeout period
@type period: C{int} or C{NoneType}
@param period: The period, in seconds, to change the timeout to, or
C{None} to disable the timeout.
"""
prev = self.timeOut
self.timeOut = period
if self.__timeoutCall is not None:
if period is None:
self.__timeoutCall.cancel()
self.__timeoutCall = None
else:
self.__timeoutCall.reset(period)
elif period is not None:
self.__timeoutCall = self.callLater(period, self.__timedOut)
return prev
def __timedOut(self):
self.__timeoutCall = None
self.timeoutConnection()
def timeoutConnection(self):
"""Called when the connection times out.
Override to define behavior other than dropping the connection.
"""
self.transport.loseConnection()
| gpl-3.0 | -1,337,991,955,031,797,200 | 31.151292 | 103 | 0.651268 | false |
Leoniela/nipype | nipype/interfaces/ants/base.py | 8 | 3737 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The ants module provides basic functions for interfacing with ANTS tools."""
# Local imports
from ..base import (CommandLine, CommandLineInputSpec, traits,
isdefined)
from ... import logging
logger = logging.getLogger('interface')
# -Using -1 gives primary responsibilty to ITKv4 to do the correct
# thread limitings.
# -Using 1 takes a very conservative approach to avoid overloading
# the computer (when running MultiProc) by forcing everything to
# single threaded. This can be a severe penalty for registration
# performance.
LOCAL_DEFAULT_NUMBER_OF_THREADS=1
# -Using NSLOTS has the same behavior as ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS
# as long as ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS is not set. Otherwise
# ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS takes precidence.
# This behavior states that you the user explicitly specifies
# num_threads, then respect that no matter what SGE tries to limit.
PREFERED_ITKv4_THREAD_LIMIT_VARIABLE='NSLOTS'
ALT_ITKv4_THREAD_LIMIT_VARIABLE='ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS'
class ANTSCommandInputSpec(CommandLineInputSpec):
"""Base Input Specification for all ANTS Commands
"""
num_threads = traits.Int(LOCAL_DEFAULT_NUMBER_OF_THREADS, usedefault=True,
nohash=True, desc="Number of ITK threads to use")
class ANTSCommand(CommandLine):
"""Base class for ANTS interfaces
"""
input_spec = ANTSCommandInputSpec
_num_threads = LOCAL_DEFAULT_NUMBER_OF_THREADS
def __init__(self, **inputs):
super(ANTSCommand, self).__init__(**inputs)
self.inputs.on_trait_change(self._num_threads_update, 'num_threads')
if not isdefined(self.inputs.num_threads):
self.inputs.num_threads = self._num_threads
else:
self._num_threads_update()
def _num_threads_update(self):
self._num_threads = self.inputs.num_threads
## ONLY SET THE ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS if requested
## by the end user. The default setting did not allow for
## overwriting the default values.
## In ITKv4 (the version used for all ANTS programs), ITK respects
## the SGE controlled $NSLOTS environmental variable.
## If user specifies -1, then that indicates that the system
## default behavior should be the one specified by ITKv4 rules
## (i.e. respect SGE $NSLOTS or environmental variables of threads, or
## user environmental settings)
if ( self.inputs.num_threads == -1 ):
if ( ALT_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ ):
del self.inputs.environ[ALT_ITKv4_THREAD_LIMIT_VARIABLE]
if ( PREFERED_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ ):
del self.inputs.environ[PREFERED_ITKv4_THREAD_LIMIT_VARIABLE]
else:
self.inputs.environ.update({PREFERED_ITKv4_THREAD_LIMIT_VARIABLE:
'%s' % self.inputs.num_threads})
@staticmethod
def _format_xarray(val):
""" Convenience method for converting input arrays [1,2,3] to commandline format '1x2x3' """
return 'x'.join([str(x) for x in val])
@classmethod
def set_default_num_threads(cls, num_threads):
"""Set the default number of threads for ITK calls
This method is used to set the default number of ITK threads for all
the ANTS interfaces. However, setting this will not update the output
type for any existing instances. For these, assign the
<instance>.inputs.num_threads
"""
cls._num_threads = num_threads
| bsd-3-clause | 5,414,236,903,631,197,000 | 42.964706 | 100 | 0.678084 | false |
draekko/androguard | tools/api/androapi.py | 23 | 7202 | #!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2010, Anthony Desnos <desnos at t0t0.org>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from BeautifulSoup import BeautifulSoup, Tag
import os, sys, re
MANIFEST_PERMISSION_HTML = "docs/reference/android/Manifest.permission.html"
PERMS = {}
PERMS_RE = None
PERMS_API = {}
try:
import psyco
psyco.full()
except ImportError:
pass
class Constant(object):
def __init__(self, name, perms, desc_return):
self.name = name
self.perms = perms
self.desc_return = desc_return
class Function(object):
def __init__(self, name, perms, desc_return):
self.name = name
self.perms = perms
self.desc_return = desc_return
def extractPerms( filename ):
soup = BeautifulSoup( open( filename ) )
s = ""
for i in soup.findAll("table", attrs={'id' : "constants"}):
for j in i.findChildren( "tr" ):
td = j.findChildren( "td" )
if td != []:
_type = str( td[0].text )
_name = str( td[1].text )
_desc = str( td[2].text )
PERMS[_name] = [ _type, _desc ]
PERMS_API[_name] = {}
s += _name + "|"
#PERMS_RE = re.compile(s[:-1])
def extractInformation( filename ):
soup = BeautifulSoup( open( filename ) )
package = filename[ filename.find("reference/android/") : ][10:-5].replace("//", "/")
package = package.replace("/", ".")
for i in soup.findAll('a', attrs={'name' : re.compile(".")}):
next_div = i.findNext("div")
perms = []
for perm in PERMS:
perm_access = next_div.findAll(text=re.compile(perm))
if perm_access != []:
perms.append( perm )
#print i.name, i.get("name"), perm_access
if perms != []:
element = None
descs = i.findNext("span", attrs={'class' : 'normal'})
_descriptor_return = descs.next
_descriptor_return = _descriptor_return.replace('', '')
_descriptor_return = _descriptor_return.split()
_descriptor_return = ' '.join(str(_d)for _d in _descriptor_return)
if isinstance(descs.next.next, Tag):
_descriptor_return += " " + descs.next.next.text
if len(next_div.findNext("h4").findAll("span")) > 2:
element = Function( i.get("name"), perms, _descriptor_return )
else:
element = Constant( i.get("name"), perms, _descriptor_return )
for perm in perms:
if package not in PERMS_API[ perm ]:
PERMS_API[ perm ][ package ] = []
PERMS_API[ perm ][ package ].append( element )
def save_file( filename ):
with open( filename, "w" ) as fd:
fd.write("PERMISSIONS = {\n")
for i in PERMS_API:
if len(PERMS_API[ i ]) > 0:
fd.write("\"%s\" : {\n" % ( i ))
for package in PERMS_API[ i ]:
if len(PERMS_API[ i ][ package ]) > 0:
fd.write("\t\"%s\" : [\n" % package)
for j in PERMS_API[ i ][ package ]:
if isinstance(j, Function):
fd.write( "\t\t[\"F\"," "\"" + j.name + "\"," + "\"" + j.desc_return + "\"]" + ",\n")
else:
fd.write( "\t\t[\"C\"," "\"" + j.name + "\"," + "\"" + j.desc_return + "\"]" + ",\n")
if len(PERMS_API[ i ][ package ]) > 0:
fd.write("\t],\n")
if len(PERMS_API[ i ]) > 0:
fd.write("},\n\n")
fd.write("}")
BASE_DOCS = sys.argv[1]
extractPerms( BASE_DOCS + MANIFEST_PERMISSION_HTML )
ANDROID_PACKAGES = [
"accessibilityservice",
"accounts",
"animation",
"app",
"appwidget",
"bluetooth",
"content",
"database",
"drm",
"gesture",
"graphics",
"hardware",
"inputmethodservice",
"location",
"media",
"net",
"nfc",
"opengl",
"os",
"preference",
"provider",
"renderscript",
"sax",
"service",
"speech",
"telephony",
"text",
"util",
"view",
"webkit",
"widget",
]
ANDROID_PACKAGES2 = [
"telephony"
]
for i in ANDROID_PACKAGES:
for root, dirs, files in os.walk( BASE_DOCS + "docs/reference/android/" + i + "/" ):
for file in files:
print "Extracting from %s" % (root + "/" + file)
#extractInformation( "/home/pouik/Bureau/android/android-sdk-linux_86/docs/reference/android/accounts/AccountManager.html" )
extractInformation( root + "/" + file )
#BASE_DOCS + "docs/reference/android/telephony/TelephonyManager.html" )
#extractInformation( BASE_DOCS + "docs/reference/android/net/sip/SipAudioCall.html" ) #android/accounts/Account.html" ) #"docs/reference/android/accounts/AccountManager.html" )
for i in PERMS_API:
if len(PERMS_API[ i ]) > 0:
print "PERMISSION ", i
for package in PERMS_API[ i ]:
print "\t package ", package
for j in PERMS_API[ i ][ package ]:
if isinstance(j, Function):
print "\t\t function : ", j.name
else:
print "\t\t constant : ", j.name
save_file( "./dvm_permissions_unformatted.py" )
#for i in soup.findAll('a') : #, attrs={'name' : re.compile("ACTION")}):
# if i.get("name") != None:
# print i.name, i.get("name")#, i.findNextSlibing(text=re.compile("READ_PHONE_STATE"))
#for i in soup.findAll(text=re.compile("READ_PHONE_STATE")):
# print i, i.parent.name, i.findPrevious(re.compile('^A')), i.findPreviousSibling(re.compile('^A'))
# if i.contents != []:
# if i.contents[0] == "READ_PHONE_STATE":
# print "here", i.parent
# parent = i.parent
# while parent.name != "A":
# parent = parent.parent
# print "step", parent
# if "class" in parent:
# print "step2", parent["class"]
# time.sleep( 1 )
# print "end", previous.name
| apache-2.0 | -6,297,110,013,047,550,000 | 32.812207 | 176 | 0.500972 | false |
maurofaccenda/ansible | contrib/inventory/gce_googleapiclient.py | 1 | 13337 | #!/usr/bin/env python
"""
Google Cloud Engine Dynamic Inventory
=====================================
Before using:
- Authentication: this script uses the same authentication as gcloud command
line. So, set it up before according to:
https://cloud.google.com/ml-engine/docs/quickstarts/command-line
- Dependencies: it depends on google-api-python-client and docoptcfg. To
install them, run:
$ pip install google-api-python-client docoptcfg
All parameters can be set in the following 3 different ways (in the order of
precedence, least to higher):
1. gce_googleapiclient.ini file:
Check included gce_googleapiclient.ini on how to use it.
The config file name can be overridden by using --config command line
parameter or GCE_CONFIG environment variable.
2. Environment variables (prefixed by 'GCE_'):
The variables needs to be set with the same names as the parameters, but
with in UPPERCASE and underscore (_) instead of dashes (-)
Ex: to set --billing-account using environment variables you'd need to
create one called GCE_BILLING_ACCOUNT
3. Command line arguments:
Usage:
gce_googleapiclient.py [--project=PROJECT]... [--zone=ZONE]...
[--api-version=API_VERSION] [--billing-account=ACCOUNT_NAME]
[--config=CONFIG_FILE] [--num-threads=NUM_THREADS]
[options]
Arguments:
-a API_VERSION --api-version=API_VERSION The API version used to connect to GCE [default: v1]
-b ACCOUNT_NAME --billing-account=ACCOUNT_NAME The billing account associated with the projects you want to get
information. It is only needed to get the list of the projects
(when --project parameter isn' set)
-c CONFIG_FILE --config=CONFIG_FILE Path to the config file (see docoptcfg docs) [default: ./gce_googleapiclient.ini]
-p PROJECT --project PROJECT Google Cloud projects to search for instances
-t NUM_THREADS --num-threads=NUM_THREADS Enable multi-threading, set it to NUM_THREADS [default: 4]
-z ZONE --zone ZONE Google Cloud zones to search for instances
Options:
-d --debug Set debugging level to DEBUG on log file
-h --help Prints the application help
-l --list Needed by Ansible, but actually doesn't change anything
Setting multiple values parameters:
Some parameters can have multiple values (ZONE and PROJECT) and to set them
use:
1. Command line:
$ ./gce_googleapiclient.py (...) --zone zone1 --zone zone2 (...)
2. Environment variables:
$ (...) GCE_ZONE0=zone1 GCE_ZONE1=zone2 (...) ./gce_googleapiclient.py
Obs: from docoptcfg documentation "(...) can set PREFIX_KEY=one,
PREFIX_KEY0=two, and so on (up to 99 is supported). They can also start at
1: PREFIX_KEY=one, PREFIX_KEY1=two, PREFIX_KEY2=three. They can even skip
the integer-less variable and do PREFIX_KEY0=one, PREFIX_KEY1=two and so
on. The first variable must start either integer-less or with 0."
3. Config ini file:
[gce_googleapiclient.py]
(...)
zone = zone1
zone2
(...)
Obs: It is important to have at least one space or tab char before 'zone2'
"""
from __future__ import print_function
from sys import version_info, stderr
import collections
import json
import logging as log
import threading
if version_info < (3, 0):
import Queue as queue
else:
import queue
from docoptcfg import DocoptcfgFileError
from docoptcfg import docoptcfg
from googleapiclient import discovery
from googleapiclient.errors import HttpError
from oauth2client.client import GoogleCredentials
ENV_PREFIX = 'GCE_'
DEFAULT_API_VERSION = 'v1'
def get_all_billing_projects(billing_account_name, api_version=DEFAULT_API_VERSION):
project_ids = []
credentials = GoogleCredentials.get_application_default()
service = discovery.build('cloudbilling',
version=api_version,
credentials=credentials)
# pylint: disable=no-member
request = service.billingAccounts().projects(). \
list(name=billing_account_name)
while request is not None:
response = request.execute()
# pylint: disable=no-member
request = service.billingAccounts().projects(). \
list_next(previous_request=request, previous_response=response)
for project_billing_info in response['projectBillingInfo']:
if project_billing_info['billingEnabled']:
project_ids.append(project_billing_info['projectId'])
return project_ids
def get_all_zones_in_project(projects_queue_in, projects_zones_queue_out, api_version=DEFAULT_API_VERSION):
try:
while not projects_queue_in.empty():
project = projects_queue_in.get(block=False)
log.info('Retrieving list of zones of project: %s', project)
try:
credentials = GoogleCredentials.get_application_default()
service = discovery.build('compute', api_version, credentials=credentials)
request = service.zones().list(project=project)
while request is not None:
response = request.execute()
for zone in response['items']:
projects_zones_queue_out.put((project, zone['name']))
request = service.zones().list_next(previous_request=request,
previous_response=response)
except HttpError as exception:
log.warn('Could not retrieve list of zones of project: %s', project)
log.warn(exception)
except queue.Empty:
pass
def get_instances(projects_zones_queue_in, instances_queue_out, api_version=DEFAULT_API_VERSION):
try:
while not projects_zones_queue_in.empty():
project, zone = projects_zones_queue_in.get(block=False)
log.info('Retrieving list of instances from project/zone: %s/%s', project, zone)
try:
credentials = GoogleCredentials.get_application_default()
service = discovery.build('compute', api_version, credentials=credentials)
# pylint: disable=no-member
request = service.instances().list(project=project, zone=zone)
while request is not None:
response = request.execute()
if 'items' in response:
for instance in response['items']:
instances_queue_out.put(instance)
request = service.instances().list_next(previous_request=request,
previous_response=response)
except HttpError as exception:
log.warn('Could not retrieve list of instances of project/zone: %s/%s', project, zone)
log.warn(str(exception))
# pylint: disable=no-member
except queue.Empty:
pass
def get_hostvars(instance):
hostvars = {
'gce_name': instance['name'],
'gce_id': instance['id'],
'gce_status': instance['status']
}
if instance['networkInterfaces'][0]['networkIP']:
hostvars['ansible_ssh_host'] = instance['networkInterfaces'][0]['networkIP']
if 'labels' in instance:
hostvars['gce_labels'] = instance['labels']
hostvars['gce_metadata'] = {}
for md in instance['metadata'].get('items', []):
hostvars['gce_metadata'][md['key']] = md['value']
if 'items' in instance['tags']:
hostvars['gce_tags'] = instance['tags']['items']
hostvars['gce_machine_type'] = instance['machineType'].split('/')[-1]
hostvars['gce_project'] = instance['selfLink'].split('/')[6]
hostvars['gce_zone'] = instance['zone'].split('/')[-1]
hostvars['gce_network'] = instance['networkInterfaces'][0]['network'].split('/')[-1]
for interface in instance['networkInterfaces']:
hostvars['gce_subnetwork'] = interface['subnetwork'].split('/')[-1]
access_configs = interface.get('accessConfigs', [])
for access_config in access_configs:
hostvars['gce_public_ip'] = access_config.get('natIP', None)
break # get only the first access config
hostvars['gce_private_ip'] = interface['networkIP']
break # get only the first interface
return hostvars
def get_inventory(instances):
inventory = collections.defaultdict(list)
inventory['_meta'] = collections.defaultdict(
lambda: collections.defaultdict(dict))
for instance in instances:
if instance['status'] in ['RUNNING', 'STAGING']:
inventory['_meta']['hostvars'][instance['name']] \
= get_hostvars(instance)
# populate the 'all' group with all hosts found
inventory['all'].append(instance['name'])
# create a group for every tag prefixed by 'tag_' and populate
# accordingly
for tag in instance['tags'].get('items', []):
inventory['tag_{}'.format(tag)].append(instance['name'])
project = instance['selfLink'].split('/')[6]
inventory['project_{}'.format(project)].append(instance['name'])
# zone groups are not prefixed to be compatible with the previous gce.py
zone = instance['zone'].split('/')[-1]
inventory[zone].append(instance['name'])
network = instance['networkInterfaces'][0]['network'].split('/')[-1]
inventory['network_{}'.format(network)].append(instance['name'])
inventory['status_{}'.format(instance['status'].lower())].append(instance['name'])
# instance type groups are not prefixed to be compatible with the previous gce.py
instance_type = instance['machineType'].split('/')[-1]
inventory[instance_type].append(instance['name'])
return inventory
def main(args):
if args['--debug']:
log.basicConfig(filename='gce_googleapiclient.log', level=log.DEBUG)
else:
log.basicConfig(level=log.ERROR)
project_list = args['--project']
zone_list = args['--zone']
api_version = args['--api-version']
billing_account_name = args['--billing-account']
num_threads = int(args['--num-threads'])
if not project_list and not billing_account_name:
print("ERROR: You didn't specified any project (parameter: --project) which means you want all projects."
" However, to get the list of all projects, we need the billing account name (parameter: "
" --billing-account, format: billingAccounts/XXXXXX-XXXXXX-XXXXXX)", file=stderr)
exit(1)
if num_threads < 1:
num_threads = 1
if not project_list:
project_list = get_all_billing_projects(billing_account_name)
instances = []
projects_queue = queue.Queue()
projects_zones_queue = queue.Queue()
instances_queue = queue.Queue()
for project in project_list:
projects_queue.put(project)
if not projects_queue.empty():
log.info('Spawning {} threads to get zone list on each project'.format(num_threads))
threads = []
if not zone_list:
for _ in range(0, num_threads):
project_thread = threading.Thread(target=get_all_zones_in_project,
args=(projects_queue,
projects_zones_queue,
api_version))
threads.append(project_thread)
project_thread.start()
for project_thread in threads:
project_thread.join()
else:
while not projects_queue.empty():
project = projects_queue.get()
for zone in zone_list:
projects_zones_queue.put((project, zone))
if not projects_zones_queue.empty():
threads = []
for _ in range(0, num_threads):
zone_thread = threading.Thread(target=get_instances,
args=(projects_zones_queue,
instances_queue,
api_version))
threads.append(zone_thread)
zone_thread.start()
for zone_thread in threads:
zone_thread.join()
while not instances_queue.empty():
instances.append(instances_queue.get())
inventory_json = get_inventory(instances)
print(json.dumps(inventory_json,
sort_keys=True,
indent=2))
if __name__ == "__main__":
log.basicConfig(filename='gce_googleapiclient.log', level=log.DEBUG)
try:
ARGS = docoptcfg(__doc__,
config_option='--config',
env_prefix=ENV_PREFIX)
except DocoptcfgFileError as exc:
log.info('Failed reading: %s', str(exc))
ARGS = docoptcfg(__doc__, env_prefix=ENV_PREFIX)
main(ARGS)
| gpl-3.0 | 7,408,946,137,983,462,000 | 35.64011 | 129 | 0.59931 | false |
armills/agocontrol | devices/scheduler/agoscheduler.py | 2 | 31606 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# agoscheduler
# copyright (c) 2014 tang ([email protected])
import sys
import os
import agoclient
import threading
import time
import logging
import json
import copy
from dateutil.relativedelta import *
from dateutil.rrule import *
from dateutil.parser import *
from datetime import datetime
from qpid.datatypes import uuid4
from bisect import bisect_left, bisect_right
from operator import itemgetter
client = None
allSchedules = None #(scheduleid, schedule)
timeSchedules = None #(timestamp, scheduleid)
scenarioControllerUuid = None
nowUtc = None
#logging.basicConfig(filename='/opt/agocontrol/agoscheduler.log', level=logging.INFO, format="%(asctime)s %(levelname)s : %(message)s")
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(name)s %(levelname)s : %(message)s")
#=================================
#classes
#=================================
class SortedCollection(object):
"""constants"""
LEFT = 0
RIGHT = 1
"""SortedCollection from http://code.activestate.com/recipes/577197-sortedcollection/"""
def __init__(self, iterable=(), key=None):
self._given_key = key
key = (lambda x: x) if key is None else key
decorated = sorted((key(item), item) for item in iterable)
self._keys = [k for k, item in decorated]
self._items = [item for k, item in decorated]
self._key = key
def _getkey(self):
return self._key
def _setkey(self, key):
if key is not self._key:
self.__init__(self._items, key=key)
def _delkey(self):
self._setkey(None)
key = property(_getkey, _setkey, _delkey, 'key function')
def clear(self):
self.__init__([], self._key)
def copy(self):
return self.__class__(self, self._key)
def __len__(self):
return len(self._items)
def __getitem__(self, i):
if isinstance( i,slice ):
sc = self.__class__( key=self._key )
sc._keys = self._keys[i]
sc._items = self._items[i]
return sc
else:
return self._items[i]
def __iter__(self):
return iter(self._items)
def __reversed__(self):
return reversed(self._items)
def __repr__(self):
return '%s(%r, key=%s)' % (
self.__class__.__name__,
self._items,
getattr(self._given_key, '__name__', repr(self._given_key))
)
def __reduce__(self):
return self.__class__, (self._items, self._given_key)
def __contains__(self, item):
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return item in self._items[i:j]
def index(self, item):
'Return first item with a key == k. Raise ValueError if not found.'
i = bisect_left(self._keys, item)
if i != len(self) and self._keys[i] == item:
return i
raise ValueError('No item found with key equal to: %r' % (item,))
def index_le(self, k):
'Return last index with a key <= k. Raise ValueError if not found.'
i = bisect_right(self._keys, k)
if i:
return i-1
raise ValueError('No index found with key at or below: %r' % (k,))
def index_lt(self, k):
'Return last index with a key < k. Raise ValueError if not found.'
i = bisect_left(self._keys, k)
if i:
return i-1
raise ValueError('No index found with key below: %r' % (k,))
def index_ge(self, k):
'Return first index with a key >= equal to k. Raise ValueError if not found'
i = bisect_left(self._keys, k)
if i != len(self):
return i
raise ValueError('No index found with key at or above: %r' % (k,))
def index_gt(self, k):
'Return first index with a key > k. Raise ValueError if not found'
i = bisect_right(self._keys, k)
if i != len(self):
return i
raise ValueError('No index found with key above: %r' % (k,))
def count(self, item):
'Return number of occurrences of item'
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return self._items[i:j].count(item)
def insert(self, item, direction=LEFT):
'Insert a new item. If equal keys are found, add to the left'
k = self._key(item)
if direction==SortedCollection.LEFT:
i = bisect_left(self._keys, k)
else:
i = bisect_right(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def insert_right(self, item):
'Insert a new item. If equal keys are found, add to the right'
k = self._key(item)
i = bisect_right(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def remove(self, item):
'Remove first occurence of item. Raise ValueError if not found'
i = self.index(item)
del self._keys[i]
del self._items[i]
def removeIndex(self, index):
'Remove item at specified index'
del self._keys[index]
del self._items[index]
def find(self, k):
'Return first item with a key == k. Raise ValueError if not found.'
i = bisect_left(self._keys, k)
if i != len(self) and self._keys[i] == k:
return self._items[i]
raise ValueError('No item found with key equal to: %r' % (k,))
def find_all(self, k, getter=None):
'Find all items with specified key. If getter specified, return values'
out = []
try:
i = self.index(k)
if getter:
out.append(getter(self._items[i]))
else:
out.append(self._items[i])
i += 1
while i<len(self) and self._keys[i]==k:
if getter:
out.append(getter(self._items[i]))
else:
out.append(self._items[i])
i += 1
except ValueError:
pass
return out
def find_le(self, k):
'Return last item with a key <= k. Raise ValueError if not found.'
i = bisect_right(self._keys, k)
if i:
return self._items[i-1]
raise ValueError('No item found with key at or below: %r' % (k,))
def find_lt(self, k):
'Return last item with a key < k. Raise ValueError if not found.'
i = bisect_left(self._keys, k)
if i:
return self._items[i-1]
raise ValueError('No item found with key below: %r' % (k,))
def find_ge(self, k):
'Return first item with a key >= equal to k. Raise ValueError if not found'
i = bisect_left(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key at or above: %r' % (k,))
def find_gt(self, k):
'Return first item with a key > k. Raise ValueError if not found'
i = bisect_right(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key above: %r' % (k,))
def find_range(self, low, high):
'Return sorted collection in range low<=items<=high'
items = self._items[self.index_ge(low):self.index_le(high)+1]
sc = self.__class__(items, key=self._key)
return sc
def get_values(self, getter):
'Return map of items values according to specified getter function'
return map(getter, self._items)
def get_keys(self):
'Return map of keys according to specified getter function'
return self._keys
def get(self, index):
return self._items[index]
#=================================
#utils
#=================================
def quit(msg):
"""Exit application"""
global client
if client:
del client
client = None
logging.fatal(msg)
sys.exit(0)
def getScenarioControllerUuid():
"""get scenariocontroller uuid"""
global client, scenarioControllerUuid
inventory = client.getInventory()
for uuid in inventory.content['devices']:
if inventory.content['devices'][uuid]['devicetype']=='scenariocontroller':
scenarioControllerUuid = uuid
break
if not scenarioControllerUuid:
raise Exception('scenariocontroller uuid not found!')
def checkContent(content, params):
"""Check if all params are in content"""
for param in params:
if not content.has_key(param):
return False
return True
def momentjsToPython(momentjsDatetime):
"""convert momentjs datetime to python datetime
format: 2014-02-11T09:00:00+01:00"""
return parse(momentjsDatetime)
def pythonToMomentjs(dt):
"""Convert python datetime with tzinfo to momentjs datetime"""
if dt.tzinfo==None:
raise Exception("No timezone info on datetime.")
out = dt.strftime("%Y-%m-%dT%H:%M:%S%z")
return out[:len(out)-2]+':'+out[len(out)-2:]
def calendarToPython(fullCalendarDatetime):
"""convert fullcalendar UTC datetime to python datetime"""
return parse(fullCalendarDatetime)
def pythonToCalendar(pythonDatetime):
"""convert python datetime to fullcalendar UTC datetime
format: 2014-02-26T12:00:00.000Z"""
return pythonDatetime.strftime("%Y-%m-%dT%H:%M:%SZ")
def createSchedule(title, uuidStart, uuidEnd, dateStart, dateEnd, color, repeat):
"""create schedule structure
@see http://arshaw.com/fullcalendar/docs/event_data/Event_Object/"""
return {
'id': str(uuid4()),
'title': title,
'start': dateStart,
'end': dateEnd,
'color': color,
'uuidStart': uuidStart,
'uuidEnd': uuidEnd,
'repeat': repeat,
'allDay': 0
}
def saveSchedules():
"""save schedules to config file"""
global allSchedules
if not agoclient.setConfigOption('agoscheduler', 'all', json.dumps(allSchedules.get_values(itemgetter(1)))):
logging.exception('Unable to load config file')
def loadSchedules():
"""load schedules from config file"""
global allSchedules, timeSchedules
#create members
allSchedules = SortedCollection([], itemgetter(0))
timeSchedules = SortedCollection([], itemgetter(0))
#get schedules from confif file
schedules = agoclient.getConfigOption("agoscheduler", "all", "[]")
schedules = json.loads(schedules)
#and store them in sorted collection
for schedule in schedules:
addSchedule(schedule, False, False)
logging.info('Loaded %d schedules' % len(allSchedules))
def computeRecurrings(recurringDatetime, repeat, months):
global nowUtc
#get tzinfo
tzinfo = recurringDatetime.tzinfo
#create start datetime (without tzinfo because its mandatory to use dateutil.rrule lib:S)
start = datetime(recurringDatetime.year, recurringDatetime.month, recurringDatetime.day, recurringDatetime.hour, recurringDatetime.minute)
#compute current datetime
eoy = datetime(nowUtc.year, 12, 31) #end of year
#compute reccurings
recurrings = []
if repeat==0:
#not a recurring schedule
recurrings = [start]
elif repeat==1:
#repeat every day
recurrings = list(rrule(DAILY, bymonth=months, until=eoy, dtstart=start))
elif repeat==7:
#repeat every week
recurrings = list(rrule(WEEKLY, bymonth=months, until=eoy, dtstart=start))
elif repeat==31:
#repeat every month
recurrings = list(rrule(MONTHLY, bymonth=months, until=eoy, dtstart=start))
elif repeat==365:
#repeat every year
#need to save at least 2 schedules, otherwise it will be lost by purge
until = start + relativedelta(years=+1, months=+1)
recurrings = list(rrule(YEARLY, until=until, dtstart=start))
#re-add tzinfo
for recurring in recurrings:
recurring.replace(tzinfo=tzinfo)
return recurrings
def addSchedule(schedule, computeRecurring=False, append=False):
"""add schedule. /!\ Need to catch Exception
@param schedule: schedule to add
@param computeRecurring: compute recurring schedules too
@param append: append new recurring schedules
@info: datetime are internally stored in UTC"""
global nowUtc
addedSchedules = []
recurringsStart = None
recurringsEnd = None
if computeRecurring:
#compute recurring datetimes
scheduleStart = calendarToPython(schedule['start'])
scheduleEnd = calendarToPython(schedule['end'])
if not append:
#compute schedules to now until end of next month
recurringsStart = computeRecurrings(scheduleStart, int(schedule['repeat']), [nowUtc.month, nowUtc.month+1])
recurringsEnd = computeRecurrings(scheduleEnd, int(schedule['repeat']), [nowUtc.month, nowUtc.month+1])
else:
#compute schedules for next month
recurringsStart = computeRecurrings(scheduleStart, int(schedule['repeat']), [nowUtc.month+1])
recurringsEnd = computeRecurrings(scheduleEnd, int(schedule['repeat']), [nowUtc.month+1])
logging.debug("addSchedule: schedule=%s computeReccuring=%s append=%s" % (str(schedule), str(computeRecurring), str(append)))
logging.debug(recurringsStart)
logging.debug(recurringsEnd)
else:
recurringsStart = [calendarToPython(schedule['start'])]
recurringsEnd = [calendarToPython(schedule['end'])]
#check recurring lists content
if len(recurringsStart)!=len(recurringsEnd):
raise Exception("Recurring lists content is not equal! len(start)=%d len(end)=%d" % (len(recurringsStart), len(recurringsEnd)))
#save start scenario timestamp in timeSchedules list
for i in range(len(recurringsStart)):
#save schedule in allSchedules list
newSchedule = copy.copy(schedule)
newSchedule['start'] = pythonToCalendar(recurringsStart[i])
newSchedule['end'] = pythonToCalendar(recurringsEnd[i])
allSchedules.insert( (newSchedule['id'], newSchedule) )
addedSchedules.append(newSchedule)
#key = int(momentjsToPython(schedule['start']).strftime('%s'))
key = int(recurringsStart[i].strftime('%s'))
timeSchedules.insert( (key, {'id':newSchedule['id'], 'scenario':newSchedule['uuidStart']}) )
#save end scenario timestamp in timeSchedules list
if schedule['uuidEnd']!='0':
#key = int(momentjsToPython(schedule['end']).strftime('%s'))
key = int(recurringsEnd[i].strftime('%s'))
timeSchedules.insert( (key, {'id':newSchedule['id'], 'scenario':newSchedule['uuidEnd']}) )
return addedSchedules
def delSchedule(scheduleId):
"""delete schedule. /!\ Need to catch Exception"""
#search schedules to delete from allSchedules list
schedsToDel = allSchedules.find_all(scheduleId)
delCount = 0
for schedToDel in schedsToDel:
#remove start scenario timestamp from timeSchedules list
dateStart = int(momentjsToPython(schedToDel[1]['start']).strftime('%s'))
scheds = timeSchedules.find_all(dateStart)
for sched in scheds:
if sched[1]['id']==scheduleId:
timeSchedules.remove(sched[0])
break
#remove end scenario timestamp from timeSchedules list
dateEnd = int(momentjsToPython(schedToDel[1]['end']).strftime('%s'))
scheds = timeSchedules.find_all(dateEnd)
for sched in scheds:
if sched[1]['id']==scheduleId:
timeSchedules.remove(sched[0])
break
#and finally delete schedule from allSchedules list
allSchedules.remove(scheduleId)
delCount += 1
logging.info("%d schedules deleted" % delCount)
def purgeSchedule(schedule):
"""purge specified schedule and nothing else (don't touch on recurring schedules)"""
global allSchedules, timeSchedules
found = False
for i in range(len(allSchedules)):
if allSchedules[i][1]['id']==schedule['id'] and allSchedules[i][1]['start']==schedule['start'] and allSchedules[i][1]['end']==schedule['end']:
allSchedules.removeIndex(i)
found = True
break
logging.warning('PurgeSchedule: schedule %s not found in allSchedules list' % str(schedule))
found = False
for i in range(len(timeSchedules)):
if timeSchedules[i][1]['id']==schedule['id'] and timeSchedules[i][1]['scenario']==schedule['uuidStart']:
timeSchedules.removeIndex(i)
found = True
break
logging.warning('PurgeSchedule: schedule %s not found in timeSchedules list (uuidStart)' % str(schedule))
if schedule['uuidEnd']!='0':
found = False
for i in range(len(timeSchedules)):
if timeSchedules[i][1]['id']==schedule['id'] and timeSchedules[i][1]['scenario']==schedule['uuidEnd']:
timeSchedules.removeIndex(i)
found = True
break
logging.warning('PurgeSchedule: schedule %s not found in timeSchedules list (uuidEnd)' % str(schedule))
def updSchedule(schedule, infos):
"""update schedule. /!\ Need to catch Exception"""
updCount = 0
#check fields to update
updateStartDate = 0
if infos['type']=='drop':
#compute time difference in minutes
updateStartDate = infos['days']*1440 + infos['minutes']
updateEndDate = 0
if infos['type']=='drop' or infos['type']=='resize':
#compute time difference in minutes
updateEndDate = infos['days']*1440 + infos['minutes']
removeEndSchedule = False
if schedule['uuidEnd']=='0':
removeEndSchedule = True
#get schedules to update
schedsToUpd = allSchedules.find_all(schedule['id'])
for schedToUpd in schedsToUpd:
#compute new start
start = momentjsToPython(schedToUpd[1]['start'])
start = start + relativedelta(minutes=updateStartDate)
#compute new end
end = momentjsToPython(schedToUpd[1]['end'])
end = end + relativedelta(minutes=updateEndDate)
if updateStartDate!=0:
#start date changed
dateStart = int(momentjsToPython(schedToUpd[1]['start']).strftime('%s'))
scheds = timeSchedules.find_all(dateStart)
for sched in scheds:
if sched[1]['id']==schedToUpd[1]['id']:
#remove old entry
timeSchedules.remove(sched[0])
#compute new start
key = int(start.strftime('%s'))
#and insert new schedule time
timeSchedules.insert( (key, {'id':schedule['id'], 'scenario':schedule['uuidStart']}) )
#and update start in allSchedules list
if updateEndDate!=0:
#end date changed
dateEnd = int(momentjsToPython(schedToUpd[1]['end']).strftime('%s'))
scheds = timeSchedules.find_all(dateEnd)
for sched in scheds:
if sched[1]['id']==schedToUpd[1]['id']:
#remove old entry
timeSchedules.remove(sched[0])
#compute new start
key = int(end.strftime('%s'))
#insert new schedule time
timeSchedules.insert( (key, {'id':schedule['id'], 'scenario':schedule['uuidEnd']}) )
#and update end in allSchedules list
if removeEndSchedule:
#no end scenario, remove all schedules
dateEnd = int(momentjsToPython(schedToUpd[1]['end']).strftime('%s'))
scheds = timeSchedules.find_all(dateEnd)
for sched in scheds:
if sched[1]['id']==schedToUpd[1]['id']:
#remove old entry
timeSchedules.remove(sched[0])
#update schedule
schedToUpd[1]['title'] = schedule['title']
schedToUpd[1]['uuidStart'] = schedule['uuidStart']
schedToUpd[1]['uuidEnd'] = schedule['uuidEnd']
schedToUpd[1]['color'] = schedule['color']
schedToUpd[1]['start'] = pythonToMomentjs(start)
schedToUpd[1]['end'] = pythonToMomentjs(end)
updCount += 1
logging.info("%d schedules updated" % updCount)
#=================================
#functions
#=================================
def commandHandler(internalid, content):
"""ago command handler"""
logging.info('commandHandler: %s, %s' % (internalid,content))
global client, allSchedules
command = None
if content.has_key('command'):
command = content['command']
else:
logging.error('No command specified')
return None
if internalid=='agoscheduler':
if command=='addSchedule':
#add new schedule
scheds = None
if checkContent(content, ['title', 'uuidStart', 'uuidEnd', 'dateStart', 'dateEnd', 'color', 'repeat']):
try:
#create new schedule
sched = createSchedule(content['title'], content['uuidStart'], content['uuidEnd'], content['dateStart'], content['dateEnd'], content['color'], content['repeat'])
#add schedule
scheds = addSchedule(sched, True, False)
logging.info("%d schedules added" % len(scheds))
#save updates
saveSchedules()
except:
logging.exception('Unable to add new schedule:')
return {'error':1, 'msg':'#ie'}
else:
logging.error("Command addSchedule: parameter missing")
return {'error':1, 'msg':'#ie'}
return {'error':0, 'msg':'', 'schedules':scheds}
elif command=='delSchedule':
#delete schedule
if checkContent(content, ['id']):
try:
#delete schedule
delSchedule(content['id'])
#and save updates
saveSchedules()
except ValueError:
logging.exception('Unable to delete schedule:')
return {'error':1, 'msg':'#ie'}
else:
logging.error('Command delSchedule: parameter missing')
return {'error':1, 'msg':'#ie'}
return {'error':0, 'msg':''}
elif command=='updSchedule':
#update schedule
#if checkContent(content, ['id', 'title', 'uuidStart', 'uuidEnd', 'dateStart', 'dateEnd', 'color', 'repeat']):
if checkContent(content, ['schedule', 'infos']):
#infos format:
# type: drop, resize, update
# days: offset days
# minutes: offset minutes
try:
#update schedule
updSchedule(content['schedule'], content['infos'])
#and save updates
saveSchedules()
logging.info(allSchedules)
logging.info(timeSchedules)
except ValueError:
#not found
logging.exception('Unable to update schedule:')
return {'error':1, 'msg':'#ie'}
else:
logging.error('Command updSchedule: parameter missing')
return {'error':1, 'msg':'#ie'}
return {'error':0, 'msg':''}
elif command=='getSchedules':
#return all schedules
return {'error':0, 'msg':'', 'schedules':allSchedules.get_values(itemgetter(1))}
def eventHandler(event, content):
"""ago event handler"""
#logging.info('eventHandler: %s, %s' % (event, content))
global client, timeSchedules, nowUtc
if event=='event.environment.timechanged':
try:
#format: {u'hour': 15, u'month': 2, u'second': 0, u'weekday': 6, u'year': 2014, u'yday': 46, u'day': 15, u'minute': 37}
#convert received datetime to timestamp UTC
currentDtLocal = datetime(content['year'], content['month'], content['day'], content['hour'], content['minute'], 0)
currentTsLocal = int(currentDtLocal.strftime('%s'))
currentTsUtc = int(time.mktime(time.gmtime(time.mktime(currentDtLocal.timetuple()))))
currentDtUtc = datetime.fromtimestamp(currentTsUtc)
#search scenarios to execute
schedules = timeSchedules.find_all(currentTsUtc, itemgetter(1))
#get scenario controller uuid
if not scenarioControllerUuid:
getScenarioControllerUuid()
#execute scenarios
for schedule in schedules:
logging.info('Execute scenario id "%s"' % schedule['scenario'])
client.sendMessage(None, {'uuid':scenarioControllerUuid, 'command':'run', 'internalid':schedule['scenario']})
#each new year append yearly recurring schedules
if currentDtLocal.year!=nowUtc.year:
for schedule in allSchedules:
if schedule[1]['repeat']=='365':
addSchedule(schedule[1], True, True)
#each months purge executed schedules and append existing recurring schedules automatically
if currentDtLocal.month!=nowUtc.month:
#purge old schedules
try:
startTsUtc = int((nowUtc + relativedelta(months=-1)).strftime('%s'))
doneSchedules = timeSchedules.find_range(startTsUtc, currentTsUtc)
for doneSchedule in doneSchedules:
purgeSchedule(doneSchedule[1])
logging.info('Monthly purge removed %d schedules' % len(doneSchedules))
except:
logging.exception('Monthly schedulings purge failed:')
#add new recurring schedules for next month
try:
#get schedules in current month
endTsUtc = int((currentDtUtc + relativedelta(months=+1)).strftime('%s'))
monthSchedules = timeSchedules.find_range(currentTsUtc, endTsUtc)
#filter recurrings to keep only first occurence
updSchedules = []
for monthSchedule in monthSchedules:
if updSchedules.count(monthSchedule[1]['id'])==0:
updSchedules.append(monthSchedule[1]['id'])
#append new schedules for next month
for updSchedule in updSchedules:
schedule = allSchedules.find(updSchedule)
logging.info(schedule)
if schedule[1]['repeat']!='0':
addSchedule(schedule[1], True, True)
except ValueError:
#no schedules found
logging.info('No recurring schedules to append')
pass
except:
logging.exception('Exception on timechanged event:')
#update current datetime
nowUtc = datetime.utcnow()
#=================================
#main
#=================================
#init
try:
#update current datetime
nowUtc = datetime.utcnow()
#connect agoclient
client = agoclient.AgoConnection('agoscheduler')
#members
loadSchedules()
#add client handlers
client.addHandler(commandHandler)
client.addEventHandler(eventHandler)
#add controller
client.addDevice('agoscheduler', 'agoscheduler')
except Exception as e:
#init failed
logging.exception("Exception on init")
quit('Init failed, exit now.')
#Unitary tests
"""
def calDt(string):
return string % (nowUtc.year, nowUtc.month, nowUtc.day)
repeat_no = createSchedule('test_repeatno' , '1234-1234-1234', '0', calDt('%s-%s-%sT12:00:00.000Z'), calDt('%s-%s-%sT12:30:00.000Z'), '#FF0000', '0')
repeat_day = createSchedule('test_repeatday' , '1234-1234-1234', '0', calDt('%s-%s-%sT13:00:00.000Z'), calDt('%s-%s-%sT13:30:00.000Z'), '#FF0000', '1')
repeat_week = createSchedule('test_repeatweek' , '1234-1234-1234', '0', calDt('%s-%s-%sT14:00:00.000Z'), calDt('%s-%s-%sT14:30:00.000Z'), '#FF0000', '7')
repeat_month = createSchedule('test_repeatmonth', '1234-1234-1234', '0', calDt('%s-%s-%sT15:00:00.000Z'), calDt('%s-%s-%sT15:30:00.000Z'), '#FF0000', '31')
repeat_year = createSchedule('test_repeatyear' , '1234-1234-1234', '0', calDt('%s-%s-%sT16:00:00.000Z'), calDt('%s-%s-%sT16:30:00.000Z'), '#FF0000', '365')
####################################
#CHANGE HERE SCHEDULES TO TEST (NO, DAY, WEEK, MONTH, YEAR)
addSchedule(repeat_year, True, False)
####################################
"""
"""
logging.info('----------Add schedules----------')
for sched in allSchedules:
logging.info(sched[1])
#force now datetime to 1st of next month to simulate schedules appending
nowUtc = datetime(nowUtc.year, nowUtc.month, 1, 0, 0, 0)
currentTsUtc = int(nowUtc.strftime('%s'))
currentDtUtc = datetime.fromtimestamp(currentTsUtc)
endTsUtc = int((currentDtUtc + relativedelta(months=+1, days=-1)).strftime('%s'))
endDtUtc = datetime.fromtimestamp(endTsUtc)
logging.info('current=%s end=%s' % (str(currentTsUtc), str(endTsUtc)))
logging.info('current=%s end=%s' % (str(currentDtUtc), str(endDtUtc)))
monthSchedules = []
logging.info('----------Schedules in range [%s-%s]----------' % (str(currentDtUtc), str(endDtUtc)))
try:
monthSchedules = timeSchedules.find_range(currentTsUtc, endTsUtc)
for sched in monthSchedules:
logging.info(sched)
except ValueError:
logging.info('No schedules to append')
updSchedules = []
for monthSchedule in monthSchedules:
if updSchedules.count(monthSchedule[1]['id'])==0:
updSchedules.append(monthSchedule[1]['id'])
logging.info('----------Schedules in range [%s-%s] after purge----------' % (str(currentDtUtc), str(endDtUtc)))
for sched in updSchedules:
logging.info(sched)
logging.info('----------Append schedules----------')
for updSchedule in updSchedules:
schedule = allSchedules.find(updSchedule) #return last inserted so the oldest one
logging.info(' -> base schedule %s' % str(schedule))
if int(schedule[1]['repeat'])!=0:
addSchedule(schedule[1], True, True)
logging.info('----------All schedules after append----------')
for sched in allSchedules:
logging.info(sched[1])
"""
"""
logging.info('----------Purge schedules----------')
try:
startDtUtc = nowUtc + relativedelta(months=-1)
startTsUtc = int(startDtUtc.strftime('%s'))
logging.info('now=%s start=%s' % (str(nowUtc), str(startDtUtc)))
currentTsUtc = int(nowUtc.strftime('%s'))
logging.info('now=%s start=%s' % (str(currentTsUtc), str(startTsUtc)))
doneSchedules = timeSchedules.find_range(startTsUtc, currentTsUtc)
logging.info(doneSchedules)
for doneSchedule in doneSchedules:
logging.info('->purge: %s' % str(doneSchedule))
logging.info('Monthly purge removed %d schedules' % len(doneSchedules))
except:
logging.exception('Monthly schedulings purge failed:')
"""
"""
quit('----------End of tests----------')
"""
#run agoclient
try:
logging.info('Running agoscheduler...')
client.run()
except KeyboardInterrupt:
#stopped by user
quit('agoscheduler stopped by user')
except Exception as e:
logging.exception("Exception on main:")
#stop everything
quit('agoscheduler stopped')
| gpl-3.0 | -5,674,381,481,719,515,000 | 38.556946 | 181 | 0.593811 | false |
jrising/prospectus-tools | gcp/extract/single.py | 1 | 1948 | """
Usage: `python single.py OPTIONS FILEPATH
Supported configuration options:
- config (default: none): read the options from a config file
- column (default: `rebased`)
- yearsets (default: `no`)
- year or years (default: `null`)
- region or regions (default: `null`)
"""
import sys, csv
from lib import bundles, configs
config, argv = configs.consume_config()
configs.handle_multiimpact_vcv(config)
columns, basenames, transforms, vectransforms = configs.interpret_filenames(argv, config)
data = {} # {region => { year => value }}
for ii in range(len(basenames)):
for region, years, values in bundles.iterate_regions(basenames[ii], columns[ii], config):
if region not in data:
data[region] = {}
for year, value in bundles.iterate_values(years, values, config):
if region == 'all':
value = vectransforms[ii](value)
else:
value = transforms[ii](value)
if year not in data[region]:
data[region][year] = value
else:
data[region][year] += value
writer = csv.writer(sys.stdout)
writer.writerow(['region', 'year', 'value'])
for region in data:
if region == 'all':
for rr in range(len(config['regionorder'])):
for year in data[region]:
if bundles.deltamethod_vcv is not None:
value = bundles.deltamethod_vcv.dot(data[region][year][:, rr]).dot(data[region][year][:, rr])
else:
value = data[region][year][rr]
writer.writerow([config['regionorder'][rr], year, value])
else:
for year in data[region]:
if bundles.deltamethod_vcv is not None:
value = bundles.deltamethod_vcv.dot(data[region][year]).dot(data[region][year])
else:
value = data[region][year][rr]
writer.writerow([region, year, value])
| mit | 1,144,895,587,784,701,300 | 35.074074 | 113 | 0.588809 | false |
jschultz/Gooey | gooey/gui/windows/header.py | 2 | 2603 | '''
Created on Dec 23, 2013
@author: Chris
'''
import wx
from gooey.gui import imageutil, image_repository
from gooey.gui.util import wx_util
PAD_SIZE = 10
class FrameHeader(wx.Panel):
def __init__(self, parent=None, **kwargs):
wx.Panel.__init__(self, parent, **kwargs)
self.SetDoubleBuffered(True)
self._header = None
self._subheader = None
self.settings_img = None
self.running_img = None
self.check_mark = None
self.error_symbol = None
self._init_properties()
self._init_components()
self._do_layout()
@property
def title(self):
return self._header.GetLabel()
@title.setter
def title(self, text):
self._header.SetLabel(text)
@property
def subtitle(self):
return self._subheader.GetLabel()
@subtitle.setter
def subtitle(self, text):
self._subheader.SetLabel(text)
def _init_properties(self):
self.SetBackgroundColour('#ffffff')
self.SetSize((30, 90))
self.SetMinSize((120, 80))
def _init_components(self):
self._header = wx_util.h1(self, '')
self._subheader = wx.StaticText(self, label='')
self.settings_img = self._load_image(image_repository.config_icon, height=79)
self.running_img = self._load_image(image_repository.running_icon, 79)
self.check_mark = self._load_image(image_repository.success_icon, height=75)
self.error_symbol = self._load_image(image_repository.error_icon, height=75)
def _do_layout(self):
vsizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.BoxSizer(wx.HORIZONTAL)
headings_sizer = self.build_heading_sizer()
sizer.Add(headings_sizer, 1, wx.ALIGN_LEFT | wx.ALIGN_CENTER_HORIZONTAL | wx.EXPAND | wx.LEFT, PAD_SIZE)
sizer.Add(self.settings_img, 0, wx.ALIGN_RIGHT | wx.EXPAND | wx.RIGHT, PAD_SIZE)
sizer.Add(self.running_img, 0, wx.ALIGN_RIGHT | wx.EXPAND | wx.RIGHT, PAD_SIZE)
sizer.Add(self.check_mark, 0, wx.ALIGN_RIGHT | wx.EXPAND | wx.RIGHT, PAD_SIZE)
sizer.Add(self.error_symbol, 0, wx.ALIGN_RIGHT | wx.EXPAND | wx.RIGHT, PAD_SIZE)
self.running_img.Hide()
self.check_mark.Hide()
self.error_symbol.Hide()
vsizer.Add(sizer, 1, wx.EXPAND)
self.SetSizer(vsizer)
def _load_image(self, img_path, height=70):
return imageutil.resize_bitmap(self, imageutil._load_image(img_path), height)
def build_heading_sizer(self):
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddStretchSpacer(1)
sizer.Add(self._header, 0)
sizer.Add(self._subheader, 0)
sizer.AddStretchSpacer(1)
return sizer
| mit | -4,890,559,889,068,914,000 | 27.579545 | 108 | 0.65655 | false |
UdeM-LBIT/GAPol | lib/TreeLib/params.py | 1 | 1616 | # This file is part of profileNJ
#
# params set duplication and losses cost for profileNJ and reconciliation
__author__ = "Emmanuel Noutahi"
import hashlib
import numpy as np
cdup, closs = 1, 1
dupcost, losscost = {}, {}
internal_type = 0
def set(dup, loss, constdlcost=(1, 1), internal_mode='default'):
global dupcost, losscost
global cdup, closs
global internal_type
dupcost, losscost = dup, loss
cdup, closs = constdlcost
internal_type = 1 if internal_mode == 'mean' else 0
def get_hash(splist):
if not isinstance(splist, basestring):
splist = ",".join(sorted(splist))
return hashlib.sha384(splist).hexdigest()
def getdup(specie=None):
global dupcost, cdup
slist = specie
if specie and not isinstance(specie, basestring):
slist = specie.get_leaf_names()
if len(slist) > 1:
return get_internal(specie, getdup, ctype="dup")
else:
return dupcost.get(get_hash(slist), cdup)
def getloss(specie=None):
global losscost, closs
slist = specie
if specie and not isinstance(specie, basestring):
slist = specie.get_leaf_names()
if len(slist) > 1:
return get_internal(specie, getloss)
else:
return losscost.get(get_hash(slist), closs)
def get_internal(specie, costfun, ctype="loss"):
global internal_type, closs, cdup
if not isinstance(specie, basestring) and (specie.is_internal() or specie.is_root()) and internal_type == 1:
defcost = np.mean([costfun(s) for s in specie.get_leaves()])
else:
defcost = closs if ctype == 'loss' else cdup
return defcost | gpl-3.0 | 5,027,785,877,860,318,000 | 26.87931 | 112 | 0.664604 | false |
fulfilio/trytond-sale-available-stock | setup.py | 2 | 3914 | #!/usr/bin/env python
import re
import os
import sys
import time
import unittest
import ConfigParser
from setuptools import setup, Command
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
class SQLiteTest(Command):
"""
Run the tests on SQLite
"""
description = "Run tests on SQLite"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
os.environ['TRYTOND_DATABASE_URI'] = 'sqlite://'
os.environ['DB_NAME'] = ':memory:'
from tests import suite
test_result = unittest.TextTestRunner(verbosity=3).run(suite())
if test_result.wasSuccessful():
sys.exit(0)
sys.exit(-1)
class PostgresTest(Command):
"""
Run the tests on Postgres.
"""
description = "Run tests on Postgresql"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
os.environ['TRYTOND_DATABASE_URI'] = 'postgresql://'
os.environ['DB_NAME'] = 'test_' + str(int(time.time()))
from tests import suite
test_result = unittest.TextTestRunner(verbosity=3).run(suite())
if test_result.wasSuccessful():
sys.exit(0)
sys.exit(-1)
config = ConfigParser.ConfigParser()
config.readfp(open('tryton.cfg'))
info = dict(config.items('tryton'))
for key in ('depends', 'extras_depend', 'xml'):
if key in info:
info[key] = info[key].strip().splitlines()
major_version, minor_version, _ = info.get('version', '0.0.1').split('.', 2)
major_version = int(major_version)
minor_version = int(minor_version)
requires = []
MODULE2PREFIX = {}
MODULE = "sale_available_stock"
PREFIX = "fio"
for dep in info.get('depends', []):
if not re.match(r'(ir|res|webdav)(\W|$)', dep):
requires.append(
'%s_%s >= %s.%s, < %s.%s' % (
MODULE2PREFIX.get(dep, 'trytond'), dep,
major_version, minor_version, major_version,
minor_version + 1
)
)
requires.append(
'trytond >= %s.%s, < %s.%s' % (
major_version, minor_version, major_version, minor_version + 1
)
)
setup(
name='%s_%s' % (PREFIX, MODULE),
version=info.get('version', '0.0.1'),
description="",
author="Fulfil.IO Inc., Openlabs Technologies and Consulting (P) Ltd.",
author_email='[email protected]',
url='http://www.fulfil.io/',
long_description=open('README.rst').read(),
package_dir={'trytond.modules.%s' % MODULE: '.'},
packages=[
'trytond.modules.%s' % MODULE,
'trytond.modules.%s.tests' % MODULE,
],
package_data={
'trytond.modules.%s' % MODULE: info.get('xml', []) +
info.get('translation', []) +
['tryton.cfg', 'locale/*.po', 'tests/*.rst', 'reports/*.odt'] +
['view/*.xml'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Plugins',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Tryton',
'Topic :: Office/Business',
],
license='BSD',
install_requires=requires,
zip_safe=False,
entry_points="""
[trytond.modules]
%s = trytond.modules.%s
""" % (MODULE, MODULE),
test_suite='tests',
test_loader='trytond.test_loader:Loader',
cmdclass={
'test': SQLiteTest,
'test_on_postgres': PostgresTest,
}
)
| bsd-3-clause | -500,966,380,051,378,200 | 25.808219 | 79 | 0.586357 | false |
wong2/sentry | tests/sentry/api/endpoints/test_project_releases.py | 4 | 3567 | from __future__ import absolute_import
from datetime import datetime
from django.core.urlresolvers import reverse
from sentry.models import Release
from sentry.testutils import APITestCase
class ProjectReleaseListTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team()
project1 = self.create_project(team=team, name='foo')
project2 = self.create_project(team=team, name='bar')
release1 = Release.objects.create(
project=project1,
version='1',
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386),
)
release2 = Release.objects.create(
project=project1,
version='2',
date_added=datetime(2013, 8, 14, 3, 8, 24, 880386),
)
Release.objects.create(
project=project2,
version='1',
)
url = reverse('sentry-api-0-project-releases', kwargs={
'organization_slug': project1.organization.slug,
'project_slug': project1.slug,
})
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert response.data[0]['version'] == release2.version
assert response.data[1]['version'] == release1.version
def test_query_filter(self):
self.login_as(user=self.user)
team = self.create_team()
project = self.create_project(team=team, name='foo')
release = Release.objects.create(
project=project,
version='foobar',
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386),
)
url = reverse('sentry-api-0-project-releases', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
})
response = self.client.get(url + '?query=foo', format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]['version'] == release.version
response = self.client.get(url + '?query=bar', format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 0
class ProjectReleaseCreateTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team()
project = self.create_project(team=team, name='foo')
url = reverse('sentry-api-0-project-releases', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
})
response = self.client.post(url, data={
'version': '1.2.1',
})
assert response.status_code == 201, response.content
assert response.data['version']
assert Release.objects.filter(
project=project,
version=response.data['version'],
).exists()
def test_duplicate(self):
self.login_as(user=self.user)
team = self.create_team()
project = self.create_project(team=team, name='foo')
Release.objects.create(version='1.2.1', project=project)
url = reverse('sentry-api-0-project-releases', kwargs={
'organization_slug': project.organization.slug,
'project_slug': project.slug,
})
response = self.client.post(url, data={
'version': '1.2.1',
})
assert response.status_code == 400, response.content
| bsd-3-clause | -794,492,773,894,965,900 | 30.848214 | 69 | 0.594617 | false |
hydroshare/hydroshare_temp | hs_core/tests/api/http/test_get_revisions_view.py | 1 | 3636 | __author__ = 'shaunjl'
"""
Tastypie REST API tests for resolveDOI(view) modeled entirely after
test_get_revisions.py written by Pabitra Dash
"""
from tastypie.test import ResourceTestCase, TestApiClient
from django.contrib.auth.models import User
from hs_core import hydroshare
from tastypie.serializers import Serializer
class TestGetRevisionsAPI(ResourceTestCase):
serializer = Serializer()
def setUp(self):
self.api_client = TestApiClient()
user = hydroshare.create_account(
'[email protected]',
username='user0',
first_name='User0_FirstName',
last_name='User0_LastName',
)
self.res = hydroshare.create_resource('GenericResource', user, 'myres')
def tearDown(self):
User.objects.all().delete()
hydroshare.delete_resource(self.res.short_id)
def test_get_revisions(self):
url = 'hsapi/revisions/{0}/'.format(self.res.short_id)
resp = self.api_client.get(url)
res_revisions = self.deserialize(resp)
self.assertValidJSONResponse(resp)
self.assertEqual(len(res_revisions), 1)
self.assertEqual(hydroshare.get_revisions(self.res.short_id), res_revisions)
resource_changed_by = hydroshare.create_account(
'[email protected]',
username='user1',
first_name='User1_FirstName',
last_name='User1_LastName'
)
hydroshare.utils.resource_modified(self.res, resource_changed_by)
resp = self.api_client.get(url)
res_revisions = self.deserialize(resp)
self.assertValidJSONResponse(resp)
self.assertEqual(len(res_revisions), 2)
self.assertEqual(hydroshare.get_revisions(self.res.short_id), res_revisions)
# test that each revision has a different time stamp
self.assertNotEqual(res_revisions[0].timestamp, res_revisions[1].timestamp)
# test that each resource revision has the same resource id
for bags in res_revisions:
self.assertEqual(self.res.id, bags.object_id)
# add a file to the resource to generate another revision of the resource
# create a file
original_file_name = 'original.txt'
original_file = open(original_file_name, 'w')
original_file.write("original text")
original_file.close()
original_file = open(original_file_name, 'r')
# add the file to the resource
hydroshare.add_resource_files(self.res.short_id, original_file)
resp = self.api_client.get(url)
res_revisions = self.deserialize(resp)
self.assertValidJSONResponse(resp)
# test that we now have 3 revisions
self.assertEqual(len(res_revisions), 3)
self.assertEqual(hydroshare.get_revisions(self.res.short_id), res_revisions)
# test that each revision has a different time stamp
self.assertNotEqual(res_revisions[0].timestamp, res_revisions[1].timestamp)
self.assertNotEqual(res_revisions[0].timestamp, res_revisions[2].timestamp)
self.assertNotEqual(res_revisions[1].timestamp, res_revisions[2].timestamp)
# delete the file in the resource to create another revision of the resource
hydroshare.delete_resource_file(self.res.short_id, original_file_name)
resp = self.api_client.get(url)
res_revisions = self.deserialize(resp)
self.assertValidJSONResponse(resp)
self.assertEqual(hydroshare.get_revisions(self.res.short_id), res_revisions)
# test that we now have 4 revisions
self.assertEqual(len(res_revisions), 4)
| bsd-3-clause | 745,294,527,247,112,600 | 37.680851 | 84 | 0.666117 | false |
jeremiah-c-leary/vhdl-style-guide | vsg/tests/case/test_rule_007.py | 1 | 1203 |
import os
import unittest
from vsg.rules import case
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_007_test_input.vhd'))
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_007_test_input.fixed.vhd'), lExpected)
class test_case_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_007(self):
oRule = case.rule_007()
oRule.allow_comments = True
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'case')
self.assertEqual(oRule.identifier, '007')
lExpected = [24]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_007(self):
oRule = case.rule_007()
oRule.allow_comments = True
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
| gpl-3.0 | -2,404,013,222,632,069,600 | 24.0625 | 106 | 0.66916 | false |
GroestlCoin/electrum-grs | electrum_grs/plugins/revealer/hmac_drbg.py | 9 | 1826 | '''
Copyright (c) 2014 David Lazar <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import hashlib
import hmac
class DRBG(object):
def __init__(self, seed):
self.key = b'\x00' * 64
self.val = b'\x01' * 64
self.reseed(seed)
def hmac(self, key, val):
return hmac.new(key, val, hashlib.sha512).digest()
def reseed(self, data=b''):
self.key = self.hmac(self.key, self.val + b'\x00' + data)
self.val = self.hmac(self.key, self.val)
if data:
self.key = self.hmac(self.key, self.val + b'\x01' + data)
self.val = self.hmac(self.key, self.val)
def generate(self, n):
xs = b''
while len(xs) < n:
self.val = self.hmac(self.key, self.val)
xs += self.val
self.reseed()
return xs[:n]
| gpl-3.0 | -7,607,971,278,986,841,000 | 34.803922 | 77 | 0.686199 | false |
wemanuel/smry | smry/server-auth/ls/google-cloud-sdk/.install/.backup/lib/googlecloudapis/apitools/base/py/util.py | 4 | 7010 | """Assorted utilities shared between parts of apitools."""
import collections
import os
import random
from protorpc import messages
import six
from six.moves import http_client
import six.moves.urllib.error as urllib_error
import six.moves.urllib.parse as urllib_parse
import six.moves.urllib.request as urllib_request
from googlecloudapis.apitools.base.py import encoding
from googlecloudapis.apitools.base.py import exceptions
__all__ = [
'DetectGae',
'DetectGce',
]
_RESERVED_URI_CHARS = r":/?#[]@!$&'()*+,;="
def DetectGae():
"""Determine whether or not we're running on GAE.
This is based on:
https://developers.google.com/appengine/docs/python/#The_Environment
Returns:
True iff we're running on GAE.
"""
server_software = os.environ.get('SERVER_SOFTWARE', '')
return (server_software.startswith('Development/') or
server_software.startswith('Google App Engine/'))
def DetectGce():
"""Determine whether or not we're running on GCE.
This is based on:
https://cloud.google.com/compute/docs/metadata#runninggce
Returns:
True iff we're running on a GCE instance.
"""
try:
o = urllib_request.build_opener(urllib_request.ProxyHandler({})).open(
urllib_request.Request('http://metadata.google.internal'))
except urllib_error.URLError:
return False
return (o.getcode() == http_client.OK and
o.headers.get('metadata-flavor') == 'Google')
def NormalizeScopes(scope_spec):
"""Normalize scope_spec to a set of strings."""
if isinstance(scope_spec, six.string_types):
return set(scope_spec.split(' '))
elif isinstance(scope_spec, collections.Iterable):
return set(scope_spec)
raise exceptions.TypecheckError(
'NormalizeScopes expected string or iterable, found %s' % (
type(scope_spec),))
def Typecheck(arg, arg_type, msg=None):
if not isinstance(arg, arg_type):
if msg is None:
if isinstance(arg_type, tuple):
msg = 'Type of arg is "%s", not one of %r' % (type(arg), arg_type)
else:
msg = 'Type of arg is "%s", not "%s"' % (type(arg), arg_type)
raise exceptions.TypecheckError(msg)
return arg
def ExpandRelativePath(method_config, params, relative_path=None):
"""Determine the relative path for request."""
path = relative_path or method_config.relative_path or ''
for param in method_config.path_params:
param_template = '{%s}' % param
# For more details about "reserved word expansion", see:
# http://tools.ietf.org/html/rfc6570#section-3.2.2
reserved_chars = ''
reserved_template = '{+%s}' % param
if reserved_template in path:
reserved_chars = _RESERVED_URI_CHARS
path = path.replace(reserved_template, param_template)
if param_template not in path:
raise exceptions.InvalidUserInputError(
'Missing path parameter %s' % param)
try:
# TODO(craigcitro): Do we want to support some sophisticated
# mapping here?
value = params[param]
except KeyError:
raise exceptions.InvalidUserInputError(
'Request missing required parameter %s' % param)
if value is None:
raise exceptions.InvalidUserInputError(
'Request missing required parameter %s' % param)
try:
if not isinstance(value, six.string_types):
value = str(value)
path = path.replace(param_template,
urllib_parse.quote(value.encode('utf_8'),
reserved_chars))
except TypeError as e:
raise exceptions.InvalidUserInputError(
'Error setting required parameter %s to value %s: %s' % (
param, value, e))
return path
def CalculateWaitForRetry(retry_attempt, max_wait=60):
"""Calculates amount of time to wait before a retry attempt.
Wait time grows exponentially with the number of attempts.
A random amount of jitter is added to spread out retry attempts from different
clients.
Args:
retry_attempt: Retry attempt counter.
max_wait: Upper bound for wait time.
Returns:
Amount of time to wait before retrying request.
"""
wait_time = 2 ** retry_attempt
# randrange requires a nonzero interval, so we want to drop it if
# the range is too small for jitter.
if retry_attempt:
max_jitter = (2 ** retry_attempt) / 2
wait_time += random.randrange(-max_jitter, max_jitter)
return min(wait_time, max_wait)
def AcceptableMimeType(accept_patterns, mime_type):
"""Return True iff mime_type is acceptable for one of accept_patterns.
Note that this function assumes that all patterns in accept_patterns
will be simple types of the form "type/subtype", where one or both
of these can be "*". We do not support parameters (i.e. "; q=") in
patterns.
Args:
accept_patterns: list of acceptable MIME types.
mime_type: the mime type we would like to match.
Returns:
Whether or not mime_type matches (at least) one of these patterns.
"""
unsupported_patterns = [p for p in accept_patterns if ';' in p]
if unsupported_patterns:
raise exceptions.GeneratedClientError(
'MIME patterns with parameter unsupported: "%s"' % ', '.join(
unsupported_patterns))
def MimeTypeMatches(pattern, mime_type):
"""Return True iff mime_type is acceptable for pattern."""
# Some systems use a single '*' instead of '*/*'.
if pattern == '*':
pattern = '*/*'
return all(accept in ('*', provided) for accept, provided
in zip(pattern.split('/'), mime_type.split('/')))
return any(MimeTypeMatches(pattern, mime_type) for pattern in accept_patterns)
def MapParamNames(params, request_type):
"""Reverse parameter remappings for URL construction."""
return [encoding.GetCustomJsonFieldMapping(request_type, json_name=p) or p
for p in params]
def MapRequestParams(params, request_type):
"""Perform any renames/remappings needed for URL construction.
Currently, we have several ways to customize JSON encoding, in
particular of field names and enums. This works fine for JSON
bodies, but also needs to be applied for path and query parameters
in the URL.
This function takes a dictionary from param names to values, and
performs any registered mappings. We also need the request type (to
look up the mappings).
Args:
params: (dict) Map from param names to values
request_type: (protorpc.messages.Message) request type for this API call
Returns:
A new dict of the same size, with all registered mappings applied.
"""
new_params = dict(params)
for param_name, value in params.items():
field_remapping = encoding.GetCustomJsonFieldMapping(
request_type, python_name=param_name)
if field_remapping is not None:
new_params[field_remapping] = new_params.pop(param_name)
if isinstance(value, messages.Enum):
new_params[param_name] = encoding.GetCustomJsonEnumMapping(
type(value), python_name=str(value)) or str(value)
return new_params
| apache-2.0 | 3,095,757,246,124,527,000 | 32.864734 | 80 | 0.685164 | false |
volpino/Yeps-EURAC | lib/galaxy/model/migrate/versions/0009_request_table.py | 1 | 2290 | """
This migration script adds a new column to 2 tables:
1) a new boolean type column named 'submitted' to the 'request' table
2) a new string type column named 'bar_code' to the 'sample' table
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
import sys, logging
from galaxy.model.custom_types import *
from sqlalchemy.exc import *
log = logging.getLogger( __name__ )
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler( sys.stdout )
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter( format )
handler.setFormatter( formatter )
log.addHandler( handler )
metadata = MetaData( migrate_engine )
def display_migration_details():
print "========================================"
print "This migration script adds a new column to 2 tables:"
print "1) a new boolean type column named 'submitted' to the 'request' table"
print "2) a new string type column named 'bar_code' to the 'sample' table"
print "========================================"
def upgrade():
display_migration_details()
# Load existing tables
metadata.reflect()
# Add 1 column to the request table
try:
Request_table = Table( "request", metadata, autoload=True )
except NoSuchTableError:
Request_table = None
log.debug( "Failed loading table request" )
if Request_table:
try:
col = Column( "submitted", Boolean, index=True, default=False )
col.create( Request_table )
assert col is Request_table.c.submitted
except Exception, e:
log.debug( "Adding column 'submitted' to request table failed: %s" % ( str( e ) ) )
# Add 1 column to the sample table
try:
Sample_table = Table( "sample", metadata, autoload=True )
except NoSuchTableError:
Sample_table = None
log.debug( "Failed loading table sample" )
if Sample_table:
try:
col = Column( "bar_code", TrimmedString( 255 ), index=True )
col.create( Sample_table )
assert col is Sample_table.c.bar_code
except Exception, e:
log.debug( "Adding column 'bar_code' to sample table failed: %s" % ( str( e ) ) )
def downgrade():
pass
| mit | -5,073,953,024,989,574,000 | 35.349206 | 95 | 0.633624 | false |
boehm-s/dotfiles | .emacs.d/elpy/rpc-venv/lib/python3.7/site-packages/pip/_vendor/packaging/specifiers.py | 5 | 28800 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def __str__(self):
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self):
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self):
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value):
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item, prereleases=None):
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(self, iterable, prereleases=None):
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class _IndividualSpecifier(BaseSpecifier):
_operators = {}
def __init__(self, spec="", prereleases=None):
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(
self.__class__.__name__,
str(self),
pre,
)
def __str__(self):
return "{0}{1}".format(*self._spec)
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec == other._spec
def __ne__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec != other._spec
def _get_operator(self, op):
return getattr(self, "_compare_{0}".format(self._operators[op]))
def _coerce_version(self, version):
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
def operator(self):
return self._spec[0]
@property
def version(self):
return self._spec[1]
@property
def prereleases(self):
return self._prereleases
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
return self._get_operator(self.operator)(item, self.version)
def filter(self, iterable, prereleases=None):
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if (parsed_version.is_prerelease and not
(prereleases or self.prereleases)):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the beginning.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class LegacySpecifier(_IndividualSpecifier):
_regex_str = (
r"""
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^,;\s)]* # Since this is a "legacy" specifier, and the version
# string can be just about anything, we match everything
# except for whitespace, a semi-colon for marker support,
# a closing paren since versions can be enclosed in
# them, and a comma since it's a version separator.
)
"""
)
_regex = re.compile(
r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def _coerce_version(self, version):
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective, spec):
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective, spec):
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective, spec):
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective, spec):
return prospective > self._coerce_version(spec)
def _require_version_compare(fn):
@functools.wraps(fn)
def wrapped(self, prospective, spec):
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
return wrapped
class Specifier(_IndividualSpecifier):
_regex_str = (
r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s]* # We just match everything, except for whitespace
# since we are only testing for strict identity.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
# You cannot use a wild card and a dev or local version
# together so group them with a | and make them optional.
(?:
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
\.\* # Wild card syntax of .*
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
"""
)
_regex = re.compile(
r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
@_require_version_compare
def _compare_compatible(self, prospective, spec):
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore post and dev releases and we want to treat the pre-release as
# it's own separate segment.
prefix = ".".join(
list(
itertools.takewhile(
lambda x: (not x.startswith("post") and not
x.startswith("dev")),
_version_split(spec),
)
)[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return (self._get_operator(">=")(prospective, spec) and
self._get_operator("==")(prospective, prefix))
@_require_version_compare
def _compare_equal(self, prospective, spec):
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
prospective = Version(prospective.public)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
prospective = prospective[:len(spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
spec, prospective = _pad_version(spec, prospective)
else:
# Convert our spec string into a Version
spec = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec.local:
prospective = Version(prospective.public)
return prospective == spec
@_require_version_compare
def _compare_not_equal(self, prospective, spec):
return not self._compare_equal(prospective, spec)
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
return prospective <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
@_require_version_compare
def _compare_greater_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is techincally greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective, spec):
return str(prospective).lower() == str(spec).lower()
@property
def prereleases(self):
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if parse(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version):
result = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _pad_version(left, right):
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]):])
right_split.append(right[len(right_split[0]):])
# Insert our padding
left_split.insert(
1,
["0"] * max(0, len(right_split[0]) - len(left_split[0])),
)
right_split.insert(
1,
["0"] * max(0, len(left_split[0]) - len(right_split[0])),
)
return (
list(itertools.chain(*left_split)),
list(itertools.chain(*right_split)),
)
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers="", prereleases=None):
# Split on , to break each indidivual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
parsed = set()
for specifier in specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
def __str__(self):
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self):
return hash(self._specs)
def __and__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs != other._specs
def __len__(self):
return len(self._specs)
def __iter__(self):
return iter(self._specs)
@property
def prereleases(self):
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(
s.contains(item, prereleases=prereleases)
for s in self._specs
)
def filter(self, iterable, prereleases=None):
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
filtered = []
found_prereleases = []
for item in iterable:
# Ensure that we some kind of Version class for this item.
if not isinstance(item, (LegacyVersion, Version)):
parsed_version = parse(item)
else:
parsed_version = item
# Filter out any item which is parsed as a LegacyVersion
if isinstance(parsed_version, LegacyVersion):
continue
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return found_prereleases
return filtered
| gpl-3.0 | -8,638,099,803,205,713,000 | 35.209302 | 79 | 0.545903 | false |
AlphaSmartDog/DeepLearningNotes | Note-1 RNN-DNC择时/Note-1 初学RqAlpha——PonderLSTM和PonderDNC日频期货的简单应用/sonnet/python/modules/basic.py | 3 | 48619 | # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Basic Modules for TensorFlow snt.
Modules defining the simplest building blocks for Neural Networks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numbers
# Dependency imports
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from sonnet.python.modules import base
from sonnet.python.modules import util
from sonnet.python.ops import nest
import tensorflow as tf
def merge_leading_dims(tensor, n_dims=2):
"""Merge the first dimensions of a tensor.
Args:
tensor: Tensor to have its first dimensions merged.
n_dims: Number of dimensions to merge.
Returns:
The input tensor, with its first dimensions merged.
"""
tensor = tf.convert_to_tensor(tensor)
tensor_shape_static = tensor.get_shape()
tensor_shape_list = tensor_shape_static.as_list()
if tensor_shape_static.is_fully_defined():
new_shape = (
[np.prod(tensor_shape_list[:n_dims])] + tensor_shape_list[n_dims:])
return tf.reshape(tensor, new_shape)
# Shape can't be inferred statically.
tensor_shape = tf.shape(tensor)
new_first_dim = tf.reduce_prod(tensor_shape[:n_dims], keep_dims=True)
other_dims = tensor_shape[n_dims:]
new_size = tf.concat([new_first_dim, other_dims], 0)
result = tf.reshape(tensor, new_size)
# We need to set the result size of this, as otherwise we won't be able to
# pass to e.g. a Linear.
result.set_shape([None] + tensor_shape_list[n_dims:])
return result
def split_leading_dim(tensor, inputs, n_dims=2):
"""Split the first dimension of a tensor.
Args:
tensor: Tensor to have its first dimension split.
inputs: Original reference input to look the dimensions of.
n_dims: Number of dimensions to split.
Returns:
The input tensor, with its first dimension split.
"""
input_shape_static = inputs.get_shape()
input_shape_list = input_shape_static.as_list()
tensor_shape_static = tensor.get_shape()
tensor_shape_list = tensor_shape_static.as_list()
if (input_shape_static.is_fully_defined()
and tensor_shape_static.is_fully_defined()):
new_shape = input_shape_list[:n_dims] + tensor_shape_list[1:]
return tf.reshape(tensor, new_shape)
# Shape can't be inferred statically.
dims_after_first = tf.shape(tensor)[1:]
split_sizes = tf.shape(inputs)[:n_dims]
known_split_sizes = input_shape_list[:n_dims]
known_dims_after_first = tensor_shape_list[1:]
output_size = tf.concat([split_sizes, dims_after_first], 0)
result = tf.reshape(tensor, output_size)
result.set_shape(known_split_sizes + known_dims_after_first)
return result
def create_linear_initializer(input_size, dtype=tf.float32):
"""Returns a default initializer for weights of a linear module."""
stddev = 1 / math.sqrt(input_size)
return tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)
def create_bias_initializer(unused_bias_shape, dtype=tf.float32):
"""Returns a default initializer for the biases of a linear/AddBias module."""
return tf.zeros_initializer(dtype=dtype)
class Linear(base.AbstractModule, base.Transposable):
"""Linear module, optionally including bias."""
def __init__(self,
output_size,
use_bias=True,
initializers=None,
partitioners=None,
regularizers=None,
custom_getter=None,
name="linear"):
"""Constructs a Linear module.
Args:
output_size: Output dimensionality. `output_size` can be either an integer
or a callable. In the latter case, since the function invocation is
deferred to graph construction time, the user must only ensure that
output_size can be called, returning an integer, when build is called.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing initializers to initialize the
weights (with key 'w') or biases (with key 'b'). The default
initializer for the weights is a truncated normal initializer, which
is commonly used when the inputs are zero centered (see
https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for
the bias is a zero initializer.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the weights
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the `tf.get_variable`
documentation for information about the custom_getter API.
name: Name of the module.
Raises:
KeyError: If `initializers`, `partitioners` or `regularizers` contains any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
"""
super(Linear, self).__init__(custom_getter=custom_getter, name=name)
self._output_size = output_size
self._use_bias = use_bias
self._input_shape = None
self._w = None
self._b = None
self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias)
self._initializers = util.check_initializers(
initializers, self.possible_keys)
self._partitioners = util.check_partitioners(
partitioners, self.possible_keys)
self._regularizers = util.check_regularizers(
regularizers, self.possible_keys)
@classmethod
def get_possible_initializer_keys(cls, use_bias=True):
return {"w", "b"} if use_bias else {"w"}
def _build(self, inputs):
"""Connects the Linear module into the graph, with input Tensor `inputs`.
If this is not the first time the module has been connected to the graph,
the Tensor provided here must have the same final dimension, in order for
the existing variables to be the correct size for the multiplication. The
batch size may differ for each connection.
Args:
inputs: A 2D Tensor of size [batch_size, input_size].
Returns:
A 2D Tensor of size [batch_size, output_size].
Raises:
base.IncompatibleShapeError: If the input is not a 2-D `Tensor` with
the size of the second dimension specified.
base.IncompatibleShapeError: If reconnecting an already connected module
into the graph, and the shape of the input is not compatible with
previous inputs.
"""
input_shape = tuple(inputs.get_shape().as_list())
if len(input_shape) != 2:
raise base.IncompatibleShapeError(
"{}: rank of shape must be 2 not: {}".format(
self.scope_name, len(input_shape)))
if input_shape[1] is None:
raise base.IncompatibleShapeError(
"{}: Input size must be specified at module build time".format(
self.scope_name))
if self._input_shape is not None and input_shape[1] != self._input_shape[1]:
raise base.IncompatibleShapeError(
"{}: Input shape must be [batch_size, {}] not: [batch_size, {}]"
.format(self.scope_name, self._input_shape[1], input_shape[1]))
self._input_shape = input_shape
dtype = inputs.dtype
if "w" not in self._initializers:
self._initializers["w"] = create_linear_initializer(self._input_shape[1],
dtype)
if "b" not in self._initializers and self._use_bias:
self._initializers["b"] = create_bias_initializer(self._input_shape[1],
dtype)
weight_shape = (self._input_shape[1], self.output_size)
self._w = tf.get_variable("w",
shape=weight_shape,
dtype=dtype,
initializer=self._initializers["w"],
partitioner=self._partitioners.get("w", None),
regularizer=self._regularizers.get("w", None))
outputs = tf.matmul(inputs, self._w)
if self._use_bias:
bias_shape = (self.output_size,)
self._b = tf.get_variable("b",
shape=bias_shape,
dtype=dtype,
initializer=self._initializers["b"],
partitioner=self._partitioners.get("b", None),
regularizer=self._regularizers.get("b", None))
outputs += self._b
return outputs
@property
def w(self):
"""Returns the Variable containing the weight matrix.
Returns:
Variable object containing the weights, from the most recent __call__.
Raises:
base.NotConnectedError: If the module has not been connected to the
graph yet, meaning the variables do not exist.
"""
self._ensure_is_connected()
return self._w
@property
def b(self):
"""Returns the Variable containing the bias.
Returns:
Variable object containing the bias, from the most recent __call__.
Raises:
base.NotConnectedError: If the module has not been connected to the
graph yet, meaning the variables do not exist.
AttributeError: If the module does not use bias.
"""
self._ensure_is_connected()
if not self._use_bias:
raise AttributeError(
"No bias Variable in Linear Module when `use_bias=False`.")
return self._b
@property
def output_size(self):
"""Returns the module output size."""
if callable(self._output_size):
self._output_size = self._output_size()
return self._output_size
@property
def has_bias(self):
"""Returns `True` if bias Variable is present in the module."""
return self._use_bias
@property
def initializers(self):
"""Returns the initializers dictionary."""
return self._initializers
@property
def partitioners(self):
"""Returns the partitioners dictionary."""
return self._partitioners
@property
def regularizers(self):
"""Returns the regularizers dictionary."""
return self._regularizers
def clone(self, name=None):
"""Returns a cloned `Linear` module.
Args:
name: Optional string assigning name of cloned module. The default name
is constructed by appending "_clone" to `self.module_name`.
Returns:
Cloned `Linear` module.
"""
if name is None:
name = self.module_name + "_clone"
return Linear(output_size=self.output_size,
use_bias=self._use_bias,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
name=name)
# Implements Transposable interface.
@property
def input_shape(self):
"""Returns shape of input `Tensor` passed at last call to `build`."""
self._ensure_is_connected()
return self._input_shape
# Implements Transposable interface
def transpose(self, name=None):
"""Returns transposed `Linear` module.
Args:
name: Optional string assigning name of transpose module. The default name
is constructed by appending "_transpose" to `self.module_name`.
Returns:
Transposed `Linear` module.
"""
if name is None:
name = self.module_name + "_transpose"
return Linear(output_size=lambda: self.input_shape[1],
use_bias=self._use_bias,
initializers=self._initializers,
partitioners=self._partitioners,
regularizers=self._regularizers,
name=name)
def calculate_bias_shape(input_shape, bias_dims):
"""Calculate `bias_shape` based on the `input_shape` and `bias_dims`.
Args:
input_shape: Shape of the input being passed into the module. The leading
dimension is the minibatch size.
bias_dims: The dimensions that bias should be applied over. The remaining
dimensions will get broadcasted over.
Returns:
bias_shape: Tuple corresponding to the shape of bias Variable to create.
Raises:
ValueError: If the user attempts to add bias over the minibatch dimension,
e.g. `bias_dims=[0]`.
"""
input_rank = len(input_shape)
# If None, default is to use all dimensions.
if bias_dims is None:
return input_shape[1:]
# If empty list, use a scalar bias.
elif not bias_dims:
return ()
# Otherwise, calculate bias_shape from bias_dims.
else:
bias_shape = [1] * input_rank
# Populate bias dimensions.
for dim in bias_dims:
dim %= input_rank
if dim == 0:
raise ValueError("Cannot apply bias across the minibatch dimension.")
bias_shape[dim] = input_shape[dim]
# Strip leading unit dimensions.
start = input_rank
for dim in xrange(1, input_rank):
if bias_shape[dim] != 1:
start = dim
break
return tuple(bias_shape[start:]) # Do not apply across minibatch dimension.
class AddBias(base.AbstractModule, base.Transposable):
"""AddBias module."""
POSSIBLE_INITIALIZER_KEYS = {"b"}
def __init__(self,
output_shape=None,
bias_dims=None,
initializers=None,
partitioners=None,
regularizers=None,
name="add"):
"""Constructs an AddBias module that supports broadcasting.
Args:
output_shape: Output dimensionality. `output_shape` can be either `None`,
a `tuple`, or a `callable`. In the latter case, since the function
invocation is deferred to graph construction time, the user must only
ensure that `output_shape` can be called, returning a tuple, when
build is called. If `output_shape` is left as `None`, the size will be
directly inferred by the input.
bias_dims: List of which dimensions to retain from the input shape when
constructing the bias. The remaining dimensions will get broadcasted
over (given size of 1), and leading dimensions will be removed
completely. For example, for an input of [batch_size, dim1_size,
dim2_size, dim3_size] and `bias_dims=[1, 3]`, the resulting
bias will have shape [dim1_size, 1, dim2_size]. The default is to
retain all dimensions apart from the minibatch dimension. Trying to
retain the bias shape over the minibatch dimension, e.g.
`bias_dims=[0]`, will result in an error at build time. See the
'Example Usage' section below for more information.
initializers: Optional dict containing ops to initialize the biases
(with key 'b'). The default initializer for the bias is a zero
initializer.
partitioners: Optional dict containing a partitioner to partition
the bias (with key 'b'). As a default, no partitioner is used.
regularizers: Optional dict containing regularizers of the biases
(with key 'b'). As a default, no regularizers are used. A regularizer
should be a function that takes a single `Tensor` as an input and
returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in
`tf.contrib.layers`.
name: Name of the module.
Example Usage:
```python
# Create a 4D input Tensor.
input = tf.random_normal(
shape=(batch_size, dim1_size, dim2_size, dim3_size)))
# Create a scalar bias:
scalar_bias = snt.AddBias(bias_dims=[])
scalar_bias_output = scalar_bias(input)
scalar_bias.b.get_shape() # ()
# Create a bias over all non-minibatch dimensions:
all_bias = snt.AddBias() # or snt.AddBias(bias_dims=None)
all_bias_output = all_bias(input)
all_bias.b.get_shape() # (dim1_size, dim2_size, dim3_size)
# Create a bias over the last non-minibatch dimension:
last_bias = snt.AddBias(bias_dims=[-1])
last_bias_output = last_bias(input)
last_bias.b.get_shape() # (dim3_size)
# Create a bias over the first non-minibatch dimension:
first_bias = snt.AddBias(bias_dims=[1])
first_bias_output = first_bias(input)
first_bias.b.get_shape() # (dim1_size, 1, 1)
# Subtract and later add the same learned bias:
bias = snt.AddBias()
hidden1 = bias(input, multiplier=-1)
# ...
reconstructed_input = bias(hidden4)
```
Raises:
KeyError: If `initializers` contains any keys other than 'b'.
KeyError: If `partitioners` contains any keys other than 'b'.
KeyError: If `regularizers` contains any keys other than 'b'.
TypeError: If any of the given initializers are not callable.
TypeError: If any of the given partitioners are not callable.
TypeError: If any of the given regularizers are not callable.
"""
super(AddBias, self).__init__(name=name)
self._output_shape = output_shape
self._input_shape = None
self._bias_dims = bias_dims
self._b = None
self._initializers = util.check_initializers(
initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(
partitioners, self.POSSIBLE_INITIALIZER_KEYS)
self._regularizers = util.check_regularizers(
regularizers, self.POSSIBLE_INITIALIZER_KEYS)
def _build(self, inputs, multiplier=1):
"""Connects the Add module into the graph, with input Tensor `inputs`.
Args:
inputs: A Tensor of size `[batch_size, input_size1, ...]`.
multiplier: A scalar or Tensor which the bias term is multiplied by
before adding it to `inputs`. Anything which works in the expression
`bias * multiplier` is acceptable here. This may be useful if you want
to add a bias in one place and subtract the same bias in another place
via `multiplier=-1`.
Returns:
A Tensor of size `[batch_size, input_size1, ...]`.
Raises:
base.IncompatibleShapeError: If the input is not a >= 2D `Tensor`.
base.IncompatibleShapeError: If connecting the module into the graph
any time after the first time, and the inferred size of the input does
not match previous invocations.
base.IncompatibleShapeError: If the `output_shape` has been specified
but it does not match the input_shape`.
base.ParentNotBuiltError: If the module is a transposed and the original
untransposed module has not been built.
"""
input_shape = tuple(inputs.get_shape().as_list())
bias_shape = calculate_bias_shape(input_shape, self._bias_dims)
# Check always contains minibatched input.
if len(input_shape) < 2:
raise base.IncompatibleShapeError(
"Rank of input shape must be >=2 not: {}.".format(len(input_shape)))
# Check previous input size is same as new input size.
if (self._input_shape is not None and
input_shape[1:] != self._input_shape[1:]):
raise base.IncompatibleShapeError("Input shape has changed.")
# If transposed, make sure that the original Module is built.
if callable(self._output_shape):
self._output_shape = self._output_shape()
if self._output_shape is None:
raise base.ParentNotBuiltError(
"Build the original untransposed module before building this one.")
# If output_shape specified, check that it matches input_shape.
if (self._output_shape is not None and
self._output_shape[1:] != input_shape[1:]):
raise base.IncompatibleShapeError(
"Input shape must be {} not: {}.".format(self._output_shape,
input_shape[1]))
self._input_shape = input_shape
dtype = inputs.dtype
if "b" not in self._initializers:
self._initializers["b"] = create_bias_initializer(bias_shape, dtype)
self._b = tf.get_variable(
"b",
shape=bias_shape,
dtype=dtype,
initializer=self._initializers["b"],
partitioner=self._partitioners.get("b", None),
regularizer=self._regularizers.get("b", None))
bias = self._b
if multiplier != 1:
bias *= multiplier
outputs = inputs + bias
return outputs
@property
def b(self):
"""Returns the Variable containing the bias.
Returns:
Variable object containing the bias, from the most recent __call__.
Raises:
base.NotConnectedError: If the module has not been connected to the
graph yet, meaning the variables do not exist.
"""
self._ensure_is_connected()
return self._b
# Implements Transposable interface.
@property
def input_shape(self):
"""Returns shape of input `Tensor` passed at last call to `build`."""
self._ensure_is_connected()
return self._input_shape
# Implements Transposable interface
def transpose(self, name=None):
"""Returns transposed `AddBias` module.
Args:
name: Optional string assigning name of transpose module. The default name
is constructed by appending "_transpose" to `self.module_name`.
Returns:
Transposed `AddBias` module.
"""
if name is None:
name = self.module_name + "_transpose"
return AddBias(output_shape=lambda: self._input_shape,
bias_dims=self._bias_dims,
initializers=self._initializers,
regularizers=self._regularizers,
name=name)
class BatchReshape(base.AbstractModule, base.Transposable):
"""Reshapes input Tensor, preserving the batch dimension."""
def __init__(self, shape, preserve_dims=1, name="batch_reshape"):
"""Constructs a BatchReshape module.
Args:
shape: Shape to reshape the input Tensor to while preserving its
first `preserve_dims` dimensions; `shape` can be either a tuple/list,
or a callable that returns the actual shape. The callable does not
need to be ready to return something meaningful at construction time,
but it will be required to be able to do so when the module is
connected to the graph. When the special value -1 appears in `shape`
the corresponding size is automatically inferred. Note that -1 can
only appear once in `shape`. To flatten all non-batch dimensions,
the snt.BatchFlatten module can also be used.
preserve_dims: Number of leading dimensions that will not be reshaped.
For example, given an input Tensor with shape `[B, H, W, C, D]`,
and argument `shape` equal to `(-1, D)`:
* `preserve_dims=1` will return a Tensor with shape `[B, H*W*C, D]`.
* `preserve_dims=2` will return a Tensor with
shape `[B, H, W*C, D]`.
* `preserve_dims=3` will return a Tensor with
shape `[B, H, W, C, D]`.
* `preserve_dims=4` will return a Tensor with
shape `[B, H, W, C, 1, D]`.
* `preserve_dims>=5` will throw an error on build unless D=1.
The preserved dimensions can be unknown at building time.
name: Name of the module.
Raises:
ValueError: If `preserve_dims <= 0`.
"""
super(BatchReshape, self).__init__(name=name)
self._input_shape = None
self._shape = shape
self._preserve_dims = preserve_dims
if preserve_dims <= 0:
raise ValueError("Argument preserve_dims should be >= 1.")
if not callable(self._shape):
self._shape = tuple(self._shape)
def _infer_shape(self, dimensions):
"""Replaces the -1 wildcard in the output shape vector.
This function infers the correct output shape given the input dimensions.
Args:
dimensions: List of input non-batch dimensions.
Returns:
Tuple of non-batch output dimensions.
"""
# Size of input
n = np.prod(dimensions)
# Size of output where defined
m = np.prod(abs(np.array(self._shape)))
# Replace wildcard
v = np.array(self._shape)
v[v == -1] = n // m
return tuple(v)
def _build(self, inputs):
"""Connects the module into the graph, with input Tensor `inputs`.
Args:
inputs: A Tensor of shape [b_1, b_2, ..., b_preserve_dims,
b_preserve_dims+1, ...].
Returns:
A Tensor of shape [b_1, b_2, ..., b_preserve_dims,
b_reshape_1, b_reshape_2, ...],
with reshaping defined by the constructor `shape` parameter.
Raises:
ValueError: If output shape is incompatible with input shape; or if
shape array contains non numeric entries; or if shape array contains
more than 1 wildcard -1; or if the input array contains unknown,
non-preserved dimensions (except when the unknown dimension is the
only non-preserved dimension and doesn't actually need reshaping).
"""
full_input_shape = inputs.get_shape().as_list()
if len(full_input_shape) < self._preserve_dims:
raise ValueError("Input tensor has {} dimensions, should have at least "
"as many as preserve_dims={}".format(
len(full_input_shape),
self._preserve_dims))
self._input_shape = full_input_shape[self._preserve_dims:]
if callable(self._shape):
self._shape = tuple(self._shape())
# Special-case of 1 non-preserved dimension, where no reshape is necessary.
# This is useful if the non-preserved dimension of `inputs` is unknown
# at build time.
if len(self._input_shape) == 1 and len(self._shape) == 1:
if self._shape[0] == -1 or self._shape[0] == self._input_shape[0]:
return inputs
else:
if self._input_shape[0] is None:
raise ValueError("Unknown non-preserved dimensions are not allowed "
"in the input to BatchReshape unless it is only one "
"and the desired shape is (-1,).")
else:
raise ValueError("Output shape is incompatible with input shape")
if not all([isinstance(x, numbers.Integral) and (x > 0 or x == -1)
for x in self._shape]):
raise ValueError(
"Desired shape can only contain positive integral numbers "
"and the wildcard -1. Given shape {}".format(self._shape))
if self._shape.count(-1) > 1:
raise ValueError(
"Wildcard -1 can appear only once in desired output shape. "
"Given shape {}".format(self._shape))
preserved_shape = tf.shape(inputs)[:self._preserve_dims]
# Slicing the shape tensor loses information, we keep it in a list.
preserved_shape_list = inputs.get_shape()[:self._preserve_dims]
# Except in the case above where no reshape is needed, we do not allow
# unknown non-preserved dimensions in the input.
if None in self._input_shape:
raise ValueError("Unknown non-preserved dimensions are not allowed in "
"the input to BatchReshape unless it is only one and the"
" desired shape is (-1,). The offending non-preserved "
"input shape is {}".format(self._input_shape))
if self._shape.count(-1) > 0:
trailing_shape = self._infer_shape(self._input_shape)
else:
trailing_shape = self._shape
if np.prod(self._input_shape) != np.prod(trailing_shape):
raise ValueError("Output shape is incompatible with input shape")
shape = tf.concat([preserved_shape, trailing_shape], 0)
output = tf.reshape(inputs, shape)
# Include shape information that was lost when we sliced the shape tensor.
shape_list = preserved_shape_list.concatenate(trailing_shape)
output.set_shape(shape_list)
return output
@property
def input_shape(self):
self._ensure_is_connected()
return self._input_shape
# Implements Transposable interface.
def transpose(self, name=None):
"""Returns transpose batch reshape."""
if name is None:
name = self.module_name + "_transpose"
return BatchReshape(shape=lambda: self.input_shape,
preserve_dims=self._preserve_dims,
name=name)
class BatchFlatten(BatchReshape):
"""Flattens the input Tensor, preserving the batch dimension(s)."""
def __init__(self, preserve_dims=1, name="batch_flatten"):
"""Constructs a BatchFlatten module.
Args:
preserve_dims: Number of leading dimensions that will not be reshaped.
For example, given an input Tensor with shape `[B, H, W, C]`:
* `preserve_dims=1` will return a Tensor with shape `[B, H*W*C]`.
* `preserve_dims=2` will return a Tensor with
shape `[B, H, W*C]`.
* `preserve_dims=3` will return the input itself,
shape `[B, H, W, C]`.
* `preserve_dims=4` will return a Tensor with
shape `[B, H, W, C, 1]`.
* `preserve_dims>=5` will throw an error on build.
The preserved dimensions can be unknown at building time.
name: Name of the module.
"""
super(BatchFlatten, self).__init__(
shape=(-1,), preserve_dims=preserve_dims, name=name)
class FlattenTrailingDimensions(BatchReshape):
"""Flattens trailing dimensions of a Tensor."""
def __init__(self, dim_from, name="batch_dim_from"):
"""Constructs a FlattenTrailingDimensions module.
For example, given an input Tensor with shape `[B, H, W, C]`:
* `dim_from=1` will return a Tensor with shape `[B, H*W*C]`.
* `dim_from=2` will return a Tensor with shape `[B, H, W*C]`.
* `dim_from=3` will return the input itself.
* `dim_from=4` will return a Tensor with shape `[B, H, W, C, 1]`.
* `dim_from>=5` will generate a ValueError when building the module.
The preserved dimensions can be unknown at building time.
Equivalent to BatchFlatten(preserve_dims=dim_from, name=name).
Args:
dim_from: All dimensions after and including `dim_from` will
be flattened into a single dimension.
name: Name of the module.
Raises:
ValueError: If `dim_from <= 0`.
"""
if dim_from <= 0:
raise ValueError("Argument dim_from should be >= 1.")
super(FlattenTrailingDimensions, self).__init__(
shape=(-1,), preserve_dims=dim_from, name=name)
class TrainableVariable(base.AbstractModule):
"""Provides learnable parameter Tensor."""
POSSIBLE_INITIALIZER_KEYS = {"w"}
def __init__(self,
shape,
dtype=tf.float32,
initializers=None,
partitioners=None,
regularizers=None,
name="trainable_variable"):
"""Constructs a TrainableVariable module.
Args:
shape: Tensor shape.
dtype: Tensor data type.
initializers: Optional dictionary containing ops to initialize the weight
Tensor, with key 'w'.
partitioners: Optional dict containing a partitioner to partition
the weight (with key 'w'). As a default, no partitioner is used.
regularizers: Optional dict containing regularizers for the weights
(with key 'w'). As a default, no regularizers are used. A regularizer
should be a function that takes a single `Tensor` as an input and
returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in
`tf.contrib.layers`.
name: Name of the module.
Raises:
KeyError: If `initializers` contains any keys other than 'w'.
KeyError: If `partitioners` contains any keys other than 'w'.
KeyError: If `regularizers` contains any keys other than 'w'.
TypeError: If any of the given initializers are not callable.
TypeError: If any of the given partitioners are not callable.
TypeError: If any of the given regularizers are not callable.
"""
super(TrainableVariable, self).__init__(name=name)
self._shape = tuple(shape)
self._dtype = dtype
self._initializers = util.check_initializers(
initializers, self.POSSIBLE_INITIALIZER_KEYS)
self._partitioners = util.check_partitioners(
partitioners, self.POSSIBLE_INITIALIZER_KEYS)
self._regularizers = util.check_regularizers(
regularizers, self.POSSIBLE_INITIALIZER_KEYS)
def _build(self):
"""Connects the TrainableTensor module into the graph.
Returns:
A Tensor of shape as determined in the constructor.
"""
if "w" not in self._initializers:
stddev = 1 / math.sqrt(np.prod(self._shape))
self._initializers["w"] = tf.truncated_normal_initializer(stddev=stddev)
self._w = tf.get_variable("w",
shape=self._shape,
dtype=self._dtype,
initializer=self._initializers["w"],
partitioner=self._partitioners.get("w", None),
regularizer=self._regularizers.get("w", None))
return self._w
@property
def w(self):
"""Returns the Variable containing the weights Tensor.
Returns:
Variable object containing the weights, from the most recent __call__.
Raises:
base.Error: If the module has not been connected to the graph yet,
meaning the variables do not exist.
"""
self._ensure_is_connected()
return self._w
class BatchApply(base.AbstractModule):
"""Merges a number of leading dimensions of an input tensor to manipulate it.
Merges a number of leading dimensions of a tensor into a single dimension,
connects the provided module, then splits the leading dimension of the
result to match the input.
This is useful for applying some module to each timestep of a Time x Batch x N
tensor. If a module is hard coded to only support 2D (Batch x N) then the
full 3D Tensor cannot be provided. BatchApply will 'merge' the first two
dimensions of the sequence tensor by reshaping to a (Time * Batch) x N Tensor,
and then the internal module can be applied. The result of that operation is
reshaped such that its first dimensions are split to match the leading
dimensions of the input.
"""
def __init__(self, module_or_op, n_dims=2, input_example_index=0,
name="batch_apply"):
"""Constructor of the module.
Args:
module_or_op: Module or tensorflow op to apply to an input tensor.
n_dims: Number of dimensions to merge before using module on the input
of BatchApply.
input_example_index: Index of input that has same shape for the first
`n_dims` dimensions as `module_or_op` output(s). This is used for
unflattening the output(s) if static shape inference is not possible.
name: Name of the module.
Raises:
TypeError: If n_dims is not an integer.
ValueError: If n_dims is not greater than zero.
"""
super(BatchApply, self).__init__(name=name)
if not isinstance(n_dims, int):
raise TypeError("n_dims should be an integer, it is a %s instead." %
type(n_dims))
if n_dims <= 0:
raise ValueError("n_dims should be greater than zero.")
self._module = module_or_op
self._n_dims = n_dims
self._input_example_index = input_example_index
def _build(self, *args, **kwargs):
"""Connects the BatchApply module into the graph.
Args:
*args: a Tensor or a nested list or dictionary of Tensors. The input
tensors will have their first dimensions merged, then an op or a
module will be called on the input. The first dimension of the output
tensor(s) will be split again based on the leading dimensions of the
first input tensor.
**kwargs: Dictionary of named arguments; used in the same way as `*args`.
Returns:
A Tensor or nested list or dictionary of Tensors as a result of applying
the process above. ("None" return values are also supported.)
"""
flattened = nest.flatten_iterable([args, kwargs])
merged_flattened = [
merge_leading_dims(inp, self._n_dims) if inp is not None else None
for inp in flattened]
merged_args, merged_kwargs = nest.pack_iterable_as([args, kwargs],
merged_flattened)
results = self._module(*merged_args, **merged_kwargs)
# Unmerging takes the sizes of the leading dimensions from an input example
# with equal shape for the leading `n_dims` dimensions. Typically this is
# the first input.
example_input = tf.convert_to_tensor(flattened[self._input_example_index])
def _split_to_original_leading_dims(result):
if result is None:
return None
else:
return split_leading_dim(result, example_input, self._n_dims)
flat_results = nest.flatten_iterable(results)
flat_unmerged_results = [_split_to_original_leading_dims(result)
for result in flat_results]
return nest.pack_iterable_as(results, flat_unmerged_results)
class SliceByDim(base.AbstractModule):
"""Slices a tensor along specific dimensions.
The user can slice a tensor by specifying only the list of dimensions that
they want to slice, together with the lists of integers containing the
beginning indices of the slicing, and the size of the slices. Hence, with
`SliceByDim` slicing can be performed without knowing in advance the rank of
the input tensor.
Tensorflow also offers a built-in op performing slicing, `tf.slice`. However,
`tf.slice` requires all the slicing dimensions to be specified, using
wildcards when no slicing is required. For example, with `tf.slice`, slicing
half a 5D tensor along dimension `1` would be:
```python
output = tf.slice(inputs,
begin=[0, 0, 0, 0, 0],
size=[-1, inputs.get_shape()[1].value//2, -1, -1, -1])
```
The same operation using `SliceByDim` would be:
```python
output = SliceByDim(dims=[1], begin=[0], size=[x.get_shape()[1].value//2])(x)
```
`SliceByDim` can be used to specify multiple slicing dimensions, for example:
```python
output = SliceByDim(dims=[1, 3], begin=[0, 0], size=[12, 24])(x)
```
"""
def __init__(self, dims, begin, size, name="slice_by_dim"):
"""Constructs the `SliceByDim` module.
Args:
dims: The dimensions to slice along, as a list of unique integers.
Negative integers index from the final dimension backwards, as in
python arrays.
begin: The beginning indices of the slicing, as a list of integers. Must
be the same length as the `dims` list.
size: The size of the slices, as a list of integers. Must be the same
length as the `dims` list.
name: The name of the module.
Raises:
ValueError: If `dims` has non-unique integers, or if the size of `begin`
is different from the size of `dims`, or if the size of `size` is
different from the size of `dims`.
"""
super(SliceByDim, self).__init__(name=name)
self._dims = dims
self._begin = begin
self._size = size
if np.unique(dims).size != len(dims):
raise ValueError("dims must not have any repeated integers.")
if len(begin) != len(dims):
raise ValueError(
"begin must have the same length as dims: {}.".format(len(dims)))
if len(size) != len(dims):
raise ValueError(
"size must have the same length as dims: {}.".format(len(dims)))
def _build(self, inputs):
"""Connects the SliceByDim module into the graph.
Args:
inputs: `Tensor` to slice. Its rank must be greater than the maximum
dimension specified in `dims` (plus one as python is 0 indexed).
Returns:
The sliced tensor.
Raises:
ValueError: If `inputs` tensor has insufficient rank.
"""
shape_inputs = inputs.get_shape().as_list()
rank = len(shape_inputs)
# Checks that the rank of the tensor.
max_dim = np.max(self._dims) + 1
if rank < max_dim:
raise ValueError("Rank of inputs must be at least {}.".format(max_dim))
# Builds default lists for begin and size to pass to `tf.slice`.
full_begin = [0] * rank
full_size = [-1] * rank
# Updates lists with what the user provided.
for dim, begin, size in zip(self._dims, self._begin, self._size):
full_begin[dim] = begin
full_size[dim] = size
return tf.slice(inputs, begin=full_begin, size=full_size)
class TileByDim(base.AbstractModule):
"""Tile a tensor along specific dimensions.
The user can tile a tensor by specifying only the list of dimensions that
they want to tile, together with the lists of integers containing the
multiples of the tiling. Hence, with `TileByDim` tiling can be performed
without knowing in advance the rank of the input tensor.
Tensorflow also offers a built-in op performing tiling, `tf.tile`. However,
`tf.tile` requires all the tiling dimensions to be specified, using `1`
when no tiling is required. For example, with tf.tiling, tiling a 5D
tensor along dimension `1`, by `2` would be:
```python
output = tf.tile(inputs, multiples=[1, 2, 1, 1, 1])
```
The same operation using `TileByDim` would be:
```python
output = TileByDim(dims=[1], multiples=[2])(x)
```
`TileByDim` can be used to specify multiple tiling dimensions, for example:
```python
output = TileByDim(dims=[1, 3], multiples=[2, 4])(x)
```
"""
def __init__(self, dims, multiples, name="tile_by_dim"):
"""Constructs the `TileByDim` module.
Args:
dims: The dimensions to tile along, as a list of unique integers.
multiples: The multiple of the tiling, as a list of integers. Must
be the same length as the `dims` list.
name: The name of the module.
Raises:
ValueError: If `dims` has non-unique integers, or if the size of
`multiples` is different from the size of `dims`.
"""
super(TileByDim, self).__init__(name=name)
self._dims = dims
self._multiples = multiples
if np.unique(dims).size != len(dims):
raise ValueError("dims must not have any repeated integers.")
if len(multiples) != len(dims):
raise ValueError(
"multiples must have the same length as dims: {}.".format(len(dims)))
def _build(self, inputs):
"""Connects the `TileByDim` module into the graph.
Args:
inputs: `Tensor` to tile.
Returns:
The tiled tensor.
"""
shape_inputs = inputs.get_shape().as_list()
rank = len(shape_inputs)
# Builds default lists for multiples to pass to `tf.tile`.
full_multiples = [1] * rank
# Updates lists with what the user provided.
for dim, multiple in zip(self._dims, self._multiples):
full_multiples[dim] = multiple
return tf.tile(inputs, multiples=full_multiples)
class MergeDims(base.AbstractModule):
"""Merges a tensor or nested list of tensors along a range of dimensions.
Tensors are reshaped by specifying the range of dimensions to merge.
Hence, the reshape can be performed without knowing in advance the rank of
the input tensor.
For example, merging dimensions 1, 2 and 3 together can be performed by
calling:
output = MergeDims(start=1, size=3)(x)
A nested list of tensors can be merged:
x = [tf.random_uniform(shape=[5, 5]), [tf.random_uniform(shape=[3, 3, 3])]]
output = MergeDims(start=0, size=2)(x)
"""
def __init__(self, start, size, name="merge_dims"):
"""Constructs the MergeDims module.
Args:
start: Start of the range of dimensions to merge.
size: Size the range of dimensions to merge.
name: The name of the module.
Raises:
ValueError: If `size` is not strictly greater than 1.
"""
super(MergeDims, self).__init__(name=name)
self._start = start
self._size = size
# Checks for non consecutive integers.
if size <= 1:
raise ValueError("`size` should be strictly greater than 1.")
def _merge(self, tensor):
output_shape = tensor.get_shape().as_list()
rank = len(output_shape)
if rank < self._start + self._size:
raise ValueError("Rank of inputs must be at least {}."
.format(self._start + self._size))
# Update the shape of the merged dimensions.
output_shape[self._start:self._start + self._size] = [-1]
return tf.reshape(tensor, shape=output_shape)
def _build(self, inputs):
"""Connects the MergeDims module into the graph.
Args:
inputs: Tensor or a nested list of Tensors to merge. Its rank must be
greater than or equal to `start` + `size`.
Returns:
The merged Tensor or a nested list of merged Tensors.
Raises:
ValueError: If any of the `inputs` tensors has insufficient rank.
"""
if nest.is_sequence(inputs):
merged_tensors = [self._merge(tensor) for tensor in nest.flatten(inputs)]
return nest.pack_sequence_as(inputs, merged_tensors)
# inputs is a single tf.Tensor
return self._merge(inputs)
class SelectInput(base.AbstractModule):
"""Returns a subset of its inputs in an arbitrarily nested configuration.
This module can be used for multiple purposes.
The basic usage is to select a tensor or a subset of tensors:
```
output = snt.SelectInput(idx=0, name='select')(input0, input1)
==> input0
output = snt.SelectInput(idx=[0, 2], name='select')(input0, input1, input2)
==> (input0, input2)
```
Another usage is to change the orders of the input tensors:
```
output = snt.SelectInput(idx=[1, 0], name='select')(input0, input1)
==> (input1, input0)
```
Another usage is to duplicate an input:
```
output = snt.SelectInput(idx=[0, 0], name='select')(input0)
==> (input0, input0)
```
Another usage is to add arbitrary nesting:
```
output = snt.SelectInput(
idx=[0, [1, [2]]], name='select')(input0, input1, input2)
==> (input0, (input1, (input2,)))
```
"""
def __init__(self, idx, name="select_input"):
"""Module constructor.
Args:
idx: Indexes of the tensors to select. If `idx` is an integer, then
a `Tensor` is returned. If `idx` is a (nested) list/tuple, then a
(nested) tuple of `Tensor` is returned.
name: Name of the module.
Raises:
TypeError: If `idx` is not an list, tuple or integer.
"""
super(SelectInput, self).__init__(name=name)
self._check_type(idx)
self._idx = idx
def _check_type(self, idx):
if isinstance(idx, (list, tuple)):
for value in idx:
self._check_type(value)
elif not isinstance(idx, int):
raise TypeError("`idx` should be a (nested) array/tuple, or an integer.")
def _select(self, inputs, idx):
if isinstance(idx, (list, tuple)):
return tuple(self._select(inputs, i) for i in idx)
else:
if idx < 0 or idx >= len(inputs):
raise ValueError("`idx` contains out of bound entries (they should be "
"in the range [0, {}))".format(len(inputs)))
# Identity is called otherwise we might get 'placeholder is both fed and
# fetched' errors in some cases when using a feed_dict.
return tf.identity(inputs[idx])
def _build(self, *inputs):
"""Connects the module into the graph.
Args:
*inputs: `Tensor` variables to select.
Returns:
Subset of `inputs` in an arbitrarily nested configuration.
Raises:
ValueError: If any entry of `idx` is out of bounds with respect to the
size of `inputs`.
"""
return self._select(inputs, self._idx)
| mit | -2,524,306,854,411,019,300 | 36.198929 | 80 | 0.644481 | false |
mastizada/kuma | kuma/demos/helpers.py | 1 | 13588 | import datetime
import functools
import hashlib
import random
from babel import localedata
import jinja2
from django.conf import settings
from django.core.cache import cache
from django.utils.tzinfo import LocalTimezone
import jingo
from jingo import register
from tower import ugettext as _
from tower import ugettext, ungettext
from taggit.models import TaggedItem
from threadedcomments.models import ThreadedComment
from threadedcomments.forms import ThreadedCommentForm
from threadedcomments.templatetags import threadedcommentstags
import threadedcomments.views
from kuma.core.urlresolvers import reverse
from .models import Submission
from . import DEMOS_CACHE_NS_KEY, TAG_DESCRIPTIONS, DEMO_LICENSES
threadedcommentstags.reverse = reverse
TEMPLATE_INCLUDE_CACHE_EXPIRES = getattr(settings,
'TEMPLATE_INCLUDE_CACHE_EXPIRES', 300)
def new_context(context, **kw):
c = dict(context.items())
c.update(kw)
return c
# TODO:liberate ?
def register_cached_inclusion_tag(template, key_fn=None,
expires=TEMPLATE_INCLUDE_CACHE_EXPIRES):
"""Decorator for inclusion tags with output caching.
Accepts a string or function to generate a cache key based on the incoming
parameters, along with an expiration time configurable as
INCLUDE_CACHE_EXPIRES or an explicit parameter"""
if key_fn is None:
key_fn = template
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kw):
if type(key_fn) is str:
cache_key = key_fn
else:
cache_key = key_fn(*args, **kw)
out = cache.get(cache_key)
if out is None:
context = f(*args, **kw)
t = jingo.env.get_template(template).render(context)
out = jinja2.Markup(t)
cache.set(cache_key, out, expires)
return out
return register.function(wrapper)
return decorator
def submission_key(prefix):
"""Produce a cache key function with a prefix, which generates the rest of
the key based on a submission ID and last-modified timestamp."""
def k(*args, **kw):
submission = args[0]
return 'submission:%s:%s:%s' % (prefix,
submission.id,
submission.modified)
return k
# TOOO: All of these inclusion tags could probably be generated & registered
# from a dict of function names and inclusion tag args, since the method bodies
# are all identical. Might be astronaut architecture, though.
@register.inclusion_tag('demos/elements/demos_head.html')
def demos_head(request):
return locals()
@register.inclusion_tag('demos/elements/submission_creator.html')
def submission_creator(submission):
return locals()
@register.inclusion_tag('demos/elements/profile_link.html')
def profile_link(user, show_gravatar=False, gravatar_size=48,
gravatar_default='mm'):
return locals()
@register.inclusion_tag('demos/elements/submission_thumb.html')
def submission_thumb(submission, extra_class=None, thumb_width="200",
thumb_height="150", is_homepage=False):
vars = locals()
flags = submission.get_flags()
# Dict of metadata associated with flags for demos
# TODO: Move to a constant or DB table? Too much view stuff here?
flags_meta = {
# flag name thumb class flag description
'firstplace': ('first-place', _('First Place')),
'secondplace': ('second-place', _('Second Place')),
'thirdplace': ('third-place', _('Third Place')),
'finalist': ('finalist', _('Finalist')),
'featured': ('featured', _('Featured')),
}
# If there are any flags, pass them onto the template. Special treatment
# for the first flag, which takes priority over all others for display in
# the thumb.
main_flag = (len(flags) > 0) and flags[0] or None
vars['all_flags'] = flags
vars['main_flag'] = main_flag
if main_flag in flags_meta:
vars['main_flag_class'] = flags_meta[main_flag][0]
vars['main_flag_description'] = flags_meta[main_flag][1]
vars['is_homepage'] = is_homepage
return vars
def submission_listing_cache_key(*args, **kw):
ns_key = cache.get(DEMOS_CACHE_NS_KEY)
if ns_key is None:
ns_key = random.randint(1, 10000)
cache.set(DEMOS_CACHE_NS_KEY, ns_key)
full_path = args[0].get_full_path()
username = args[0].user.username
return 'demos_%s:%s' % (ns_key,
hashlib.md5(full_path + username).hexdigest())
@register_cached_inclusion_tag('demos/elements/submission_listing.html',
submission_listing_cache_key)
def submission_listing(request, submission_list, is_paginated, paginator,
page_obj, feed_title, feed_url,
cols_per_row=3, pagination_base_url='', show_sorts=True,
show_submit=False):
return locals()
@register.inclusion_tag('demos/elements/tech_tags_list.html')
def tech_tags_list():
return locals()
# Not cached, because it's small and changes based on
# current search query string
@register.inclusion_tag('demos/elements/search_form.html')
@jinja2.contextfunction
def search_form(context):
return new_context(**locals())
bitly_api = None
def _get_bitly_api():
"""Get an instance of the bit.ly API class"""
global bitly_api
if bitly_api is None:
import bitly
login = getattr(settings, 'BITLY_USERNAME', '')
apikey = getattr(settings, 'BITLY_API_KEY', '')
bitly_api = bitly.Api(login, apikey)
return bitly_api
@register.filter
def bitly_shorten(url):
"""Attempt to shorten a given URL through bit.ly / mzl.la"""
try:
# TODO:caching
return _get_bitly_api().shorten(url)
except:
# Just in case the bit.ly service fails or the API key isn't
# configured, fall back to using the original URL.
return url
@register.function
def devderby_tag_to_date_url(tag):
"""Turn a devderby tag like challenge:2011:june into a date-based URL"""
# HACK: Not super happy with this, but it works for now
if not tag:
return ''
parts = tag.split(':')
return reverse('demos_devderby_by_date', args=(parts[-2], parts[-1]))
@register.function
def license_link(license_name):
if license_name in DEMO_LICENSES:
return DEMO_LICENSES[license_name]['link']
else:
return license_name
@register.function
def license_title(license_name):
if license_name in DEMO_LICENSES:
return DEMO_LICENSES[license_name]['title']
else:
return license_name
@register.function
def tag_title(tag):
if not tag:
return ''
name = (isinstance(tag, basestring)) and tag or tag.name
if name in TAG_DESCRIPTIONS:
return TAG_DESCRIPTIONS[name]['title']
else:
return name
@register.function
def tag_description(tag):
if not tag:
return ''
name = (isinstance(tag, basestring)) and tag or tag.name
if name in TAG_DESCRIPTIONS and 'description' in TAG_DESCRIPTIONS[name]:
return TAG_DESCRIPTIONS[name]['description']
else:
return name
@register.function
def tag_learn_more(tag):
if not tag:
return ''
if (tag.name in TAG_DESCRIPTIONS and
'learn_more' in TAG_DESCRIPTIONS[tag.name]):
return TAG_DESCRIPTIONS[tag.name]['learn_more']
else:
return []
@register.function
def tag_meta(tag, other_name):
"""Get metadata for a tag or tag name."""
# TODO: Replace usage of tag_{title,description,learn_more}?
if not tag:
return ''
name = (isinstance(tag, basestring)) and tag or tag.name
if name in TAG_DESCRIPTIONS and other_name in TAG_DESCRIPTIONS[name]:
return TAG_DESCRIPTIONS[name][other_name]
else:
return ''
@register.function
def tags_for_object(obj):
tags = obj.taggit_tags.all()
return tags
@register.function
def tech_tags_for_object(obj):
return obj.taggit_tags.all_ns('tech')
@register.function
def tags_used_for_submissions():
return TaggedItem.tags_for(Submission)
@register.filter
def date_diff(timestamp, to=None):
if not timestamp:
return ""
compare_with = to or datetime.date.today()
delta = timestamp - compare_with
if delta.days == 0:
return u"today"
elif delta.days == -1:
return u"yesterday"
elif delta.days == 1:
return u"tomorrow"
chunks = (
(365.0, lambda n: ungettext('year', 'years', n)),
(30.0, lambda n: ungettext('month', 'months', n)),
(7.0, lambda n: ungettext('week', 'weeks', n)),
(1.0, lambda n: ungettext('day', 'days', n)),
)
for i, (chunk, name) in enumerate(chunks):
if abs(delta.days) >= chunk:
count = abs(round(delta.days / chunk, 0))
break
date_str = (ugettext('%(number)d %(type)s') %
{'number': count, 'type': name(count)})
if delta.days > 0:
return "in " + date_str
else:
return date_str + " ago"
# TODO: Maybe just register the template tag functions in the jingo environment
# directly, rather than building adapter functions?
@register.function
def get_threaded_comment_flat(content_object, tree_root=0):
return ThreadedComment.public.get_tree(content_object, root=tree_root)
@register.function
def get_threaded_comment_tree(content_object, tree_root=0):
"""Convert the flat list with depth indices into a true tree structure for
recursive template display"""
root = dict(children=[])
parent_stack = [root, ]
flat = ThreadedComment.public.get_tree(content_object, root=tree_root)
for comment in flat:
c = dict(comment=comment, children=[])
if (comment.depth > len(parent_stack) - 1 and
len(parent_stack[-1]['children'])):
parent_stack.append(parent_stack[-1]['children'][-1])
while comment.depth < len(parent_stack) - 1:
parent_stack.pop(-1)
parent_stack[-1]['children'].append(c)
return root
@register.inclusion_tag('demos/elements/comments_tree.html')
def comments_tree(request, object, root):
return locals()
@register.function
def get_comment_url(content_object, parent=None):
return threadedcommentstags.get_comment_url(content_object, parent)
@register.function
def get_threaded_comment_form():
return ThreadedCommentForm()
@register.function
def auto_transform_markup(comment):
return threadedcommentstags.auto_transform_markup(comment)
@register.function
def can_delete_comment(comment, user):
return threadedcomments.views.can_delete_comment(comment, user)
@register.filter
def timesince(d, now=None):
"""Take two datetime objects and return the time between d and now as a
nicely formatted string, e.g. "10 minutes". If d is None or occurs after
now, return ''.
Units used are years, months, weeks, days, hours, and minutes. Seconds and
microseconds are ignored. Just one unit is displayed. For example,
"2 weeks" and "1 year" are possible outputs, but "2 weeks, 3 days" and "1
year, 5 months" are not.
Adapted from django.utils.timesince to have better i18n (not assuming
commas as list separators and including "ago" so order of words isn't
assumed), show only one time unit, and include seconds.
"""
if d is None:
return u''
chunks = [
(60 * 60 * 24 * 365, lambda n: ungettext('%(number)d year ago',
'%(number)d years ago', n)),
(60 * 60 * 24 * 30, lambda n: ungettext('%(number)d month ago',
'%(number)d months ago', n)),
(60 * 60 * 24 * 7, lambda n: ungettext('%(number)d week ago',
'%(number)d weeks ago', n)),
(60 * 60 * 24, lambda n: ungettext('%(number)d day ago',
'%(number)d days ago', n)),
(60 * 60, lambda n: ungettext('%(number)d hour ago',
'%(number)d hours ago', n)),
(60, lambda n: ungettext('%(number)d minute ago',
'%(number)d minutes ago', n)),
(1, lambda n: ungettext('%(number)d second ago',
'%(number)d seconds ago', n))]
if not now:
if d.tzinfo:
now = datetime.datetime.now(LocalTimezone(d))
else:
now = datetime.datetime.now()
# Ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return u''
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
return name(count) % {'number': count}
def _babel_locale(locale):
"""Return the Babel locale code, given a normal one."""
# Babel uses underscore as separator.
return locale.replace('-', '_')
def _contextual_locale(context):
"""Return locale from the context, falling back to a default if invalid."""
locale = context['request'].locale
if not localedata.exists(locale):
locale = settings.LANGUAGE_CODE
return locale
| mpl-2.0 | -2,504,698,096,416,612,400 | 30.381062 | 79 | 0.628201 | false |
yongtang/tensorflow | tensorflow/python/tpu/training_loop.py | 9 | 9472 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Library for constructing a training loop, suitable for TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Callable, Iterable, List, Optional, Union
from tensorflow.python.compiler.xla import xla
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.tpu import tensor_tracer
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import tpu_function
from tensorflow.python.types import core as core_types
def while_loop(condition: Callable[..., Any],
body: Callable[..., Any],
inputs: Optional[List[Any]] = None,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
name: Any = None) -> Any:
"""Builds a training loop for TPUs.
The set of loop-carried tensors corresponds to `inputs`. Both
`condition` and `body` take the current value of the loop-carried
tensors. 'body' additionally takes a tuple of infeed from
infeed_queue if infeed_queue is not None. `condition` must return a
single boolean value that determines whether iteration
continues. `body` must return an updated list of values for the
loop-carried tensors.
Args:
condition: a Python function that builds the loop condition.
body: a Python function that builds the loop body.
inputs: a list of initial values passed into the training loop, or None
(equivalent to an empty list).
infeed_queue: if not None, the infeed queue from which to append a tuple of
arguments as inputs to condition.
name: (Deprecated) Does nothing.
Returns:
The final values of the loop-carried tensors.
Raises:
TypeError: if body or condition has the wrong signature.
"""
del name
# Converts inputs to Tensors.
inputs = [] if inputs is None else [ops.convert_to_tensor(x) for
x in inputs]
input_types = [x.dtype for x in inputs]
input_arity = len(inputs)
body_arg_error = xla.check_function_argument_count(
body, input_arity, infeed_queue)
if body_arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied loop body function cannot be called with the specified "
"inputs. You specified %d inputs: %s, but the loop body needs %s" % (
input_arity, str([i.name for i in inputs]), body_arg_error))
else:
raise TypeError(
"Supplied loop body function cannot be called with the specified "
"inputs. You specified %d inputs: %s and %d additional inputs from "
"infeed, but the computation needs %s" % (input_arity, str(
[i.name for i in inputs]), infeed_queue.number_of_tuple_elements,
body_arg_error))
condition_arg_error = xla.check_function_argument_count(
condition, input_arity, None)
if condition_arg_error is not None:
if infeed_queue is None:
raise TypeError(
"Supplied loop condition function cannot be called with the "
"specified inputs. You specified %d inputs: %s, but the loop "
"condition needs %s" % (input_arity, str([i.name for i in inputs]),
condition_arg_error))
else:
raise TypeError(
"Supplied loop condition function cannot be called with the "
"specified inputs. You specified %d inputs: %s, but the loop "
"condition needs %s. Note that infeed is not passed to the loop "
"condition." % (input_arity, str([i.name for i in inputs]),
condition_arg_error))
def condition_wrapper(*inputs):
# Discards the dummy output added for arity-0 loops.
if input_arity == 0:
inputs = []
return condition(*inputs)
def body_wrapper(*inputs):
"""Wrapper around `body` that handles infeed queues and control deps."""
inputs = list(inputs)
# Discards the dummy output added for arity-0 loops.
if input_arity == 0:
inputs = []
# Runs `body` with the dequeue_ops appended.
if infeed_queue:
number_of_shards = tpu_function.get_tpu_context().number_of_shards
if number_of_shards is None:
raise ValueError("Can't build training loop with infeed when there is "
"no tpu_shard_context. Are you building a loop or "
"graph directly rather than from inside tpu.rewrite, "
"tpu.batch_parallel, tpu.shard, or tpu.replicate?")
infeed_queue.set_number_of_shards(number_of_shards)
dequeue_ops = [d for d in infeed_queue.generate_dequeue_op()]
else:
dequeue_ops = []
outputs = body(*(inputs + dequeue_ops))
# If the computation only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs
if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
"TPU training loop body must return zero or more Tensor values "
"followed by zero or more Operations.")
output_types = [op.dtype for op in output_tensors]
if input_types != output_types:
raise TypeError(
"Mismatch between input types and output types for training loop "
"body: {} vs {}".format(input_types, output_types))
# Add the dequeue operations to output_operations to ensure they are run
# by the loop, even if the programmer's loop body does not use them.
output_operations += dequeue_ops
# Add a dummy output, if needed.
if not output_tensors:
output_tensors = array_ops.constant(0)
if output_operations:
# TODO(phawkins): in principle this is too restrictive since it serializes
# the training loop steps. In practice it does not matter since this loop
# will be compiled by XLA.
output_tensors = control_flow_ops.tuple(output_tensors,
control_inputs=output_operations)
if tensor_tracer.TensorTracer.is_enabled():
num_replicas = tpu_function.get_tpu_context().number_of_shards
if num_replicas is None:
num_replicas = 1
tt = tensor_tracer.TensorTracer()
output_tensors = tt.trace_tpu(ops.get_default_graph(),
output_tensors, None,
num_replicas)
return output_tensors
# If the body has arity 0, add a dummy loop-carried value to which we can add
# control dependencies from any side-effecting operations.
if input_arity == 0:
inputs = [array_ops.constant(0)]
return control_flow_ops.while_loop(
condition_wrapper, body_wrapper, inputs, name="", parallel_iterations=1)
def repeat(
n: int,
body: Callable[..., Union[core_types.TensorLike, Iterable]], # pylint:disable=g-bare-generic
inputs: Optional[List[core_types.TensorLike]] = None,
infeed_queue: Optional[tpu_feed.InfeedQueue] = None,
name: Any = None) -> List[core_types.TensorLike]:
"""Builds a training loop that executes a fixed number of iterations.
The set of loop-carried tensors correspond to `inputs`.
`body` must be a function that takes and returns the values of the
loop-carried tensors.
Args:
n: the number of loop iterations
body: a Python function that builds the loop body.
inputs: a list of initial values passed into the training loop or None
(equivalent to an empty list).
infeed_queue: if not None, the infeed queue from which to append a tuple of
arguments as inputs to condition.
name: (Deprecated) Does nothing.
Returns:
The final values of the loop-carried tensors.
Raises:
ValueError: if there is a type error.
"""
def _convert_to_list(xs):
if not isinstance(xs, (list, tuple)):
return [xs]
else:
return list(xs)
def cond(i, *args):
del args
return i < n
def body_wrapper(i, *args):
return [i + 1] + _convert_to_list(body(*args))
inputs = [0] if inputs is None else [0] + _convert_to_list(inputs)
outputs = while_loop(
cond, body_wrapper, inputs=inputs, infeed_queue=infeed_queue, name=name)
outputs = _convert_to_list(outputs)
if len(outputs) == 1:
# Returns the Op rather than an empty list.
return outputs[0].op
else:
return outputs[1:]
| apache-2.0 | 6,007,988,542,291,337,000 | 39.135593 | 97 | 0.655089 | false |
jcshen007/cloudstack | test/integration/component/test_acl_isolatednetwork.py | 3 | 50928 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test cases relating to access checks for createNetwork(), deploying VM in an isolated network and restartNetwork() for Admin, domain admin and regular users
"""
#Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from marvin.cloudstackException import CloudstackAclException
from marvin.lib.common import *
from marvin.codes import (RUNNING,
ALLOCATED)
from nose.plugins.attrib import attr
#Import System modules
import time
_multiprocess_shared_ = True
class TestIsolatedNetwork(cloudstackTestCase):
@classmethod
def setUpClass(cls):
"""
Create the following domain tree and accounts that are reqiured for executing Test cases relating to access checks for createNetwork(), deploying VM in an isolated network and restartNetwork():
Under ROOT - create 2 domaind D1 and D2
Under D1 - Create 2 subdomain D11 and D12
Under D11 - Create subdimain D111
Under each of the domain create 1 admin user and couple of regular users.
As each of these users , create an isolated network.
"""
cls.testclient = super(TestIsolatedNetwork, cls).getClsTestClient()
cls.apiclient = cls.testclient.getApiClient()
#cls.acldata = Services().services
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.acldata = cls.testdata["acl"]
cls.domain_1 = None
cls.domain_2 = None
cls.cleanup = []
try:
# backup default apikey and secretkey
cls.default_apikey = cls.apiclient.connection.apiKey
cls.default_secretkey = cls.apiclient.connection.securityKey
# Create domains
cls.domain_1 = Domain.create(
cls.apiclient,
cls.acldata["domain1"]
)
cls.domain_11 = Domain.create(
cls.apiclient,
cls.acldata["domain11"],
parentdomainid=cls.domain_1.id
)
cls.domain_111 = Domain.create(
cls.apiclient,
cls.acldata["domain111"],
parentdomainid=cls.domain_11.id,
)
cls.domain_12 = Domain.create(
cls.apiclient,
cls.acldata["domain12"],
parentdomainid=cls.domain_1.id
)
cls.domain_2 = Domain.create(
cls.apiclient,
cls.acldata["domain2"]
)
# Create 1 admin account and 2 user accounts for doamin_1
cls.account_d1 = Account.create(
cls.apiclient,
cls.acldata["accountD1"],
admin=True,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d1)
cls.user_d1_apikey = user.apikey
cls.user_d1_secretkey = user.secretkey
cls.account_d1a = Account.create(
cls.apiclient,
cls.acldata["accountD1A"],
admin=False,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d1a)
cls.user_d1a_apikey = user.apikey
cls.user_d1a_secretkey = user.secretkey
cls.account_d1b = Account.create(
cls.apiclient,
cls.acldata["accountD1B"],
admin=False,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d1b)
cls.user_d1b_apikey = user.apikey
cls.user_d1b_secretkey = user.secretkey
# Create 1 admin and 2 user accounts for doamin_11
cls.account_d11 = Account.create(
cls.apiclient,
cls.acldata["accountD11"],
admin=True,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d11)
cls.user_d11_apikey = user.apikey
cls.user_d11_secretkey = user.secretkey
cls.account_d11a = Account.create(
cls.apiclient,
cls.acldata["accountD11A"],
admin=False,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d11a)
cls.user_d11a_apikey = user.apikey
cls.user_d11a_secretkey = user.secretkey
cls.account_d11b = Account.create(
cls.apiclient,
cls.acldata["accountD11B"],
admin=False,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d11b)
cls.user_d11b_apikey = user.apikey
cls.user_d11b_secretkey = user.secretkey
# Create 2 user accounts and 1 admin account for doamin_111
cls.account_d111 = Account.create(
cls.apiclient,
cls.acldata["accountD111"],
admin=True,
domainid=cls.domain_111.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d111)
cls.user_d111_apikey = user.apikey
cls.user_d111_secretkey = user.secretkey
cls.account_d111a = Account.create(
cls.apiclient,
cls.acldata["accountD111A"],
admin=False,
domainid=cls.domain_111.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d111a)
cls.user_d111a_apikey = user.apikey
cls.user_d111a_secretkey = user.secretkey
cls.account_d111b = Account.create(
cls.apiclient,
cls.acldata["accountD111B"],
admin=False,
domainid=cls.domain_111.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d111b)
cls.user_d111b_apikey = user.apikey
cls.user_d111b_secretkey = user.secretkey
# Create 2 user accounts for doamin_12
cls.account_d12a = Account.create(
cls.apiclient,
cls.acldata["accountD12A"],
admin=False,
domainid=cls.domain_12.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d12a)
cls.user_d12a_apikey = user.apikey
cls.user_d12a_secretkey = user.secretkey
cls.account_d12b = Account.create(
cls.apiclient,
cls.acldata["accountD12B"],
admin=False,
domainid=cls.domain_12.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d12b)
cls.user_d12b_apikey = user.apikey
cls.user_d12b_secretkey = user.secretkey
# Create 1 user account for domain_2
cls.account_d2a = Account.create(
cls.apiclient,
cls.acldata["accountD2"],
admin=False,
domainid=cls.domain_2.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d2a)
cls.user_d2a_apikey = user.apikey
cls.user_d2a_secretkey = user.secretkey
# Create 1 user account and admin account in "ROOT" domain
cls.account_roota = Account.create(
cls.apiclient,
cls.acldata["accountROOTA"],
admin=False,
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_roota)
cls.user_roota_apikey = user.apikey
cls.user_roota_secretkey = user.secretkey
cls.account_root = Account.create(
cls.apiclient,
cls.acldata["accountROOT"],
admin=True,
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_root)
cls.user_root_apikey = user.apikey
cls.user_root_secretkey = user.secretkey
# create service offering
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.acldata["service_offering"]["small"]
)
cls.zone = get_zone(cls.apiclient,cls.testclient.getZoneForTests())
cls.acldata['mode'] = cls.zone.networktype
cls.template = get_template(cls.apiclient, cls.zone.id, cls.acldata["ostype"])
cls.apiclient.connection.apiKey = cls.default_apikey
cls.apiclient.connection.securityKey = cls.default_secretkey
list_isolated_network_offerings_response = NetworkOffering.list(
cls.apiclient,
name="DefaultIsolatedNetworkOfferingWithSourceNatService"
)
cls.isolated_network_offering_id = list_isolated_network_offerings_response[0].id
## Create Network objects for deployVirtualMachine and restartNetwork API related test cases
cls.apiclient.connection.apiKey = cls.user_root_apikey
cls.apiclient.connection.securityKey = cls.user_root_secretkey
cls.network_root = cls.createNetwork(cls.apiclient,cls.account_root,cls.isolated_network_offering_id,cls.zone)
cls.createVM(cls.apiclient,cls.zone.id,cls.service_offering.id,cls.template.id,cls.network_root.id)
cls.apiclient.connection.apiKey = cls.user_d1_apikey
cls.apiclient.connection.securityKey = cls.user_d1_secretkey
cls.network_d1 = cls.createNetwork(cls.apiclient,cls.account_d1,cls.isolated_network_offering_id,cls.zone)
cls.createVM(cls.apiclient,cls.zone.id,cls.service_offering.id,cls.template.id,cls.network_d1.id)
cls.apiclient.connection.apiKey = cls.user_d1a_apikey
cls.apiclient.connection.securityKey = cls.user_d1a_secretkey
cls.network_d1a = cls.createNetwork(cls.apiclient,cls.account_d1a,cls.isolated_network_offering_id,cls.zone)
cls.createVM(cls.apiclient,cls.zone.id,cls.service_offering.id,cls.template.id,cls.network_d1a.id)
cls.apiclient.connection.apiKey = cls.user_d1b_apikey
cls.apiclient.connection.securityKey = cls.user_d1b_secretkey
cls.network_d1b = cls.createNetwork(cls.apiclient,cls.account_d1b,cls.isolated_network_offering_id,cls.zone)
cls.createVM(cls.apiclient,cls.zone.id,cls.service_offering.id,cls.template.id,cls.network_d1b.id)
cls.apiclient.connection.apiKey = cls.user_d11a_apikey
cls.apiclient.connection.securityKey = cls.user_d11a_secretkey
cls.network_d11a = cls.createNetwork(cls.apiclient,cls.account_d11a,cls.isolated_network_offering_id,cls.zone)
cls.createVM(cls.apiclient,cls.zone.id,cls.service_offering.id,cls.template.id,cls.network_d11a.id)
cls.apiclient.connection.apiKey = cls.user_d11b_apikey
cls.apiclient.connection.securityKey = cls.user_d11b_secretkey
cls.network_d11b = cls.createNetwork(cls.apiclient,cls.account_d11b,cls.isolated_network_offering_id,cls.zone)
cls.createVM(cls.apiclient,cls.zone.id,cls.service_offering.id,cls.template.id,cls.network_d11b.id)
cls.apiclient.connection.apiKey = cls.user_d12a_apikey
cls.apiclient.connection.securityKey = cls.user_d12a_secretkey
cls.network_d12a = cls.createNetwork(cls.apiclient,cls.account_d12a,cls.isolated_network_offering_id,cls.zone)
cls.createVM(cls.apiclient,cls.zone.id,cls.service_offering.id,cls.template.id,cls.network_d12a.id)
cls.apiclient.connection.apiKey = cls.user_roota_apikey
cls.apiclient.connection.securityKey = cls.user_roota_secretkey
cls.network_roota = cls.createNetwork(cls.apiclient,cls.account_roota,cls.isolated_network_offering_id,cls.zone)
cls.createVM(cls.apiclient,cls.zone.id,cls.service_offering.id,cls.template.id,cls.network_roota.id)
cls.apiclient.connection.apiKey = cls.user_d111a_apikey
cls.apiclient.connection.securityKey = cls.user_d111a_secretkey
cls.network_d111a = cls.createNetwork(cls.apiclient,cls.account_d111a,cls.isolated_network_offering_id,cls.zone)
cls.createVM(cls.apiclient,cls.zone.id,cls.service_offering.id,cls.template.id,cls.network_d111a.id)
cls.apiclient.connection.apiKey = cls.user_d111b_apikey
cls.apiclient.connection.securityKey = cls.user_d111b_secretkey
cls.network_d111b = cls.createNetwork(cls.apiclient,cls.account_d111b,cls.isolated_network_offering_id,cls.zone)
cls.createVM(cls.apiclient,cls.zone.id,cls.service_offering.id,cls.template.id,cls.network_d111b.id)
cls.apiclient.connection.apiKey = cls.user_d2a_apikey
cls.apiclient.connection.securityKey = cls.user_d2a_secretkey
cls.network_d2a = cls.createNetwork(cls.apiclient,cls.account_d2a,cls.isolated_network_offering_id,cls.zone)
cls.createVM(cls.apiclient,cls.zone.id,cls.service_offering.id,cls.template.id,cls.network_d2a.id)
cls.cleanup = [
cls.account_root,
cls.account_roota,
cls.service_offering,
]
except Exception as e:
cls.domain_1.delete(cls.apiclient,cleanup="true")
cls.domain_2.delete(cls.apiclient,cleanup="true")
cleanup_resources(cls.apiclient, cls.cleanup)
raise Exception("Failed to create the setup required to execute the test cases: %s" % e)
@classmethod
def tearDownClass(cls):
cls.apiclient = super(TestIsolatedNetwork, cls).getClsTestClient().getApiClient()
cls.apiclient.connection.apiKey = cls.default_apikey
cls.apiclient.connection.securityKey = cls.default_secretkey
cls.domain_1.delete(cls.apiclient,cleanup="true")
cls.domain_2.delete(cls.apiclient,cleanup="true")
cleanup_resources(cls.apiclient, cls.cleanup)
return
def setUp(cls):
cls.apiclient = cls.testClient.getApiClient()
cls.dbclient = cls.testClient.getDbConnection()
def tearDown(cls):
# restore back default apikey and secretkey
cls.apiclient.connection.apiKey = cls.default_apikey
cls.apiclient.connection.securityKey = cls.default_secretkey
return
## Test cases relating to createNetwork as admin user
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_01_createNetwork_admin(self):
"""
# Validate that Admin should be able to create network for himslef
"""
self.apiclient.connection.apiKey = self.user_root_apikey
self.apiclient.connection.securityKey = self.user_root_secretkey
self.acldata["network"]["name"] = "root"
self.acldata["network"]["displayname"] = "root"
network = Network.create(
self.apiclient,
self.acldata["network"],
networkofferingid=self.isolated_network_offering_id,
zoneid=self.zone.id
)
self.assertEqual(network.state.lower() == ALLOCATED.lower(),
True,
"Admin User is not able to create a network for himself")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_02_createNetwork_admin_foruserinsamedomain(self):
"""
# Validate that Admin should be able to create network for users in his domain
"""
self.apiclient.connection.apiKey = self.user_root_apikey
self.apiclient.connection.securityKey = self.user_root_secretkey
self.acldata["network"]["name"] = "root_roota"
self.acldata["network"]["displayname"] = "root_roota"
network = Network.create(
self.apiclient,
self.acldata["network"],
networkofferingid=self.isolated_network_offering_id,
zoneid=self.zone.id,
accountid=self.account_roota.name,
domainid=self.account_roota.domainid
)
self.assertEqual(network.state.lower() == ALLOCATED.lower(),
True,
"Admin User is not able to create a network for other users in his domain")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_03_createNetwork_admin_foruserinotherdomain(self):
"""
# Validate that Admin should be able to create network for users in his sub domain
"""
self.apiclient.connection.apiKey = self.user_root_apikey
self.apiclient.connection.securityKey = self.user_root_secretkey
self.acldata["network"]["name"] = "root_d11a"
self.acldata["network"]["displayname"] = "root_d11a"
network = Network.create(
self.apiclient,
self.acldata["network"],
networkofferingid=self.isolated_network_offering_id,
zoneid=self.zone.id,
accountid=self.account_d11a.name,
domainid=self.account_d11a.domainid
)
self.assertEqual(network.state.lower() == ALLOCATED.lower(),
True,
"Admin User is not able to create a network for for other users in other domain")
## Test cases relating to createNetwork as domain admin user
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_04_createNetwork_domaindmin(self):
"""
# Validate that Domain admin should be able to create network for himslef
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.acldata["network"]["name"] = "d1"
self.acldata["network"]["displayname"] = "d1"
network = Network.create(
self.apiclient,
self.acldata["network"],
networkofferingid=self.isolated_network_offering_id,
zoneid=self.zone.id
)
self.assertEqual(network.state.lower() == ALLOCATED.lower(),
True,
"Domain admin User is not able to create a network for himself")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_05_createNetwork_domaindmin_foruserinsamedomain(self):
"""
# Validate that Domain admin should be able to create network for users in his domain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.acldata["network"]["name"] = "d1-d1a"
self.acldata["network"]["displayname"] = "d1-d1a"
network = Network.create(
self.apiclient,
self.acldata["network"],
networkofferingid=self.isolated_network_offering_id,
zoneid=self.zone.id,
accountid=self.account_d1a.name,
domainid=self.account_d1a.domainid
)
self.assertEqual(network.state.lower() == ALLOCATED.lower(),
True,
"Domain admin User is not able to create a network for other users in his domain")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_06_createNetwork_domaindmin_foruserinsubdomain(self):
"""
# Validate that Domain admin should be able to create network for users in his sub domain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.acldata["network"]["name"] = "d1_d11a"
self.acldata["network"]["displayname"] = "d1_d11a"
network = Network.create(
self.apiclient,
self.acldata["network"],
networkofferingid=self.isolated_network_offering_id,
zoneid=self.zone.id,
accountid=self.account_d11a.name,
domainid=self.account_d11a.domainid
)
self.assertEqual(network.state.lower() == ALLOCATED.lower(),
True,
"Domain admin User is not able to create a network for other users in his sub domain")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_07_createNetwork_domaindmin_forcrossdomainuser(self):
"""
# Validate that Domain admin should not be able to create network for users in his sub domain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
self.acldata["network"]["name"] = "d1_d2a"
self.acldata["network"]["displayname"] = "d1_d2a"
try:
network = Network.create(
self.apiclient,
self.acldata["network"],
networkofferingid=self.isolated_network_offering_id,
zoneid=self.zone.id,
accountid=self.account_d2a.name,
domainid=self.account_d2a.domainid
)
self.fail("Domain admin is allowed to create network for users not in his domain ")
except Exception as e:
self.debug ("When Domain admin tries to create network for users in his sub domain %s" %e)
if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN):
self.fail("Error message validation failed when Domain admin tries to create network for users not in his domain ")
## Test cases relating to createNetwork as regular user
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_08_createNetwork_user(self):
"""
# Validate that Regular should be able to create network for himslef
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
self.acldata["network"]["name"] = "d1a"
self.acldata["network"]["displayname"] = "d1a"
network = Network.create(
self.apiclient,
self.acldata["network"],
networkofferingid=self.isolated_network_offering_id,
zoneid=self.zone.id
)
self.assertEqual(network.state.lower() == ALLOCATED.lower(),
True,
"User is not able to create a network for himself")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_09_createNetwork_user_foruserinsamedomain(self):
"""
# Validate that Regular user should NOT be able to create network for users in his domain
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
self.acldata["network"]["name"] = "d1a_d1b"
self.acldata["network"]["displayname"] = "d1a_d1b"
try:
network = Network.create(
self.apiclient,
self.acldata["network"],
networkofferingid=self.isolated_network_offering_id,
zoneid=self.zone.id,
accountid=self.account_d1b.name,
domainid=self.account_d1b.domainid
)
self.fail("User is allowed to create network for other users in his domain ")
except Exception as e:
self.debug ("When user tries to create network for users in his domain %s" %e)
if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.UNABLE_TO_LIST_NETWORK_ACCOUNT):
self.fail("Error message validation failed when when User tries to create network for other users in his domain ")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_10_createNetwork_user_foruserinotherdomain(self):
"""
# Validate that Domain admin should be NOT be able to create network for users in other domains
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
self.acldata["network"]["name"] = "d1a_d11a"
self.acldata["network"]["displayname"] = "d1a_d11a"
try:
network = Network.create(
self.apiclient,
self.acldata["network"],
networkofferingid=self.isolated_network_offering_id,
zoneid=self.zone.id,
accountid=self.account_d11a.name,
domainid=self.account_d11a.domainid
)
self.fail("User is allowed to create network for users not in his domain ")
except Exception as e:
self.debug ("When user tries to create network for users in other domain %s" %e)
if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.UNABLE_TO_LIST_NETWORK_ACCOUNT):
self.fail("Error message validation failed when User tries to create network for users not in his domain ")
## Test cases relating to Deploying VM in a network as admin user
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_11_deployvm_admin(self):
"""
# Validate that Admin should be able to deploy VM in the networks he owns
"""
self.apiclient.connection.apiKey = self.user_root_apikey
self.apiclient.connection.securityKey = self.user_root_secretkey
vmData = {"name":"root-root","dispayname":"root-root"}
vm = VirtualMachine.create(
self.apiclient,
vmData,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.network_root.id
)
self.assertEqual(vm.state.lower() == RUNNING.lower(),
True,
"Admin User is not able to deploy VM in his own network")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_12_deployvm_admin_foruserinsamedomain(self):
"""
# Validate that Admin should be able to deploy Vm for users in his domain
"""
self.apiclient.connection.apiKey = self.user_root_apikey
self.apiclient.connection.securityKey = self.user_root_secretkey
vmData={"name":"roota-root","displayname":"roota-root"}
vm = VirtualMachine.create(
self.apiclient,
vmData,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.network_roota.id,
accountid=self.account_roota.name,
domainid=self.account_roota.domainid
)
self.assertEqual(vm.state.lower() == RUNNING.lower() and vm.account== self.account_roota.name and vm.domainid == self.account_roota.domainid,
True,
"Admin User is not able to deploy VM for users in his domain")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_13_deployvm_admin_foruserinotherdomain(self):
"""
# Validate that Admin should be able to deploy VM for users in his sub domain
"""
self.apiclient.connection.apiKey = self.user_root_apikey
self.apiclient.connection.securityKey = self.user_root_secretkey
vmData={"name":"d2a-root","displayname":"d2a-root"}
vm = VirtualMachine.create(
self.apiclient,
vmData,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.network_d2a.id,
accountid=self.account_d2a.name,
domainid=self.account_d2a.domainid
)
self.assertEqual(vm.state.lower() == RUNNING.lower() and vm.account== self.account_d2a.name and vm.domainid == self.account_d2a.domainid,
True,
"Admin User is not able to deploy VM for users users in other domain")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_13_1_deployvm_admin_foruserinotherdomain_crossnetwork(self):
"""
# Validate that Admin should not be able deploy VM for a user in a network that does not belong to the user
"""
self.apiclient.connection.apiKey = self.user_root_apikey
self.apiclient.connection.securityKey = self.user_root_secretkey
vmData={"name":"d11a-root-invalidnetwork","displayname":"d11a-root-invalidnetwork"}
try:
vm = VirtualMachine.create(
self.apiclient,
vmData,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.network_d11b.id,
accountid=self.account_d11a.name,
domainid=self.account_d11a.domainid
)
self.fail("Admin is allowed to deploy VM for a user in a network that does not belong to the user ")
except Exception as e:
self.debug ("When admin tries to deploy vm for users in network that does not belong to the user %s" %e)
if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.UNABLE_TO_USE_NETWORK):
self.fail("Admin tries to deploy VM for a user in a network that does not belong to the user ")
## Test cases relating to deploying VM as domain admin user
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_14_deployvm_domaindmin(self):
"""
# Validate that Domain admin should be able to deploy vm for himslef
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmData={"name":"d1-d1","displayname":"d1-d1"}
vm = VirtualMachine.create(
self.apiclient,
vmData,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.network_d1.id,
)
self.assertEqual(vm.state.lower() == RUNNING.lower(),
True,
"Domain admin User is not able to deploy VM for himself")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_15_deployvm_domaindmin_foruserinsamedomain(self):
"""
# Validate that Domain admin should be able to deploy vm for users in his domain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmData={"name":"d1a-d1","displayname":"d1a-d1"}
vm = VirtualMachine.create(
self.apiclient,
vmData,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.network_d1a.id,
accountid=self.account_d1a.name,
domainid=self.account_d1a.domainid
)
self.assertEqual(vm.state.lower() == RUNNING.lower() and vm.account== self.account_d1a.name and vm.domainid == self.account_d1a.domainid,
True,
"Domain admin User is not able to deploy VM for other users in his domain")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_16_deployvm_domaindmin_foruserinsubdomain(self):
"""
# Validate that Domain admin should be able to deploy vm for users in his sub domain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmData={"name":"d11a-d1","displayname":"d111a-d1"}
vm = VirtualMachine.create(
self.apiclient,
vmData,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.network_d11a.id,
accountid=self.account_d11a.name,
domainid=self.account_d11a.domainid
)
self.assertEqual(vm.state.lower() == RUNNING.lower() and vm.account== self.account_d11a.name and vm.domainid == self.account_d11a.domainid,
True,
"Domain admin User is not able to deploy vm for himself")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_17_deployvm_domaindmin_forcrossdomainuser(self):
"""
# Validate that Domain admin should not be able allowed to deploy vm for users not in his sub domain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmData={"name":"d2a-d1","displayname":"d2a-d1"}
try:
vm = VirtualMachine.create(
self.apiclient,
vmData,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.network_d2a.id,
accountid=self.account_d2a.name,
domainid=self.account_d2a.domainid
)
self.fail("Domain admin is allowed to deploy vm for users not in hos domain ")
except Exception as e:
self.debug ("When Domain admin tries to deploy vm for users in his sub domain %s" %e)
if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN):
self.fail("Error message validation failed when Domain admin tries to deploy vm for users not in hos domain ")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_17_1_deployvm_domainadmin_foruserinotherdomain_crossnetwork(self):
"""
# Validate that Domain admin should not be able deploy VM for a user in a network that does not belong to the user
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmData={"name":"d1-d11a-invalidnetwork","displayname":"d1-d11a-invalidnetwork"}
try:
vm = VirtualMachine.create(
self.apiclient,
vmData,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.network_d11b.id,
accountid=self.account_d11a.name,
domainid=self.account_d11a.domainid
)
self.fail("Domain admin is allowed to deploy vm for users in a network that does not belong to him ")
except Exception as e:
self.debug ("When domain admin tries to deploy vm for users in network that does not belong to the user %s" %e)
if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.UNABLE_TO_USE_NETWORK):
self.fail("Error message validation failed when Domain admin tries to deploy vm for users in a network that does not belong to him ")
## Test cases relating to deploying VM as regular user
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_18_deployvm_user(self):
"""
# Validate that Regular should be able to deploy vm for himslef
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmData={"name":"d1a-d1a","displayname":"d1a-d1a"}
vm = VirtualMachine.create(
self.apiclient,
vmData,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.network_d1a.id,
)
self.assertEqual(vm.state.lower() == RUNNING.lower(),
True,
"User is not able to deploy vm for himself")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_19_deployvm_user_foruserinsamedomain(self):
"""
# Validate that Regular user should NOT be able to deploy vm for users in his domain
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmData={"name":"d1b-d1a","displayname":"d1b-d1a"}
try:
vm = VirtualMachine.create(
self.apiclient,
vmData,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.network_d1b.id,
accountid=self.account_d1b.name,
domainid=self.account_d1b.domainid
)
self.fail("Regular user is allowed to deploy vm for other users in his domain ")
except Exception as e:
self.debug ("When user tries to deploy vm for users in his domain %s" %e)
if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT):
self.fail("Error message validation failed when Regular user tries to deploy vm for other users in his domain ")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_20_deployvm_user_foruserincrossdomain(self):
"""
# Validate that Regular user should NOT be able to deploy vm for users in his domain
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmData={"name":"d2a-d1a","displayname":"d2a-d1a"}
try:
vm = VirtualMachine.create(
self.apiclient,
vmData,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.network_d2a.id,
accountid=self.account_d2a.name,
domainid=self.account_d2a.domainid
)
self.fail("Regular user is allowed to deploy vm for users not in his domain ")
except Exception as e:
self.debug ("When user tries to deploy vm for users n different domain %s" %e)
if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT):
self.fail("Error message validation failed when Regular user tries to deploy vm for users not in his domain ")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_20_1_deployvm_user_incrossnetwork(self):
"""
#Validate that User should not be able deploy VM in a network that does not belong to him
"""
self.apiclient.connection.apiKey = self.user_d11a_apikey
self.apiclient.connection.securityKey = self.user_d11a_secretkey
vmData={"name":"d11a-invalidnetwork","displayname":"d11a-invalidnetwork"}
try:
vm = VirtualMachine.create(
self.apiclient,
vmData,
zoneid=self.zone.id,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
networkids=self.network_d11b.id,
)
self.fail("User is allowed to deploy VM in a network that does not belong to him ")
except Exception as e:
self.debug ("When user tries to deploy vm in a network that does not belong to him %s" %e)
if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.UNABLE_TO_USE_NETWORK):
self.fail("Error message validation failed when User is allowed to deploy VM in a network that does not belong to him ")
## Test cases relating to restart Network as admin user
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_21_restartNetwork_admin(self):
"""
#Validate that Admin should be able to restart network for networks he owns
"""
self.apiclient.connection.apiKey = self.user_root_apikey
self.apiclient.connection.securityKey = self.user_root_secretkey
restartResponse = self.network_root.restart(self.apiclient)
self.assertEqual(restartResponse.success,
True,
"Admin User is not able to restart network he owns")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_22_restartNetwork_admin_foruserinsamedomain(self):
"""
# Validate that Admin should be able to restart network for users in his domain
"""
self.apiclient.connection.apiKey = self.user_root_apikey
self.apiclient.connection.securityKey = self.user_root_secretkey
restartResponse = self.network_roota.restart(self.apiclient)
self.assertEqual(restartResponse.success,
True,
"Admin User is not able to restart network owned by users his domain")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_23_restartNetwork_admin_foruserinotherdomain(self):
"""
# Validate that Admin should be able to restart network for users in his sub domain
"""
self.apiclient.connection.apiKey = self.user_root_apikey
self.apiclient.connection.securityKey = self.user_root_secretkey
restartResponse = self.network_d11a.restart(self.apiclient)
self.assertEqual(restartResponse.success,
True,
"Admin User is not able to restart network owned other users in other domain")
## Test cases relating to restart Network as domain admin user
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_24_restartNetwork_domaindmin(self):
"""
# Validate that Domain admin should be able to restart network for himslef
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
restartResponse = self.network_d1.restart(self.apiclient)
self.assertEqual(restartResponse.success,
True,
"Domain admin User is not able to restart network for himself")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_25_restartNetwork_domaindmin_foruserinsamedomain(self):
"""
# Validate that Domain admin should be able to restart network for users in his domain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
restartResponse = self.network_d1a.restart(self.apiclient)
self.assertEqual(restartResponse.success,
True,
"Domain admin User is not able to restart network for other users in his domain")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_26_restartNetwork_domaindmin_foruserinsubdomain(self):
"""
# Validate that Domain admin should be able to restart network for users in his sub domain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
restartResponse = self.network_d11a.restart(self.apiclient)
self.assertEqual(restartResponse.success,
True,
"Domain admin User is not able to restart network he owns")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_27_restartNetwork_domaindmin_forcrossdomainuser(self):
"""
# Validate that Domain admin should be able to restart network for users in his sub domain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
try:
restartResponse = self.network_d2a.restart(self.apiclient)
self.fail("Domain admin is allowed to restart network for users not in his domain ")
except Exception as e:
self.debug ("When Domain admin tries to restart network for users in his sub domain %s" %e)
if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.NO_PERMISSION_TO_OPERATE_DOMAIN):
self.fail("Error message validation failed when Domain admin tries to restart network for users not in his domain ")
## Test cases relating restart network as regular user
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_28_restartNetwork_user(self):
"""
#Validate that Regular should be able to restart network for himslef
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
restartResponse = self.network_d1a.restart(self.apiclient)
self.assertEqual(restartResponse.success,
True,
"User is not able to restart network he owns")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_29_restartNetwork_user_foruserinsamedomain(self):
"""
#Validate that Regular user should NOT be able to restart network for users in his domain
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
try:
restartResponse = self.network_d1b.restart(self.apiclient)
self.fail("Regular user is allowed to restart network for users in his domain ")
except Exception as e:
self.debug ("When user tries to restart network for users in his domain %s" %e)
if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT):
self.fail("Error message validation failed when Regular user tries to restart network for users in his domain ")
@attr("simulator_only",tags=[ "advanced"],required_hardware="false")
def test_30_restartNetwork_user_foruserinotherdomain(self):
"""
#Validate that Domain admin should be NOT be able to restart network for users in other domains
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
try:
restartResponse = self.network_d11a.restart(self.apiclient)
self.fail("Regular user is allowed to restart network for users not in his domain ")
except Exception as e:
self.debug ("When user tries to restart network for users in other domain %s" %e)
if not CloudstackAclException.verifyMsginException(e,CloudstackAclException.NO_PERMISSION_TO_OPERATE_ACCOUNT):
self.fail("Error message validation failed when Regular user is allowed to restart network for users not in his domain ")
@staticmethod
def generateKeysForUser(apiclient,account):
user = User.list(
apiclient,
account=account.name,
domainid=account.domainid
)[0]
return (User.registerUserKeys(
apiclient,
user.id
))
@staticmethod
def createNetwork(apiclient,account,isolated_network_offering_id,zone):
network= {
"name": "Network-",
"displaytext": "Network-",
"gateway" :"10.223.1.1",
"netmask" :"255.255.255.0",
"startip" :"10.223.1.2",
"endip" :"10.223.1.100",
}
network["name"] = account.name +" -forupdate"
network["displayname"] = account.name + "-forupdate"
network = Network.create(
apiclient,
network,
networkofferingid=isolated_network_offering_id,
zoneid=zone.id
)
return network
@staticmethod
def createVM(apiclient,zoneId,serviceOfferingId,templateId,networkId):
vmData = {"name":"prereq","dispayname":"prereq"}
vm = VirtualMachine.create(
apiclient,
vmData,
zoneid=zoneId,
serviceofferingid=serviceOfferingId,
templateid=templateId,
networkids=networkId
)
return vm
| apache-2.0 | 841,529,715,351,655,700 | 43.246742 | 201 | 0.615084 | false |
gavinandresen/bitcoin-git | share/qt/extract_strings_qt.py | 12 | 2526 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("bitcoin-core", "%s"),\n' % (os.getenv('PACKAGE_NAME'),))
f.write('QT_TRANSLATE_NOOP("bitcoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
if os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION') != os.getenv('PACKAGE_NAME'):
f.write('QT_TRANSLATE_NOOP("bitcoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| mit | -8,493,147,293,304,912,000 | 28.034483 | 106 | 0.612035 | false |
mjbrewer/testindex | magnum/tests/unit/api/controllers/v1/test_x509keypair.py | 7 | 13729 | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from six.moves.urllib import parse as urlparse
from magnum.api.controllers.v1 import x509keypair as api_x509keypair
from magnum.common import utils
from magnum.conductor import api as rpcapi
from magnum import objects
from magnum.tests import base
from magnum.tests.unit.api import base as api_base
from magnum.tests.unit.api import utils as apiutils
from magnum.tests.unit.objects import utils as obj_utils
class TestX509KeyPairObject(base.TestCase):
def test_x509keypair_init(self):
x509keypair_dict = apiutils.x509keypair_post_data(bay_uuid=None)
x509keypair = api_x509keypair.X509KeyPair(**x509keypair_dict)
self.assertEqual('certificate', x509keypair.certificate)
class TestListX509KeyPair(api_base.FunctionalTest):
def setUp(self):
super(TestListX509KeyPair, self).setUp()
self.bay = obj_utils.create_test_bay(self.context)
def test_empty(self):
response = self.get_json('/x509keypairs')
self.assertEqual([], response['x509keypairs'])
def test_one(self):
x509keypair = obj_utils.create_test_x509keypair(self.context)
response = self.get_json('/x509keypairs')
self.assertEqual(x509keypair.uuid, response['x509keypairs'][0]["uuid"])
self.assertIn('name', response['x509keypairs'][0])
self.assertIn('bay_uuid', response['x509keypairs'][0])
self.assertIn('certificate', response['x509keypairs'][0])
self.assertIn('private_key', response['x509keypairs'][0])
def test_get_one(self):
x509keypair = obj_utils.create_test_x509keypair(self.context)
response = self.get_json('/x509keypairs/%s' % x509keypair['uuid'])
self.assertEqual(x509keypair.uuid, response['uuid'])
self.assertIn('name', response)
self.assertIn('bay_uuid', response)
self.assertIn('certificate', response)
self.assertIn('private_key', response)
def test_get_one_by_name(self):
x509keypair = obj_utils.create_test_x509keypair(self.context)
response = self.get_json('/x509keypairs/%s' % x509keypair['name'])
self.assertEqual(x509keypair.uuid, response['uuid'])
self.assertIn('name', response)
self.assertIn('bay_uuid', response)
self.assertIn('certificate', response)
self.assertIn('private_key', response)
def test_get_one_by_name_not_found(self):
response = self.get_json(
'/x509keypairs/not_found',
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_get_one_by_name_multiple_x509keypair(self):
obj_utils.create_test_x509keypair(self.context,
name='test_x509keypair',
uuid=utils.generate_uuid())
obj_utils.create_test_x509keypair(self.context,
name='test_x509keypair',
uuid=utils.generate_uuid())
response = self.get_json('/x509keypairs/test_x509keypair',
expect_errors=True)
self.assertEqual(409, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_detail(self):
x509keypair = obj_utils.create_test_x509keypair(self.context)
response = self.get_json('/x509keypairs/detail')
self.assertEqual(x509keypair.uuid, response['x509keypairs'][0]["uuid"])
self.assertIn('name', response['x509keypairs'][0])
self.assertIn('bay_uuid', response['x509keypairs'][0])
self.assertIn('certificate', response['x509keypairs'][0])
self.assertIn('private_key', response['x509keypairs'][0])
def test_detail_against_single(self):
x509keypair = obj_utils.create_test_x509keypair(self.context)
response = self.get_json(
'/x509keypairs/%s/detail' % x509keypair['uuid'],
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_many(self):
keypair_list = []
for id_ in range(5):
x509keypair = obj_utils.create_test_x509keypair(
self.context, id=id_,
uuid=utils.generate_uuid())
keypair_list.append(x509keypair.uuid)
response = self.get_json('/x509keypairs')
self.assertEqual(len(keypair_list), len(response['x509keypairs']))
uuids = [b['uuid'] for b in response['x509keypairs']]
self.assertEqual(sorted(keypair_list), sorted(uuids))
def test_links(self):
uuid = utils.generate_uuid()
obj_utils.create_test_x509keypair(self.context, id=1, uuid=uuid)
response = self.get_json('/x509keypairs/%s' % uuid)
self.assertIn('links', response.keys())
self.assertEqual(2, len(response['links']))
self.assertIn(uuid, response['links'][0]['href'])
for l in response['links']:
bookmark = l['rel'] == 'bookmark'
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
def test_collection_links(self):
for id_ in range(5):
obj_utils.create_test_x509keypair(self.context, id=id_,
uuid=utils.generate_uuid())
response = self.get_json('/x509keypairs/?limit=3')
self.assertEqual(3, len(response['x509keypairs']))
next_marker = response['x509keypairs'][-1]['uuid']
self.assertIn(next_marker, response['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
for id_ in range(5):
obj_utils.create_test_x509keypair(self.context, id=id_,
uuid=utils.generate_uuid())
response = self.get_json('/x509keypairs')
self.assertEqual(3, len(response['x509keypairs']))
next_marker = response['x509keypairs'][-1]['uuid']
self.assertIn(next_marker, response['next'])
class TestPost(api_base.FunctionalTest):
def setUp(self):
super(TestPost, self).setUp()
self.bay = obj_utils.create_test_bay(self.context)
p = mock.patch.object(rpcapi.API, 'x509keypair_create')
self.mock_x509keypair_create = p.start()
self.mock_x509keypair_create.side_effect = \
self._simulate_rpc_x509keypair_create
self.addCleanup(p.stop)
def _simulate_rpc_x509keypair_create(self, x509keypair):
x509keypair.create()
return x509keypair
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_x509keypair(self, mock_utcnow):
cdict = apiutils.x509keypair_post_data()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.post_json('/x509keypairs', cdict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/x509keypairs/%s' % cdict['uuid']
self.assertEqual(urlparse.urlparse(response.location).path,
expected_location)
self.assertEqual(cdict['uuid'], response.json['uuid'])
self.assertNotIn('updated_at', response.json.keys)
return_created_at = timeutils.parse_isotime(
response.json['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
def test_create_x509keypair_set_project_id_and_user_id(self):
cdict = apiutils.x509keypair_post_data()
def _simulate_keypair_create(x509keypair):
self.assertEqual(x509keypair.project_id, self.context.project_id)
self.assertEqual(x509keypair.user_id, self.context.user_id)
x509keypair.create()
return x509keypair
self.mock_x509keypair_create.side_effect = _simulate_keypair_create
self.post_json('/x509keypairs', cdict)
def test_create_x509keypair_doesnt_contain_id(self):
with mock.patch.object(self.dbapi, 'create_x509keypair',
wraps=self.dbapi.create_x509keypair) as cc_mock:
cdict = apiutils.x509keypair_post_data(
name='x509keypair_example_A')
response = self.post_json('/x509keypairs', cdict)
self.assertEqual(cdict['name'], response.json['name'])
cc_mock.assert_called_once_with(mock.ANY)
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', cc_mock.call_args[0][0])
def test_create_x509keypair_generate_uuid(self):
cdict = apiutils.x509keypair_post_data()
del cdict['uuid']
response = self.post_json('/x509keypairs', cdict)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
self.assertEqual(cdict['name'], response.json['name'])
self.assertTrue(utils.is_uuid_like(response.json['uuid']))
def test_create_x509keypair_no_bay_uuid(self):
cdict = apiutils.x509keypair_post_data()
del cdict['bay_uuid']
response = self.post_json('/x509keypairs', cdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
def test_create_x509keypair_with_non_existent_bay_uuid(self):
cdict = apiutils.x509keypair_post_data(bay_uuid=utils.generate_uuid())
response = self.post_json('/x509keypairs', cdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['error_message'])
def test_create_x509keypair_with_bay_name(self):
cdict = apiutils.x509keypair_post_data(bay_uuid=self.bay.name)
response = self.post_json('/x509keypairs', cdict, expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(201, response.status_int)
class TestDelete(api_base.FunctionalTest):
def setUp(self):
super(TestDelete, self).setUp()
self.bay = obj_utils.create_test_bay(self.context)
self.x509keypair = obj_utils.create_test_x509keypair(self.context)
p = mock.patch.object(rpcapi.API, 'x509keypair_delete')
self.mock_x509keypair_delete = p.start()
self.mock_x509keypair_delete.side_effect = \
self._simulate_rpc_x509keypair_delete
self.addCleanup(p.stop)
def _simulate_rpc_x509keypair_delete(self, x509keypair_uuid):
x509keypair = objects.X509KeyPair.get_by_uuid(self.context,
x509keypair_uuid)
x509keypair.destroy()
def test_delete_x509keypair(self):
self.delete('/x509keypairs/%s' % self.x509keypair.uuid)
response = self.get_json('/x509keypairs/%s' % self.x509keypair.uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_delete_x509keypair_not_found(self):
uuid = utils.generate_uuid()
response = self.delete('/x509keypairs/%s' % uuid, expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_delete_x509keypair_with_name_not_found(self):
response = self.delete('/x509keypairs/not_found', expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
def test_delete_x509keypair_with_name(self):
response = self.delete('/x509keypairs/%s' % self.x509keypair.name,
expect_errors=True)
self.assertEqual(204, response.status_int)
def test_delete_multiple_x509keypair_by_name(self):
obj_utils.create_test_x509keypair(self.context,
name='test_x509keypair',
uuid=utils.generate_uuid())
obj_utils.create_test_x509keypair(self.context,
name='test_x509keypair',
uuid=utils.generate_uuid())
response = self.delete('/x509keypairs/test_x509keypair',
expect_errors=True)
self.assertEqual(409, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['error_message'])
| apache-2.0 | -3,933,161,691,942,718,500 | 44.460265 | 79 | 0.641853 | false |
Castronova/EMIT | api_old/ODM2/LikeODM1/services/record_service.py | 1 | 8238 |
from wx.lib.pubsub import pub as Publisher
class RecordService():
# Script header (imports etc.) will be set up in Main when record is clicked.
def __init__(self, script, edit_service, connection_string, record=False):
self._script = script
self._edit_service = edit_service
self._connection_string = connection_string
self._record = record
###################
# Filters
###################
def filter_value(self, value, operator):
self._edit_service.filter_value(value, operator)
if self._record:
self._script("edit_service.filter_value(%s, '%s')\n" % (value, operator), 'black')
Publisher.sendMessage("scroll")
def filter_date(self, before, after):
self._edit_service.filter_date(before, after)
if self._record:
self._script("edit_service.filter_date(%s, %s)\n" % (repr(before), repr(after)), 'black')
Publisher.sendMessage("scroll")
def data_gaps(self, value, time_period):
self._edit_service.data_gaps(value, time_period)
if self._record:
self._script("edit_service.data_gaps(%s, '%s')\n" % (value, time_period), 'black')
Publisher.sendMessage("scroll")
def value_change_threshold(self, value):
self._edit_service.value_change_threshold(value)
if self._record:
self._script("edit_service.value_change_threshold(%s)\n" % (value), 'black')
Publisher.sendMessage("scroll")
def toggle_filter_previous(self):
self._edit_service.toggle_filter_previous()
def select_points_tf(self, tf_list):
self._edit_service.select_points_tf(tf_list)
if self._record:
#print [x[2] for x in self._edit_service.get_filtered_points()]
self._script("edit_service.select_points({list})\n".format(
list=[x[2] for x in self._edit_service.get_filtered_points()]))
Publisher.sendMessage("scroll")
def select_points(self, id_list=[], datetime_list=[]):
self._edit_service.select_points(id_list, datetime_list)
if self._record:
#print [x[2] for x in self._edit_service.get_filtered_points()]
#self._script("edit_service.select_points({list})\n".format(list=[x[2] for x in self._edit_service.get_filtered_points()]))
self._script("edit_service.select_points({id}, {list})\n".format(id=id_list, list=[x[2] for x in
self._edit_service.get_filtered_points()]))
Publisher.sendMessage("scroll")
#print self._edit_service.get_filtered_points()
###################
# Editing
###################
def add_points(self, points):
self._edit_service.add_points(points)
if self._record:
self._script("# add points\n")
Publisher.sendMessage("scroll")
def delete_points(self):
self._edit_service.delete_points()
if self._record:
self._script("edit_service.delete_points()\n", 'black')
Publisher.sendMessage("scroll")
def change_value(self, operator, value):
self._edit_service.change_value(operator, value)
if self._record:
self._script("edit_service.change_value(%s, '%s')\n" % (operator, value), 'black')
Publisher.sendMessage("scroll")
def interpolate(self):
self._edit_service.interpolate()
if self._record:
self._script("edit_service.interpolate()\n", 'black')
Publisher.sendMessage("scroll")
def drift_correction(self, gap_width):
ret = self._edit_service.drift_correction(gap_width)
if self._record:
self._script("edit_service.drift_correction(%s)\n" % (gap_width), 'black')
Publisher.sendMessage("scroll")
return ret
def reset_filter(self):
self._edit_service.reset_filter()
if self._record:
self._script("edit_service.reset_filter()\n", 'black')
Publisher.sendMessage("scroll")
def flag(self, qualifier_id):
self._edit_service.flag(qualifier_id)
if self._record:
self._script("edit_service.flag(%s)\n" % qualifier_id, 'black')
Publisher.sendMessage("scroll")
def restore(self):
self._edit_service.restore()
if self._record:
self._script("edit_service.restore()\n", 'black')
Publisher.sendMessage("scroll")
def save(self, var=None, method=None, qcl=None, isSave=False):
self._edit_service.save(var=var, method=method, qcl=qcl, isSave=isSave)
if self._record:
self._script("edit_service.save()\n", 'black')
Publisher.sendMessage("scroll")
###################
# Gets
###################
def get_series(self):
return self._edit_service.get_series()
def get_series_points(self):
return self._edit_service.get_series_points()
def get_filtered_points(self):
return self._edit_service.get_filtered_points()
def get_filter_list(self):
return self._edit_service.get_filter_list()
def get_filtered_dates(self):
return self._edit_service.get_filtered_dates()
def get_selection_groups(self):
return self._edit_service.get_selection_groups()
def get_qcl(self, q):
qcl = self._edit_service.get_qcl(q.id)
if self._record:
self._script('new_qcl = series_service.get_qcl_by_id(%s)\n' % (qcl.id))
Publisher.sendMessage("scroll")
return qcl
def get_method(self, m):
method = self._edit_service.get_method(m.id)
if self._record:
self._script('new_method = series_service.get_method_by_id(%s)\n' % (method.id))
Publisher.sendMessage("scroll")
return method
def get_variable(self, v):
var = self._edit_service.get_variable(v.id)
if self._record:
self._script('new_variable = series_service.get_variable_by_id(%s)\n' % (var.id))
Publisher.sendMessage("scroll")
return var
def toggle_record(self):
if self._record:
self._record = False
else:
self._record = True
###################
# Creates
###################
def create_qcl(self, code, definition, explanation):
qcl = self._edit_service.create_qcl(code, definition, explanation)
if self._record:
self._script('new_qcl = series_service.get_qcl_by_id(%s)\n' % (qcl.id))
Publisher.sendMessage("scroll")
return qcl
def create_method(self, m):
method = self._edit_service.create_method(m.description, m.link)
if self._record:
self._script('new_method = series_service.get_method_by_id(%s)\n' % (method.id))
Publisher.sendMessage("scroll")
return method
def create_variable(self, v):
var = self._edit_service.create_variable(v.code, v.name, v.speciation, v.variable_unit_id, v.sample_medium,
v.value_type, v.is_regular, v.time_support, v.time_unit_id,
v.data_type, v.general_category, v.no_data_value)
if self._record:
self._script('new_variable = series_service.get_variable_by_id(%s)\n' % (var.id))
Publisher.sendMessage("scroll")
return var
def write_header(self):
self._script("from odmservices import EditService\n", 'black')
self._script("from odmservices import SeriesService\n", 'black')
self._script("edit_service = EditService(series_id={id}, connection_string='{con}')\n".format(
id=self._edit_service._series_id, con=self._connection_string), 'black')
self._script("series_service = SeriesService(connection_string='%s')\n" % (self._connection_string), 'black')
self._script("## To run commands from the python console uncomment and run the following command ##\n", 'black')
self._script("#edit_service = Tools\n", 'black')
Publisher.sendMessage("scroll")
| gpl-2.0 | 2,349,401,841,005,918,000 | 35.451327 | 138 | 0.580117 | false |
dpwrussell/openmicroscopy | components/tools/OmeroWeb/omeroweb/webgateway/marshal.py | 1 | 19351 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import omero
import time
import re
import logging
import traceback
logger = logging.getLogger(__name__)
from omero.rtypes import unwrap
# OMERO.insight point list regular expression
INSIGHT_POINT_LIST_RE = re.compile(r'points\[([^\]]+)\]')
# OME model point list regular expression
OME_MODEL_POINT_LIST_RE = re.compile(r'([\d.]+),([\d.]+)')
def channelMarshal(channel):
"""
return a dict with all there is to know about a channel
@param channel: L{omero.gateway.ChannelWrapper}
@return: Dict
"""
chan = {'emissionWave': channel.getEmissionWave(),
'label': channel.getLabel(),
'color': channel.getColor().getHtml(),
'reverseIntensity': channel.isReverseIntensity(),
'window': {'min': channel.getWindowMin(),
'max': channel.getWindowMax(),
'start': channel.getWindowStart(),
'end': channel.getWindowEnd()},
'active': channel.isActive()}
lut = channel.getLut()
if lut and len(lut) > 0:
chan['lut'] = lut
return chan
def imageMarshal(image, key=None, request=None):
"""
return a dict with pretty much everything we know and care about an image,
all wrapped in a pretty structure.
@param image: L{omero.gateway.ImageWrapper}
@param key: key of specific attributes to select
@return: Dict
"""
image.loadRenderOptions()
pr = image.getProject()
ds = None
wellsample = None
well = None
try:
# Replicating the functionality of the deprecated
# ImageWrapper.getDataset() with shares in mind.
# -- Tue Sep 6 10:48:47 BST 2011 (See #6660)
parents = image.listParents()
if parents is not None and len(parents) == 1:
if parents[0].OMERO_CLASS == 'Dataset':
ds = parents[0]
elif parents[0].OMERO_CLASS == 'WellSample':
wellsample = parents[0]
if wellsample.well is not None:
well = wellsample.well
except omero.SecurityViolation, e:
# We're in a share so the Image's parent Dataset cannot be loaded
# or some other permissions related issue has tripped us up.
logger.warn('Security violation while retrieving Dataset when '
'marshaling image metadata: %s' % e.message)
rv = {
'id': image.id,
'meta': {
'imageName': image.name or '',
'imageDescription': image.description or '',
'imageAuthor': image.getAuthor(),
'projectName': pr and pr.name or 'Multiple',
'projectId': pr and pr.id or None,
'projectDescription': pr and pr.description or '',
'datasetName': ds and ds.name or 'Multiple',
'datasetId': ds and ds.id or None,
'datasetDescription': ds and ds.description or '',
'wellSampleId': wellsample and wellsample.id or '',
'wellId': well and well.id.val or '',
'imageTimestamp': time.mktime(
image.getDate().timetuple()),
'imageId': image.id,
'pixelsType': image.getPixelsType(),
},
'perms': {
'canAnnotate': image.canAnnotate(),
'canEdit': image.canEdit(),
'canDelete': image.canDelete(),
'canLink': image.canLink()
}
}
try:
reOK = image._prepareRenderingEngine()
if not reOK:
logger.debug(
"Failed to prepare Rendering Engine for imageMarshal")
return rv
except omero.ConcurrencyException, ce:
backOff = ce.backOff
rv = {
'ConcurrencyException': {
'backOff': backOff
}
}
return rv
except Exception, ex: # Handle everything else.
rv['Exception'] = ex.message
logger.error(traceback.format_exc())
return rv # Return what we have already, in case it's useful
# big images
tiles = image._re.requiresPixelsPyramid()
rv['tiles'] = tiles
if (tiles):
width, height = image._re.getTileSize()
levels = image._re.getResolutionLevels()
zoomLevelScaling = image.getZoomLevelScaling()
rv.update({'tile_size': {'width': width,
'height': height},
'levels': levels})
if zoomLevelScaling is not None:
rv['zoomLevelScaling'] = zoomLevelScaling
nominalMagnification = image.getObjectiveSettings() is not None \
and image.getObjectiveSettings().getObjective().getNominalMagnification() \
or None
try:
server_settings = request.session.get('server_settings', {}) \
.get('viewer', {})
except:
server_settings = {}
init_zoom = server_settings.get('initial_zoom_level', 0)
if init_zoom < 0:
init_zoom = levels + init_zoom
interpolate = server_settings.get('interpolate_pixels', True)
try:
def pixel_size_in_microns(method):
try:
size = method('MICROMETER')
return size.getValue() if size else None
except:
logger.debug(
'Unable to convert physical pixel size to microns',
exc_info=True
)
return None
rv.update({
'interpolate': interpolate,
'size': {'width': image.getSizeX(),
'height': image.getSizeY(),
'z': image.getSizeZ(),
't': image.getSizeT(),
'c': image.getSizeC()},
'pixel_size': {'x': pixel_size_in_microns(image.getPixelSizeX),
'y': pixel_size_in_microns(image.getPixelSizeY),
'z': pixel_size_in_microns(image.getPixelSizeZ)},
})
if init_zoom is not None:
rv['init_zoom'] = init_zoom
if nominalMagnification is not None:
rv.update({'nominalMagnification': nominalMagnification})
try:
rv['pixel_range'] = image.getPixelRange()
rv['channels'] = map(lambda x: channelMarshal(x),
image.getChannels())
rv['split_channel'] = image.splitChannelDims()
rv['rdefs'] = {'model': (image.isGreyscaleRenderingModel() and
'greyscale' or 'color'),
'projection': image.getProjection(),
'defaultZ': image._re.getDefaultZ(),
'defaultT': image._re.getDefaultT(),
'invertAxis': image.isInvertedAxis()}
except TypeError:
# Will happen if an image has bad or missing pixel data
logger.error('imageMarshal', exc_info=True)
rv['pixel_range'] = (0, 0)
rv['channels'] = ()
rv['split_channel'] = ()
rv['rdefs'] = {'model': 'color',
'projection': image.getProjection(),
'defaultZ': 0,
'defaultT': 0,
'invertAxis': image.isInvertedAxis()}
except AttributeError:
rv = None
raise
if key is not None and rv is not None:
for k in key.split('.'):
rv = rv.get(k, {})
if rv == {}:
rv = None
return rv
def shapeMarshal(shape):
"""
return a dict with all there is to know about a shape
@param channel: L{omero.model.ShapeI}
@return: Dict
"""
rv = {}
def set_if(k, v, func=lambda a: a is not None):
"""
Sets the key L{k} with the value of L{v} if the unwrapped value L{v}
passed to L{func} evaluates to True. In the default case this is
True if the unwrapped value L{v} is not None.
"""
v = unwrap(v)
if func(v):
rv[k] = v
rv['id'] = shape.getId().getValue()
set_if('theT', shape.getTheT())
set_if('theZ', shape.getTheZ())
shape_type = type(shape)
if shape_type == omero.model.RectangleI:
rv['type'] = 'Rectangle'
rv['x'] = shape.getX().getValue()
rv['y'] = shape.getY().getValue()
rv['width'] = shape.getWidth().getValue()
rv['height'] = shape.getHeight().getValue()
elif shape_type == omero.model.MaskI:
rv['type'] = 'Mask'
rv['x'] = shape.getX().getValue()
rv['y'] = shape.getY().getValue()
rv['width'] = shape.getWidth().getValue()
rv['height'] = shape.getHeight().getValue()
# TODO: support for mask
elif shape_type == omero.model.EllipseI:
rv['type'] = 'Ellipse'
rv['x'] = shape.getX().getValue()
rv['y'] = shape.getY().getValue()
rv['radiusX'] = shape.getRadiusX().getValue()
rv['radiusY'] = shape.getRadiusY().getValue()
elif shape_type == omero.model.PolylineI:
rv['type'] = 'PolyLine'
rv['points'] = stringToSvg(shape.getPoints().getValue())
elif shape_type == omero.model.LineI:
rv['type'] = 'Line'
rv['x1'] = shape.getX1().getValue()
rv['x2'] = shape.getX2().getValue()
rv['y1'] = shape.getY1().getValue()
rv['y2'] = shape.getY2().getValue()
elif shape_type == omero.model.PointI:
rv['type'] = 'Point'
rv['x'] = shape.getX().getValue()
rv['y'] = shape.getY().getValue()
elif shape_type == omero.model.PolygonI:
rv['type'] = 'Polygon'
# z = closed line
rv['points'] = stringToSvg(shape.getPoints().getValue()) + " z"
elif shape_type == omero.model.LabelI:
rv['type'] = 'Label'
rv['x'] = shape.getX().getValue()
rv['y'] = shape.getY().getValue()
else:
logger.debug("Shape type not supported: %s" % str(shape_type))
text_value = unwrap(shape.getTextValue())
if text_value is not None:
# only populate json with font styles if we have some text
rv['textValue'] = text_value
# FIXME: units ignored for font size
if shape.getFontSize() is not None:
set_if('fontSize', shape.getFontSize().getValue())
set_if('fontStyle', shape.getFontStyle())
set_if('fontFamily', shape.getFontFamily())
if shape.getTransform() is not None:
transform = shape.getTransform()
tm = [unwrap(transform.a00),
unwrap(transform.a10),
unwrap(transform.a01),
unwrap(transform.a11),
unwrap(transform.a02),
unwrap(transform.a12)]
rv['transform'] = 'matrix(%s)' % (' '.join([str(t) for t in tm]))
fill_color = unwrap(shape.getFillColor())
if fill_color is not None:
rv['fillColor'], rv['fillAlpha'] = rgb_int2css(fill_color)
stroke_color = unwrap(shape.getStrokeColor())
if stroke_color is not None:
rv['strokeColor'], rv['strokeAlpha'] = rgb_int2css(stroke_color)
if shape.getStrokeWidth() is not None:
# FIXME: units ignored for stroke width
set_if('strokeWidth', shape.getStrokeWidth().getValue())
if hasattr(shape, 'getMarkerStart') and shape.getMarkerStart() is not None:
rv['markerStart'] = shape.getMarkerStart().getValue()
if hasattr(shape, 'getMarkerEnd') and shape.getMarkerEnd() is not None:
rv['markerEnd'] = shape.getMarkerEnd().getValue()
return rv
def stringToSvg(string):
"""
Method for converting the string returned from
omero.model.ShapeI.getPoints() into an SVG for display on web.
E.g: "points[309,427, 366,503, 190,491] points1[309,427, 366,503, 190,491]
points2[309,427, 366,503, 190,491]"
To: M 309 427 L 366 503 L 190 491 z
"""
point_list = string.strip()
match = INSIGHT_POINT_LIST_RE.search(point_list)
if match is not None:
point_list = match.group(1)
point_list = OME_MODEL_POINT_LIST_RE.findall(point_list)
if len(point_list) == 0:
logger.error("Unrecognised ROI shape 'points' string: %r" % string)
return ""
point_list = ' L '.join([' '.join(point) for point in point_list])
return "M %s" % point_list
def rgb_int2css(rgbint):
"""
converts a bin int number into css colour and alpha fraction.
E.g. -1006567680 to '#00ff00', 0.5
"""
alpha = rgbint // 256 // 256 // 256 % 256
alpha = float(alpha) / 256
r, g, b = (rgbint // 256 // 256 % 256, rgbint // 256 % 256, rgbint % 256)
return "#%02x%02x%02x" % (r, g, b), alpha
def rgb_int2rgba(rgbint):
"""
converts a bin int number into (r, g, b, alpha) tuple.
E.g. 1694433280 to (255, 0, 0, 0.390625)
"""
alpha = rgbint // 256 // 256 // 256 % 256
alpha = float(alpha) / 256
r, g, b = (rgbint // 256 // 256 % 256, rgbint // 256 % 256, rgbint % 256)
return (r, g, b, alpha)
def chgrpMarshal(conn, rsp):
"""
Helper for marshalling a Chgrp response.
Uses conn to lookup unlinked objects.
Returns dict of e.g.
{'includedObjects': {'Datasets':[1,2,3]},
'unlinkedDetails': {'Tags':[{'id':1, 'name':'t'}]}
}
"""
rv = {}
if isinstance(rsp, omero.cmd.ERR):
rsp_params = ", ".join(["%s: %s" % (k, v) for k, v in
rsp.parameters.items()])
rv['error'] = rsp.message
rv['report'] = "%s %s" % (rsp.name, rsp_params)
else:
included = rsp.responses[0].includedObjects
deleted = rsp.responses[0].deletedObjects
# Included: just simplify the key, e.g. -> Projects, Datasets etc
includedObjects = {}
objKeys = ['ome.model.containers.Project',
'ome.model.containers.Dataset',
'ome.model.core.Image',
'ome.model.screen.Screen',
'ome.model.screen.Plate',
'ome.model.screen.Well']
for k in objKeys:
if k in included:
otype = k.split(".")[-1]
oids = included[k]
oids.sort() # makes testing easier
includedObjects[otype + 's'] = oids
rv['includedObjects'] = includedObjects
# Annotation links - need to get info on linked objects
tags = {}
files = {}
comments = 0
others = 0
annotationLinks = ['ome.model.annotations.ProjectAnnotationLink',
'ome.model.annotations.DatasetAnnotationLink',
'ome.model.annotations.ImageAnnotationLink',
'ome.model.annotations.ScreenAnnotationLink',
'ome.model.annotations.PlateAnnotationLink',
'ome.model.annotations.WellAnnotationLink']
for l in annotationLinks:
if l in deleted:
linkType = l.split(".")[-1]
params = omero.sys.ParametersI()
params.addIds(deleted[l])
query = ("select annLink from %s as annLink "
"join fetch annLink.child as ann "
"left outer join fetch ann.file "
"where annLink.id in (:ids)" % linkType)
links = conn.getQueryService().findAllByQuery(
query, params, conn.SERVICE_OPTS)
for lnk in links:
ann = lnk.child
if isinstance(ann, omero.model.FileAnnotationI):
name = unwrap(ann.getFile().getName())
files[ann.id.val] = {'id': ann.id.val,
'name': name}
elif isinstance(ann, omero.model.TagAnnotationI):
name = unwrap(ann.getTextValue())
tags[ann.id.val] = {'id': ann.id.val,
'name': name}
elif isinstance(ann, omero.model.CommentAnnotationI):
comments += 1
else:
others += 1
# sort tags & comments
tags = tags.values()
tags.sort(key=lambda x: x['name'])
files = files.values()
files.sort(key=lambda x: x['name'])
rv['unlinkedDetails'] = {'Tags': tags,
'Files': files,
'Comments': comments,
'Others': others
}
# Container links - only report these if we are moving the *parent*,
# E.g. DatasetImageLinks are only reported if we are moving the Dataset
# (and the image is left behind). If we were moving the Image then we'd
# expect the link to be broken (can ignore)
objects = {}
containerLinks = {
'ome.model.containers.ProjectDatasetLink': 'Datasets',
'ome.model.containers.DatasetImageLink': 'Images',
'ome.model.screen.ScreenPlateLink': 'Screens'}
for l, ch in containerLinks.items():
if l in deleted:
linkType = l.split(".")[-1]
params = omero.sys.ParametersI()
params.addIds(deleted[l])
query = ("select conLink from %s as conLink "
"join fetch conLink.child as ann "
"where conLink.id in (:ids)" % linkType)
links = conn.getQueryService().findAllByQuery(
query, params, conn.SERVICE_OPTS)
for lnk in links:
child = lnk.child
if (ch not in includedObjects or
child.id.val not in includedObjects[ch]):
name = unwrap(child.getName())
# Put objects in a dictionary to avoid duplicates
if ch not in objects:
objects[ch] = {}
# E.g. objects['Dataset']['1'] = {}
objects[ch][child.id.val] = {'id': child.id.val,
'name': name}
# sort objects
for otype, objs in objects.items():
objs = objs.values()
objs.sort(key=lambda x: x['name'])
# E.g. 'Dataset' objects in 'Datasets'
rv['unlinkedDetails'][otype] = objs
return rv
| gpl-2.0 | 7,000,183,557,897,163,000 | 38.491837 | 83 | 0.538163 | false |
xqliu/coursera | 6.00.1x_IntroductionToComputerScienceAndProgramming/ProblemSet4/ps4b.py | 1 | 6470 | from ps4a import *
import time
#
#
# Problem #6: Computer chooses a word
#
#
def compChooseWord(hand, wordList, n):
"""
Given a hand and a wordList, find the word that gives
the maximum value score, and return it.
This word should be calculated by considering all the words
in the wordList.
If no words in the wordList can be made from the hand, return None.
hand: dictionary (string -> int)
wordList: list (string)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
returns: string or None
"""
# Create a new variable to store the maximum score seen so far (initially 0)
maxScore = 0
# Create a new variable to store the best word seen so far (initially None)
bestWord = None
possibleMaxScore = 0
for key in hand.keys():
possibleMaxScore += SCRABBLE_LETTER_VALUES[key] * hand[key]
possibleMaxScore *= len(hand)
possibleMaxScore += 50
# For each word in the wordList
for word in wordList:
# If you can construct the word from your hand
# (hint: you can use isValidWord, or - since you don't really need to test if the word is in the wordList - you can make a similar function that omits that test)
if (isValidWord(word, hand, wordList)):
# Find out how much making that word is worth
score = getWordScore(word, n)
# If the score for that word is higher than your best score
if (score > maxScore):
# Update your best score, and best word accordingly
maxScore = score
bestWord = word
if (maxScore == possibleMaxScore):
break
# return the best word you found.
return bestWord
#
# Problem #7: Computer plays a hand
#
def compPlayHand(hand, wordList, n):
"""
Allows the computer to play the given hand, following the same procedure
as playHand, except instead of the user choosing a word, the computer
chooses it.
1) The hand is displayed.
2) The computer chooses a word.
3) After every valid word: the word and the score for that word is
displayed, the remaining letters in the hand are displayed, and the
computer chooses another word.
4) The sum of the word scores is displayed when the hand finishes.
5) The hand finishes when the computer has exhausted its possible
choices (i.e. compChooseWord returns None).
hand: dictionary (string -> int)
wordList: list (string)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
"""
# TO DO ... <-- Remove this comment when you code this function
totalPoints = 0
while (len(hand) > 0):
# Display the hand
print "Current Hand: ",
displayHand(hand)
# Ask user for input
word = compChooseWord(hand, wordList, n)
if (None != word):
# Tell the user how many points the word earned, and the updated total score, in one line followed by a blank line
wordScore = getWordScore(word, n)
totalPoints += wordScore
print '"' + word + '"' + " earned " + str(wordScore) + " points. Total: " + str(totalPoints) + " points"
# Update the hand
for c in word:
current = hand.get(c,0)
if (current == 1):
del hand[c]
else:
hand[c] = current - 1
if (len(hand) == 0 or None == word):
print "Total score: " + str(totalPoints) + " points."
break
#
# Problem #8: Playing a game
#
#
def playGame(wordList):
"""
Allow the user to play an arbitrary number of hands.
1) Asks the user to input 'n' or 'r' or 'e'.
* If the user inputs 'e', immediately exit the game.
* If the user inputs anything that's not 'n', 'r', or 'e', keep asking them again.
2) Asks the user to input a 'u' or a 'c'.
* If the user inputs anything that's not 'c' or 'u', keep asking them again.
3) Switch functionality based on the above choices:
* If the user inputted 'n', play a new (random) hand.
* Else, if the user inputted 'r', play the last hand again.
* If the user inputted 'u', let the user play the game
with the selected hand, using playHand.
* If the user inputted 'c', let the computer play the
game with the selected hand, using compPlayHand.
4) After the computer or user has played the hand, repeat from step 1
wordList: list (string)
"""
originalHand = None
while True:
command = raw_input("Enter n to deal a new hand, r to replay the last hand, or e to end game:")
if 'n' == command:
finished = False
while (finished != None):
userOrCompu = raw_input("Enter u to have yourself play, c to have the computer play:")
if 'u' == userOrCompu or 'c' == userOrCompu:
hand = dealHand(HAND_SIZE)
originalHand = hand.copy()
if 'u' == userOrCompu:
finished = playHand(hand, wordList, HAND_SIZE)
elif 'c' == userOrCompu:
finished = compPlayHand(hand, wordList, HAND_SIZE)
else:
print "Invalid command."
elif 'r' == command:
if (None == originalHand):
print "You have not played a hand yet. Please play a new hand first!"
else:
finished = False
while (finished != None):
userOrCompu = raw_input("Enter u to have yourself play, c to have the computer play:")
if 'u' == userOrCompu or 'c' == userOrCompu:
hand = originalHand.copy()
if 'u' == userOrCompu:
finished = playHand(hand, wordList, HAND_SIZE)
elif 'c' == userOrCompu:
finished = compPlayHand(hand, wordList, HAND_SIZE)
else:
print "Invalid command."
elif 'e' == command:
return
else:
print "Invalid command."
#
# Build data structures used for entire session and play game
#
if __name__ == '__main__':
wordList = loadWords()
playGame(wordList)
| mit | 3,226,312,288,263,787,000 | 36.183908 | 169 | 0.574034 | false |
tobias47n9e/social-core | social_core/backends/vend.py | 5 | 1227 | """
Vend OAuth2 backend:
"""
from .oauth import BaseOAuth2
class VendOAuth2(BaseOAuth2):
name = 'vend'
AUTHORIZATION_URL = 'https://secure.vendhq.com/connect'
ACCESS_TOKEN_URL = 'https://{0}.vendhq.com/api/1.0/token'
ACCESS_TOKEN_METHOD = 'POST'
REDIRECT_STATE = False
EXTRA_DATA = [
('refresh_token', 'refresh_token'),
('domain_prefix', 'domain_prefix')
]
def access_token_url(self):
return self.ACCESS_TOKEN_URL.format(self.data['domain_prefix'])
def get_user_details(self, response):
email = response['email']
username = response.get('username') or email.split('@', 1)[0]
return {
'username': username,
'email': email,
'fullname': '',
'first_name': '',
'last_name': ''
}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
prefix = kwargs['response']['domain_prefix']
url = 'https://{0}.vendhq.com/api/users'.format(prefix)
data = self.get_json(url, headers={
'Authorization': 'Bearer {0}'.format(access_token)
})
return data['users'][0] if data.get('users') else {}
| bsd-3-clause | -1,724,783,308,075,950,600 | 30.461538 | 71 | 0.566422 | false |
HyperBaton/ansible | lib/ansible/modules/network/fortios/fortios_spamfilter_dnsbl.py | 7 | 11149 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_spamfilter_dnsbl
short_description: Configure AntiSpam DNSBL/ORBL in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify spamfilter feature and dnsbl category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
spamfilter_dnsbl:
description:
- Configure AntiSpam DNSBL/ORBL.
default: null
type: dict
suboptions:
comment:
description:
- Optional comments.
type: str
entries:
description:
- Spam filter DNSBL and ORBL server.
type: list
suboptions:
action:
description:
- Reject connection or mark as spam email.
type: str
choices:
- reject
- spam
id:
description:
- DNSBL/ORBL entry ID.
required: true
type: int
server:
description:
- DNSBL or ORBL server name.
type: str
status:
description:
- Enable/disable status.
type: str
choices:
- enable
- disable
id:
description:
- ID.
required: true
type: int
name:
description:
- Name of table.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure AntiSpam DNSBL/ORBL.
fortios_spamfilter_dnsbl:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
spamfilter_dnsbl:
comment: "Optional comments."
entries:
-
action: "reject"
id: "6"
server: "192.168.100.40"
status: "enable"
id: "9"
name: "default_name_10"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_spamfilter_dnsbl_data(json):
option_list = ['comment', 'entries', 'id',
'name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def spamfilter_dnsbl(data, fos):
vdom = data['vdom']
state = data['state']
spamfilter_dnsbl_data = data['spamfilter_dnsbl']
filtered_data = underscore_to_hyphen(filter_spamfilter_dnsbl_data(spamfilter_dnsbl_data))
if state == "present":
return fos.set('spamfilter',
'dnsbl',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('spamfilter',
'dnsbl',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_spamfilter(data, fos):
if data['spamfilter_dnsbl']:
resp = spamfilter_dnsbl(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"spamfilter_dnsbl": {
"required": False, "type": "dict", "default": None,
"options": {
"comment": {"required": False, "type": "str"},
"entries": {"required": False, "type": "list",
"options": {
"action": {"required": False, "type": "str",
"choices": ["reject", "spam"]},
"id": {"required": True, "type": "int"},
"server": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}},
"id": {"required": True, "type": "int"},
"name": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_spamfilter(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_spamfilter(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,198,966,180,892,313,000 | 29.545205 | 97 | 0.552247 | false |
adoosii/edx-platform | lms/startup.py | 2 | 5390 | """
Module for code that should run during LMS startup
"""
# pylint: disable=unused-argument
from django.conf import settings
# Force settings to run so that the python path is modified
settings.INSTALLED_APPS # pylint: disable=pointless-statement
from openedx.core.lib.django_startup import autostartup
import edxmako
import logging
from monkey_patch import django_utils_translation
import analytics
log = logging.getLogger(__name__)
def run():
"""
Executed during django startup
"""
django_utils_translation.patch()
autostartup()
add_mimetypes()
if settings.FEATURES.get('USE_CUSTOM_THEME', False):
enable_stanford_theme()
if settings.FEATURES.get('USE_MICROSITES', False):
enable_microsites()
if settings.FEATURES.get('ENABLE_THIRD_PARTY_AUTH', False):
enable_third_party_auth()
# Initialize Segment analytics module by setting the write_key.
if settings.LMS_SEGMENT_KEY:
analytics.write_key = settings.LMS_SEGMENT_KEY
# register any dependency injections that we need to support in edx_proctoring
# right now edx_proctoring is dependent on the openedx.core.djangoapps.credit
# as well as the instructor dashboard (for deleting student attempts)
if settings.FEATURES.get('ENABLE_PROCTORED_EXAMS'):
# Import these here to avoid circular dependencies of the form:
# edx-platform app --> DRF --> django translation --> edx-platform app
from edx_proctoring.runtime import set_runtime_service
from instructor.services import InstructorService
from openedx.core.djangoapps.credit.services import CreditService
set_runtime_service('credit', CreditService())
set_runtime_service('instructor', InstructorService())
def add_mimetypes():
"""
Add extra mimetypes. Used in xblock_resource.
If you add a mimetype here, be sure to also add it in cms/startup.py.
"""
import mimetypes
mimetypes.add_type('application/vnd.ms-fontobject', '.eot')
mimetypes.add_type('application/x-font-opentype', '.otf')
mimetypes.add_type('application/x-font-ttf', '.ttf')
mimetypes.add_type('application/font-woff', '.woff')
def enable_stanford_theme():
"""
Enable the settings for a custom theme, whose files should be stored
in ENV_ROOT/themes/THEME_NAME (e.g., edx_all/themes/stanford).
"""
# Workaround for setting THEME_NAME to an empty
# string which is the default due to this ansible
# bug: https://github.com/ansible/ansible/issues/4812
if getattr(settings, "THEME_NAME", "") == "":
settings.THEME_NAME = None
return
assert settings.FEATURES['USE_CUSTOM_THEME']
settings.FAVICON_PATH = 'themes/{name}/images/favicon.ico'.format(
name=settings.THEME_NAME
)
# Calculate the location of the theme's files
theme_root = settings.ENV_ROOT / "themes" / settings.THEME_NAME
# Include the theme's templates in the template search paths
settings.TEMPLATE_DIRS.insert(0, theme_root / 'templates')
edxmako.paths.add_lookup('main', theme_root / 'templates', prepend=True)
# Namespace the theme's static files to 'themes/<theme_name>' to
# avoid collisions with default edX static files
settings.STATICFILES_DIRS.append(
(u'themes/{}'.format(settings.THEME_NAME), theme_root / 'static')
)
# Include theme locale path for django translations lookup
settings.LOCALE_PATHS = (theme_root / 'conf/locale',) + settings.LOCALE_PATHS
def enable_microsites():
"""
Enable the use of microsites, which are websites that allow
for subdomains for the edX platform, e.g. foo.edx.org
"""
microsites_root = settings.MICROSITE_ROOT_DIR
microsite_config_dict = settings.MICROSITE_CONFIGURATION
for ms_name, ms_config in microsite_config_dict.items():
# Calculate the location of the microsite's files
ms_root = microsites_root / ms_name
ms_config = microsite_config_dict[ms_name]
# pull in configuration information from each
# microsite root
if ms_root.isdir():
# store the path on disk for later use
ms_config['microsite_root'] = ms_root
template_dir = ms_root / 'templates'
ms_config['template_dir'] = template_dir
ms_config['microsite_name'] = ms_name
log.info('Loading microsite %s', ms_root)
else:
# not sure if we have application logging at this stage of
# startup
log.error('Error loading microsite %s. Directory does not exist', ms_root)
# remove from our configuration as it is not valid
del microsite_config_dict[ms_name]
# if we have any valid microsites defined, let's wire in the Mako and STATIC_FILES search paths
if microsite_config_dict:
settings.TEMPLATE_DIRS.append(microsites_root)
edxmako.paths.add_lookup('main', microsites_root)
settings.STATICFILES_DIRS.insert(0, microsites_root)
def enable_third_party_auth():
"""
Enable the use of third_party_auth, which allows users to sign in to edX
using other identity providers. For configuration details, see
common/djangoapps/third_party_auth/settings.py.
"""
from third_party_auth import settings as auth_settings
auth_settings.apply_settings(settings)
| agpl-3.0 | -2,560,384,727,025,535,000 | 33.774194 | 99 | 0.684972 | false |
jmartinezchaine/OpenERP | openerp/addons/point_of_sale/wizard/pos_sales_user.py | 9 | 2221 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
from tools.translate import _
class pos_sale_user(osv.osv_memory):
_name = 'pos.sale.user'
_description = 'Sale by User'
_columns = {
'date_start': fields.date('Date Start', required=True),
'date_end': fields.date('Date End', required=True),
'user_id': fields.many2many('res.users', 'sale_user_rel', 'user_id', 'uid', 'Salesman'),
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : return report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, ['date_start', 'date_end', 'user_id'], context=context)
res = res and res[0] or {}
datas['form'] = res
return {
'type': 'ir.actions.report.xml',
'report_name': 'pos.sales.user',
'datas': datas,
}
pos_sale_user()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,650,896,322,884,492,000 | 35.409836 | 96 | 0.580369 | false |
dancingdan/tensorflow | tensorflow/python/kernel_tests/substr_op_test.py | 6 | 20111 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Substr op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class SubstrOpTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(np.int32, 1, "BYTE"),
(np.int64, 1, "BYTE"),
(np.int32, -4, "BYTE"),
(np.int64, -4, "BYTE"),
(np.int32, 1, "UTF8_CHAR"),
(np.int64, 1, "UTF8_CHAR"),
(np.int32, -4, "UTF8_CHAR"),
(np.int64, -4, "UTF8_CHAR"),
)
def testScalarString(self, dtype, pos, unit):
test_string = {
"BYTE": b"Hello",
"UTF8_CHAR": u"He\xc3\xc3\U0001f604".encode("utf-8"),
}[unit]
expected_value = {
"BYTE": b"ell",
"UTF8_CHAR": u"e\xc3\xc3".encode("utf-8"),
}[unit]
position = np.array(pos, dtype)
length = np.array(3, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
def testScalarString_EdgeCases(self, dtype, unit):
# Empty string
test_string = {
"BYTE": b"",
"UTF8_CHAR": u"".encode("utf-8"),
}[unit]
expected_value = b""
position = np.array(0, dtype)
length = np.array(3, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
# Full string
test_string = {
"BYTE": b"Hello",
"UTF8_CHAR": u"H\xc3ll\U0001f604".encode("utf-8"),
}[unit]
position = np.array(0, dtype)
length = np.array(5, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = substr_op.eval()
self.assertAllEqual(substr, test_string)
# Full string (Negative)
test_string = {
"BYTE": b"Hello",
"UTF8_CHAR": u"H\xc3ll\U0001f604".encode("utf-8"),
}[unit]
position = np.array(-5, dtype)
length = np.array(5, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = substr_op.eval()
self.assertAllEqual(substr, test_string)
# Length is larger in magnitude than a negative position
test_string = {
"BYTE": b"Hello",
"UTF8_CHAR": u"H\xc3ll\U0001f604".encode("utf-8"),
}[unit]
expected_string = {
"BYTE": b"ello",
"UTF8_CHAR": u"\xc3ll\U0001f604".encode("utf-8"),
}[unit]
position = np.array(-4, dtype)
length = np.array(5, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_string)
@parameterized.parameters(
(np.int32, 1, "BYTE"),
(np.int64, 1, "BYTE"),
(np.int32, -4, "BYTE"),
(np.int64, -4, "BYTE"),
(np.int32, 1, "UTF8_CHAR"),
(np.int64, 1, "UTF8_CHAR"),
(np.int32, -4, "UTF8_CHAR"),
(np.int64, -4, "UTF8_CHAR"),
)
def testVectorStrings(self, dtype, pos, unit):
test_string = {
"BYTE": [b"Hello", b"World"],
"UTF8_CHAR": [x.encode("utf-8") for x in [u"H\xc3llo",
u"W\U0001f604rld"]],
}[unit]
expected_value = {
"BYTE": [b"ell", b"orl"],
"UTF8_CHAR": [x.encode("utf-8") for x in [u"\xc3ll", u"\U0001f604rl"]],
}[unit]
position = np.array(pos, dtype)
length = np.array(3, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
def testMatrixStrings(self, dtype, unit):
test_string = {
"BYTE": [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d229\U0001d227n",
u"\xc6\u053c\u025bv\u025bn",
u"tw\u0c1dlv\u025b"]],
[x.encode("utf-8") for x in [u"He\xc3\xc3o",
u"W\U0001f604rld",
u"d\xfcd\xea"]]],
}[unit]
position = np.array(1, dtype)
length = np.array(4, dtype)
expected_value = {
"BYTE": [[b"en", b"leve", b"welv"], [b"hirt", b"ourt", b"ifte"],
[b"ixte", b"even", b"ight"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d227n",
u"\u053c\u025bv\u025b",
u"w\u0c1dlv"]],
[x.encode("utf-8") for x in [u"e\xc3\xc3o",
u"\U0001f604rld",
u"\xfcd\xea"]]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
position = np.array(-3, dtype)
length = np.array(2, dtype)
expected_value = {
"BYTE": [[b"te", b"ve", b"lv"], [b"ee", b"ee", b"ee"],
[b"ee", b"ee", b"ee"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d229\U0001d227",
u"v\u025b", u"lv"]],
[x.encode("utf-8") for x in [u"\xc3\xc3", u"rl",
u"\xfcd"]]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
def testElementWisePosLen(self, dtype, unit):
test_string = {
"BYTE": [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d229\U0001d227n",
u"\xc6\u053c\u025bv\u025bn",
u"tw\u0c1dlv\u025b"]],
[x.encode("utf-8") for x in [u"He\xc3\xc3o",
u"W\U0001f604rld",
u"d\xfcd\xea"]],
[x.encode("utf-8") for x in [u"sixt\xea\xean",
u"se\U00010299enteen",
u"ei\U0001e920h\x86een"]]],
}[unit]
position = np.array([[1, -4, 3], [1, 2, -4], [-5, 2, 3]], dtype)
length = np.array([[2, 2, 4], [4, 3, 2], [5, 5, 5]], dtype)
expected_value = {
"BYTE": [[b"en", b"ev", b"lve"], [b"hirt", b"urt", b"te"],
[b"xteen", b"vente", b"hteen"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d227n",
u"\u025bv",
u"lv\u025b"]],
[x.encode("utf-8") for x in [u"e\xc3\xc3o",
u"rld",
u"d\xfc"]],
[x.encode("utf-8") for x in [u"xt\xea\xean",
u"\U00010299ente",
u"h\x86een"]]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
def testBroadcast(self, dtype, unit):
# Broadcast pos/len onto input string
test_string = {
"BYTE": [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"],
[b"nineteen", b"twenty", b"twentyone"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d229\U0001d227n",
u"\xc6\u053c\u025bv\u025bn",
u"tw\u0c1dlv\u025b"]],
[x.encode("utf-8") for x in [u"th\xcdrt\xea\xean",
u"f\U0001f604urt\xea\xean",
u"f\xcd\ua09ctee\ua0e4"]],
[x.encode("utf-8") for x in [u"s\xcdxt\xea\xean",
u"se\U00010299enteen",
u"ei\U0001e920h\x86een"]],
[x.encode("utf-8") for x in [u"nineteen",
u"twenty",
u"twentyone"]]],
}[unit]
position = np.array([1, -4, 3], dtype)
length = np.array([1, 2, 3], dtype)
expected_value = {
"BYTE": [[b"e", b"ev", b"lve"], [b"h", b"te", b"tee"],
[b"i", b"te", b"hte"], [b"i", b"en", b"nty"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d227",
u"\u025bv", u"lv\u025b"]],
[x.encode("utf-8") for x in [u"h", u"t\xea", u"tee"]],
[x.encode("utf-8") for x in [u"\xcd", u"te", u"h\x86e"]],
[x.encode("utf-8") for x in [u"i", u"en", u"nty"]]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
# Broadcast input string onto pos/len
test_string = {
"BYTE": [b"thirteen", b"fourteen", b"fifteen"],
"UTF8_CHAR": [x.encode("utf-8") for x in [u"th\xcdrt\xea\xean",
u"f\U0001f604urt\xea\xean",
u"f\xcd\ua09ctee\ua0e4"]],
}[unit]
position = np.array([[1, -2, 3], [-3, 2, 1], [5, 5, -5]], dtype)
length = np.array([[3, 2, 1], [1, 2, 3], [2, 2, 2]], dtype)
expected_value = {
"BYTE": [[b"hir", b"en", b"t"], [b"e", b"ur", b"ift"],
[b"ee", b"ee", b"ft"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"h\xcdr", u"\xean", u"t"]],
[x.encode("utf-8") for x in [u"\xea", u"ur",
u"\xcd\ua09ct"]],
[x.encode("utf-8") for x in [u"\xea\xea", u"\xea\xea",
u"\ua09ct"]]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
# Test 1D broadcast
test_string = {
"BYTE": b"thirteen",
"UTF8_CHAR": u"th\xcdrt\xea\xean".encode("utf-8"),
}[unit]
position = np.array([1, -4, 7], dtype)
length = np.array([3, 2, 1], dtype)
expected_value = {
"BYTE": [b"hir", b"te", b"n"],
"UTF8_CHAR": [x.encode("utf-8") for x in [u"h\xcdr", u"t\xea", u"n"]],
}[unit]
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
substr = substr_op.eval()
self.assertAllEqual(substr, expected_value)
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
def testBadBroadcast(self, dtype, unit):
test_string = [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]]
position = np.array([1, 2, -3, 4], dtype)
length = np.array([1, 2, 3, 4], dtype)
with self.assertRaises(ValueError):
string_ops.substr(test_string, position, length, unit=unit)
@parameterized.parameters(
(np.int32, 6, "BYTE"),
(np.int64, 6, "BYTE"),
(np.int32, -6, "BYTE"),
(np.int64, -6, "BYTE"),
(np.int32, 6, "UTF8_CHAR"),
(np.int64, 6, "UTF8_CHAR"),
(np.int32, -6, "UTF8_CHAR"),
(np.int64, -6, "UTF8_CHAR"),
)
def testOutOfRangeError_Scalar(self, dtype, pos, unit):
# Scalar/Scalar
test_string = {
"BYTE": b"Hello",
"UTF8_CHAR": u"H\xc3ll\U0001f604".encode("utf-8"),
}[unit]
position = np.array(pos, dtype)
length = np.array(3, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
substr_op.eval()
@parameterized.parameters(
(np.int32, 4, "BYTE"),
(np.int64, 4, "BYTE"),
(np.int32, -4, "BYTE"),
(np.int64, -4, "BYTE"),
(np.int32, 4, "UTF8_CHAR"),
(np.int64, 4, "UTF8_CHAR"),
(np.int32, -4, "UTF8_CHAR"),
(np.int64, -4, "UTF8_CHAR"),
)
def testOutOfRangeError_VectorScalar(self, dtype, pos, unit):
# Vector/Scalar
test_string = {
"BYTE": [b"good", b"good", b"bad", b"good"],
"UTF8_CHAR": [x.encode("utf-8") for x in [u"g\xc3\xc3d", u"b\xc3d",
u"g\xc3\xc3d"]],
}[unit]
position = np.array(pos, dtype)
length = np.array(1, dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
substr_op.eval()
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
def testOutOfRangeError_MatrixMatrix(self, dtype, unit):
# Matrix/Matrix
test_string = {
"BYTE": [[b"good", b"good", b"good"], [b"good", b"good", b"bad"],
[b"good", b"good", b"good"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"g\xc3\xc3d", u"g\xc3\xc3d",
u"g\xc3\xc3d"]],
[x.encode("utf-8") for x in [u"g\xc3\xc3d", u"g\xc3\xc3d",
u"b\xc3d"]],
[x.encode("utf-8") for x in [u"g\xc3\xc3d", u"g\xc3\xc3d",
u"g\xc3\xc3d"]]],
}[unit]
position = np.array([[1, 2, 3], [1, 2, 4], [1, 2, 3]], dtype)
length = np.array([[3, 2, 1], [1, 2, 3], [2, 2, 2]], dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
substr_op.eval()
# Matrix/Matrix (with negative)
position = np.array([[1, 2, -3], [1, 2, -4], [1, 2, -3]], dtype)
length = np.array([[3, 2, 1], [1, 2, 3], [2, 2, 2]], dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
substr_op.eval()
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
def testOutOfRangeError_Broadcast(self, dtype, unit):
# Broadcast
test_string = {
"BYTE": [[b"good", b"good", b"good"], [b"good", b"good", b"bad"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"g\xc3\xc3d", u"g\xc3\xc3d",
u"g\xc3\xc3d"]],
[x.encode("utf-8") for x in [u"g\xc3\xc3d", u"g\xc3\xc3d",
u"b\xc3d"]]],
}[unit]
position = np.array([1, 2, 4], dtype)
length = np.array([1, 2, 3], dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
substr_op.eval()
# Broadcast (with negative)
position = np.array([-1, -2, -4], dtype)
length = np.array([1, 2, 3], dtype)
substr_op = string_ops.substr(test_string, position, length, unit=unit)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
substr_op.eval()
@parameterized.parameters(
(np.int32, "BYTE"),
(np.int64, "BYTE"),
(np.int32, "UTF8_CHAR"),
(np.int64, "UTF8_CHAR"),
)
def testMismatchPosLenShapes(self, dtype, unit):
test_string = {
"BYTE": [[b"ten", b"eleven", b"twelve"],
[b"thirteen", b"fourteen", b"fifteen"],
[b"sixteen", b"seventeen", b"eighteen"]],
"UTF8_CHAR": [[x.encode("utf-8") for x in [u"\U0001d229\U0001d227n",
u"\xc6\u053c\u025bv\u025bn",
u"tw\u0c1dlv\u025b"]],
[x.encode("utf-8") for x in [u"th\xcdrt\xea\xean",
u"f\U0001f604urt\xea\xean",
u"f\xcd\ua09ctee\ua0e4"]],
[x.encode("utf-8") for x in [u"s\xcdxt\xea\xean",
u"se\U00010299enteen",
u"ei\U0001e920h\x86een"]]],
}[unit]
position = np.array([[1, 2, 3]], dtype)
length = np.array([2, 3, 4], dtype)
# Should fail: position/length have different rank
with self.assertRaises(ValueError):
string_ops.substr(test_string, position, length)
position = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]], dtype)
length = np.array([[2, 3, 4]], dtype)
# Should fail: position/length have different dimensionality
with self.assertRaises(ValueError):
string_ops.substr(test_string, position, length)
def testWrongDtype(self):
with self.cached_session():
with self.assertRaises(TypeError):
string_ops.substr(b"test", 3.0, 1)
with self.assertRaises(TypeError):
string_ops.substr(b"test", 3, 1.0)
def testInvalidUnit(self):
with self.cached_session():
with self.assertRaises(ValueError):
string_ops.substr(b"test", 3, 1, unit="UTF8")
if __name__ == "__main__":
test.main()
| apache-2.0 | -5,872,511,195,430,174,000 | 40.211066 | 80 | 0.496196 | false |
joachimmetz/plaso | tests/parsers/zsh_extended_history.py | 3 | 1922 | #!/usr/bin/env python3
# -*_ coding: utf-8 -*-
"""Tests for the Zsh extended_history parser."""
import unittest
from plaso.parsers import zsh_extended_history
from tests.parsers import test_lib
class ZshExtendedHistoryTest(test_lib.ParserTestCase):
"""Tests for the Zsh extended_history parser."""
def testParse(self):
"""Tests for the Parse method."""
parser = zsh_extended_history.ZshExtendedHistoryParser()
storage_writer = self._ParseFile(['zsh_extended_history.txt'], parser)
self.assertEqual(storage_writer.number_of_events, 4)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'command': 'cd plaso',
'date_time': '2016-03-12 08:26:50',
'data_type': 'shell:zsh:history',
'elapsed_seconds': 0}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
expected_event_values = {
'command': 'echo dfgdfg \\\\\n& touch /tmp/afile',
'date_time': '2016-03-26 11:54:53',
'data_type': 'shell:zsh:history',
'elapsed_seconds': 0}
self.CheckEventValues(storage_writer, events[2], expected_event_values)
expected_event_values = {
'date_time': '2016-03-26 11:54:57',
'data_type': 'shell:zsh:history'}
self.CheckEventValues(storage_writer, events[3], expected_event_values)
def testVerification(self):
"""Tests for the VerifyStructure method"""
mediator = None
parser = zsh_extended_history.ZshExtendedHistoryParser()
valid_lines = ': 1457771210:0;cd plaso'
self.assertTrue(parser.VerifyStructure(mediator, valid_lines))
invalid_lines = ': 2016-03-26 11:54:53;0;cd plaso'
self.assertFalse(parser.VerifyStructure(mediator, invalid_lines))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,767,368,761,887,004,000 | 30.508197 | 75 | 0.676899 | false |
MjAbuz/flask | flask/testsuite/testing.py | 1 | 7411 | # -*- coding: utf-8 -*-
"""
flask.testsuite.testing
~~~~~~~~~~~~~~~~~~~~~~~
Test client and more.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from flask.testsuite import FlaskTestCase
from flask._compat import text_type
class TestToolsTestCase(FlaskTestCase):
def test_environ_defaults_from_config(self):
app = flask.Flask(__name__)
app.testing = True
app.config['SERVER_NAME'] = 'example.com:1234'
app.config['APPLICATION_ROOT'] = '/foo'
@app.route('/')
def index():
return flask.request.url
ctx = app.test_request_context()
self.assert_equal(ctx.request.url, 'http://example.com:1234/foo/')
with app.test_client() as c:
rv = c.get('/')
self.assert_equal(rv.data, b'http://example.com:1234/foo/')
def test_environ_defaults(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.url
ctx = app.test_request_context()
self.assert_equal(ctx.request.url, 'http://localhost/')
with app.test_client() as c:
rv = c.get('/')
self.assert_equal(rv.data, b'http://localhost/')
def test_redirect_keep_session(self):
app = flask.Flask(__name__)
app.secret_key = 'testing'
@app.route('/', methods=['GET', 'POST'])
def index():
if flask.request.method == 'POST':
return flask.redirect('/getsession')
flask.session['data'] = 'foo'
return 'index'
@app.route('/getsession')
def get_session():
return flask.session.get('data', '<missing>')
with app.test_client() as c:
rv = c.get('/getsession')
assert rv.data == b'<missing>'
rv = c.get('/')
assert rv.data == b'index'
assert flask.session.get('data') == 'foo'
rv = c.post('/', data={}, follow_redirects=True)
assert rv.data == b'foo'
# This support requires a new Werkzeug version
if not hasattr(c, 'redirect_client'):
assert flask.session.get('data') == 'foo'
rv = c.get('/getsession')
assert rv.data == b'foo'
def test_session_transactions(self):
app = flask.Flask(__name__)
app.testing = True
app.secret_key = 'testing'
@app.route('/')
def index():
return text_type(flask.session['foo'])
with app.test_client() as c:
with c.session_transaction() as sess:
self.assert_equal(len(sess), 0)
sess['foo'] = [42]
self.assert_equal(len(sess), 1)
rv = c.get('/')
self.assert_equal(rv.data, b'[42]')
with c.session_transaction() as sess:
self.assert_equal(len(sess), 1)
self.assert_equal(sess['foo'], [42])
def test_session_transactions_no_null_sessions(self):
app = flask.Flask(__name__)
app.testing = True
with app.test_client() as c:
try:
with c.session_transaction() as sess:
pass
except RuntimeError as e:
self.assert_in('Session backend did not open a session', str(e))
else:
self.fail('Expected runtime error')
def test_session_transactions_keep_context(self):
app = flask.Flask(__name__)
app.testing = True
app.secret_key = 'testing'
with app.test_client() as c:
rv = c.get('/')
req = flask.request._get_current_object()
self.assert_true(req is not None)
with c.session_transaction():
self.assert_true(req is flask.request._get_current_object())
def test_session_transaction_needs_cookies(self):
app = flask.Flask(__name__)
app.testing = True
c = app.test_client(use_cookies=False)
try:
with c.session_transaction() as s:
pass
except RuntimeError as e:
self.assert_in('cookies', str(e))
else:
self.fail('Expected runtime error')
def test_test_client_context_binding(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
flask.g.value = 42
return 'Hello World!'
@app.route('/other')
def other():
1 // 0
with app.test_client() as c:
resp = c.get('/')
self.assert_equal(flask.g.value, 42)
self.assert_equal(resp.data, b'Hello World!')
self.assert_equal(resp.status_code, 200)
resp = c.get('/other')
self.assert_false(hasattr(flask.g, 'value'))
self.assert_in(b'Internal Server Error', resp.data)
self.assert_equal(resp.status_code, 500)
flask.g.value = 23
try:
flask.g.value
except (AttributeError, RuntimeError):
pass
else:
raise AssertionError('some kind of exception expected')
def test_reuse_client(self):
app = flask.Flask(__name__)
c = app.test_client()
with c:
self.assert_equal(c.get('/').status_code, 404)
with c:
self.assert_equal(c.get('/').status_code, 404)
def test_test_client_calls_teardown_handlers(self):
app = flask.Flask(__name__)
called = []
@app.teardown_request
def remember(error):
called.append(error)
with app.test_client() as c:
self.assert_equal(called, [])
c.get('/')
self.assert_equal(called, [])
self.assert_equal(called, [None])
del called[:]
with app.test_client() as c:
self.assert_equal(called, [])
c.get('/')
self.assert_equal(called, [])
c.get('/')
self.assert_equal(called, [None])
self.assert_equal(called, [None, None])
class SubdomainTestCase(FlaskTestCase):
def setUp(self):
self.app = flask.Flask(__name__)
self.app.config['SERVER_NAME'] = 'example.com'
self.client = self.app.test_client()
self._ctx = self.app.test_request_context()
self._ctx.push()
def tearDown(self):
if self._ctx is not None:
self._ctx.pop()
def test_subdomain(self):
@self.app.route('/', subdomain='<company_id>')
def view(company_id):
return company_id
url = flask.url_for('view', company_id='xxx')
response = self.client.get(url)
self.assert_equal(200, response.status_code)
self.assert_equal(b'xxx', response.data)
def test_nosubdomain(self):
@self.app.route('/<company_id>')
def view(company_id):
return company_id
url = flask.url_for('view', company_id='xxx')
response = self.client.get(url)
self.assert_equal(200, response.status_code)
self.assert_equal(b'xxx', response.data)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestToolsTestCase))
suite.addTest(unittest.makeSuite(SubdomainTestCase))
return suite
| bsd-3-clause | -4,079,166,665,988,013,000 | 29.623967 | 80 | 0.538254 | false |
massimo-zaniboni/netrobots | server/rest_api/configuration.py | 2 | 7174 | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import base64
import urllib3
try:
import httplib
except ImportError:
# for python3
import http.client as httplib
import sys
import logging
from six import iteritems
def singleton(cls, *args, **kw):
instances = {}
def _singleton():
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
@singleton
class Configuration(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
"""
def __init__(self):
"""
Constructor
"""
# Default Base url
self.host = "http://localhost"
# Default api client
self.api_client = None
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("swagger_client")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
@property
def logger_file(self):
"""
Gets the logger_file.
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""
Sets the logger_file.
If the logger_file is None, then add stream handler and remove file handler.
Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
"""
Gets the debug status.
"""
return self.__debug
@debug.setter
def debug(self, value):
"""
Sets the debug status.
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""
Gets the logger_format.
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""
Sets the logger_format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""
Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.api_key.get(identifier) and self.api_key_prefix.get(identifier):
return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier]
elif self.api_key.get(identifier):
return self.api_key[identifier]
def get_basic_auth_token(self):
"""
Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(basic_auth=self.username + ':' + self.password)\
.get('authorization')
def auth_settings(self):
"""
Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
}
def to_debug_report(self):
"""
Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 2.0.0\n"\
"SDK Package Version: 1.0.0".\
format(env=sys.platform, pyversion=sys.version)
| gpl-3.0 | -3,508,052,409,785,501,700 | 30.056277 | 97 | 0.596181 | false |
nwjs/chromium.src | third_party/blink/tools/blinkpy/style/patchreader.py | 2 | 2840 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Chris Jerdonek ([email protected])
# Copyright (C) 2010 ProFUSION embedded systems
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from blinkpy.common.checkout.diff_parser import DiffParser
_log = logging.getLogger(__name__)
class PatchReader(object):
"""Supports checking style in patches."""
def __init__(self, text_file_reader):
"""Create a PatchReader instance.
Args:
text_file_reader: A TextFileReader instance.
"""
self._text_file_reader = text_file_reader
def check(self, patch_string):
"""Checks style in the given patch."""
patch_files = DiffParser(patch_string.splitlines()).files
for path, diff_file in patch_files.iteritems():
line_numbers = diff_file.added_or_modified_line_numbers()
_log.debug('Found %s new or modified lines in: %s', len(line_numbers), path)
if not line_numbers:
# Don't check files which contain only deleted lines
# as they can never add style errors. However, mark them as
# processed so that we count up number of such files.
self._text_file_reader.count_delete_only_file()
continue
self._text_file_reader.process_file(file_path=path, line_numbers=line_numbers)
| bsd-3-clause | -975,206,121,481,361,300 | 42.692308 | 90 | 0.713732 | false |
idaholab/raven | framework/Databases/__init__.py | 1 | 1355 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Databases module includes efficient ways to serialize data to file.
"""
from __future__ import absolute_import
from utils import InputData
from .Database import DateBase as Database
from .HDF5 import HDF5
from .NetCDF import NetCDF
from .Factory import factory
class DatabasesCollection(InputData.ParameterInput):
"""
Class for reading in a collection of databases
"""
DatabasesCollection.createClass("Databases")
DatabasesCollection.addSub(HDF5.getInputSpecification())
DatabasesCollection.addSub(NetCDF.getInputSpecification())
def returnInputParameter():
"""
Returns the input specs for the desired classes
@ In, None
@ Out, returnInputParameter, InputData.ParameterInput, parsing class
"""
return DatabasesCollection()
| apache-2.0 | 1,328,735,218,116,621,600 | 30.511628 | 74 | 0.772694 | false |
jhreinholdt/collatz-conjecture | collatz_conjecture.py | 1 | 1190 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 18 16:03:24 2017
@author: jorei
"""
#Collatz Conjecture - Start with a number n > 1.
#Find the number of steps it takes to reach one using the following process:
#If n is even, divide it by 2. If n is odd, multiply it by 3 and add 1.
import matplotlib.pyplot as plt
import numpy as np
def collatzconjecture(number):
iterations = 0
while number != 1:
if number % 2 == 0:
number = number / 2
iterations += 1
else:
number = 3 * number + 1
iterations += 1
return int(iterations)
def main():
maxno = 1000000
x = np.arange(2,maxno)
y = np.arange(0,maxno-2)
for number in x:
y[number-2] = collatzconjecture(number)
dots = plt.plot(x,y)
plt.setp(dots, marker='.', linewidth='0.0', markersize='1.5')
plt.xscale('log')
plt.grid(True, which="both", ls="-", color='0.65')
plt.ylabel('Number of iterations')
plt.xlabel('Starting number')
plt.savefig('CollatzIterations.png', bbox_inches='tight')
plt.show()
if __name__ == '__main__':
main() | mit | -716,338,628,596,278,000 | 25.72093 | 77 | 0.562185 | false |
herove/dotfiles | sublime/Packages/SublimeCodeIntel/libs/inflector/Rules/Base.py | 2 | 6613 | #!/usr/bin/env python
# Copyright (c) 2006 Bermi Ferrer Martinez
# info at bermi dot org
# See the end of this file for the free software, open source license
# (BSD-style).
import re
class Base:
'''Locale inflectors must inherit from this base class inorder to provide
the basic Inflector functionality'''
def conditionalPlural(self, numer_of_records, word):
'''Returns the plural form of a word if first parameter is greater than 1'''
if numer_of_records > 1:
return self.pluralize(word)
else:
return word
def titleize(self, word, uppercase=''):
'''Converts an underscored or CamelCase word into a English sentence.
The titleize function converts text like "WelcomePage",
"welcome_page" or "welcome page" to this "Welcome Page".
If second parameter is set to 'first' it will only
capitalize the first character of the title.'''
if(uppercase == 'first'):
return self.humanize(self.underscore(word)).capitalize()
else:
return self.humanize(self.underscore(word)).title()
def camelize(self, word):
''' Returns given word as CamelCased
Converts a word like "send_email" to "SendEmail". It
will remove non alphanumeric character from the word, so
"who's online" will be converted to "WhoSOnline"'''
return ''.join(w[0].upper() + w[1:] for w in re.sub('[^A-Z^a-z^0-9^:]+', ' ', word).split(' '))
def underscore(self, word):
''' Converts a word "into_it_s_underscored_version"
Convert any "CamelCased" or "ordinary Word" into an
"underscored_word".
This can be really useful for creating friendly URLs.'''
return re.sub('[^A-Z^a-z^0-9^\/]+', '_',
re.sub('([a-z\d])([A-Z])', '\\1_\\2',
re.sub('([A-Z]+)([A-Z][a-z])', '\\1_\\2', re.sub('::', '/', word)))).lower()
def humanize(self, word, uppercase=''):
'''Returns a human-readable string from word
Returns a human-readable string from word, by replacing
underscores with a space, and by upper-casing the initial
character by default.
If you need to uppercase all the words you just have to
pass 'all' as a second parameter.'''
if(uppercase == 'first'):
return re.sub('_id$', '', word).replace('_', ' ').capitalize()
else:
return re.sub('_id$', '', word).replace('_', ' ').title()
def variablize(self, word):
'''Same as camelize but first char is lowercased
Converts a word like "send_email" to "sendEmail". It
will remove non alphanumeric character from the word, so
"who's online" will be converted to "whoSOnline"'''
word = self.camelize(word)
return word[0].lower()+word[1:]
def tableize(self, class_name):
''' Converts a class name to its table name according to rails
naming conventions. Example. Converts "Person" to "people" '''
return self.pluralize(self.underscore(class_name))
def classify(self, table_name):
'''Converts a table name to its class name according to rails
naming conventions. Example: Converts "people" to "Person" '''
return self.camelize(self.singularize(table_name))
def ordinalize(self, number):
'''Converts number to its ordinal English form.
This method converts 13 to 13th, 2 to 2nd ...'''
tail = 'th'
if number % 100 == 11 or number % 100 == 12 or number % 100 == 13:
tail = 'th'
elif number % 10 == 1:
tail = 'st'
elif number % 10 == 2:
tail = 'nd'
elif number % 10 == 3:
tail = 'rd'
return str(number)+tail
def unaccent(self, text):
'''Transforms a string to its unaccented version.
This might be useful for generating "friendly" URLs'''
find = u'\u00C0\u00C1\u00C2\u00C3\u00C4\u00C5\u00C6\u00C7\u00C8\u00C9\u00CA\u00CB\u00CC\u00CD\u00CE\u00CF\u00D0\u00D1\u00D2\u00D3\u00D4\u00D5\u00D6\u00D8\u00D9\u00DA\u00DB\u00DC\u00DD\u00DE\u00DF\u00E0\u00E1\u00E2\u00E3\u00E4\u00E5\u00E6\u00E7\u00E8\u00E9\u00EA\u00EB\u00EC\u00ED\u00EE\u00EF\u00F0\u00F1\u00F2\u00F3\u00F4\u00F5\u00F6\u00F8\u00F9\u00FA\u00FB\u00FC\u00FD\u00FE\u00FF'
replace = u'AAAAAAACEEEEIIIIDNOOOOOOUUUUYTsaaaaaaaceeeeiiiienoooooouuuuyty'
return self.string_replace(text, find, replace)
def string_replace(self, word, find, replace):
'''This function returns a copy of word, translating
all occurrences of each character in find to the
corresponding character in replace'''
for k in range(0, len(find)):
word = re.sub(find[k], replace[k], word)
return word
def urlize(self, text):
'''Transform a string its unaccented and underscored
version ready to be inserted in friendly URLs'''
return re.sub('^_|_$', '', self.underscore(self.unaccent(text)))
def demodulize(self, module_name):
return self.humanize(self.underscore(re.sub('^.*::', '', module_name)))
def modulize(self, module_description):
return self.camelize(self.singularize(module_description))
def foreignKey(self, class_name, separate_class_name_and_id_with_underscore=1):
''' Returns class_name in underscored form, with "_id" tacked on at the end.
This is for use in dealing with the database.'''
if separate_class_name_and_id_with_underscore:
tail = '_id'
else:
tail = 'id'
return self.underscore(self.demodulize(class_name))+tail
# Copyright (c) 2006 Bermi Ferrer Martinez
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software to deal in this software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this software, and to permit
# persons to whom this software is furnished to do so, subject to the following
# condition:
#
# THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THIS SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THIS SOFTWARE.
| mit | -3,599,438,972,841,421,000 | 43.086667 | 390 | 0.642522 | false |
helenwarren/pied-wagtail | wagtail/wagtaildocs/wagtail_hooks.py | 1 | 1464 | from django.conf import settings
from django.conf.urls import include, url
from django.core import urlresolvers
from django.utils.html import format_html, format_html_join
from django.utils.translation import ugettext_lazy as _
from wagtail.wagtailadmin import hooks
from wagtail.wagtailadmin.menu import MenuItem
from wagtail.wagtaildocs import admin_urls
def register_admin_urls():
return [
url(r'^documents/', include(admin_urls)),
]
hooks.register('register_admin_urls', register_admin_urls)
def construct_main_menu(request, menu_items):
if request.user.has_perm('wagtaildocs.add_document'):
menu_items.append(
MenuItem(_('Documents'), urlresolvers.reverse('wagtaildocs_index'), classnames='icon icon-doc-full-inverse', order=400)
)
hooks.register('construct_main_menu', construct_main_menu)
def editor_js():
js_files = [
'wagtaildocs/js/hallo-plugins/hallo-wagtaildoclink.js',
'wagtaildocs/js/document-chooser.js',
]
js_includes = format_html_join('\n', '<script src="{0}{1}"></script>',
((settings.STATIC_URL, filename) for filename in js_files)
)
return js_includes + format_html(
"""
<script>
window.chooserUrls.documentChooser = '{0}';
registerHalloPlugin('hallowagtaildoclink');
</script>
""",
urlresolvers.reverse('wagtaildocs_chooser')
)
hooks.register('insert_editor_js', editor_js)
| bsd-3-clause | -6,834,067,793,087,028,000 | 31.533333 | 131 | 0.681011 | false |
zak-k/iris | tools/gen_stash_refs.py | 14 | 4827 | # (C) British Crown Copyright 2013 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import json
import urllib
import urllib2
from iris.fileformats.pp import STASH
import gen_helpers
HEADER = '''
"""
Auto-generated from iris/tools/gen_stash_refs.py
Relates grid code and field code to the stash code.
"""
'''
CODE_PREAMBLE = ("\nfrom __future__ import "
"(absolute_import, division, print_function)\n"
"from six.moves import "
"(filter, input, map, range, zip) # noqa\n\n"
"from collections import namedtuple\n\n\n"
"Stash = namedtuple('Stash', "
"'grid_code field_code pseudo_level_type')\n\n\n")
def _value_from_xref(xref, name):
"""Return the value for the key name from xref.
Will return 0 if the key does not look like an integer.
"""
result = xref.get(name)
try:
int(result)
except (ValueError, TypeError):
result = 0
return result
def write_cross_reference_module(module_path, xrefs):
gen_helpers.prep_module_file(module_path)
with open(module_path, 'a') as module_file:
module_file.write(HEADER)
module_file.write(CODE_PREAMBLE)
module_file.write('STASH_TRANS = {\n')
for xref in xrefs:
stash = xref.get('stash')
try:
STASH.from_msi(stash.replace('"', ''))
except ValueError:
msg = ('stash code is not of a recognised'
'"m??s??i???" form: {}'.format(stash))
print(msg)
grid = xref.get('grid')
if grid is not None:
try:
int(grid)
except ValueError:
msg = ('grid code retrieved from STASH lookup'
'is not an integer: {}'.format(grid))
print(msg)
else:
grid = 0
lbfc = _value_from_xref(xref, 'lbfcn')
pseudT = _value_from_xref(xref, 'pseudT')
module_file.write(
' "{}": Stash({}, {}, {}),\n'.format(stash,
grid,
lbfc,
pseudT))
module_file.write('}\n')
def stash_grid_retrieve():
"""return a dictionary of stash codes and rel;ated information from
the Met Office Reference Registry
"""
baseurl = 'http://reference.metoffice.gov.uk/system/query?query='
query = '''prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
prefix skos: <http://www.w3.org/2004/02/skos/core#>
SELECT ?stash ?grid ?lbfcn ?pseudT
WHERE {
?stashcode rdf:type <http://reference.metoffice.gov.uk/um/c4/stash/Stash> ;
skos:notation ?stash ;
<http://reference.metoffice.gov.uk/um/c4/stash/grid> ?gridcode .
OPTIONAL { ?gridcode skos:notation ?grid .}
OPTIONAL {?stashcode <http://reference.metoffice.gov.uk/um/c4/stash/ppfc> ?lbfc .
?lbfc skos:notation ?lbfcn .}
OPTIONAL {?stashcode <http://reference.metoffice.gov.uk/um/c4/stash/pseudT> ?pseudT_id .
?pseudT_id skos:notation ?pseudT . }
}
order by ?stash'''
encquery = urllib.quote_plus(query)
out_format = '&output=json'
url = baseurl + encquery + out_format
response = urllib2.urlopen(url)
stash = json.loads(response.read())
## heads will be of the form [u'stash', u'grid', u'lbfcn', u'pseudT']
## as defined in the query string
heads = stash['head']['vars']
stashcodes = []
for result in stash['results']['bindings']:
res = {}
for head in heads:
if head in result:
res[head] = result[head]['value']
stashcodes.append(res)
return stashcodes
if __name__ == '__main__':
xrefs = stash_grid_retrieve()
outfile = '../lib/iris/fileformats/_ff_cross_references.py'
write_cross_reference_module(outfile, xrefs)
| gpl-3.0 | 1,901,862,876,571,535,400 | 32.289655 | 88 | 0.588357 | false |
DayGitH/Python-Challenges | DailyProgrammer/DP20151214A.py | 1 | 2774 | """
[2015-12-14] Challenge # 245 [Easy] Date Dilemma
https://www.reddit.com/r/dailyprogrammer/comments/3wshp7/20151214_challenge_245_easy_date_dilemma/
# Description
Yesterday, Devon the developer made an awesome webform, which the sales team would use to record the results from
today's big new marketing campaign, but now he realised he forgot to add a validator to the "delivery_date" field! He
proceeds to open the generated spreadsheet but, as he expected, the dates are all but normalized... Some of them use `M
D Y` and others `Y M D`, and even arbitrary separators are used! Can you help him parse all the messy text into
properly ISO 8601
(`YYYY-MM-DD`) formatted dates before beer o'clock?
Assume only dates starting with 4 digits use `Y M D`, and others use `M D Y`.
# Sample Input
2/13/15
1-31-10
5 10 2015
2012 3 17
2001-01-01
2008/01/07
# Sample Output
2015-02-13
2010-01-31
2015-05-10
2012-03-17
2001-01-01
2008-01-07
# Extension challenge [Intermediate]
Devon's nemesis, Sally, is by far the best salesperson in the team, but her writing is also the most idiosyncratic! Can
you parse all of her dates? Guidelines:
- Use `2014-12-24` as the base for relative dates.
- When adding **days**, account for the different number of days in each month; ignore leap years.
- When adding **months** and **years**, use whole units, so that:
- one month before october 10 is september 10
- one year after 2001-04-02 is 2002-04-02
- one month after january 30 is february 28 (not march 1)
**Sally's inputs**:
tomorrow
2010-dec-7
OCT 23
1 week ago
next Monday
last sunDAY
1 year ago
1 month ago
last week
LAST MONTH
10 October 2010
an year ago
2 years from tomoRRow
1 month from 2016-01-31
4 DAYS FROM today
9 weeks from yesterday
**Sally's expected outputs**:
2014-12-25
2010-12-01
2014-10-23
2014-12-17
2014-12-29
2014-12-21
2013-12-24
2014-11-24
2014-12-15
2014-11-24
2010-10-10
2013-12-24
2016-12-25
2016-02-28
2014-12-28
2015-02-25
# Notes and Further Reading
- https://en.wikipedia.org/wiki/ISO_8601
- http://php.net/manual/en/function.strtotime.php
- https://xkcd.com/1179/
- *Moderator note:* If you would like to solve the general case to absolutely work on all possible input dates, check
out this video first: https://www.youtube.com/watch?v=-5wpm-gesOY
PS: Using `<?php echo strftime('%Y-%m-%d', strtotime($s));` is cheating! :\^)
------
This challenge is here thanks to /u/alfred300p proposing it in /r/dailyprogrammer_ideas.
Do you a good challenge idea? Consider submitting it to /r/dailyprogrammer_ideas!
"""
def main():
pass
if __name__ == "__main__":
main()
| mit | 5,571,221,075,342,877,000 | 30.168539 | 119 | 0.69106 | false |
TonyShield/Whatever | parse.py | 1 | 1531 | #encoding=utf-8
import jieba
import stem
import stop_words
class Parser(object):
def __init__(self):
self.__stemmer = stem.PorterStemmer()
s = stop_words.get_stop_words('en')
self.__stopwords = []
for word in s:
if word.isalpha():
self.__stopwords.append(self.__stemmer.stem(word.lower(),0,len(word)-1))
self.__stopwords = set(self.__stopwords)
__punctuation = set([
u'!',u'@',u'#',u'$',u'%',u'^',u'&',u'*',u'(',u')',u'_',u'+',u'-',u'=',u'~',u'`',
u',',u'.',u'/',u';',u'\'',u'[',u']',u'\\',u'<',u'>',u'?',u':',u'\"',u'{',u'}',u'|'])#,
# u',',u'。',u'、',u';',u'‘',u'’',u'【',u'】',u'、',u'《',u'》',u'?',u':',u'“',u
# u'”',u'{',u'}',u'|',u'¥',u'!',u'…',u'(',u')',u'——',u'-'])
def normalize(self,str):
a = []
for word in jieba.cut(str):
word = word.strip()
if word and word not in self.__punctuation: #and word not in self.__punctuation:
if word.isalpha():
word = self.__stemmer.stem(word.lower(),0,len(word)-1)
if word not in self.__stopwords:
a.append(word)
else:
if not word.isdigit():
a.append(word)
return a
if __name__ == '__main__':
import sys
p = Parser()
fin = open(sys.argv[1])
for line in fin:
words = p.normalize(line)
for word in words:
print word, | agpl-3.0 | 2,030,494,398,209,831,400 | 35.195122 | 92 | 0.436952 | false |
fernandalavalle/mlab-ns | server/mapreduce/input_readers.py | 2 | 70093 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines input readers for MapReduce."""
__all__ = [
"AbstractDatastoreInputReader",
"ALLOW_CHECKPOINT",
"BadReaderParamsError",
"BlobstoreLineInputReader",
"BlobstoreZipInputReader",
"BlobstoreZipLineInputReader",
"COUNTER_IO_READ_BYTES",
"COUNTER_IO_READ_MSEC",
"ConsistentKeyReader",
"DatastoreEntityInputReader",
"DatastoreInputReader",
"DatastoreKeyInputReader",
"Error",
"InputReader",
"LogInputReader",
"NamespaceInputReader",
"RecordsReader",
]
# pylint: disable-msg=C6409
import base64
import copy
import logging
import StringIO
import time
import zipfile
from google.net.proto import ProtocolBuffer
from google.appengine.api import datastore
from mapreduce.lib import files
from google.appengine.api import logservice
from mapreduce.lib.files import records
from google.appengine.api.logservice import log_service_pb
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import blobstore
from google.appengine.ext import db
from mapreduce.lib import key_range
from google.appengine.ext.db import metadata
from mapreduce import context
from mapreduce import errors
from mapreduce import model
from mapreduce import namespace_range
from mapreduce import operation
from mapreduce import util
# Classes moved to errors module. Copied here for compatibility.
Error = errors.Error
BadReaderParamsError = errors.BadReaderParamsError
# Counter name for number of bytes read.
COUNTER_IO_READ_BYTES = "io-read-bytes"
# Counter name for milliseconds spent reading data.
COUNTER_IO_READ_MSEC = "io-read-msec"
# Special value that can be yielded by InputReaders if they want to give the
# framework an opportunity to save the state of the mapreduce without having
# to yield an actual value to the handler.
ALLOW_CHECKPOINT = object()
class InputReader(model.JsonMixin):
"""Abstract base class for input readers.
InputReaders have the following properties:
* They are created by using the split_input method to generate a set of
InputReaders from a MapperSpec.
* They generate inputs to the mapper via the iterator interface.
* After creation, they can be serialized and resumed using the JsonMixin
interface.
* They are cast to string for a user-readable description; it may be
valuable to implement __str__.
"""
# When expand_parameters is False, then value yielded by reader is passed
# to handler as is. If it's true, then *value is passed, expanding arguments
# and letting handler be a multi-parameter function.
expand_parameters = False
# Mapreduce parameters.
_APP_PARAM = "_app"
NAMESPACE_PARAM = "namespace"
NAMESPACES_PARAM = "namespaces" # Obsolete.
def __iter__(self):
return self
def next(self):
"""Returns the next input from this input reader as a key, value pair.
Returns:
The next input from this input reader.
"""
raise NotImplementedError("next() not implemented in %s" % self.__class__)
@classmethod
def from_json(cls, input_shard_state):
"""Creates an instance of the InputReader for the given input shard state.
Args:
input_shard_state: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
raise NotImplementedError("from_json() not implemented in %s" % cls)
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
raise NotImplementedError("to_json() not implemented in %s" %
self.__class__)
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader.
Returns:
A list of InputReaders.
"""
raise NotImplementedError("split_input() not implemented in %s" % cls)
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Input reader parameters are expected to be passed as "input_reader"
subdictionary of mapper_spec.params. To be compatible with previous
API input reader is advised to check mapper_spec.params and issue
a warning if "input_reader" subdicationary is not present.
_get_params helper method can be used to simplify implementation.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
raise NotImplementedError("validate() not implemented in %s" % cls)
def _get_params(mapper_spec, allowed_keys=None):
"""Obtain input reader parameters.
Utility function for input readers implementation. Fetches parameters
from mapreduce specification giving appropriate usage warnings.
Args:
mapper_spec: The MapperSpec for the job
allowed_keys: set of all allowed keys in parameters as strings. If it is not
None, then parameters are expected to be in a separate "input_reader"
subdictionary of mapper_spec parameters.
Returns:
mapper parameters as dict
Raises:
BadReaderParamsError: if parameters are invalid/missing or not allowed.
"""
if "input_reader" not in mapper_spec.params:
message = ("Input reader's parameters should be specified in "
"input_reader subdictionary.")
if allowed_keys:
raise errors.BadReaderParamsError(message)
else:
logging.warning(message)
params = mapper_spec.params
params = dict((str(n), v) for n, v in params.iteritems())
else:
if not isinstance(mapper_spec.params.get("input_reader"), dict):
raise BadReaderParamsError(
"Input reader parameters should be a dictionary")
params = mapper_spec.params.get("input_reader")
params = dict((str(n), v) for n, v in params.iteritems())
if allowed_keys:
params_diff = set(params.keys()) - allowed_keys
if params_diff:
raise errors.BadReaderParamsError(
"Invalid input_reader parameters: %s" % ",".join(params_diff))
return params
# TODO(user): This should probably be renamed something like
# "DatastoreInputReader" and DatastoreInputReader should be called
# "DatastoreModelReader".
class AbstractDatastoreInputReader(InputReader):
"""Abstract base class for classes that iterate over datastore entities.
Concrete subclasses must implement _iter_key_range(self, k_range). See the
docstring for that method for details.
"""
# Number of entities to fetch at once while doing scanning.
_BATCH_SIZE = 50
# Maximum number of shards we'll create.
_MAX_SHARD_COUNT = 256
# __scatter__ oversampling factor
_OVERSAMPLING_FACTOR = 32
# The maximum number of namespaces that will be sharded by datastore key
# before switching to a strategy where sharding is done lexographically by
# namespace.
MAX_NAMESPACES_FOR_KEY_SHARD = 10
# Mapreduce parameters.
ENTITY_KIND_PARAM = "entity_kind"
KEYS_ONLY_PARAM = "keys_only"
BATCH_SIZE_PARAM = "batch_size"
KEY_RANGE_PARAM = "key_range"
NAMESPACE_RANGE_PARAM = "namespace_range"
CURRENT_KEY_RANGE_PARAM = "current_key_range"
# TODO(user): Add support for arbitrary queries. It's not possible to
# support them without cursors since right now you can't even serialize query
# definition.
def __init__(self,
entity_kind,
key_ranges=None,
ns_range=None,
batch_size=_BATCH_SIZE,
current_key_range=None):
"""Create new AbstractDatastoreInputReader object.
This is internal constructor. Use split_query in a concrete class instead.
Args:
entity_kind: entity kind as string.
key_ranges: a sequence of key_range.KeyRange instances to process. Only
one of key_ranges or ns_range can be non-None.
ns_range: a namespace_range.NamespaceRange to process. Only one of
key_ranges or ns_range can be non-None.
batch_size: size of read batch as int.
current_key_range: the current key_range.KeyRange being processed.
"""
assert key_ranges is not None or ns_range is not None, (
"must specify one of 'key_ranges' or 'ns_range'")
assert key_ranges is None or ns_range is None, (
"can't specify both 'key_ranges ' and 'ns_range'")
self._entity_kind = entity_kind
# Reverse the KeyRanges so they can be processed in order as a stack of
# work items.
self._key_ranges = key_ranges and list(reversed(key_ranges))
self._ns_range = ns_range
self._batch_size = int(batch_size)
self._current_key_range = current_key_range
@classmethod
def _get_raw_entity_kind(cls, entity_kind):
if "." in entity_kind:
logging.warning(
". detected in entity kind %s specified for reader %s."
"Assuming entity kind contains the dot.",
entity_kind, cls.__name__)
return entity_kind
def __iter__(self):
"""Iterates over the given KeyRanges or NamespaceRange.
This method iterates over the given KeyRanges or NamespaceRange and sets
the self._current_key_range to the KeyRange currently being processed. It
then delegates to the _iter_key_range method to yield that actual
results.
Yields:
Forwards the objects yielded by the subclasses concrete _iter_key_range()
method. The caller must consume the result yielded because self.to_json()
will not include it.
"""
if self._key_ranges is not None:
for o in self._iter_key_ranges():
yield o
elif self._ns_range is not None:
for o in self._iter_ns_range():
yield o
else:
assert False, "self._key_ranges and self._ns_range are both None"
def _iter_key_ranges(self):
"""Iterates over self._key_ranges, delegating to self._iter_key_range()."""
while True:
if self._current_key_range is None:
if self._key_ranges:
self._current_key_range = self._key_ranges.pop()
# The most recently popped key_range may be None, so continue here
# to find the next keyrange that's valid.
continue
else:
break
for key, o in self._iter_key_range(
copy.deepcopy(self._current_key_range)):
# The caller must consume yielded values so advancing the KeyRange
# before yielding is safe.
self._current_key_range.advance(key)
yield o
self._current_key_range = None
def _iter_ns_range(self):
"""Iterates over self._ns_range, delegating to self._iter_key_range()."""
while True:
if self._current_key_range is None:
query = self._ns_range.make_datastore_query()
namespace_result = query.Get(1)
if not namespace_result:
break
namespace = namespace_result[0].name() or ""
self._current_key_range = key_range.KeyRange(
namespace=namespace, _app=self._ns_range.app)
yield ALLOW_CHECKPOINT
for key, o in self._iter_key_range(
copy.deepcopy(self._current_key_range)):
# The caller must consume yielded values so advancing the KeyRange
# before yielding is safe.
self._current_key_range.advance(key)
yield o
if (self._ns_range.is_single_namespace or
self._current_key_range.namespace == self._ns_range.namespace_end):
break
self._ns_range = self._ns_range.with_start_after(
self._current_key_range.namespace)
self._current_key_range = None
def _iter_key_range(self, k_range):
"""Yields a db.Key and the value that should be yielded by self.__iter__().
Args:
k_range: The key_range.KeyRange to iterate over.
Yields:
A 2-tuple containing the last db.Key processed and the value that should
be yielded by __iter__. The returned db.Key will be used to determine the
InputReader's current position in self._current_key_range.
"""
raise NotImplementedError("_iter_key_range() not implemented in %s" %
self.__class__)
def __str__(self):
"""Returns the string representation of this InputReader."""
if self._ns_range is None:
return repr(self._key_ranges)
else:
return repr(self._ns_range)
@classmethod
def _choose_split_points(cls, sorted_keys, shard_count):
"""Returns the best split points given a random set of db.Keys."""
assert len(sorted_keys) >= shard_count
index_stride = len(sorted_keys) / float(shard_count)
return [sorted_keys[int(round(index_stride * i))]
for i in range(1, shard_count)]
# TODO(user): use query splitting functionality when it becomes available
# instead.
@classmethod
def _split_input_from_namespace(cls, app, namespace, entity_kind,
shard_count):
"""Return KeyRange objects. Helper for _split_input_from_params.
If there are not enough Entities to make all of the given shards, the
returned list of KeyRanges will include Nones. The returned list will
contain KeyRanges ordered lexographically with any Nones appearing at the
end.
"""
raw_entity_kind = cls._get_raw_entity_kind(entity_kind)
if shard_count == 1:
# With one shard we don't need to calculate any splitpoints at all.
return [key_range.KeyRange(namespace=namespace, _app=app)]
# we use datastore.Query instead of ext.db.Query here, because we can't
# erase ordering on db.Query once we set it.
ds_query = datastore.Query(kind=raw_entity_kind,
namespace=namespace,
_app=app,
keys_only=True)
ds_query.Order("__scatter__")
random_keys = ds_query.Get(shard_count * cls._OVERSAMPLING_FACTOR)
if not random_keys:
# There are no entities with scatter property. We have no idea
# how to split.
return ([key_range.KeyRange(namespace=namespace, _app=app)] +
[None] * (shard_count - 1))
random_keys.sort()
if len(random_keys) >= shard_count:
# We've got a lot of scatter values. Sample them down.
random_keys = cls._choose_split_points(random_keys, shard_count)
key_ranges = []
key_ranges.append(key_range.KeyRange(
key_start=None,
key_end=random_keys[0],
direction=key_range.KeyRange.ASC,
include_start=False,
include_end=False,
namespace=namespace,
_app=app))
for i in range(0, len(random_keys) - 1):
key_ranges.append(key_range.KeyRange(
key_start=random_keys[i],
key_end=random_keys[i+1],
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
key_ranges.append(key_range.KeyRange(
key_start=random_keys[-1],
key_end=None,
direction=key_range.KeyRange.ASC,
include_start=True,
include_end=False,
namespace=namespace,
_app=app))
if len(key_ranges) < shard_count:
# We need to have as many shards as it was requested. Add some Nones.
key_ranges = key_ranges + [None] * (shard_count - len(key_ranges))
return key_ranges
@classmethod
def _split_input_from_params(cls, app, namespaces, entity_kind_name,
params, shard_count):
"""Return input reader objects. Helper for split_input."""
key_ranges = [] # KeyRanges for all namespaces
for namespace in namespaces:
key_ranges.extend(
cls._split_input_from_namespace(app,
namespace,
entity_kind_name,
shard_count))
# Divide the KeyRanges into shard_count shards. The KeyRanges for different
# namespaces might be very different in size so the assignment of KeyRanges
# to shards is done round-robin.
shared_ranges = [[] for _ in range(shard_count)]
for i, k_range in enumerate(key_ranges):
shared_ranges[i % shard_count].append(k_range)
batch_size = int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))
return [cls(entity_kind_name,
key_ranges=key_ranges,
ns_range=None,
batch_size=batch_size)
for key_ranges in shared_ranges if key_ranges]
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec)
if cls.ENTITY_KIND_PARAM not in params:
raise BadReaderParamsError("Missing mapper parameter 'entity_kind'")
if cls.BATCH_SIZE_PARAM in params:
try:
batch_size = int(params[cls.BATCH_SIZE_PARAM])
if batch_size < 1:
raise BadReaderParamsError("Bad batch size: %s" % batch_size)
except ValueError, e:
raise BadReaderParamsError("Bad batch size: %s" % e)
if cls.NAMESPACE_PARAM in params:
if not isinstance(params[cls.NAMESPACE_PARAM],
(str, unicode, type(None))):
raise BadReaderParamsError(
"Expected a single namespace string")
if cls.NAMESPACES_PARAM in params:
raise BadReaderParamsError("Multiple namespaces are no longer supported")
@classmethod
def split_input(cls, mapper_spec):
"""Splits query into shards without fetching query results.
Tries as best as it can to split the whole query result set into equal
shards. Due to difficulty of making the perfect split, resulting shards'
sizes might differ significantly from each other.
Args:
mapper_spec: MapperSpec with params containing 'entity_kind'.
May have 'namespace' in the params as a string containing a single
namespace. If specified then the input reader will only yield values
in the given namespace. If 'namespace' is not given then values from
all namespaces will be yielded. May also have 'batch_size' in the params
to specify the number of entities to process in each batch.
Returns:
A list of InputReader objects. If the query results are empty then the
empty list will be returned. Otherwise, the list will always have a length
equal to number_of_shards but may be padded with Nones if there are too
few results for effective sharding.
"""
params = _get_params(mapper_spec)
entity_kind_name = params[cls.ENTITY_KIND_PARAM]
batch_size = int(params.get(cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))
shard_count = mapper_spec.shard_count
namespace = params.get(cls.NAMESPACE_PARAM)
app = params.get(cls._APP_PARAM)
if namespace is None:
# It is difficult to efficiently shard large numbers of namespaces because
# there can be an arbitrary number of them. So the strategy is:
# 1. if there are a small number of namespaces in the datastore then
# generate one KeyRange per namespace per shard and assign each shard a
# KeyRange for every namespace. This should lead to nearly perfect
# sharding.
# 2. if there are a large number of namespaces in the datastore then
# generate one NamespaceRange per worker. This can lead to very bad
# sharding because namespaces can contain very different numbers of
# entities and each NamespaceRange may contain very different numbers
# of namespaces.
namespace_query = datastore.Query("__namespace__",
keys_only=True,
_app=app)
namespace_keys = namespace_query.Get(
limit=cls.MAX_NAMESPACES_FOR_KEY_SHARD+1)
if len(namespace_keys) > cls.MAX_NAMESPACES_FOR_KEY_SHARD:
ns_ranges = namespace_range.NamespaceRange.split(n=shard_count,
contiguous=True,
_app=app)
return [cls(entity_kind_name,
key_ranges=None,
ns_range=ns_range,
batch_size=batch_size)
for ns_range in ns_ranges]
elif not namespace_keys:
return [cls(entity_kind_name,
key_ranges=None,
ns_range=namespace_range.NamespaceRange(),
batch_size=shard_count)]
else:
namespaces = [namespace_key.name() or ""
for namespace_key in namespace_keys]
else:
namespaces = [namespace]
return cls._split_input_from_params(
app, namespaces, entity_kind_name, params, shard_count)
def to_json(self):
"""Serializes all the data in this query range into json form.
Returns:
all the data in json-compatible map.
"""
if self._key_ranges is None:
key_ranges_json = None
else:
key_ranges_json = []
for k in self._key_ranges:
if k:
key_ranges_json.append(k.to_json())
else:
key_ranges_json.append(None)
if self._ns_range is None:
namespace_range_json = None
else:
namespace_range_json = self._ns_range.to_json_object()
if self._current_key_range is None:
current_key_range_json = None
else:
current_key_range_json = self._current_key_range.to_json()
json_dict = {self.KEY_RANGE_PARAM: key_ranges_json,
self.NAMESPACE_RANGE_PARAM: namespace_range_json,
self.CURRENT_KEY_RANGE_PARAM: current_key_range_json,
self.ENTITY_KIND_PARAM: self._entity_kind,
self.BATCH_SIZE_PARAM: self._batch_size}
return json_dict
@classmethod
def from_json(cls, json):
"""Create new DatastoreInputReader from the json, encoded by to_json.
Args:
json: json map representation of DatastoreInputReader.
Returns:
an instance of DatastoreInputReader with all data deserialized from json.
"""
if json[cls.KEY_RANGE_PARAM] is None:
key_ranges = None
else:
key_ranges = []
for k in json[cls.KEY_RANGE_PARAM]:
if k:
key_ranges.append(key_range.KeyRange.from_json(k))
else:
key_ranges.append(None)
if json[cls.NAMESPACE_RANGE_PARAM] is None:
ns_range = None
else:
ns_range = namespace_range.NamespaceRange.from_json_object(
json[cls.NAMESPACE_RANGE_PARAM])
if json[cls.CURRENT_KEY_RANGE_PARAM] is None:
current_key_range = None
else:
current_key_range = key_range.KeyRange.from_json(
json[cls.CURRENT_KEY_RANGE_PARAM])
return cls(
json[cls.ENTITY_KIND_PARAM],
key_ranges,
ns_range,
json[cls.BATCH_SIZE_PARAM],
current_key_range)
class DatastoreInputReader(AbstractDatastoreInputReader):
"""Represents a range in query results.
DatastoreInputReader yields model instances from the entities in a given key
range. Iterating over DatastoreInputReader changes its range past consumed
entries.
The class shouldn't be instantiated directly. Use the split_input class method
instead.
"""
def _iter_key_range(self, k_range):
cursor = None
while True:
query = k_range.make_ascending_query(
util.for_name(self._entity_kind))
if isinstance(query, db.Query):
# Old db version.
if cursor:
query.with_cursor(cursor)
results = query.fetch(limit=self._batch_size)
if not results:
break
for model_instance in results:
key = model_instance.key()
yield key, model_instance
cursor = query.cursor()
else:
# NDB version using fetch_page().
results, cursor, more = query.fetch_page(self._batch_size,
start_cursor=cursor)
for model_instance in results:
key = model_instance.key
yield key, model_instance
if not more:
break
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
super(DatastoreInputReader, cls).validate(mapper_spec)
params = _get_params(mapper_spec)
keys_only = util.parse_bool(params.get(cls.KEYS_ONLY_PARAM, False))
if keys_only:
raise BadReaderParamsError("The keys_only parameter is obsolete. "
"Use DatastoreKeyInputReader instead.")
entity_kind_name = params[cls.ENTITY_KIND_PARAM]
# Fail fast if Model cannot be located.
try:
util.for_name(entity_kind_name)
except ImportError, e:
raise BadReaderParamsError("Bad entity kind: %s" % e)
@classmethod
def _get_raw_entity_kind(cls, entity_kind):
"""Returns an entity kind to use with datastore calls."""
entity_type = util.for_name(entity_kind)
if isinstance(entity_kind, db.Model):
return entity_type.kind()
else:
return util.get_short_name(entity_kind)
class DatastoreKeyInputReader(AbstractDatastoreInputReader):
"""An input reader which takes a Kind and yields Keys for that kind."""
def _iter_key_range(self, k_range):
raw_entity_kind = self._get_raw_entity_kind(self._entity_kind)
query = k_range.make_ascending_datastore_query(
raw_entity_kind, keys_only=True)
for key in query.Run(
config=datastore_query.QueryOptions(batch_size=self._batch_size)):
yield key, key
class DatastoreEntityInputReader(AbstractDatastoreInputReader):
"""An input reader which yields low level datastore entities for a kind."""
def _iter_key_range(self, k_range):
raw_entity_kind = self._get_raw_entity_kind(self._entity_kind)
query = k_range.make_ascending_datastore_query(
raw_entity_kind)
for entity in query.Run(
config=datastore_query.QueryOptions(batch_size=self._batch_size)):
yield entity.key(), entity
class BlobstoreLineInputReader(InputReader):
"""Input reader for a newline delimited blob in Blobstore."""
# TODO(user): Should we set this based on MAX_BLOB_FETCH_SIZE?
_BLOB_BUFFER_SIZE = 64000
# Maximum number of shards to allow.
_MAX_SHARD_COUNT = 256
# Maximum number of blobs to allow.
_MAX_BLOB_KEYS_COUNT = 246
# Mapreduce parameters.
BLOB_KEYS_PARAM = "blob_keys"
# Serialization parmaeters.
INITIAL_POSITION_PARAM = "initial_position"
END_POSITION_PARAM = "end_position"
BLOB_KEY_PARAM = "blob_key"
def __init__(self, blob_key, start_position, end_position):
"""Initializes this instance with the given blob key and character range.
This BlobstoreInputReader will read from the first record starting after
strictly after start_position until the first record ending at or after
end_position (exclusive). As an exception, if start_position is 0, then
this InputReader starts reading at the first record.
Args:
blob_key: the BlobKey that this input reader is processing.
start_position: the position to start reading at.
end_position: a position in the last record to read.
"""
self._blob_key = blob_key
self._blob_reader = blobstore.BlobReader(blob_key,
self._BLOB_BUFFER_SIZE,
start_position)
self._end_position = end_position
self._has_iterated = False
self._read_before_start = bool(start_position)
def next(self):
"""Returns the next input from as an (offset, line) tuple."""
self._has_iterated = True
if self._read_before_start:
self._blob_reader.readline()
self._read_before_start = False
start_position = self._blob_reader.tell()
if start_position > self._end_position:
raise StopIteration()
line = self._blob_reader.readline()
if not line:
raise StopIteration()
return start_position, line.rstrip("\n")
def to_json(self):
"""Returns an json-compatible input shard spec for remaining inputs."""
new_pos = self._blob_reader.tell()
if self._has_iterated:
new_pos -= 1
return {self.BLOB_KEY_PARAM: self._blob_key,
self.INITIAL_POSITION_PARAM: new_pos,
self.END_POSITION_PARAM: self._end_position}
def __str__(self):
"""Returns the string representation of this BlobstoreLineInputReader."""
return "blobstore.BlobKey(%r):[%d, %d]" % (
self._blob_key, self._blob_reader.tell(), self._end_position)
@classmethod
def from_json(cls, json):
"""Instantiates an instance of this InputReader for the given shard spec."""
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.INITIAL_POSITION_PARAM],
json[cls.END_POSITION_PARAM])
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BLOB_KEYS_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_keys' for mapper input")
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
if len(blob_keys) > cls._MAX_BLOB_KEYS_COUNT:
raise BadReaderParamsError("Too many 'blob_keys' for mapper input")
if not blob_keys:
raise BadReaderParamsError("No 'blob_keys' specified for mapper input")
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
if not blob_info:
raise BadReaderParamsError("Could not find blobinfo for key %s" %
blob_key)
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of shard_count input_spec_shards for input_spec.
Args:
mapper_spec: The mapper specification to split from. Must contain
'blob_keys' parameter with one or more blob keys.
Returns:
A list of BlobstoreInputReaders corresponding to the specified shards.
"""
params = _get_params(mapper_spec)
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
blob_sizes = {}
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
blob_sizes[blob_key] = blob_info.size
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
shards_per_blob = shard_count // len(blob_keys)
if shards_per_blob == 0:
shards_per_blob = 1
chunks = []
for blob_key, blob_size in blob_sizes.items():
blob_chunk_size = blob_size // shards_per_blob
for i in xrange(shards_per_blob - 1):
chunks.append(BlobstoreLineInputReader.from_json(
{cls.BLOB_KEY_PARAM: blob_key,
cls.INITIAL_POSITION_PARAM: blob_chunk_size * i,
cls.END_POSITION_PARAM: blob_chunk_size * (i + 1)}))
chunks.append(BlobstoreLineInputReader.from_json(
{cls.BLOB_KEY_PARAM: blob_key,
cls.INITIAL_POSITION_PARAM: blob_chunk_size * (shards_per_blob - 1),
cls.END_POSITION_PARAM: blob_size}))
return chunks
class BlobstoreZipInputReader(InputReader):
"""Input reader for files from a zip archive stored in the Blobstore.
Each instance of the reader will read the TOC, from the end of the zip file,
and then only the contained files which it is responsible for.
"""
# Maximum number of shards to allow.
_MAX_SHARD_COUNT = 256
# Mapreduce parameters.
BLOB_KEY_PARAM = "blob_key"
START_INDEX_PARAM = "start_index"
END_INDEX_PARAM = "end_index"
def __init__(self, blob_key, start_index, end_index,
_reader=blobstore.BlobReader):
"""Initializes this instance with the given blob key and file range.
This BlobstoreZipInputReader will read from the file with index start_index
up to but not including the file with index end_index.
Args:
blob_key: the BlobKey that this input reader is processing.
start_index: the index of the first file to read.
end_index: the index of the first file that will not be read.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
"""
self._blob_key = blob_key
self._start_index = start_index
self._end_index = end_index
self._reader = _reader
self._zip = None
self._entries = None
def next(self):
"""Returns the next input from this input reader as (ZipInfo, opener) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple is a zipfile.ZipInfo object.
The second element of the tuple is a zero-argument function that, when
called, returns the complete body of the file.
"""
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
# Get a list of entries, reversed so we can pop entries off in order
self._entries = self._zip.infolist()[self._start_index:self._end_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
self._start_index += 1
return (entry, lambda: self._read(entry))
def _read(self, entry):
"""Read entry content.
Args:
entry: zip file entry as zipfile.ZipInfo.
Returns:
Entry content as string.
"""
start_time = time.time()
content = self._zip.read(entry.filename)
ctx = context.get()
if ctx:
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx)
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
return content
@classmethod
def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.START_INDEX_PARAM],
json[cls.END_INDEX_PARAM])
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
return {self.BLOB_KEY_PARAM: self._blob_key,
self.START_INDEX_PARAM: self._start_index,
self.END_INDEX_PARAM: self._end_index}
def __str__(self):
"""Returns the string representation of this BlobstoreZipInputReader."""
return "blobstore.BlobKey(%r):[%d, %d]" % (
self._blob_key, self._start_index, self._end_index)
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BLOB_KEY_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_key' for mapper input")
blob_key = params[cls.BLOB_KEY_PARAM]
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
if not blob_info:
raise BadReaderParamsError("Could not find blobinfo for key %s" %
blob_key)
@classmethod
def split_input(cls, mapper_spec, _reader=blobstore.BlobReader):
"""Returns a list of input shard states for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader. Must contain
'blob_key' parameter with one blob key.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
Returns:
A list of InputReaders spanning files within the zip.
"""
params = _get_params(mapper_spec)
blob_key = params[cls.BLOB_KEY_PARAM]
zip_input = zipfile.ZipFile(_reader(blob_key))
files = zip_input.infolist()
total_size = sum(x.file_size for x in files)
num_shards = min(mapper_spec.shard_count, cls._MAX_SHARD_COUNT)
size_per_shard = total_size // num_shards
# Break the list of files into sublists, each of approximately
# size_per_shard bytes.
shard_start_indexes = [0]
current_shard_size = 0
for i, fileinfo in enumerate(files):
current_shard_size += fileinfo.file_size
if current_shard_size >= size_per_shard:
shard_start_indexes.append(i + 1)
current_shard_size = 0
if shard_start_indexes[-1] != len(files):
shard_start_indexes.append(len(files))
return [cls(blob_key, start_index, end_index, _reader)
for start_index, end_index
in zip(shard_start_indexes, shard_start_indexes[1:])]
class BlobstoreZipLineInputReader(InputReader):
"""Input reader for newline delimited files in zip archives from Blobstore.
This has the same external interface as the BlobstoreLineInputReader, in that
it takes a list of blobs as its input and yields lines to the reader.
However the blobs themselves are expected to be zip archives of line delimited
files instead of the files themselves.
This is useful as many line delimited files gain greatly from compression.
"""
# Maximum number of shards to allow.
_MAX_SHARD_COUNT = 256
# Maximum number of blobs to allow.
_MAX_BLOB_KEYS_COUNT = 246
# Mapreduce parameters.
BLOB_KEYS_PARAM = "blob_keys"
# Serialization parameters.
BLOB_KEY_PARAM = "blob_key"
START_FILE_INDEX_PARAM = "start_file_index"
END_FILE_INDEX_PARAM = "end_file_index"
OFFSET_PARAM = "offset"
def __init__(self, blob_key, start_file_index, end_file_index, offset,
_reader=blobstore.BlobReader):
"""Initializes this instance with the given blob key and file range.
This BlobstoreZipLineInputReader will read from the file with index
start_file_index up to but not including the file with index end_file_index.
It will return lines starting at offset within file[start_file_index]
Args:
blob_key: the BlobKey that this input reader is processing.
start_file_index: the index of the first file to read within the zip.
end_file_index: the index of the first file that will not be read.
offset: the byte offset within blob_key.zip[start_file_index] to start
reading. The reader will continue to the end of the file.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
"""
self._blob_key = blob_key
self._start_file_index = start_file_index
self._end_file_index = end_file_index
self._initial_offset = offset
self._reader = _reader
self._zip = None
self._entries = None
self._filestream = None
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BLOB_KEYS_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_keys' for mapper input")
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
if len(blob_keys) > cls._MAX_BLOB_KEYS_COUNT:
raise BadReaderParamsError("Too many 'blob_keys' for mapper input")
if not blob_keys:
raise BadReaderParamsError("No 'blob_keys' specified for mapper input")
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
if not blob_info:
raise BadReaderParamsError("Could not find blobinfo for key %s" %
blob_key)
@classmethod
def split_input(cls, mapper_spec, _reader=blobstore.BlobReader):
"""Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader. Must contain
'blob_keys' parameter with one or more blob keys.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
Returns:
A list of InputReaders spanning the subfiles within the blobs.
There will be at least one reader per blob, but it will otherwise
attempt to keep the expanded size even.
"""
params = _get_params(mapper_spec)
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
blob_files = {}
total_size = 0
for blob_key in blob_keys:
zip_input = zipfile.ZipFile(_reader(blob_key))
blob_files[blob_key] = zip_input.infolist()
total_size += sum(x.file_size for x in blob_files[blob_key])
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
# We can break on both blob key and file-within-zip boundaries.
# A shard will span at minimum a single blob key, but may only
# handle a few files within a blob.
size_per_shard = total_size // shard_count
readers = []
for blob_key in blob_keys:
files = blob_files[blob_key]
current_shard_size = 0
start_file_index = 0
next_file_index = 0
for fileinfo in files:
next_file_index += 1
current_shard_size += fileinfo.file_size
if current_shard_size >= size_per_shard:
readers.append(cls(blob_key, start_file_index, next_file_index, 0,
_reader))
current_shard_size = 0
start_file_index = next_file_index
if current_shard_size != 0:
readers.append(cls(blob_key, start_file_index, next_file_index, 0,
_reader))
return readers
def next(self):
"""Returns the next line from this input reader as (lineinfo, line) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple describes the source, it is itself
a tuple (blobkey, filenumber, byteoffset).
The second element of the tuple is the line found at that offset.
"""
if not self._filestream:
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
# Get a list of entries, reversed so we can pop entries off in order
self._entries = self._zip.infolist()[self._start_file_index:
self._end_file_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
value = self._zip.read(entry.filename)
self._filestream = StringIO.StringIO(value)
if self._initial_offset:
self._filestream.seek(self._initial_offset)
self._filestream.readline()
start_position = self._filestream.tell()
line = self._filestream.readline()
if not line:
# Done with this file in the zip. Move on to the next file.
self._filestream.close()
self._filestream = None
self._start_file_index += 1
self._initial_offset = 0
return self.next()
return ((self._blob_key, self._start_file_index, start_position),
line.rstrip("\n"))
def _next_offset(self):
"""Return the offset of the next line to read."""
if self._filestream:
offset = self._filestream.tell()
if offset:
offset -= 1
else:
offset = self._initial_offset
return offset
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
return {self.BLOB_KEY_PARAM: self._blob_key,
self.START_FILE_INDEX_PARAM: self._start_file_index,
self.END_FILE_INDEX_PARAM: self._end_file_index,
self.OFFSET_PARAM: self._next_offset()}
@classmethod
def from_json(cls, json, _reader=blobstore.BlobReader):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
_reader: For dependency injection.
Returns:
An instance of the InputReader configured using the values of json.
"""
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.START_FILE_INDEX_PARAM],
json[cls.END_FILE_INDEX_PARAM],
json[cls.OFFSET_PARAM],
_reader)
def __str__(self):
"""Returns the string representation of this reader.
Returns:
string blobkey:[start file num, end file num]:current offset.
"""
return "blobstore.BlobKey(%r):[%d, %d]:%d" % (
self._blob_key, self._start_file_index, self._end_file_index,
self._next_offset())
class ConsistentKeyReader(DatastoreKeyInputReader):
"""A key reader which reads consistent data from datastore.
Datastore might have entities which were written, but not visible through
queries for some time. Typically these entities can be only read inside
transaction until they are 'applied'.
This reader reads all keys even if they are not visible. It might take
significant time to start yielding some data because it has to apply all
modifications created before its start.
"""
START_TIME_US_PARAM = "start_time_us"
UNAPPLIED_LOG_FILTER = "__unapplied_log_timestamp_us__ <"
DUMMY_KIND = "DUMMY_KIND"
DUMMY_ID = 106275677020293L
UNAPPLIED_QUERY_DEADLINE = 270 # Max supported by datastore.
def _get_unapplied_jobs_accross_namespaces(self,
namespace_start,
namespace_end,
app):
filters = {"__key__ >=": db.Key.from_path("__namespace__",
namespace_start or 1,
_app=app),
"__key__ <=": db.Key.from_path("__namespace__",
namespace_end or 1,
_app=app),
self.UNAPPLIED_LOG_FILTER: self.start_time_us}
unapplied_query = datastore.Query(filters=filters, keys_only=True, _app=app)
return unapplied_query.Get(
limit=self._batch_size,
config=datastore_rpc.Configuration(
deadline=self.UNAPPLIED_QUERY_DEADLINE))
def _iter_ns_range(self):
while True:
unapplied_jobs = self._get_unapplied_jobs_accross_namespaces(
self._ns_range.namespace_start,
self._ns_range.namespace_end,
self._ns_range.app)
if not unapplied_jobs:
break
self._apply_jobs(unapplied_jobs)
for o in super(ConsistentKeyReader, self)._iter_ns_range():
yield o
def _iter_key_range(self, k_range):
assert hasattr(self, "start_time_us"), "start_time_us property was not set"
if self._ns_range is None:
# _iter_ns_range will have already have dealt with unapplied jobs so only
# handle the case where it would not have been called.
self._apply_key_range(k_range)
for o in super(ConsistentKeyReader, self)._iter_key_range(k_range):
yield o
def _apply_key_range(self, k_range):
"""Apply all jobs in the given KeyRange."""
# The strategy used here will not work if the entire key range cannot be
# applied before the task times-out because the results of incremental work
# are not checkpointed. It also assumes that the entire key range can be
# queried without timing-out, which may not be the case.
# See b/5201059.
apply_range = copy.deepcopy(k_range)
while True:
# Creates an unapplied query and fetches unapplied jobs in the result
# range. self.split() ensures that the generated KeyRanges cover the
# entire possible key range.
unapplied_query = self._make_unapplied_query(apply_range)
unapplied_jobs = unapplied_query.Get(
limit=self._batch_size,
config=datastore_rpc.Configuration(
deadline=self.UNAPPLIED_QUERY_DEADLINE))
if not unapplied_jobs:
break
self._apply_jobs(unapplied_jobs)
# Avoid requerying parts of the key range that have already been
# applied.
apply_range.advance(unapplied_jobs[-1])
def _make_unapplied_query(self, k_range):
"""Returns a datastore.Query that finds the unapplied keys in k_range."""
unapplied_query = k_range.make_ascending_datastore_query(
kind=None, keys_only=True)
unapplied_query[
ConsistentKeyReader.UNAPPLIED_LOG_FILTER] = self.start_time_us
return unapplied_query
def _apply_jobs(self, unapplied_jobs):
"""Apply all jobs implied by the given keys."""
# There were some unapplied jobs. Roll them forward.
keys_to_apply = []
for key in unapplied_jobs:
# To apply the entity group we need to read something from it.
# We use dummy kind and id because we don't actually need any data.
path = key.to_path() + [ConsistentKeyReader.DUMMY_KIND,
ConsistentKeyReader.DUMMY_ID]
keys_to_apply.append(
db.Key.from_path(_app=key.app(), namespace=key.namespace(), *path))
db.get(keys_to_apply, config=datastore_rpc.Configuration(
deadline=self.UNAPPLIED_QUERY_DEADLINE,
read_policy=datastore_rpc.Configuration.APPLY_ALL_JOBS_CONSISTENCY))
@classmethod
def _split_input_from_namespace(cls,
app,
namespace,
entity_kind_name,
shard_count):
key_ranges = super(ConsistentKeyReader, cls)._split_input_from_namespace(
app, namespace, entity_kind_name, shard_count)
assert len(key_ranges) == shard_count
# The KeyRanges calculated by the base class may not include keys for
# entities that have unapplied jobs. So use an open key range for the first
# and last KeyRanges to ensure that they will be processed.
try:
last_key_range_index = key_ranges.index(None) - 1
except ValueError:
last_key_range_index = shard_count - 1
if last_key_range_index != -1:
key_ranges[0].key_start = None
key_ranges[0].include_start = False
key_ranges[last_key_range_index].key_end = None
key_ranges[last_key_range_index].include_end = False
return key_ranges
@classmethod
def _split_input_from_params(cls, app, namespaces, entity_kind_name,
params, shard_count):
readers = super(ConsistentKeyReader, cls)._split_input_from_params(
app,
namespaces,
entity_kind_name,
params,
shard_count)
# We always produce at least one namespace range because:
# a) there might be unapplied entities
# b) it simplifies mapper code
if not readers:
readers = [cls(entity_kind_name,
key_ranges=None,
ns_range=namespace_range.NamespaceRange(),
batch_size=shard_count)]
return readers
@classmethod
def split_input(cls, mapper_spec):
"""Splits input into key ranges."""
readers = super(ConsistentKeyReader, cls).split_input(mapper_spec)
start_time_us = _get_params(mapper_spec).get(
cls.START_TIME_US_PARAM, long(time.time() * 1e6))
for reader in readers:
reader.start_time_us = start_time_us
return readers
def to_json(self):
"""Serializes all the data in this reader into json form.
Returns:
all the data in json-compatible map.
"""
json_dict = super(DatastoreKeyInputReader, self).to_json()
json_dict[self.START_TIME_US_PARAM] = self.start_time_us
return json_dict
@classmethod
def from_json(cls, json):
"""Create new ConsistentKeyReader from the json, encoded by to_json.
Args:
json: json map representation of ConsistentKeyReader.
Returns:
an instance of ConsistentKeyReader with all data deserialized from json.
"""
reader = super(ConsistentKeyReader, cls).from_json(json)
reader.start_time_us = json[cls.START_TIME_US_PARAM]
return reader
# TODO(user): This reader always produces only one shard, because
# namespace entities use the mix of ids/names, and KeyRange-based splitting
# doesn't work satisfactory in this case.
# It's possible to implement specific splitting functionality for the reader
# instead of reusing generic one. Meanwhile 1 shard is enough for our
# applications.
class NamespaceInputReader(InputReader):
"""An input reader to iterate over namespaces.
This reader yields namespace names as string.
It will always produce only one shard.
"""
NAMESPACE_RANGE_PARAM = "namespace_range"
BATCH_SIZE_PARAM = "batch_size"
_BATCH_SIZE = 10
def __init__(self, ns_range, batch_size = _BATCH_SIZE):
self.ns_range = ns_range
self._batch_size = batch_size
def to_json(self):
"""Serializes all the data in this query range into json form.
Returns:
all the data in json-compatible map.
"""
return {self.NAMESPACE_RANGE_PARAM: self.ns_range.to_json_object(),
self.BATCH_SIZE_PARAM: self._batch_size}
@classmethod
def from_json(cls, json):
"""Create new DatastoreInputReader from the json, encoded by to_json.
Args:
json: json map representation of DatastoreInputReader.
Returns:
an instance of DatastoreInputReader with all data deserialized from json.
"""
return cls(
namespace_range.NamespaceRange.from_json_object(
json[cls.NAMESPACE_RANGE_PARAM]),
json[cls.BATCH_SIZE_PARAM])
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec)
if cls.BATCH_SIZE_PARAM in params:
try:
batch_size = int(params[cls.BATCH_SIZE_PARAM])
if batch_size < 1:
raise BadReaderParamsError("Bad batch size: %s" % batch_size)
except ValueError, e:
raise BadReaderParamsError("Bad batch size: %s" % e)
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader.
Returns:
A list of InputReaders.
"""
batch_size = int(_get_params(mapper_spec).get(
cls.BATCH_SIZE_PARAM, cls._BATCH_SIZE))
shard_count = mapper_spec.shard_count
namespace_ranges = namespace_range.NamespaceRange.split(shard_count,
contiguous=True)
return [NamespaceInputReader(ns_range, batch_size)
for ns_range in namespace_ranges]
def __iter__(self):
while True:
keys = self.ns_range.make_datastore_query().Get(limit=self._batch_size)
if not keys:
break
for key in keys:
namespace = metadata.Namespace.key_to_namespace(key)
self.ns_range = self.ns_range.with_start_after(namespace)
yield namespace
def __str__(self):
return repr(self.ns_range)
class RecordsReader(InputReader):
"""Reader to read a list of Files API file in records format.
The number of input shards can be specified by the SHARDS_PARAM
mapper parameter. Input files cannot be split, so there will be at most
one shard per file. Also the number of shards will not be reduced based on
the number of input files, so shards in always equals shards out.
"""
FILE_PARAM = "file"
FILES_PARAM = "files"
def __init__(self, filenames, position):
"""Constructor.
Args:
filenames: list of filenames.
position: file position to start reading from as int.
"""
self._filenames = filenames
if self._filenames:
self._reader = records.RecordsReader(
files.BufferedFile(self._filenames[0]))
self._reader.seek(position)
else:
self._reader = None
def __iter__(self):
"""Iterate over records in file.
Yields records as strings.
"""
ctx = context.get()
while self._reader:
try:
start_time = time.time()
record = self._reader.read()
if ctx:
operation.counters.Increment(
COUNTER_IO_READ_MSEC, int((time.time() - start_time) * 1000))(ctx)
operation.counters.Increment(COUNTER_IO_READ_BYTES, len(record))(ctx)
yield record
except EOFError:
self._filenames.pop(0)
if not self._filenames:
self._reader = None
else:
self._reader = records.RecordsReader(
files.BufferedFile(self._filenames[0]))
@classmethod
def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
return cls(json["filenames"], json["position"])
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
result = {
"filenames": self._filenames,
"position": 0,
}
if self._reader:
result["position"] = self._reader.tell()
return result
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader.
Returns:
A list of InputReaders.
"""
params = _get_params(mapper_spec)
shard_count = mapper_spec.shard_count
if cls.FILES_PARAM in params:
filenames = params[cls.FILES_PARAM]
if isinstance(filenames, basestring):
filenames = filenames.split(",")
else:
filenames = [params[cls.FILE_PARAM]]
batch_list = [[] for _ in xrange(shard_count)]
for index, filename in enumerate(filenames):
# Simplest round robin so we don't have any short shards.
batch_list[index % shard_count].append(filenames[index])
# Sort from most shards to least shards so the short shard is last.
batch_list.sort(reverse=True, key=lambda x: len(x))
return [cls(batch, 0) for batch in batch_list]
@classmethod
def validate(cls, mapper_spec):
"""Validates mapper spec and all mapper parameters.
Args:
mapper_spec: The MapperSpec for this InputReader.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise errors.BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec)
if (cls.FILES_PARAM not in params and
cls.FILE_PARAM not in params):
raise BadReaderParamsError(
"Must specify '%s' or '%s' parameter for mapper input" %
(cls.FILES_PARAM, cls.FILE_PARAM))
def __str__(self):
position = 0
if self._reader:
position = self._reader.tell()
return "%s:%s" % (self._filenames, position)
class LogInputReader(InputReader):
"""Input reader for a time range of logs via the Logs Reader API.
The number of input shards may be specified by the SHARDS_PARAM mapper
parameter. A starting and ending time (in seconds since the Unix epoch) are
required to generate time ranges over which to shard the input.
"""
# Parameters directly mapping to those available via logservice.fetch().
START_TIME_PARAM = "start_time"
END_TIME_PARAM = "end_time"
MINIMUM_LOG_LEVEL_PARAM = "minimum_log_level"
INCLUDE_INCOMPLETE_PARAM = "include_incomplete"
INCLUDE_APP_LOGS_PARAM = "include_app_logs"
VERSION_IDS_PARAM = "version_ids"
# Semi-hidden parameters used only internally or for privileged applications.
_OFFSET_PARAM = "offset"
_PROTOTYPE_REQUEST_PARAM = "prototype_request"
_PARAMS = frozenset([START_TIME_PARAM, END_TIME_PARAM, _OFFSET_PARAM,
MINIMUM_LOG_LEVEL_PARAM, INCLUDE_INCOMPLETE_PARAM,
INCLUDE_APP_LOGS_PARAM, VERSION_IDS_PARAM,
_PROTOTYPE_REQUEST_PARAM])
_KWARGS = frozenset([_OFFSET_PARAM, _PROTOTYPE_REQUEST_PARAM])
def __init__(self,
start_time=None,
end_time=None,
minimum_log_level=None,
include_incomplete=False,
include_app_logs=True,
version_ids=None,
**kwargs):
"""Constructor.
Args:
start_time: The earliest request completion or last-update time of logs
that should be mapped over, in seconds since the Unix epoch.
end_time: The latest request completion or last-update time that logs
should be mapped over, in seconds since the Unix epoch.
minimum_log_level: An application log level which serves as a filter on
the requests mapped over--requests with no application log at or above
the specified level will be omitted, even if include_app_logs is False.
include_incomplete: Whether or not to include requests that have started
but not yet finished, as a boolean. Defaults to False.
include_app_logs: Whether or not to include application level logs in the
mapped logs, as a boolean. Defaults to False.
version_ids: A list of version ids whose logs should be mapped against.
"""
InputReader.__init__(self)
# The rule for __params is that its contents will always be suitable as
# input to logservice.fetch().
self.__params = dict(kwargs)
if start_time is not None:
self.__params[self.START_TIME_PARAM] = start_time
if end_time is not None:
self.__params[self.END_TIME_PARAM] = end_time
if minimum_log_level is not None:
self.__params[self.MINIMUM_LOG_LEVEL_PARAM] = minimum_log_level
if include_incomplete is not None:
self.__params[self.INCLUDE_INCOMPLETE_PARAM] = include_incomplete
if include_app_logs is not None:
self.__params[self.INCLUDE_APP_LOGS_PARAM] = include_app_logs
if version_ids:
self.__params[self.VERSION_IDS_PARAM] = version_ids
# Any submitted prototype_request will be in encoded form.
if self._PROTOTYPE_REQUEST_PARAM in self.__params:
prototype_request = log_service_pb.LogReadRequest(
self.__params[self._PROTOTYPE_REQUEST_PARAM])
self.__params[self._PROTOTYPE_REQUEST_PARAM] = prototype_request
def __iter__(self):
"""Iterates over logs in a given range of time.
Yields:
A RequestLog containing all the information for a single request.
"""
for log in logservice.fetch(**self.__params):
self.__params[self._OFFSET_PARAM] = log.offset
yield log
@classmethod
def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard's state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the given JSON parameters.
"""
# Strip out unrecognized parameters, as introduced by b/5960884.
params = dict((str(k), v) for k, v in json.iteritems()
if k in cls._PARAMS)
# This is not symmetric with to_json() wrt. PROTOTYPE_REQUEST_PARAM because
# the constructor parameters need to be JSON-encodable, so the decoding
# needs to happen there anyways.
if cls._OFFSET_PARAM in params:
params[cls._OFFSET_PARAM] = base64.b64decode(params[cls._OFFSET_PARAM])
return cls(**params)
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A JSON serializable version of the remaining input to read.
"""
params = dict(self.__params) # Shallow copy.
if self._PROTOTYPE_REQUEST_PARAM in params:
prototype_request = params[self._PROTOTYPE_REQUEST_PARAM]
params[self._PROTOTYPE_REQUEST_PARAM] = prototype_request.Encode()
if self._OFFSET_PARAM in params:
params[self._OFFSET_PARAM] = base64.b64encode(params[self._OFFSET_PARAM])
return params
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of input readers for the given input specification.
Args:
mapper_spec: The MapperSpec for this InputReader.
Returns:
A list of InputReaders.
"""
params = _get_params(mapper_spec)
shard_count = mapper_spec.shard_count
# Pick out the overall start and end times and time step per shard.
start_time = params[cls.START_TIME_PARAM]
end_time = params[cls.END_TIME_PARAM]
seconds_per_shard = (end_time - start_time) / shard_count
# Create a LogInputReader for each shard, modulating the params as we go.
shards = []
for _ in xrange(shard_count - 1):
params[cls.END_TIME_PARAM] = (params[cls.START_TIME_PARAM] +
seconds_per_shard)
shards.append(LogInputReader(**params))
params[cls.START_TIME_PARAM] = params[cls.END_TIME_PARAM]
# Create a final shard to complete the time range.
params[cls.END_TIME_PARAM] = end_time
return shards + [LogInputReader(**params)]
@classmethod
def validate(cls, mapper_spec):
"""Validates the mapper's specification and all necessary parameters.
Args:
mapper_spec: The MapperSpec to be used with this InputReader.
Raises:
BadReaderParamsError: If the user fails to specify both a starting time
and an ending time, or if the starting time is later than the ending
time.
"""
if mapper_spec.input_reader_class() != cls:
raise errors.BadReaderParamsError("Input reader class mismatch")
params = _get_params(mapper_spec, allowed_keys=cls._PARAMS)
if cls.VERSION_IDS_PARAM not in params:
raise errors.BadReaderParamsError("Must specify a list of version ids "
"for mapper input")
if (cls.START_TIME_PARAM not in params or
params[cls.START_TIME_PARAM] is None):
raise errors.BadReaderParamsError("Must specify a starting time for "
"mapper input")
if cls.END_TIME_PARAM not in params or params[cls.END_TIME_PARAM] is None:
params[cls.END_TIME_PARAM] = time.time()
if params[cls.START_TIME_PARAM] >= params[cls.END_TIME_PARAM]:
raise errors.BadReaderParamsError("The starting time cannot be later "
"than or the same as the ending time.")
if cls._PROTOTYPE_REQUEST_PARAM in params:
try:
params[cls._PROTOTYPE_REQUEST_PARAM] = log_service_pb.LogReadRequest(
params[cls._PROTOTYPE_REQUEST_PARAM])
except (TypeError, ProtocolBuffer.ProtocolBufferDecodeError):
raise errors.BadReaderParamsError("The prototype request must be "
"parseable as a LogReadRequest.")
# Pass the parameters to logservice.fetch() to verify any underlying
# constraints on types or values. This only constructs an iterator, it
# doesn't trigger any requests for actual log records.
try:
logservice.fetch(**params)
except logservice.InvalidArgumentError, e:
raise errors.BadReaderParamsError("One or more parameters are not valid "
"inputs to logservice.fetch(): %s" % e)
def __str__(self):
"""Returns the string representation of this LogInputReader."""
params = []
for key, value in self.__params.iteritems():
if key is self._PROTOTYPE_REQUEST_PARAM:
params.append("%s='%s'" % (key, value))
elif key is self._OFFSET_PARAM:
params.append("%s='%s'" % (key, value))
else:
params.append("%s=%s" % (key, value))
return "LogInputReader(%s)" % ", ".join(params)
| apache-2.0 | 3,572,940,923,067,458,000 | 35.186371 | 80 | 0.654973 | false |
CenterForOpenScience/SHARE | share/schema/shapes.py | 2 | 1146 | from enum import Enum
from typing import Set, NamedTuple, Optional, Tuple
RelationShape = Enum('RelationShape', ['MANY_TO_MANY', 'MANY_TO_ONE', 'ONE_TO_MANY'])
AttributeDataType = Enum('AttributeDataType', ['BOOLEAN', 'STRING', 'INTEGER', 'DATETIME', 'OBJECT'])
AttributeDataFormat = Enum('AttributeDataFormat', ['URI'])
class ShareV2SchemaType(NamedTuple):
name: str
concrete_type: str
explicit_fields: Set[str]
type_lineage: Tuple[str] = ()
@property
def distance_from_concrete_type(self):
return len(self.type_lineage)
class ShareV2SchemaAttribute(NamedTuple):
name: str
data_type: AttributeDataType
data_format: Optional[AttributeDataFormat]
is_required: bool = False
is_relation: bool = False
class ShareV2SchemaRelation(NamedTuple):
name: str
relation_shape: RelationShape
related_concrete_type: str
inverse_relation: str
through_concrete_type: Optional[str] = None
incoming_through_relation: Optional[str] = None
outgoing_through_relation: Optional[str] = None
is_required: bool = False
is_implicit: bool = False
is_relation: bool = True
| apache-2.0 | 8,417,613,205,347,932,000 | 28.384615 | 101 | 0.706806 | false |
nikmolnar/python-sld | sld/run_tests.py | 2 | 1347 | #!/usr/bin/env python
"""
Testing fixture for StyledLayerDescriptor library.
License
=======
Copyright 2011-2014 David Zwarg <U{[email protected]}>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
U{http://www.apache.org/licenses/LICENSE-2.0}
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: David Zwarg
@contact: [email protected]
@copyright: 2011-2014, Azavea
@license: Apache 2.0
@version: 1.0.10
"""
import unittest
import sys
import logging
from optparse import OptionParser
if __name__ == "__main__":
parser = OptionParser()
parser.add_option('-v', '--verbose', dest='verbosity',
help='Logging verbosity.', action='store_true', default=False)
(options, args) = parser.parse_args()
loglevel = logging.WARNING
if options.verbosity:
loglevel = logging.DEBUG
logging.basicConfig(format='%(message)s', level=loglevel)
sys.path.insert(0, '..')
import sld.test
unittest.main(sld.test)
| apache-2.0 | -6,708,301,877,772,915,000 | 27.0625 | 84 | 0.717149 | false |
dodger487/MIST | data/magnetak_plot.py | 1 | 8973 | """
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Chris Riederer
# Google, Inc
# 2014-08-26
"""Plotting module for MagnetAK, the Magnetometer Android toolKit"""
import magnetak_util
import pylab as pl
import numpy as np
TRUE_COLOR = 'green'
INPUT_COLOR = 'red'
def PlotData(runData, optPlotData=False, inputLabels=[]):
"""Plots the data from a run"""
pl.figure()
pl.title(runData['systemInfo']['Build.MODEL'] + " " + runData['filename'])
magData = np.array(runData['magnetometer'])
magDomain = magData[:,0] # first index is time, second is accuracy
accuracyData = magData[:,1]
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
mag = np.sqrt(X**2 + Y**2 + Z**2)
# pl.scatter(magDomain, X, color='red')
# pl.scatter(magDomain, Y, color='blue')
# pl.scatter(magDomain, Z, color='green')
# pl.scatter(magDomain, mag, color='black')
pl.plot(magDomain, X, color='red')
pl.plot(magDomain, Y, color='blue')
pl.plot(magDomain, Z, color='green')
pl.plot(magDomain, mag, color='black')
pl.xlabel("Time (ns)") # show axes labels
pl.ylabel("Magnetometer Data ($\mu$T)")
pl.legend(["X","Y","Z","Magnitude"], loc="lower left")
accuracyColors = ['red','blue','green','black']
if optPlotData:
for index in xrange(1,len(accuracyData)-1):
if accuracyData[index] != accuracyData[index-1]:
pl.scatter(magDomain[index], 0,
color=accuracyColors[int(accuracyData[index])])
if 'labels' in runData.keys() and len(runData['labels']):
labelTime = np.array(runData['labels'])[:,0]
for t in labelTime:
pl.axvline(t, color=TRUE_COLOR)
for inputLabel in inputLabels:
pl.axvline(inputLabel, color=INPUT_COLOR)
def format_coord(x, y): # let us see the full time coordinate in the display
return 'x=%16f, y=%16f' % (x / 1e6, y)
ax = pl.gca()
ax.format_coord = format_coord
def PlotList(runDataList, optPlotData=True):
"""In separate figures, plot the data for each run"""
for runData in runDataList:
PlotData(runData, optPlotData=optPlotData)
pl.show() # shows all the plots from above
def PlotFeatures(runDataList):
"""Plot X,Y,Z and magnitude of snippet in separate plots"""
f, axarr = pl.subplots(2, 4, sharex=True)
for runData in runDataList:
SubPlotFeature(runData, axarr)
positives = [rd for rd in runDataList if len(rd['labels']) > 0]
negatives = [rd for rd in runDataList if len(rd['labels']) == 0]
xp, yp, zp, mp = magnetak_util.CreateTemplates(positives)
newT = range(0,450000000,1000000)
axarr[0, 0].plot(newT, [xp(t) for t in newT], color='red')
axarr[0, 1].plot(newT, [yp(t) for t in newT], color='red')
axarr[0, 2].plot(newT, [zp(t) for t in newT], color='red')
axarr[0, 3].plot(newT, [mp(t) for t in newT], color='red')
xp, yp, zp, mp = magnetak_util.CreateTemplates(negatives)
newT = range(0,450000000,1000000)
axarr[1, 0].plot(newT, [xp(t) for t in newT], color='red')
axarr[1, 1].plot(newT, [yp(t) for t in newT], color='red')
axarr[1, 2].plot(newT, [zp(t) for t in newT], color='red')
axarr[1, 3].plot(newT, [mp(t) for t in newT], color='red')
pl.show()
def SubPlotFeature(runData, axarr):
magData = np.array(runData['magnetometer'])
magData = magData - magData[0,:] # normalize based on the first row
# magData = magData - magData[-1,:] # normalize based on the last value
magDomain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
mag = np.sqrt(X**2 + Y**2 + Z**2)
magDomain = magDomain - magDomain[0] # put in same timescale
X = magnetak_util.scale(X)
Y = magnetak_util.scale(Y)
Z = magnetak_util.scale(Z)
mag = magnetak_util.scale(mag)
row = 0 if len(runData['labels']) > 0 else 1
axarr[row, 0].plot(magDomain, X, alpha=0.2)
axarr[row, 1].plot(magDomain, Y, alpha=0.2)
axarr[row, 2].plot(magDomain, Z, alpha=0.2)
axarr[row, 3].plot(magDomain, mag, alpha=0.2)
if row == 0:
axarr[row, 0].set_ylabel('True Positive')
axarr[row, 0].set_title('X')
axarr[row, 1].set_title('Y')
axarr[row, 2].set_title('Z')
axarr[row, 3].set_title('Magnitude')
else:
axarr[row, 0].set_ylabel('True Negative')
axarr[row, 0].set_ylim(axarr[0, 0].get_ylim())
axarr[row, 1].set_ylim(axarr[0, 1].get_ylim())
axarr[row, 2].set_ylim(axarr[0, 2].get_ylim())
axarr[row, 3].set_ylim(axarr[0, 3].get_ylim())
def PlotSnip(runData):
"""Plot magnitude of snippet in the same plot,
red if positive, blue otherwise
"""
magData = np.array(runData['magnetometer'])
magData = magData - magData[0,:] # normalize data based on first row
magDomain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
mag = np.sqrt(X**2 + Y**2 + Z**2)
magDomain = magDomain - magDomain[0] # put in same timemagnetak_util.scale
color = 'blue' if len(runData['labels']) > 0 else 'red'
pl.plot(magDomain, mag, color=color, alpha=0.1)
def PlotSnips(runDataList):
pl.figure()
pl.title("Snips")
for s in runDataList:
PlotSnip(s)
pl.show()
def PlotInterpolatedSnips(runDataList):
fcns = []
for runData in runDataList:
if len(runData['labels']) == 0:
continue
magData = np.array(runData['magnetometer'])
magData = magData - magData[0,:] # normalize data based on first row
# magData = magData - magData[-1,:] # normalize data based on last row
magDomain = magData[:,0] # first index is time, second is accuracy
X = magData[:,2]
Y = magData[:,3]
Z = magData[:,4]
mag = np.sqrt(X**2 + Y**2 + Z**2)
magDomain = magDomain - magDomain[0] # put in same timescale
mag = magnetak_util.scale(mag)
fcns.append(scipy.interpolate.interp1d(magDomain, mag, kind='cubic'))
pl.plot(magDomain, mag, alpha=0.2)
numFcns = float(len(fcns))
BigF = lambda x: sum([f(x) for f in fcns]) / numFcns
newX = range(0,450000000,1000000)
newY = [BigF(x) for x in newX]
pl.plot(newX, newY, color='red')
def PlotFeatureHistograms(snipList, featurizer, featureIndex=0, samePlot=True):
"""Plots two histograms of features, one for positive examples and one for
negative examples. This is used to help engineer good features."""
positives = [rd for rd in snipList if len(rd['labels']) > 0]
negatives = [rd for rd in snipList if len(rd['labels']) == 0]
pos_features = np.array([featurizer.featurize(rd['magnetometer']) for rd in positives])
neg_features = np.array([featurizer.featurize(rd['magnetometer']) for rd in negatives])
if samePlot:
n, bins, patches = pl.hist(pos_features[:,featureIndex], color='red', alpha=0.4)
pl.hist(neg_features[:,featureIndex], color='blue', bins=bins, alpha=0.4)
pl.show()
else:
pl.figure()
pl.title("Positive examples feature distribution")
pl.hist(pos_features[:,featureIndex], color='red')
pl.figure()
pl.title("Negative examples feature distribution")
pl.hist(neg_features[:,featureIndex], color='blue')
pl.show()
def PlotThresholds(runData, T1=30, T2=130, segment_size=200):
pl.figure()
pl.title(runData['systemInfo']['Build.MODEL'] + " " + runData['filename'] + " Thresholds")
data = np.array(runData['magnetometer'])
domain = data[:,0] # first index is time, second is accuracy
# domain = domain * 1e9
min_seg1 = []
max_seg2 = []
segment_time_ns = segment_size * 1e6
window_size = segment_time_ns * 2
newDomain = domain[domain > domain[0] + window_size]
newDomain = map(long, newDomain)
for sensorTime in newDomain:
segment1 = data[(domain > sensorTime - window_size) & (domain <= sensorTime - segment_time_ns)]
segment2 = data[(domain > sensorTime - segment_time_ns) & (domain <= sensorTime)]
# For each window, calculate the baseline.
# Get the baseline S0, the last value before we start the segmentation.
S0 = segment2[-1, 2:5]
offsets1 = segment1[:, 2:5] - S0
offsets2 = segment2[:, 2:5] - S0
norms1 = [np.linalg.norm(row) for row in offsets1]
norms2 = [np.linalg.norm(row) for row in offsets2]
min_seg1.append(min(norms1))
max_seg2.append(max(norms2))
# Plot the thresholds.
pl.plot(newDomain, min_seg1, color='red')
pl.plot(newDomain, max_seg2, color='blue')
pl.plot(newDomain, np.ones(len(newDomain)) * T1, color='#aadddd') # Minimum must be lower
pl.plot(newDomain, np.ones(len(newDomain)) * T2, color='#ddaadd') # Maximum must be higher
pl.show()
| apache-2.0 | 1,342,547,384,469,074,400 | 33.914397 | 99 | 0.663881 | false |
jskurka/PyChess-Learning-Module | setup.py | 1 | 5135 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from imp import load_module, find_module
pychess = load_module("pychess", *find_module("pychess",["lib"]))
from distutils.core import setup
from glob import glob
from os import listdir
from os.path import isdir, isfile
import os
import sys
# To run "setup.py register" change name to "NAME+VERSION_NAME"
# because pychess from another author already exist in pypi.
NAME = "pychess"
VERSION = pychess.VERSION
DESC = "Gnome chess game"
LONG_DESC = """PyChess is a Gtk chess client, originally developed for Gnome, but running well under all other Linux desktops.
(Which we know of, at least). PyChess is 100% python code, from the top of the UI to the bottom of the chess engine, and all code
is licensed under the GNU General Public License.
The goal of PyChess is to provide an advanced chess client for Linux following the Gnome Human Interface Guidelines. The client should be usable
to those new to chess, who just want to play a short game and get back to their work, as well as those who wants to use the computer to further
enhance their play."""
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Environment :: X11 Applications :: Gnome',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Desktop Environment :: Gnome',
'Topic :: Games/Entertainment :: Board Games',
'Natural Language :: Arabic',
'Natural Language :: Bulgarian',
'Natural Language :: Catalan',
'Natural Language :: Czech',
'Natural Language :: Danish',
'Natural Language :: Dutch',
'Natural Language :: English',
'Natural Language :: Finnish',
'Natural Language :: French',
'Natural Language :: German',
'Natural Language :: Greek',
'Natural Language :: Hebrew',
'Natural Language :: Hindi',
'Natural Language :: Hungarian',
'Natural Language :: Italian',
'Natural Language :: Japanese',
'Natural Language :: Norwegian',
'Natural Language :: Persian',
'Natural Language :: Polish',
'Natural Language :: Portuguese',
'Natural Language :: Portuguese (Brazilian)',
'Natural Language :: Romanian',
'Natural Language :: Russian',
'Natural Language :: Slovak',
'Natural Language :: Spanish',
'Natural Language :: Swedish',
'Natural Language :: Turkish',
]
os.chdir(os.path.abspath(os.path.dirname(__file__)))
DATA_FILES = [("share/pychess",
["README", "AUTHORS", "ARTISTS", "DOCUMENTERS", "LICENSE", "TRANSLATORS", "open.db"])]
# UI
DATA_FILES += [("share/pychess/glade", glob('glade/*.glade'))]
DATA_FILES += [("share/pychess/glade", glob('glade/*.png'))]
DATA_FILES += [("share/pychess/glade", glob('glade/*.svg'))]
DATA_FILES += [("share/pychess/flags", glob('flags/*.png'))]
# Sidepanel (not a package)
DATA_FILES += [("share/pychess/sidepanel", glob('sidepanel/*.glade'))]
DATA_FILES += [("share/pychess/sidepanel", glob('sidepanel/*.py'))]
# Data
DATA_FILES += [('share/applications', ['pychess.desktop'])]
DATA_FILES += [('share/icons/hicolor/scalable/apps', ['pychess.svg'])]
DATA_FILES += [('share/pixmaps', ['pychess.svg'])]
DATA_FILES += [("share/pychess/sounds", glob('sounds/*.ogg'))]
DATA_FILES += [('share/icons/hicolor/24x24/apps', ['pychess.png'])]
DATA_FILES += [('share/gtksourceview-1.0/language-specs', ['gtksourceview-1.0/language-specs/pgn.lang'])]
# Manpages
DATA_FILES += [('share/man/man1', ['manpages/pychess.1.gz'])]
# Language
pofile = "LC_MESSAGES/pychess"
if sys.platform == "win32":
argv0_path = os.path.dirname(os.path.abspath(sys.executable))
sys.path.append(argv0_path + "\\tools\\i18n")
import msgfmt
for dir in [d for d in listdir("lang") if d.find(".svn") < 0 and isdir("lang/"+d)]:
if sys.platform == "win32":
file = "lang/%s/%s" % (dir,pofile)
msgfmt.make(file+".po", file+".mo")
else:
os.popen("msgfmt lang/%s/%s.po -o lang/%s/%s.mo" % (dir,pofile,dir,pofile))
DATA_FILES += [("share/locale/"+dir+"/LC_MESSAGES", ["lang/"+dir+"/"+pofile+".mo"])]
# Packages
PACKAGES = ["pychess", "pychess.gfx", "pychess.ic", "pychess.ic.managers",
"pychess.Players", "pychess.Savers", "pychess.System",
"pychess.Utils", "pychess.Utils.lutils", "pychess.Variants",
"pychess.widgets", "pychess.widgets.pydock" ]
# Setup
setup (
name = NAME,
version = VERSION,
author = 'Pychess team',
author_email = 'pychess-people at googlegroups com',
maintainer = 'Thomas Dybdahl Ahle',
classifiers = CLASSIFIERS,
keywords = 'python gtk chess xboard gnuchess game pgn epd board linux',
description = DESC,
long_description = LONG_DESC,
license = 'GPL3',
url = 'http://pychess.googlepages.com',
download_url = 'http://code.google.com/p/pychess/downloads/list',
package_dir = {'': 'lib'},
packages = PACKAGES,
data_files = DATA_FILES,
scripts = ['pychess']
)
| gpl-3.0 | -1,360,995,317,106,970,000 | 37.609023 | 144 | 0.644791 | false |
Duke-NSOE/GeoHAT | GeoHat_V10/Scripts/networkx/algorithms/tests/test_core.py | 16 | 4195 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestCore:
def setUp(self):
# G is the example graph in Figure 1 from Batagelj and
# Zaversnik's paper titled An O(m) Algorithm for Cores
# Decomposition of Networks, 2003,
# http://arXiv.org/abs/cs/0310049. With nodes labeled as
# shown, the 3-core is given by nodes 1-8, the 2-core by nodes
# 9-16, the 1-core by nodes 17-20 and node 21 is in the
# 0-core.
t1=nx.convert_node_labels_to_integers(nx.tetrahedral_graph(),1)
t2=nx.convert_node_labels_to_integers(t1,5)
G=nx.union(t1,t2)
G.add_edges_from( [(3,7), (2,11), (11,5), (11,12), (5,12), (12,19),
(12,18), (3,9), (7,9), (7,10), (9,10), (9,20),
(17,13), (13,14), (14,15), (15,16), (16,13)])
G.add_node(21)
self.G=G
# Create the graph H resulting from the degree sequence
# [0,1,2,2,2,2,3] when using the Havel-Hakimi algorithm.
degseq=[0,1,2,2,2,2,3]
self.H=nx.havel_hakimi_graph(degseq)
def test_trivial(self):
"""Empty graph"""
G = nx.Graph()
assert_equal(nx.find_cores(G),{})
def test_find_cores(self):
cores=nx.find_cores(self.G)
nodes_by_core=[]
for val in [0,1,2,3]:
nodes_by_core.append( sorted([k for k in cores if cores[k]==val]))
assert_equal(nodes_by_core[0],[21])
assert_equal(nodes_by_core[1],[17, 18, 19, 20])
assert_equal(nodes_by_core[2],[9, 10, 11, 12, 13, 14, 15, 16])
assert_equal(nodes_by_core[3], [1, 2, 3, 4, 5, 6, 7, 8])
def test_core_number(self):
# smoke test real name
cores=nx.core_number(self.G)
def test_find_cores2(self):
cores=nx.find_cores(self.H)
nodes_by_core=[]
for val in [0,1,2]:
nodes_by_core.append( sorted([k for k in cores if cores[k]==val]))
assert_equal(nodes_by_core[0],[0])
assert_equal(nodes_by_core[1],[1, 3])
assert_equal(nodes_by_core[2],[2, 4, 5, 6])
def test_main_core(self):
main_core_subgraph=nx.k_core(self.H)
assert_equal(sorted(main_core_subgraph.nodes()),[2,4,5,6])
def test_k_core(self):
# k=0
k_core_subgraph=nx.k_core(self.H,k=0)
assert_equal(sorted(k_core_subgraph.nodes()),sorted(self.H.nodes()))
# k=1
k_core_subgraph=nx.k_core(self.H,k=1)
assert_equal(sorted(k_core_subgraph.nodes()),[1,2,3,4,5,6])
# k=2
k_core_subgraph=nx.k_core(self.H,k=2)
assert_equal(sorted(k_core_subgraph.nodes()),[2,4,5,6])
def test_main_crust(self):
main_crust_subgraph=nx.k_crust(self.H)
assert_equal(sorted(main_crust_subgraph.nodes()),[0,1,3])
def test_k_crust(self):
# k=0
k_crust_subgraph=nx.k_crust(self.H,k=2)
assert_equal(sorted(k_crust_subgraph.nodes()),sorted(self.H.nodes()))
# k=1
k_crust_subgraph=nx.k_crust(self.H,k=1)
assert_equal(sorted(k_crust_subgraph.nodes()),[0,1,3])
# k=2
k_crust_subgraph=nx.k_crust(self.H,k=0)
assert_equal(sorted(k_crust_subgraph.nodes()),[0])
def test_main_shell(self):
main_shell_subgraph=nx.k_shell(self.H)
assert_equal(sorted(main_shell_subgraph.nodes()),[2,4,5,6])
def test_k_shell(self):
# k=0
k_shell_subgraph=nx.k_shell(self.H,k=2)
assert_equal(sorted(k_shell_subgraph.nodes()),[2,4,5,6])
# k=1
k_shell_subgraph=nx.k_shell(self.H,k=1)
assert_equal(sorted(k_shell_subgraph.nodes()),[1,3])
# k=2
k_shell_subgraph=nx.k_shell(self.H,k=0)
assert_equal(sorted(k_shell_subgraph.nodes()),[0])
def test_k_corona(self):
# k=0
k_corona_subgraph=nx.k_corona(self.H,k=2)
assert_equal(sorted(k_corona_subgraph.nodes()),[2,4,5,6])
# k=1
k_corona_subgraph=nx.k_corona(self.H,k=1)
assert_equal(sorted(k_corona_subgraph.nodes()),[1])
# k=2
k_corona_subgraph=nx.k_corona(self.H,k=0)
assert_equal(sorted(k_corona_subgraph.nodes()),[0])
| cc0-1.0 | 7,264,925,824,296,135,000 | 35.798246 | 78 | 0.56472 | false |
kriswuollett/grpc | src/python/grpcio/grpc/beta/_server_adaptations.py | 19 | 14757 | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Translates gRPC's server-side API into gRPC's server-side Beta API."""
import collections
import threading
import grpc
from grpc import _common
from grpc.beta import interfaces
from grpc.framework.common import cardinality
from grpc.framework.common import style
from grpc.framework.foundation import abandonment
from grpc.framework.foundation import logging_pool
from grpc.framework.foundation import stream
from grpc.framework.interfaces.face import face
# pylint: disable=too-many-return-statements
_DEFAULT_POOL_SIZE = 8
class _ServerProtocolContext(interfaces.GRPCServicerContext):
def __init__(self, servicer_context):
self._servicer_context = servicer_context
def peer(self):
return self._servicer_context.peer()
def disable_next_response_compression(self):
pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
class _FaceServicerContext(face.ServicerContext):
def __init__(self, servicer_context):
self._servicer_context = servicer_context
def is_active(self):
return self._servicer_context.is_active()
def time_remaining(self):
return self._servicer_context.time_remaining()
def add_abortion_callback(self, abortion_callback):
raise NotImplementedError(
'add_abortion_callback no longer supported server-side!')
def cancel(self):
self._servicer_context.cancel()
def protocol_context(self):
return _ServerProtocolContext(self._servicer_context)
def invocation_metadata(self):
return _common.to_cygrpc_metadata(
self._servicer_context.invocation_metadata())
def initial_metadata(self, initial_metadata):
self._servicer_context.send_initial_metadata(initial_metadata)
def terminal_metadata(self, terminal_metadata):
self._servicer_context.set_terminal_metadata(terminal_metadata)
def code(self, code):
self._servicer_context.set_code(code)
def details(self, details):
self._servicer_context.set_details(details)
def _adapt_unary_request_inline(unary_request_inline):
def adaptation(request, servicer_context):
return unary_request_inline(request,
_FaceServicerContext(servicer_context))
return adaptation
def _adapt_stream_request_inline(stream_request_inline):
def adaptation(request_iterator, servicer_context):
return stream_request_inline(request_iterator,
_FaceServicerContext(servicer_context))
return adaptation
class _Callback(stream.Consumer):
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._terminated = False
self._cancelled = False
def consume(self, value):
with self._condition:
self._values.append(value)
self._condition.notify_all()
def terminate(self):
with self._condition:
self._terminated = True
self._condition.notify_all()
def consume_and_terminate(self, value):
with self._condition:
self._values.append(value)
self._terminated = True
self._condition.notify_all()
def cancel(self):
with self._condition:
self._cancelled = True
self._condition.notify_all()
def draw_one_value(self):
with self._condition:
while True:
if self._cancelled:
raise abandonment.Abandoned()
elif self._values:
return self._values.pop(0)
elif self._terminated:
return None
else:
self._condition.wait()
def draw_all_values(self):
with self._condition:
while True:
if self._cancelled:
raise abandonment.Abandoned()
elif self._terminated:
all_values = tuple(self._values)
self._values = None
return all_values
else:
self._condition.wait()
def _run_request_pipe_thread(request_iterator, request_consumer,
servicer_context):
thread_joined = threading.Event()
def pipe_requests():
for request in request_iterator:
if not servicer_context.is_active() or thread_joined.is_set():
return
request_consumer.consume(request)
if not servicer_context.is_active() or thread_joined.is_set():
return
request_consumer.terminate()
def stop_request_pipe(timeout): # pylint: disable=unused-argument
thread_joined.set()
request_pipe_thread = _common.CleanupThread(
stop_request_pipe, target=pipe_requests)
request_pipe_thread.start()
def _adapt_unary_unary_event(unary_unary_event):
def adaptation(request, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
unary_unary_event(request, callback.consume_and_terminate,
_FaceServicerContext(servicer_context))
return callback.draw_all_values()[0]
return adaptation
def _adapt_unary_stream_event(unary_stream_event):
def adaptation(request, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
unary_stream_event(request, callback,
_FaceServicerContext(servicer_context))
while True:
response = callback.draw_one_value()
if response is None:
return
else:
yield response
return adaptation
def _adapt_stream_unary_event(stream_unary_event):
def adaptation(request_iterator, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
request_consumer = stream_unary_event(
callback.consume_and_terminate,
_FaceServicerContext(servicer_context))
_run_request_pipe_thread(request_iterator, request_consumer,
servicer_context)
return callback.draw_all_values()[0]
return adaptation
def _adapt_stream_stream_event(stream_stream_event):
def adaptation(request_iterator, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
request_consumer = stream_stream_event(
callback, _FaceServicerContext(servicer_context))
_run_request_pipe_thread(request_iterator, request_consumer,
servicer_context)
while True:
response = callback.draw_one_value()
if response is None:
return
else:
yield response
return adaptation
class _SimpleMethodHandler(
collections.namedtuple('_MethodHandler', (
'request_streaming', 'response_streaming', 'request_deserializer',
'response_serializer', 'unary_unary', 'unary_stream',
'stream_unary', 'stream_stream',)), grpc.RpcMethodHandler):
pass
def _simple_method_handler(implementation, request_deserializer,
response_serializer):
if implementation.style is style.Service.INLINE:
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
return _SimpleMethodHandler(
False, False, request_deserializer, response_serializer,
_adapt_unary_request_inline(implementation.unary_unary_inline),
None, None, None)
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
return _SimpleMethodHandler(
False, True, request_deserializer, response_serializer, None,
_adapt_unary_request_inline(implementation.unary_stream_inline),
None, None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
return _SimpleMethodHandler(True, False, request_deserializer,
response_serializer, None, None,
_adapt_stream_request_inline(
implementation.stream_unary_inline),
None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
return _SimpleMethodHandler(
True, True, request_deserializer, response_serializer, None,
None, None,
_adapt_stream_request_inline(
implementation.stream_stream_inline))
elif implementation.style is style.Service.EVENT:
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
return _SimpleMethodHandler(
False, False, request_deserializer, response_serializer,
_adapt_unary_unary_event(implementation.unary_unary_event),
None, None, None)
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
return _SimpleMethodHandler(
False, True, request_deserializer, response_serializer, None,
_adapt_unary_stream_event(implementation.unary_stream_event),
None, None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
return _SimpleMethodHandler(
True, False, request_deserializer, response_serializer, None,
None,
_adapt_stream_unary_event(implementation.stream_unary_event),
None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
return _SimpleMethodHandler(
True, True, request_deserializer, response_serializer, None,
None, None,
_adapt_stream_stream_event(implementation.stream_stream_event))
def _flatten_method_pair_map(method_pair_map):
method_pair_map = method_pair_map or {}
flat_map = {}
for method_pair in method_pair_map:
method = _common.fully_qualified_method(method_pair[0], method_pair[1])
flat_map[method] = method_pair_map[method_pair]
return flat_map
class _GenericRpcHandler(grpc.GenericRpcHandler):
def __init__(self, method_implementations, multi_method_implementation,
request_deserializers, response_serializers):
self._method_implementations = _flatten_method_pair_map(
method_implementations)
self._request_deserializers = _flatten_method_pair_map(
request_deserializers)
self._response_serializers = _flatten_method_pair_map(
response_serializers)
self._multi_method_implementation = multi_method_implementation
def service(self, handler_call_details):
method_implementation = self._method_implementations.get(
handler_call_details.method)
if method_implementation is not None:
return _simple_method_handler(
method_implementation,
self._request_deserializers.get(handler_call_details.method),
self._response_serializers.get(handler_call_details.method))
elif self._multi_method_implementation is None:
return None
else:
try:
return None #TODO(nathaniel): call the multimethod.
except face.NoSuchMethodError:
return None
class _Server(interfaces.Server):
def __init__(self, grpc_server):
self._grpc_server = grpc_server
def add_insecure_port(self, address):
return self._grpc_server.add_insecure_port(address)
def add_secure_port(self, address, server_credentials):
return self._grpc_server.add_secure_port(address, server_credentials)
def start(self):
self._grpc_server.start()
def stop(self, grace):
return self._grpc_server.stop(grace)
def __enter__(self):
self._grpc_server.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._grpc_server.stop(None)
return False
def server(service_implementations, multi_method_implementation,
request_deserializers, response_serializers, thread_pool,
thread_pool_size):
generic_rpc_handler = _GenericRpcHandler(
service_implementations, multi_method_implementation,
request_deserializers, response_serializers)
if thread_pool is None:
effective_thread_pool = logging_pool.pool(_DEFAULT_POOL_SIZE
if thread_pool_size is None
else thread_pool_size)
else:
effective_thread_pool = thread_pool
return _Server(
grpc.server(effective_thread_pool, handlers=(generic_rpc_handler,)))
| bsd-3-clause | -1,663,827,651,052,878,600 | 36.549618 | 82 | 0.640984 | false |
UTSDataArena/examples | earth/LoadKML.py | 1 | 1342 | # TODO: append GeoLoader package to python search path in omegalib: workaround
import os.path, sys
basePath = os.path.dirname(os.path.abspath(__file__)) # for current dir of file
modulePath = os.path.dirname(basePath) # for GeoLoader packages - '/local/examples'
sys.path.append(modulePath)
from omega import setEventFunction, setUpdateFunction
from pipelines.objects import KML
from pipelines.handler import GeometryHandler
modelFile = basePath + "/mapquest_osm.earth"
geo = KML(modelFile)
geo.addKml(basePath + "/polygon.kml")
# Polygon and Path working, Placemark Points, GroundOverlays not (no HTML)
# Polygon needs to be elevated
# TODO KMZ
geo.yRotClamp = 360
geo.xRotClamp = geo.zRotClamp = 0
geo.xMoveClamp = geo.yMoveClamp = geo.zMoveClamp = 0.07
geo.initialRotation = [-90, 122, 0]
# scale model down from earth dimensions to 0-1
geo.model.setScale(0.1**7, 0.1**7, 0.1**7)
geo.reset()
handler = GeometryHandler()
handler.initialCamPosition = [0, -0.36, 0.6]
handler.yRotSensitivity /= 4
handler.xMoveSensitivity /= 40
handler.yMoveSensitivity /= 40
handler.zMoveSensitivity /= 4
handler.spaceNavMoveSensitivity /= 5
handler.spaceNavRotSensitivity /= 8
handler.allowXRot = False
handler.allowZRot = False
handler.allowXMove = False
handler.addGeo(geo)
setEventFunction(handler.onEvent)
setUpdateFunction(handler.onUpdate)
| bsd-2-clause | 7,411,220,163,943,094,000 | 29.5 | 83 | 0.771237 | false |
hackebrot/pytest | testing/test_helpconfig.py | 3 | 2111 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
from _pytest.main import EXIT_NOTESTSCOLLECTED
def test_version(testdir, pytestconfig):
result = testdir.runpytest("--version")
assert result.ret == 0
# p = py.path.local(py.__file__).dirpath()
result.stderr.fnmatch_lines(
["*pytest*{}*imported from*".format(pytest.__version__)]
)
if pytestconfig.pluginmanager.list_plugin_distinfo():
result.stderr.fnmatch_lines(["*setuptools registered plugins:", "*at*"])
def test_help(testdir):
result = testdir.runpytest("--help")
assert result.ret == 0
result.stdout.fnmatch_lines(
"""
*-v*verbose*
*setup.cfg*
*minversion*
*to see*markers*pytest --markers*
*to see*fixtures*pytest --fixtures*
"""
)
def test_hookvalidation_unknown(testdir):
testdir.makeconftest(
"""
def pytest_hello(xyz):
pass
"""
)
result = testdir.runpytest()
assert result.ret != 0
result.stdout.fnmatch_lines(["*unknown hook*pytest_hello*"])
def test_hookvalidation_optional(testdir):
testdir.makeconftest(
"""
import pytest
@pytest.hookimpl(optionalhook=True)
def pytest_hello(xyz):
pass
"""
)
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_traceconfig(testdir):
result = testdir.runpytest("--traceconfig")
result.stdout.fnmatch_lines(["*using*pytest*py*", "*active plugins*"])
def test_debug(testdir, monkeypatch):
result = testdir.runpytest_subprocess("--debug")
assert result.ret == EXIT_NOTESTSCOLLECTED
p = testdir.tmpdir.join("pytestdebug.log")
assert "pytest_sessionstart" in p.read()
def test_PYTEST_DEBUG(testdir, monkeypatch):
monkeypatch.setenv("PYTEST_DEBUG", "1")
result = testdir.runpytest_subprocess()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stderr.fnmatch_lines(
["*pytest_plugin_registered*", "*manager*PluginManager*"]
)
| mit | -7,620,794,595,243,463,000 | 26.415584 | 80 | 0.646139 | false |
RipcordSoftware/replication-monitor | ui/new_replications_window.py | 2 | 2058 | from gi.repository import Gtk, Gdk, GObject
from src.gtk_helper import GtkHelper
class NewReplicationsWindow:
def __init__(self, builder, hide_callback=None):
self._win = builder.get_object('window_new_replications', target=self, include_children=True)
self._hide_callback = hide_callback
self._model = Gtk.ListStore(str, str, str, str)
self.treeview_new_replications_queue.set_model(self._model)
def get_title(self):
return self._win.get_title()
def set_title(self, title):
return self._win.set_title(title)
def show(self):
self._win.show()
def hide(self):
self._win.hide()
def add(self, repl):
itr = self._model.append([repl.source, repl.target, 'image-loading', ''])
path = self._model.get_path(itr)
return Gtk.TreeRowReference.new(self._model, path)
def update_success(self, reference):
assert isinstance(reference, Gtk.TreeRowReference)
if reference.valid():
def func():
path = reference.get_path()
self._model[path][2] = 'emblem-default'
GtkHelper.invoke(func)
def update_failed(self, reference, err=None):
assert isinstance(reference, Gtk.TreeRowReference)
if reference.valid():
def func():
path = reference.get_path()
self._model[path][2] = 'emblem-important'
if err:
if isinstance(err, Exception):
self._model[path][3] = '{}: {}'.format(type(err).__name__, str(err))
else:
self._model[path][3] = str(err)
GtkHelper.invoke(func)
# region Events
def on_window_new_replications_show(self, widget):
pass
def on_window_new_replications_delete_event(self, widget, user_data):
if self._hide_callback and callable(self._hide_callback):
self._hide_callback()
else:
self._win.hide()
return True
# endregion
| mit | -8,508,211,357,497,981,000 | 32.737705 | 101 | 0.578717 | false |
sumerc/yappi | tests/test_hooks.py | 1 | 7136 | import re
import subprocess
import sys
import unittest
import time
import yappi
import utils
def a():
pass
class ContextIdCallbackTest(utils.YappiUnitTestCase):
"""Test yappi.set_context_id_callback()."""
def test_profile_single_context(self):
def id_callback():
return self.callback_count
def a():
pass
self.callback_count = 1
yappi.set_context_id_callback(id_callback)
yappi.start(profile_threads=False)
a() # context-id:1
self.callback_count = 2
a() # context-id:2
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, "a")
self.assertEqual(fsa.ncall, 1)
yappi.stop()
yappi.clear_stats()
self.callback_count = 1
yappi.start() # profile_threads=True
a() # context-id:1
self.callback_count = 2
a() # context-id:2
stats = yappi.get_func_stats()
fsa = utils.find_stat_by_name(stats, "a")
self.assertEqual(fsa.ncall, 2)
def test_bad_input(self):
self.assertRaises(TypeError, yappi.set_context_id_callback, 1)
def test_clear_callback(self):
self.callback_count = 0
def callback():
self.callback_count += 1
return 1
yappi.set_context_id_callback(callback)
yappi.start()
a()
yappi.set_context_id_callback(None)
old_callback_count = self.callback_count
a()
yappi.stop()
self.assertEqual(old_callback_count, self.callback_count)
def test_callback_error(self):
self.callback_count = 0
def callback():
self.callback_count += 1
raise Exception('callback error')
yappi.set_context_id_callback(callback)
yappi.start()
a()
a()
yappi.stop()
# Callback was cleared after first error.
self.assertEqual(1, self.callback_count)
def test_callback_non_integer(self):
self.callback_count = 0
def callback():
self.callback_count += 1
return None # Supposed to return an integer.
yappi.set_context_id_callback(callback)
yappi.start()
a()
a()
yappi.stop()
# Callback was cleared after first error.
self.assertEqual(1, self.callback_count)
def test_callback(self):
self.context_id = 0
yappi.set_context_id_callback(lambda: self.context_id)
yappi.start()
a()
self.context_id = 1
a()
self.context_id = 2
a()
# Re-schedule context 1.
self.context_id = 1
a()
yappi.stop()
threadstats = yappi.get_thread_stats().sort('id', 'ascending')
self.assertEqual(3, len(threadstats))
self.assertEqual(0, threadstats[0].id)
self.assertEqual(1, threadstats[1].id)
self.assertEqual(2, threadstats[2].id)
self.assertEqual(1, threadstats[0].sched_count)
self.assertEqual(2, threadstats[1].sched_count) # Context 1 ran twice.
self.assertEqual(1, threadstats[2].sched_count)
funcstats = yappi.get_func_stats()
self.assertEqual(4, utils.find_stat_by_name(funcstats, 'a').ncall)
def test_pause_resume(self):
yappi.set_context_id_callback(lambda: self.context_id)
yappi.set_clock_type('wall')
# Start in context 0.
self.context_id = 0
yappi.start()
time.sleep(0.08)
# Switch to context 1.
self.context_id = 1
time.sleep(0.05)
# Switch back to context 0.
self.context_id = 0
time.sleep(0.07)
yappi.stop()
t_stats = yappi.get_thread_stats().sort('id', 'ascending')
self.assertEqual(2, len(t_stats))
self.assertEqual(0, t_stats[0].id)
self.assertEqual(2, t_stats[0].sched_count)
self.assertTrue(0.15 < t_stats[0].ttot < 0.7, t_stats[0].ttot)
self.assertEqual(1, t_stats[1].id)
self.assertEqual(1, t_stats[1].sched_count)
# Context 1 was scheduled for 0.05 seconds during the run of the
# profiler
self.assert_almost_equal(t_stats[1].ttot, 0.05)
class ContextNameCallbackTest(utils.YappiUnitTestCase):
""" Test yappi.set_context_name_callback(). """
def tearDown(self):
yappi.set_context_name_callback(None)
super(ContextNameCallbackTest, self).tearDown()
def test_bad_input(self):
self.assertRaises(TypeError, yappi.set_context_name_callback, 1)
def test_clear_callback(self):
self.callback_count = 0
def callback():
self.callback_count += 1
return 'name'
yappi.set_context_name_callback(callback)
yappi.start()
a()
yappi.set_context_name_callback(None)
old_callback_count = self.callback_count
a()
yappi.stop()
self.assertEqual(old_callback_count, self.callback_count)
def test_callback_error(self):
self.callback_count = 0
def callback():
self.callback_count += 1
raise Exception('callback error')
yappi.set_context_name_callback(callback)
yappi.start()
a()
a()
yappi.stop()
# Callback was cleared after first error.
self.assertEqual(1, self.callback_count)
def test_callback_none_return(self):
self.callback_count = 0
def callback():
self.callback_count += 1
if self.callback_count < 3:
return None # yappi will call again
else:
return "name"
yappi.set_context_name_callback(callback)
yappi.start()
a()
a()
yappi.stop()
# yappi tried again until a string was returned
self.assertEqual(3, self.callback_count)
def test_callback_non_string(self):
self.callback_count = 0
def callback():
self.callback_count += 1
return 1 # Supposed to return a string.
yappi.set_context_name_callback(callback)
yappi.start()
a()
a()
yappi.stop()
# Callback was cleared after first error.
self.assertEqual(1, self.callback_count)
def test_callback(self):
self.context_id = 0
self.context_name = 'a'
yappi.set_context_id_callback(lambda: self.context_id)
yappi.set_context_name_callback(lambda: self.context_name)
yappi.start()
a()
self.context_id = 1
self.context_name = 'b'
a()
# Re-schedule context 0.
self.context_id = 0
self.context_name = 'a'
a()
yappi.stop()
threadstats = yappi.get_thread_stats().sort('name', 'ascending')
self.assertEqual(2, len(threadstats))
self.assertEqual(0, threadstats[0].id)
self.assertEqual('a', threadstats[0].name)
self.assertEqual(1, threadstats[1].id)
self.assertEqual('b', threadstats[1].name)
if __name__ == '__main__':
unittest.main()
| mit | -4,016,871,694,571,817,000 | 26.13308 | 79 | 0.578055 | false |
endlessm/chromium-browser | third_party/catapult/firefighter/default/handlers/trace.py | 7 | 1172 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import urllib
import webapp2
from common import jinja
from common import query_filter
class Trace(webapp2.RequestHandler):
def get(self):
try:
filters = query_filter.Filters(self.request)
except ValueError as e:
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write({'error': str(e)})
return
query_parameters = []
for filter_name, filter_values in filters.iteritems():
if filter_name == 'start_time':
query_parameters.append(('start_time', filter_values))
elif filter_name == 'end_time':
query_parameters.append(('end_time', filter_values))
else:
for filter_value in filter_values:
query_parameters.append((filter_name, filter_value))
template_values = {
'query_string': urllib.urlencode(query_parameters),
}
template = jinja.ENVIRONMENT.get_template('trace.html')
# pylint: disable=no-member
self.response.out.write(template.render(template_values))
| bsd-3-clause | -5,157,119,744,680,215,000 | 29.842105 | 72 | 0.683447 | false |
rrahn/gdf_tools | include/seqan/apps/tree_recon/tests/run_tests.py | 13 | 3621 | #!/usr/bin/env python
"""Execute the tests for the tree_recomb program.
The golden test outputs are generated by the script generate_outputs.sh.
You have to give the root paths to the source and the binaries as arguments to
the program. These are the paths to the directory that contains the 'projects'
directory.
Usage: run_tests.py SOURCE_ROOT_PATH BINARY_ROOT_PATH
"""
import os.path
import sys
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
def main(source_base, binary_base):
"""Main entry point of the script."""
print 'Executing test for tree_recomb'
print '=============================='
print
ph = app_tests.TestPathHelper(
source_base, binary_base,
'apps/tree_recon/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_program = app_tests.autolocateBinary(
binary_base, 'apps/tree_recon', 'tree_recon')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
for i in [1, 2, 3]:
conf = app_tests.TestConf(
program=path_to_program,
args=['-m', ph.inFile('example%d.dist' % i),
'-o', ph.outFile('example%d.dot' % i)],
to_diff=[(ph.inFile('example%d.dot' % i),
ph.outFile('example%d.dot' % i))])
conf_list.append(conf)
for i in [1, 2, 3]:
for b in ['nj', 'min', 'max', 'avg', 'wavg']:
if i == 1 and b == 'avg':
continue # Skip, rounding problems MSVC vs GCC.
conf = app_tests.TestConf(
program=path_to_program,
args=['-b', b,
'-m', ph.inFile('example%d.dist' % i),
'-o', ph.outFile('example%d.%s.dot' % (i, b))],
to_diff=[(ph.inFile('example%d.%s.dot' % (i, b)),
ph.outFile('example%d.%s.dot' % (i, b)))])
conf_list.append(conf)
for i in [1, 2, 3]:
for f in ['dot', 'newick']:
conf = app_tests.TestConf(
program=path_to_program,
args=['-m', ph.inFile('example%d.dist' % i),
'-o', ph.outFile('example%d.%s' % (i, f))],
to_diff=[(ph.inFile('example%d.%s' % (i, f)),
ph.outFile('example%d.%s' % (i, f)))])
conf_list.append(conf)
# Execute the tests.
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print ' '.join(['tree_recomb'] + conf.args),
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main))
| gpl-3.0 | 3,598,783,130,950,774,300 | 34.5 | 79 | 0.481911 | false |
patjouk/djangogirls | organize/tests/test_models.py | 1 | 3370 | import vcr
from django.core import mail
from django.core.exceptions import ValidationError
from django.test import TestCase
from organize.constants import DEPLOYED, ON_HOLD, REJECTED
from organize.models import EventApplication
from core.models import Event, User
class EventApplicationTest(TestCase):
fixtures = ['event_application_testdata.json', 'users_testdata.json',]
def test_comment_required_for_on_hold_application(self):
event_application = EventApplication.objects.get(pk=1)
event_application.status = ON_HOLD
with self.assertRaises(ValidationError):
event_application.clean()
event_application.comment = "Comment"
try:
event_application.clean()
except ValidationError:
self.fail("Event application should be valid.")
def test_all_recipients(self):
event_application = EventApplication.objects.get(pk=1)
assert len(event_application.get_organizers_emails()) == \
event_application.coorganizers.count() + 1
def test_reject_method(self):
event_application = EventApplication.objects.get(pk=1)
event_application.reject()
event_application.status == REJECTED
assert len(mail.outbox) == 1
email = mail.outbox[0]
assert email.to == event_application.get_organizers_emails()
@vcr.use_cassette('organize/tests/vcr/deploy_from_previous_event.yaml')
def test_deploy_event_from_previous_event(self):
event_application = EventApplication.objects.get(pk=1)
Event.objects.create(
city=event_application.city,
country=event_application.country
)
event_application.deploy()
event_application.status == DEPLOYED
assert len(mail.outbox) == 4
email_subjects = [e.subject for e in mail.outbox]
self.assertTrue("Access to Django Girls website" in email_subjects)
self.assertTrue("Congrats! Your application to organize Django Girls London has been accepted!" in email_subjects)
@vcr.use_cassette('organize/tests/vcr/latlng.yaml')
def test_latlng_is_fetched_when_creating_application(self):
event_application = EventApplication.objects.get(pk=1)
assert event_application.latlng == '0.0,0.0'
event_application.latlng = ''
event_application.save()
assert event_application.latlng == '39.4747112, -0.3798073'
def test_has_past_team_members(self):
user = User.objects.get(pk=1)
event_application = EventApplication.objects.get(pk=1)
event_application.main_organizer_email = user.email
event_application.save()
event = Event.objects.create(
city=event_application.city,
country=event_application.country,
)
# first event in city has nothing to compare so we return False
self.assertFalse(event_application.has_past_team_members(event))
next_event = Event.objects.create(
city=event.city,
country=event.country
)
# if there are no same organizers we return False
self.assertFalse(event_application.has_past_team_members(next_event))
event.team.add(user)
# if there is a common organizer, return True
self.assertTrue(event_application.has_past_team_members(next_event))
| bsd-3-clause | -4,528,928,447,635,633,700 | 36.865169 | 122 | 0.675074 | false |
hospace/ToughRADIUS | toughradius/console/customer/forms.py | 3 | 5988 | #coding:utf-8
from toughradius.console.libs import pyforms
from toughradius.console.libs.pyforms import dataform
from toughradius.console.libs.pyforms import rules
from toughradius.console.libs.pyforms.rules import button_style,input_style
boolean = {0:u"否", 1:u"是"}
sexopt = {1:u"男",0:u"女"}
member_login_form = pyforms.Form(
pyforms.Textbox("username", rules.len_of(1, 32), description=u"用户名", size=32,required="required",**input_style),
pyforms.Password("password", rules.len_of(1,32), description=u"登录密码", size=32, required="required",**input_style),
pyforms.Button("submit", type="submit", html=u"<b>登陆</b>", **button_style),
pyforms.Hidden("next",value="/"),
action="/auth/login",
title=u"用户登陆"
)
def member_join_form(nodes=[]):
return pyforms.Form(
pyforms.Dropdown("node_id", description=u"区域", args=nodes,required="required", **input_style),
pyforms.Textbox("realname", rules.len_of(2,32), description=u"用户姓名(必填)", required="required",**input_style),
pyforms.Dropdown("sex", description=u"性别", args=sexopt.items(),required="required", **input_style),
pyforms.Textbox("age", rules.is_number, description=u"年龄(必填)", size=3,required="required",**input_style),
pyforms.Textbox("username", rules.is_alphanum3(6, 32), description=u"用户名(必填)", size=32,required="required",**input_style),
pyforms.Password("password", rules.len_of(6,32), description=u"登录密码(必填)", size=32, required="required",**input_style),
pyforms.Textbox("email", rules.is_email, description=u"电子邮箱(必填)", size=64,required="required",**input_style),
pyforms.Textbox("idcard", rules.len_of(0,32), description=u"证件号码", **input_style),
pyforms.Textbox("mobile", rules.len_of(0,32),description=u"用户手机号码", **input_style),
pyforms.Textbox("address", description=u"用户地址",hr=True, **input_style),
pyforms.Button("submit", type="submit", html=u"<b>注册</b>", **button_style),
action="/join",
title=u"用户注册"
)
password_update_form = pyforms.Form(
pyforms.Textbox("account_number", description=u"用户账号", readonly="readonly", **input_style),
pyforms.Password("old_password",description=u"旧密码(必填)", required="required",**input_style),
pyforms.Password("new_password", rules.is_alphanum3(6, 32),description=u"新密码(必填)", required="required",**input_style),
pyforms.Password("new_password2",rules.is_alphanum3(6, 32), description=u"确认新密码(必填)", required="required",**input_style),
pyforms.Button("submit", type="submit", html=u"<b>提交</b>", **button_style),
title=u"修改密码",
action="/password/update"
)
password_mail_form = pyforms.Form(
pyforms.Textbox("member_name", rules.len_of(1, 64),description=u"请输入登录名", required="required",**input_style),
pyforms.Button("submit", type="submit", html=u"<b>提交</b>", **button_style),
title=u"重置密码请求",
action="/password/mail"
)
password_reset_form = pyforms.Form(
pyforms.Hidden("active_code", description=u"", **input_style),
pyforms.Password("new_password", rules.is_alphanum3(6, 32),description=u"新密码(必填)", required="required",**input_style),
pyforms.Password("new_password2",rules.is_alphanum3(6, 32), description=u"确认新密码(必填)", required="required",**input_style),
pyforms.Button("submit", type="submit", html=u"<b>重置密码</b>", **button_style),
title=u"重置密码",
action="/password/reset"
)
def account_open_form(products=[]):
return pyforms.Form(
pyforms.Textbox("recharge_card", description=u"充值卡号", required="required", **input_style),
pyforms.Password("recharge_pwd", description=u"充值卡密码", required="required", **input_style),
pyforms.Textbox("account_number", description=u"用户账号", required="required", **input_style),
pyforms.Password("password", description=u"认证密码", required="required", **input_style),
pyforms.Dropdown("product_id",args=products, description=u"资费", required="required", **input_style),
pyforms.Button("submit", type="submit", html=u"<b>提交</b>", **button_style),
title=u"用户自助开户",
action="/open"
)
recharge_form = pyforms.Form(
pyforms.Textbox("account_number",description=u"用户账号",readonly="readonly", **input_style),
pyforms.Textbox("recharge_card", description=u"充值卡号", required="required", **input_style),
pyforms.Password("recharge_pwd", description=u"充值卡密码", required="required", **input_style),
pyforms.Button("submit", type="submit", html=u"<b>提交</b>", **button_style),
title=u"用户自助充值",
action="/recharge"
)
def member_update_form():
return pyforms.Form(
pyforms.Textbox("realname", description=u"用户姓名",readonly="readonly",**input_style),
pyforms.Textbox("member_name", description=u"用户登陆名", readonly="readonly",**input_style),
pyforms.Password("new_password", rules.len_of(0,128),value="", description=u"用户登陆密码(留空不修改)", **input_style),
pyforms.Textbox("email", rules.len_of(0,128), description=u"电子邮箱", **input_style),
# pyforms.Textbox("idcard", rules.len_of(0,32), description=u"证件号码", **input_style),
# pyforms.Textbox("mobile", rules.len_of(0,32),description=u"用户手机号码", **input_style),
pyforms.Textbox("address", description=u"用户地址",hr=True, **input_style),
pyforms.Button("submit", type="submit", html=u"<b>提交</b>", **button_style),
title=u"用户基本信息修改",
action="/user/update"
)
| agpl-3.0 | -7,405,204,193,205,255,000 | 52.25 | 130 | 0.653666 | false |
aspc/mainsite | aspc/events/backends/facebook.py | 1 | 6686 | import dateutil.parser
from django.conf import settings
import requests
import urlparse
from aspc.events.exceptions import InvalidEventException, InvalidFacebookEventPageException
import re
import logging
import pytz
import json
from datetime import datetime, timedelta
logger = logging.getLogger(__name__)
class FacebookBackend(object):
event_required_fields = ('name', 'place', 'start_time', 'description', 'owner')
page_required_fields = ('name', 'link', 'id')
GRAPH_API_TEMPLATE = 'https://graph.facebook.com/v2.8/'
EVENT_LINK_TEMPLATE = re.compile(r'(?:https?:\/\/(?:www\.)?)?facebook.com/events/(?P<event_id>\d+)')
def __init__(self, options=None):
self.facebook_token = self._get_access_token()
def _get_access_token(self):
response = requests.get(
self.GRAPH_API_TEMPLATE + 'oauth/access_token',
params = {
'client_id': settings.FACEBOOK_APP_ID,
'client_secret': settings.FACEBOOK_APP_SECRET,
'grant_type': 'client_credentials'
}
)
response_data = json.loads(response.text) # Parses the returned json
return response_data['access_token']
def _event_lookup(self, event_id):
response = requests.get(
self.GRAPH_API_TEMPLATE + event_id,
params = {
'access_token': self.facebook_token,
'fields': ','.join(self.event_required_fields)
}
)
if response.status_code != 200:
raise InvalidEventException('Unable to retrieve event details.')
return response.json()
def _page_lookup(self, page_id):
response = requests.get(
self.GRAPH_API_TEMPLATE + page_id,
params = {
'access_token': self.facebook_token,
'fields': ','.join(self.page_required_fields)
}
)
if response.status_code != 200:
raise InvalidFacebookEventPageException('Unable to retrieve page details.')
return response.json()
def _parse_event_data(self, event_data):
# Checks if the event has a start and end time
if not event_data.get('start_time', True) and not event_data.get('end_time'):
raise InvalidEventException('Event does not have a specific start time.')
start_dt = dateutil.parser.parse(event_data['start_time'])
start = start_dt.astimezone(pytz.UTC)
yesterday = (datetime.now() - timedelta(days=1)).replace(tzinfo = pytz.utc)
# Checks if the event is in the past (compare to yesterday's date just in case)
if start < yesterday:
raise InvalidEventException('This event has already taken place!')
# Checks if the event has all the other necessary fields
if not all((key in event_data.keys()) for key in self.event_required_fields):
raise InvalidEventException('The event is missing location or description information.')
normalized = {
'name': event_data['name'],
'location': event_data['place']['name'],
'start': start,
'description': event_data.get('description', ''),
'host': event_data['owner']['name'],
'url': 'http://www.facebook.com/events/' + event_data['id']
}
if 'end_time' in event_data.keys():
end_dt = dateutil.parser.parse(event_data['end_time'])
normalized['end'] = end_dt.astimezone(pytz.UTC)
return normalized
def _parse_page_data(self, page_data):
# Checks if the page has all the necessary fields
if not all((key in page_data.keys()) for key in self.page_required_fields):
raise InvalidFacebookEventPageException('Page missing required fields.')
normalized = {
'name': page_data['name'],
'url': page_data['link'],
'page_id': page_data['id']
}
return normalized
# Public
# Intended to be invoked by EventController#new_event
def get_event_data(self, event_url):
try:
event_id = self.EVENT_LINK_TEMPLATE.match(event_url).groupdict()['event_id']
except:
# Validation also happens client-side so an error is unlikely to occur here
raise InvalidEventException('Invalid url: ' + event_url)
event_data = self._event_lookup(event_id)
return self._parse_event_data(event_data)
# Public
# Intended to be invoked by FacebookEventPageController#scrape_page_events
def get_page_event_ids(self, page_id):
page_event_ids = []
# First get the ids of the events that the page itself has created
response = requests.get(
self.GRAPH_API_TEMPLATE + page_id + '/events',
params = {
'access_token': self.facebook_token
}
)
if response.status_code != 200:
raise InvalidFacebookEventPageException('Unable to retrieve page event details.')
for event_data in response.json()['data']:
page_event_ids.append(event_data['id'])
# Then get the ids of the events that the page has merely advertised on its wall
response = requests.get(
self.GRAPH_API_TEMPLATE + page_id + '/feed',
params = {
'access_token': self.facebook_token
}
)
if response.status_code != 200:
raise InvalidFacebookEventPageException('Unable to retrieve page event details.')
for wall_post in response.json()['data']:
if 'link' in wall_post and self.EVENT_LINK_TEMPLATE.match(wall_post['link']):
page_event_ids.append(self.EVENT_LINK_TEMPLATE.match(wall_post['link']).groupdict()['event_id'])
return page_event_ids
# Public
# Intended to be invoked by FacebookEventPageController#new_facebook_event_page
def get_page_data(self, page_url):
try:
# Have to account for the two ways a Facebook page event URL can be formatted:
# https://www.facebook.com/PomonaCollegeMockTrial
# https://www.facebook.com/pages/Studio-47/146452969759
#
# This slice will grab either the page ID or the page username, either of which can be used to perform a Graph API lookup
page_id = page_url.split('/')[-2]
except:
# Validation also happens client-side so an error is unlikely to occur here
raise InvalidFacebookEventPageException('Invalid url: ' + page_url)
page_data = self._page_lookup(page_id)
return self._parse_page_data(page_data)
| mit | 6,455,790,034,031,716,000 | 37.425287 | 133 | 0.608884 | false |
CenterForOpenScience/SHARE | tests/share/metadata_formats/test_oai_dc_formatter.py | 2 | 3827 | from lxml import etree
from tests.share.metadata_formats.base import BaseMetadataFormatterTest
def xml_elements_equal(element_1, element_2):
return (
element_1.tag == element_2.tag
and element_1.text == element_2.text
and element_1.tail == element_2.tail
and element_1.attrib == element_2.attrib
and len(element_1) == len(element_2)
and all(
xml_elements_equal(child_1, child_2)
for child_1, child_2 in zip(element_1, element_2)
)
)
class TestOaiDcFormatter(BaseMetadataFormatterTest):
formatter_key = 'oai_dc'
def assert_formatter_outputs_equal(self, actual_output, expected_output):
if expected_output is None:
assert actual_output is None
else:
xml_parser = etree.XMLParser(remove_blank_text=True)
actual_xml = etree.fromstring(actual_output, parser=xml_parser)
expected_xml = etree.fromstring(expected_output, parser=xml_parser)
assert xml_elements_equal(actual_xml, expected_xml), f"actual: {etree.tostring(actual_xml, encoding='unicode', pretty_print=True)}\nexpected: {etree.tostring(expected_xml, encoding='unicode', pretty_print=True)}"
expected_outputs = {
'mycorrhizas': '''
<oai_dc:dc
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd"
>
<dc:title>The Role of Mycorrhizas in Forest Soil Stability with Climate Change</dc:title>
<dc:creator>Suzanne Simard</dc:creator>
<dc:creator>Mary Austi</dc:creator>
<dc:publisher>InTech</dc:publisher>
<dc:date>2017-03-31T05:39:48Z</dc:date>
<dc:type>creativework</dc:type>
<dc:identifier>http://dx.doi.org/10.5772/9813</dc:identifier>
</oai_dc:dc>
''',
'no-names-only-name-parts': '''
<oai_dc:dc
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd"
>
<dc:title>The Role of Mycorrhizas in Forest Soil Stability with Climate Change</dc:title>
<dc:creator>Suzanne Simard</dc:creator>
<dc:creator>Mary Austi</dc:creator>
<dc:date>2017-03-31T05:39:48Z</dc:date>
<dc:type>creativework</dc:type>
<dc:identifier>http://dx.doi.org/10.5772/9813</dc:identifier>
</oai_dc:dc>
''',
'with-is_deleted': None,
'with-subjects': '''
<oai_dc:dc
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd"
>
<dc:title>Assorted chair</dc:title>
<dc:creator>Some Rando</dc:creator>
<dc:subject>Architecture</dc:subject>
<dc:subject>Business</dc:subject>
<dc:subject>Custom biologyyyy</dc:subject>
<dc:subject>Education</dc:subject>
<dc:date>2019-01-23T20:34:21Z</dc:date>
<dc:type>registration</dc:type>
<dc:identifier>http://staging.osf.io/chair/</dc:identifier>
<dc:relation>http://staging.osf.io/vroom/</dc:relation>
</oai_dc:dc>
''',
}
| apache-2.0 | -5,439,439,050,747,447,000 | 44.559524 | 224 | 0.604651 | false |
msabramo/PyHamcrest | src/hamcrest/library/text/isequal_ignoring_whitespace.py | 6 | 1667 | __author__ = "Jon Reid"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
from hamcrest.core.base_matcher import BaseMatcher
import six
def stripspace(string):
result = ''
last_was_space = True
for character in string:
if character.isspace():
if not last_was_space:
result += ' '
last_was_space = True
else:
result += character
last_was_space = False
return result.strip()
class IsEqualIgnoringWhiteSpace(BaseMatcher):
def __init__(self, string):
if not isinstance(string, six.string_types):
raise TypeError('IsEqualIgnoringWhiteSpace requires string')
self.original_string = string
self.stripped_string = stripspace(string)
def _matches(self, item):
if not isinstance(item, six.string_types):
return False
return self.stripped_string == stripspace(item)
def describe_to(self, description):
description.append_description_of(self.original_string) \
.append_text(' ignoring whitespace')
def equal_to_ignoring_whitespace(string):
"""Matches if object is a string equal to a given string, ignoring
differences in whitespace.
:param string: The string to compare against as the expected value.
This matcher first checks whether the evaluated object is a string. If so,
it compares it with ``string``, ignoring differences in runs of whitespace.
Example::
equal_to_ignoring_whitespace("hello world")
will match ``"hello world"``.
"""
return IsEqualIgnoringWhiteSpace(string)
| bsd-3-clause | -8,010,375,042,774,392,000 | 28.245614 | 79 | 0.64787 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.