repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
petrutlucian94/nova | nova/tests/unit/virt/xenapi/test_volumeops.py | 65 | 24052 | # Copyright (c) 2012 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import exception
from nova import test
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
class VolumeOpsTestBase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(VolumeOpsTestBase, self).setUp()
self._setup_mock_volumeops()
def _setup_mock_volumeops(self):
self.session = stubs.FakeSessionForVolumeTests('fake_uri')
self.ops = volumeops.VolumeOps(self.session)
class VolumeDetachTestCase(VolumeOpsTestBase):
def test_detach_volume_call(self):
registered_calls = []
def regcall(label):
def side_effect(*args, **kwargs):
registered_calls.append(label)
return side_effect
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(volumeops.vm_utils, 'lookup')
self.mox.StubOutWithMock(volumeops.volume_utils, 'find_vbd_by_number')
self.mox.StubOutWithMock(volumeops.vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd')
self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd')
self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr')
volumeops.vm_utils.lookup('session', 'instance_1').AndReturn(
'vmref')
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
volumeops.volume_utils.find_vbd_by_number(
'session', 'vmref', 'devnumber').AndReturn('vbdref')
volumeops.vm_utils.is_vm_shutdown('session', 'vmref').AndReturn(
False)
volumeops.vm_utils.unplug_vbd('session', 'vbdref', 'vmref')
volumeops.vm_utils.destroy_vbd('session', 'vbdref').WithSideEffects(
regcall('destroy_vbd'))
volumeops.volume_utils.find_sr_from_vbd(
'session', 'vbdref').WithSideEffects(
regcall('find_sr_from_vbd')).AndReturn('srref')
volumeops.volume_utils.purge_sr('session', 'srref')
self.mox.ReplayAll()
ops.detach_volume(
dict(driver_volume_type='iscsi', data='conn_data'),
'instance_1', 'mountpoint')
self.assertEqual(
['find_sr_from_vbd', 'destroy_vbd'], registered_calls)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volume_utils, "find_vbd_by_number")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_detach_volume(self, mock_vm, mock_vbd, mock_detach):
mock_vm.return_value = "vm_ref"
mock_vbd.return_value = "vbd_ref"
self.ops.detach_volume({}, "name", "/dev/xvdd")
mock_vm.assert_called_once_with(self.session, "name")
mock_vbd.assert_called_once_with(self.session, "vm_ref", 3)
mock_detach.assert_called_once_with("vm_ref", ["vbd_ref"])
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volume_utils, "find_vbd_by_number")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_detach_volume_skips_error_skip_attach(self, mock_vm, mock_vbd,
mock_detach):
mock_vm.return_value = "vm_ref"
mock_vbd.return_value = None
self.ops.detach_volume({}, "name", "/dev/xvdd")
self.assertFalse(mock_detach.called)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volume_utils, "find_vbd_by_number")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_detach_volume_raises(self, mock_vm, mock_vbd,
mock_detach):
mock_vm.return_value = "vm_ref"
mock_vbd.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.ops.detach_volume, {}, "name", "/dev/xvdd")
self.assertFalse(mock_detach.called)
@mock.patch.object(volume_utils, "purge_sr")
@mock.patch.object(vm_utils, "destroy_vbd")
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(vm_utils, "unplug_vbd")
@mock.patch.object(vm_utils, "is_vm_shutdown")
def test_detach_vbds_and_srs_not_shutdown(self, mock_shutdown, mock_unplug,
mock_find_sr, mock_destroy, mock_purge):
mock_shutdown.return_value = False
mock_find_sr.return_value = "sr_ref"
self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref"])
mock_shutdown.assert_called_once_with(self.session, "vm_ref")
mock_find_sr.assert_called_once_with(self.session, "vbd_ref")
mock_unplug.assert_called_once_with(self.session, "vbd_ref", "vm_ref")
mock_destroy.assert_called_once_with(self.session, "vbd_ref")
mock_purge.assert_called_once_with(self.session, "sr_ref")
@mock.patch.object(volume_utils, "purge_sr")
@mock.patch.object(vm_utils, "destroy_vbd")
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(vm_utils, "unplug_vbd")
@mock.patch.object(vm_utils, "is_vm_shutdown")
def test_detach_vbds_and_srs_is_shutdown(self, mock_shutdown, mock_unplug,
mock_find_sr, mock_destroy, mock_purge):
mock_shutdown.return_value = True
mock_find_sr.return_value = "sr_ref"
self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref_1", "vbd_ref_2"])
expected = [mock.call(self.session, "vbd_ref_1"),
mock.call(self.session, "vbd_ref_2")]
self.assertEqual(expected, mock_destroy.call_args_list)
mock_purge.assert_called_with(self.session, "sr_ref")
self.assertFalse(mock_unplug.called)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_detach_all_no_volumes(self, mock_get_all, mock_detach):
mock_get_all.return_value = []
self.ops.detach_all("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
self.assertFalse(mock_detach.called)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_detach_all_volumes(self, mock_get_all, mock_detach):
mock_get_all.return_value = ["1"]
self.ops.detach_all("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
mock_detach.assert_called_once_with("vm_ref", ["1"])
def test_get_all_volume_vbd_refs_no_vbds(self):
with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
with mock.patch.object(self.session.VBD,
"get_other_config") as mock_conf:
mock_get.return_value = []
result = self.ops._get_all_volume_vbd_refs("vm_ref")
self.assertEqual([], list(result))
mock_get.assert_called_once_with("vm_ref")
self.assertFalse(mock_conf.called)
def test_get_all_volume_vbd_refs_no_volumes(self):
with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
with mock.patch.object(self.session.VBD,
"get_other_config") as mock_conf:
mock_get.return_value = ["1"]
mock_conf.return_value = {}
result = self.ops._get_all_volume_vbd_refs("vm_ref")
self.assertEqual([], list(result))
mock_get.assert_called_once_with("vm_ref")
mock_conf.assert_called_once_with("1")
def test_get_all_volume_vbd_refs_with_volumes(self):
with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
with mock.patch.object(self.session.VBD,
"get_other_config") as mock_conf:
mock_get.return_value = ["1", "2"]
mock_conf.return_value = {"osvol": True}
result = self.ops._get_all_volume_vbd_refs("vm_ref")
self.assertEqual(["1", "2"], list(result))
mock_get.assert_called_once_with("vm_ref")
class AttachVolumeTestCase(VolumeOpsTestBase):
@mock.patch.object(volumeops.VolumeOps, "_attach_volume")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_attach_volume_default_hotplug(self, mock_get_vm, mock_attach):
mock_get_vm.return_value = "vm_ref"
self.ops.attach_volume({}, "instance_name", "/dev/xvda")
mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
True)
@mock.patch.object(volumeops.VolumeOps, "_attach_volume")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_attach_volume_hotplug(self, mock_get_vm, mock_attach):
mock_get_vm.return_value = "vm_ref"
self.ops.attach_volume({}, "instance_name", "/dev/xvda", False)
mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
False)
@mock.patch.object(volumeops.VolumeOps, "_attach_volume")
def test_attach_volume_default_hotplug_connect_volume(self, mock_attach):
self.ops.connect_volume({})
mock_attach.assert_called_once_with({})
@mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
@mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
@mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
@mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
def test_attach_volume_with_defaults(self, mock_attach, mock_hypervisor,
mock_provider, mock_driver):
connection_info = {"data": {}}
with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
mock_provider.return_value = ("sr_ref", "sr_uuid")
mock_vdi.return_value = "vdi_uuid"
result = self.ops._attach_volume(connection_info)
self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
mock_driver.assert_called_once_with(connection_info)
mock_provider.assert_called_once_with({}, None)
mock_hypervisor.assert_called_once_with("sr_ref", {})
self.assertFalse(mock_attach.called)
@mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
@mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
@mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
@mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
def test_attach_volume_with_hot_attach(self, mock_attach, mock_hypervisor,
mock_provider, mock_driver):
connection_info = {"data": {}}
with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
mock_provider.return_value = ("sr_ref", "sr_uuid")
mock_hypervisor.return_value = "vdi_ref"
mock_vdi.return_value = "vdi_uuid"
result = self.ops._attach_volume(connection_info, "vm_ref",
"name", 2, True)
self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
mock_driver.assert_called_once_with(connection_info)
mock_provider.assert_called_once_with({}, "name")
mock_hypervisor.assert_called_once_with("sr_ref", {})
mock_attach.assert_called_once_with("vdi_ref", "vm_ref", "name", 2,
True)
@mock.patch.object(volume_utils, "forget_sr")
@mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
@mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
@mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
@mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
def test_attach_volume_cleanup(self, mock_attach, mock_hypervisor,
mock_provider, mock_driver, mock_forget):
connection_info = {"data": {}}
mock_provider.return_value = ("sr_ref", "sr_uuid")
mock_hypervisor.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.ops._attach_volume, connection_info)
mock_driver.assert_called_once_with(connection_info)
mock_provider.assert_called_once_with({}, None)
mock_hypervisor.assert_called_once_with("sr_ref", {})
mock_forget.assert_called_once_with(self.session, "sr_ref")
self.assertFalse(mock_attach.called)
def test_check_is_supported_driver_type_pass_iscsi(self):
conn_info = {"driver_volume_type": "iscsi"}
self.ops._check_is_supported_driver_type(conn_info)
def test_check_is_supported_driver_type_pass_xensm(self):
conn_info = {"driver_volume_type": "xensm"}
self.ops._check_is_supported_driver_type(conn_info)
def test_check_is_supported_driver_type_pass_bad(self):
conn_info = {"driver_volume_type": "bad"}
self.assertRaises(exception.VolumeDriverNotFound,
self.ops._check_is_supported_driver_type, conn_info)
@mock.patch.object(volume_utils, "introduce_sr")
@mock.patch.object(volume_utils, "find_sr_by_uuid")
@mock.patch.object(volume_utils, "parse_sr_info")
def test_connect_to_volume_provider_new_sr(self, mock_parse, mock_find_sr,
mock_introduce_sr):
mock_parse.return_value = ("uuid", "label", "params")
mock_find_sr.return_value = None
mock_introduce_sr.return_value = "sr_ref"
ref, uuid = self.ops._connect_to_volume_provider({}, "name")
self.assertEqual("sr_ref", ref)
self.assertEqual("uuid", uuid)
mock_parse.assert_called_once_with({}, "Disk-for:name")
mock_find_sr.assert_called_once_with(self.session, "uuid")
mock_introduce_sr.assert_called_once_with(self.session, "uuid",
"label", "params")
@mock.patch.object(volume_utils, "introduce_sr")
@mock.patch.object(volume_utils, "find_sr_by_uuid")
@mock.patch.object(volume_utils, "parse_sr_info")
def test_connect_to_volume_provider_old_sr(self, mock_parse, mock_find_sr,
mock_introduce_sr):
mock_parse.return_value = ("uuid", "label", "params")
mock_find_sr.return_value = "sr_ref"
ref, uuid = self.ops._connect_to_volume_provider({}, "name")
self.assertEqual("sr_ref", ref)
self.assertEqual("uuid", uuid)
mock_parse.assert_called_once_with({}, "Disk-for:name")
mock_find_sr.assert_called_once_with(self.session, "uuid")
self.assertFalse(mock_introduce_sr.called)
@mock.patch.object(volume_utils, "introduce_vdi")
def test_connect_hypervisor_to_volume_regular(self, mock_intro):
mock_intro.return_value = "vdi"
result = self.ops._connect_hypervisor_to_volume("sr", {})
self.assertEqual("vdi", result)
mock_intro.assert_called_once_with(self.session, "sr")
@mock.patch.object(volume_utils, "introduce_vdi")
def test_connect_hypervisor_to_volume_vdi(self, mock_intro):
mock_intro.return_value = "vdi"
conn = {"vdi_uuid": "id"}
result = self.ops._connect_hypervisor_to_volume("sr", conn)
self.assertEqual("vdi", result)
mock_intro.assert_called_once_with(self.session, "sr",
vdi_uuid="id")
@mock.patch.object(volume_utils, "introduce_vdi")
def test_connect_hypervisor_to_volume_lun(self, mock_intro):
mock_intro.return_value = "vdi"
conn = {"target_lun": "lun"}
result = self.ops._connect_hypervisor_to_volume("sr", conn)
self.assertEqual("vdi", result)
mock_intro.assert_called_once_with(self.session, "sr",
target_lun="lun")
@mock.patch.object(vm_utils, "is_vm_shutdown")
@mock.patch.object(vm_utils, "create_vbd")
def test_attach_volume_to_vm_plug(self, mock_vbd, mock_shutdown):
mock_vbd.return_value = "vbd"
mock_shutdown.return_value = False
with mock.patch.object(self.session.VBD, "plug") as mock_plug:
self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
mock_plug.assert_called_once_with("vbd", "vm")
mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
bootable=False, osvol=True)
mock_shutdown.assert_called_once_with(self.session, "vm")
@mock.patch.object(vm_utils, "is_vm_shutdown")
@mock.patch.object(vm_utils, "create_vbd")
def test_attach_volume_to_vm_no_plug(self, mock_vbd, mock_shutdown):
mock_vbd.return_value = "vbd"
mock_shutdown.return_value = True
with mock.patch.object(self.session.VBD, "plug") as mock_plug:
self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
self.assertFalse(mock_plug.called)
mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
bootable=False, osvol=True)
mock_shutdown.assert_called_once_with(self.session, "vm")
@mock.patch.object(vm_utils, "is_vm_shutdown")
@mock.patch.object(vm_utils, "create_vbd")
def test_attach_volume_to_vm_no_hotplug(self, mock_vbd, mock_shutdown):
mock_vbd.return_value = "vbd"
with mock.patch.object(self.session.VBD, "plug") as mock_plug:
self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, False)
self.assertFalse(mock_plug.called)
mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
bootable=False, osvol=True)
self.assertFalse(mock_shutdown.called)
class FindBadVolumeTestCase(VolumeOpsTestBase):
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_no_vbds(self, mock_get_all):
mock_get_all.return_value = []
result = self.ops.find_bad_volumes("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
self.assertEqual([], result)
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_no_bad_vbds(self, mock_get_all, mock_find_sr):
mock_get_all.return_value = ["1", "2"]
mock_find_sr.return_value = "sr_ref"
with mock.patch.object(self.session.SR, "scan") as mock_scan:
result = self.ops.find_bad_volumes("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
expected_find = [mock.call(self.session, "1"),
mock.call(self.session, "2")]
self.assertEqual(expected_find, mock_find_sr.call_args_list)
expected_scan = [mock.call("sr_ref"), mock.call("sr_ref")]
self.assertEqual(expected_scan, mock_scan.call_args_list)
self.assertEqual([], result)
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_bad_vbds(self, mock_get_all, mock_find_sr):
mock_get_all.return_value = ["vbd_ref"]
mock_find_sr.return_value = "sr_ref"
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session = mock.Mock()
session.XenAPI.Failure = FakeException
self.ops._session = session
with mock.patch.object(session.SR, "scan") as mock_scan:
with mock.patch.object(session.VBD,
"get_device") as mock_get:
mock_scan.side_effect = FakeException
mock_get.return_value = "xvdb"
result = self.ops.find_bad_volumes("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
mock_scan.assert_called_once_with("sr_ref")
mock_get.assert_called_once_with("vbd_ref")
self.assertEqual(["/dev/xvdb"], result)
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_raises(self, mock_get_all, mock_find_sr):
mock_get_all.return_value = ["vbd_ref"]
mock_find_sr.return_value = "sr_ref"
class FakeException(Exception):
details = ['foo', "", "", ""]
session = mock.Mock()
session.XenAPI.Failure = FakeException
self.ops._session = session
with mock.patch.object(session.SR, "scan") as mock_scan:
with mock.patch.object(session.VBD,
"get_device") as mock_get:
mock_scan.side_effect = FakeException
mock_get.return_value = "xvdb"
self.assertRaises(FakeException,
self.ops.find_bad_volumes, "vm_ref")
mock_scan.assert_called_once_with("sr_ref")
class CleanupFromVDIsTestCase(VolumeOpsTestBase):
def _check_find_purge_calls(self, find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs):
find_sr_calls = [mock.call(self.ops._session, vdi_ref) for vdi_ref
in vdi_refs]
find_sr_from_vdi.assert_has_calls(find_sr_calls)
purge_sr_calls = [mock.call(self.ops._session, sr_ref) for sr_ref
in sr_refs]
purge_sr.assert_has_calls(purge_sr_calls)
@mock.patch.object(volume_utils, 'find_sr_from_vdi')
@mock.patch.object(volume_utils, 'purge_sr')
def test_safe_cleanup_from_vdis(self, purge_sr, find_sr_from_vdi):
vdi_refs = ['vdi_ref1', 'vdi_ref2']
sr_refs = ['sr_ref1', 'sr_ref2']
find_sr_from_vdi.side_effect = sr_refs
self.ops.safe_cleanup_from_vdis(vdi_refs)
self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs)
@mock.patch.object(volume_utils, 'find_sr_from_vdi',
side_effect=[exception.StorageError(reason=''), 'sr_ref2'])
@mock.patch.object(volume_utils, 'purge_sr')
def test_safe_cleanup_from_vdis_handles_find_sr_exception(self, purge_sr,
find_sr_from_vdi):
vdi_refs = ['vdi_ref1', 'vdi_ref2']
sr_refs = ['sr_ref2']
find_sr_from_vdi.side_effect = [exception.StorageError(reason=''),
sr_refs[0]]
self.ops.safe_cleanup_from_vdis(vdi_refs)
self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs)
@mock.patch.object(volume_utils, 'find_sr_from_vdi')
@mock.patch.object(volume_utils, 'purge_sr')
def test_safe_cleanup_from_vdis_handles_purge_sr_exception(self, purge_sr,
find_sr_from_vdi):
vdi_refs = ['vdi_ref1', 'vdi_ref2']
sr_refs = ['sr_ref1', 'sr_ref2']
find_sr_from_vdi.side_effect = sr_refs
purge_sr.side_effects = [test.TestingException, None]
self.ops.safe_cleanup_from_vdis(vdi_refs)
self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs)
| apache-2.0 | -4,296,341,046,375,235,600 | -9,041,747,295,495,686,000 | 42.810565 | 79 | 0.612922 | false |
amir-qayyum-khan/edx-platform | common/lib/xmodule/xmodule/assetstore/assetmgr.py | 148 | 2171 | """
Asset Manager
Interface allowing course asset saving/retrieving.
Handles:
- saving asset in the BlobStore -and- saving asset metadata in course modulestore.
- retrieving asset metadata from course modulestore -and- returning URL to asset -or- asset bytes.
Phase 1: Checks to see if an asset's metadata can be found in the course's modulestore.
If not found, fails over to access the asset from the contentstore.
At first, the asset metadata will never be found, since saving isn't implemented yet.
Note: Hotfix (PLAT-734) No asset calls find_asset_metadata, and directly accesses from contentstore.
"""
from contracts import contract, new_contract
from opaque_keys.edx.keys import AssetKey
from xmodule.contentstore.django import contentstore
new_contract('AssetKey', AssetKey)
class AssetException(Exception):
"""
Base exception class for all exceptions related to assets.
"""
pass
class AssetMetadataNotFound(AssetException):
"""
Thrown when no asset metadata is present in the course modulestore for the particular asset requested.
"""
pass
class AssetMetadataFoundTemporary(AssetException):
"""
TEMPORARY: Thrown if asset metadata is actually found in the course modulestore.
"""
pass
class AssetManager(object):
"""
Manager for saving/loading course assets.
"""
@staticmethod
@contract(asset_key='AssetKey', throw_on_not_found='bool', as_stream='bool')
def find(asset_key, throw_on_not_found=True, as_stream=False):
"""
Finds course asset in the deprecated contentstore.
This method was previously searching for the course asset in the assetstore first, then in the deprecated
contentstore. However, the asset was never found in the assetstore since an asset's metadata is
not yet stored there.(removed calls to modulestore().find_asset_metadata(asset_key))
The assetstore search was removed due to performance issues caused by each call unpickling the pickled and
compressed course structure from the structure cache.
"""
return contentstore().find(asset_key, throw_on_not_found, as_stream)
| agpl-3.0 | -7,402,756,237,588,763,000 | 6,146,423,181,645,648,000 | 35.183333 | 114 | 0.732381 | false |
terhorst/psmcpp | test/unit/scrm_sim.py | 2 | 4001 | #!/usr/bin/env python2.7
from __future__ import print_function, division
from subprocess import check_output
from math import log
import random
import numpy as np
import itertools
import os
import json
from collections import Counter, namedtuple, defaultdict
from phylogenies import leaves, newick2tree, parent_of
from sum_product import NodeState
SCRM_PATH = os.environ['SCRM_PATH']
def build_command_line(root, L, lineages_per_taxon):
'''Given a tree, build a scrm command line which will simulate from it.'''
ejopts = []
Iopts = []
enopts = []
N0 = root.N
# tfac = 1.0 / 4.0 / N0 / 25.0
# Times are already scaled in Jack's implementation
tfac = 1.0
# rho = 1e-9 * 4 * N0 * (L - 1)
theta = 10.
lineages = []
lineage_map = {}
for i, leaf_node in list(enumerate(leaves(root), 1)):
nsamp = lineages_per_taxon
Iopts.append(nsamp)
lineage_map[leaf_node] = i
lineages += [leaf_node.leaf_name] * nsamp
age = leaf_node.edge_length * tfac
enopts.append((age, i, leaf_node.N / N0))
p = parent_of(root, leaf_node)
while True:
if p not in lineage_map:
lineage_map[p] = i
if p.edge_length == float("inf"):
break
age += p.edge_length * tfac
old_p = p
p = parent_of(root, p)
enopts.append((age, i, p.N / N0))
else:
# We have a join-on time
ejopts.append((age, i, lineage_map[p]))
break
cmdline = ["-I %d %s" % (len(Iopts), " ".join(map(str, Iopts)))]
for ej in ejopts:
cmdline.append("-ej %g %d %d" % ej)
for en in enopts:
cmdline.append("-en %g %d %g" % en)
cmdline = ["%s %d 1 -t %g" % (SCRM_PATH, sum(Iopts), theta)] + cmdline
print(cmdline)
return lineages, " ".join(cmdline)
def run_simulation(tree, L, lineages_per_taxon):
lineages, cmd = build_command_line(tree, L, lineages_per_taxon)
species = list(set(lineages))
n_lineages = Counter(lineages)
N0 = tree.N
print(cmd)
output = [l.strip() for l in check_output(cmd, shell=True).split("\n")]
def f(x):
if x == "//":
f.i += 1
return f.i
f.i = 0
for k, lines in itertools.groupby(output, f):
if k == 0:
continue
# Skip preamble
next(lines)
# segsites
segsites = int(next(lines).split(" ")[1])
# positions
next(lines)
# at haplotypes
lin_counts = defaultdict(lambda: np.zeros(segsites, dtype=int))
for hap, lin in zip(lines, lineages):
hap = list(map(int, hap))
lin_counts[lin] += hap
return [{lin: NodeState(n_derived=lin_counts[lin][i],
n_ancestral=n_lineages[lin] - lin_counts[lin][i])
for lin in lineages}
for i in range(segsites)]
def build_splits(lineages, seqs_path, outgroup):
splits = Counter()
with open(seqs_path, "rt") as f:
next(f)
seqdata = [(lineages[int(spec) - 1], seq) for spec, seq in
(line.strip().split() for line in f)]
specs = [s[0] for s in seqdata]
c = Counter()
nt = namedtuple("Spectrum", sorted({ell for ell in lineages if ell != outgroup}))
for col in zip(*[s[1] for s in seqdata]):
# This is not a true dict (potentially multiple of same key) but there should
# be only one outgroup lineage
dbase = dict(zip(specs, col))
abase = dbase[outgroup]
d = {}
for spec, base in zip(specs, col):
d.setdefault(spec, [0, 0])[int(base != abase)] += 1
d = {k: tuple(v) for k, v in d.items() if k != outgroup}
if not any(all(d[k][i] == 0 for k in specs if k != outgroup) for i in [0]):
c[nt(**d)] += 1
return c
if __name__ == "__main__":
test_scrm_sim(mktree(10.0), "outgroup")
| gpl-3.0 | -6,760,008,752,698,329,000 | 7,453,572,889,292,067,000 | 33.196581 | 85 | 0.547113 | false |
level12/blazeweb | tests/apps/blazewebtestapp/components/tests/views.py | 2 | 5636 | from blazeweb.globals import rg
from blazeweb.content import getcontent
from blazeweb.utils import redirect
from blazeweb.views import View, forward, jsonify
from werkzeug.exceptions import ServiceUnavailable
from formencode.validators import UnicodeString, Int
class Rvb(View):
def default(self):
# this view is used as a error doc handler, so we need to set the
# status code appropriately
if rg.respctx.error_doc_code:
self.status_code = rg.respctx.error_doc_code
self.retval = 'Hello World!'
class RvbWithSnippet(View):
def default(self):
self.retval = getcontent('tests:HwSnippet').primary
class Get(View):
def get(self):
self.retval = 'Hello World!'
class Post(View):
def post(self):
return 'Hello World!'
class Prep(View):
def init(self):
self.retval = 'Hello World!'
def default(self):
pass
class NoActionMethod(View):
def init(self):
self.retval = 'Hello World!'
class DoForward(View):
def default(self):
forward('tests:ForwardTo')
class ForwardTo(View):
def default(self):
return 'forward to me'
class RaiseExc(View):
def default(self):
raise ValueError('exception for testing')
class Text(View):
def default(self):
self.render_template(default_ext='txt')
class TextWithSnippet(View):
def default(self):
self.assign('output', getcontent('tests:text_snippet.txt'))
self.render_template(default_ext='txt')
class TextWithSnippet2(View):
def default(self):
self.render_template(default_ext='txt')
class Html(View):
def default(self):
self.render_template()
class Redirect(View):
def default(self):
redirect('/some/other/page')
class PermRedirect(View):
def default(self):
redirect('/some/other/page', permanent=True)
class CustRedirect(View):
def default(self):
redirect('/some/other/page', code=303)
class HttpExceptionRaise(View):
def default(self):
raise ServiceUnavailable()
class ForwardLoop(View):
def default(self):
forward('tests:ForwardLoop')
class UrlArguments(View):
def default(self, towho='World', anum=None):
if anum is None:
return 'Hello %s!' % towho
else:
return 'Give me a name!'
class GetArguments(View):
def init(self):
self.add_processor('towho', UnicodeString())
def default(self, greeting='Hello', towho='World', anum=None):
if anum is None:
return '%s %s!' % (greeting, towho)
else:
return 'Give me a name!'
class GetArguments2(View):
def init(self):
self.add_processor('towho', UnicodeString())
self.add_processor('num', Int())
def default(self, towho='World', num=None):
if num:
return 'Hello %s, %d!' % (towho, num)
else:
return 'Hello %s!' % towho
class GetArguments3(View):
def init(self):
self.add_processor('towho', UnicodeString())
self.add_processor('num', Int(), show_msg=True)
self.add_processor('num2', Int(), custom_msg='num: must be an integer')
self.strict_args = True
def default(self, towho='World', num=None, num2=None):
if num:
return 'Hello %s, %d!' % (towho, num)
else:
return 'Hello %s!' % towho
class RequiredGetArguments(View):
def init(self):
self.add_processor('towho', UnicodeString(), show_msg=True)
self.add_processor('num', Int, required=True, show_msg=True)
self.add_processor('num2', Int, strict=True, show_msg=True)
self.add_processor('num3', Int, show_msg=True)
def default(self, towho='World', num=None, num2=10, num3=10):
if num:
return 'Hello %s, %d %d %d!' % (towho, num, num2, num3)
class ListGetArguments(View):
def init(self):
self.add_processor('nums', Int(), show_msg=True, takes_list=True)
def default(self, nums=[]):
return str(nums)
class CustomValidator(View):
def init(self):
self.add_processor('num', self.validate_num)
def default(self, num=10):
return str(num)
def validate_num(self, value):
return int(value)
class BadValidator(View):
def init(self):
self.add_processor('num', 'notavalidator')
def default(self, num=10):
return num
class HtmlTemplateFileArg(View):
def default(self):
self.render_template('filearg.html')
class TemplateInheritance(View):
def default(self):
self.render_template()
class ParentTemplate(View):
def default(self):
self.render_template()
class ParentTemplateInheritance(View):
def default(self):
self.render_template()
class ModLevelPriority(View):
def default(self):
self.render_template()
class HtmlSnippetWithCss(View):
def default(self):
self.render_template()
class HtmlSnippetWithCssParent(View):
def default(self):
self.retval = getview('tests:HtmlSnippetWithCss') # noqa
self.render_template()
class UserMessages(View):
def default(self):
if rg.respctx.error_doc_code:
self.status_code = rg.respctx.error_doc_code
self.render_template()
class TemplateChooser(View):
def default(self, rtype):
if rtype == 'endpoint':
self.render_endpoint('app_level.html')
if rtype == 'content':
self.render_endpoint('tests:HwSnippet')
class JsonifyException(View):
@jsonify
def default(self):
foo # noqa
| bsd-3-clause | 4,906,590,279,384,527,000 | 6,838,694,245,824,068,000 | 21.634538 | 79 | 0.626331 | false |
daviddoria/PointGraphsPhase1 | Utilities/vtkTclTest2Py/mccases.py | 10 | 1065 | """This is python equivalent of Wrapping/Tcl/vtktesting/mccases.tcl.
Used for setting vertex values for clipping, cutting, and contouring tests.
This script is used while running python tests translated from Tcl."""
def case1 ( scalars, IN, OUT, caseLabel ):
scalars.InsertValue(0,IN )
scalars.InsertValue(1,OUT)
scalars.InsertValue(2,OUT)
scalars.InsertValue(3,OUT)
scalars.InsertValue(4,OUT)
scalars.InsertValue(5,OUT)
scalars.InsertValue(6,OUT)
scalars.InsertValue(7,OUT)
if IN == 1:
caseLabel.SetText("Case 1 - 00000001")
else :
caseLabel.SetText("Case 1c - 11111110")
pass
def case2 ( scalars, IN, OUT, caseLabel ):
scalars.InsertValue(0,IN)
scalars.InsertValue(1,IN)
scalars.InsertValue(2,OUT)
scalars.InsertValue(3,OUT)
scalars.InsertValue(4,OUT)
scalars.InsertValue(5,OUT)
scalars.InsertValue(6,OUT)
scalars.InsertValue(7,OUT)
if IN == 1:
caseLabel.SetText("Case 2 - 00000011")
else:
caseLabel.SetText("Case 2c - 11111100")
pass
| bsd-3-clause | -3,952,157,453,954,910,700 | 791,305,812,875,150,100 | 29.428571 | 75 | 0.679812 | false |
nicklhy/mxnet | tests/nightly/dist_lenet.py | 52 | 1839 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# distributed lenet
import os, sys
curr_path = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(curr_path, "../../example/image-classification"))
sys.path.append(os.path.join(curr_path, "../../python"))
import mxnet as mx
import argparse
import train_mnist
import logging
if __name__ == '__main__':
args = train_mnist.parse_args()
args.batch_size = 100
data_shape = (1, 28, 28)
loader = train_mnist.get_iterator(data_shape)
kv = mx.kvstore.create(args.kv_store)
(train, val) = loader(args, kv)
net = train_mnist.get_lenet()
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
mx.model.FeedForward.create(
ctx = mx.gpu(kv.rank),
kvstore = kv,
symbol = net,
X = train,
eval_data = val,
num_epoch = args.num_epochs,
learning_rate = args.lr,
momentum = 0.9,
wd = 0.00001)
| apache-2.0 | 8,550,023,758,570,082,000 | -4,857,084,321,675,509,000 | 35.058824 | 78 | 0.661773 | false |
jamiefolsom/edx-platform | lms/djangoapps/teams/management/commands/reindex_course_team.py | 34 | 2408 | """ Management command to update course_teams' search index. """
from django.core.management import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from optparse import make_option
from textwrap import dedent
from teams.models import CourseTeam
class Command(BaseCommand):
"""
Command to reindex course_teams (single, multiple or all available).
Examples:
./manage.py reindex_course_team team1 team2 - reindexes course teams with team_ids team1 and team2
./manage.py reindex_course_team --all - reindexes all available course teams
"""
help = dedent(__doc__)
can_import_settings = True
args = "<course_team_id course_team_id ...>"
option_list = BaseCommand.option_list + (
make_option(
'--all',
action='store_true',
dest='all',
default=False,
help='Reindex all course teams'
),
)
def _get_course_team(self, team_id):
""" Returns course_team object from team_id. """
try:
result = CourseTeam.objects.get(team_id=team_id)
except ObjectDoesNotExist:
raise CommandError(u"Argument {0} is not a course_team team_id".format(team_id))
return result
def handle(self, *args, **options):
"""
By convention set by django developers, this method actually executes command's actions.
So, there could be no better docstring than emphasize this once again.
"""
# This is ugly, but there is a really strange circular dependency that doesn't
# happen anywhere else that I can't figure out how to avoid it :(
from teams.search_indexes import CourseTeamIndexer
if len(args) == 0 and not options.get('all', False):
raise CommandError(u"reindex_course_team requires one or more arguments: <course_team_id>")
elif not settings.FEATURES.get('ENABLE_TEAMS', False):
raise CommandError(u"ENABLE_TEAMS must be enabled to use course team indexing")
if options.get('all', False):
course_teams = CourseTeam.objects.all()
else:
course_teams = map(self._get_course_team, args)
for course_team in course_teams:
print "Indexing {id}".format(id=course_team.team_id)
CourseTeamIndexer.index(course_team)
| agpl-3.0 | -4,274,137,457,489,798,700 | -6,412,029,148,840,707,000 | 35.484848 | 106 | 0.646595 | false |
Ali-aqrabawi/ezclinic | lib/django/contrib/auth/management/commands/changepassword.py | 65 | 2685 | from __future__ import unicode_literals
import getpass
from django.contrib.auth import get_user_model
from django.contrib.auth.password_validation import validate_password
from django.core.exceptions import ValidationError
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.utils.encoding import force_str
class Command(BaseCommand):
help = "Change a user's password for django.contrib.auth."
requires_migrations_checks = True
requires_system_checks = False
def _get_pass(self, prompt="Password: "):
p = getpass.getpass(prompt=force_str(prompt))
if not p:
raise CommandError("aborted")
return p
def add_arguments(self, parser):
parser.add_argument(
'username', nargs='?',
help='Username to change password for; by default, it\'s the current username.',
)
parser.add_argument(
'--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Specifies the database to use. Default is "default".',
)
def handle(self, *args, **options):
if options['username']:
username = options['username']
else:
username = getpass.getuser()
UserModel = get_user_model()
try:
u = UserModel._default_manager.using(options['database']).get(**{
UserModel.USERNAME_FIELD: username
})
except UserModel.DoesNotExist:
raise CommandError("user '%s' does not exist" % username)
self.stdout.write("Changing password for user '%s'\n" % u)
MAX_TRIES = 3
count = 0
p1, p2 = 1, 2 # To make them initially mismatch.
password_validated = False
while (p1 != p2 or not password_validated) and count < MAX_TRIES:
p1 = self._get_pass()
p2 = self._get_pass("Password (again): ")
if p1 != p2:
self.stdout.write("Passwords do not match. Please try again.\n")
count += 1
# Don't validate passwords that don't match.
continue
try:
validate_password(p2, u)
except ValidationError as err:
self.stderr.write('\n'.join(err.messages))
count += 1
else:
password_validated = True
if count == MAX_TRIES:
raise CommandError("Aborting password change for user '%s' after %s attempts" % (u, count))
u.set_password(p1)
u.save()
return "Password changed successfully for user '%s'" % u
| mit | -4,854,670,432,041,029,000 | -5,971,260,845,497,317,000 | 33.423077 | 103 | 0.587709 | false |
DaniilLeksin/gc | wx/tools/Editra/src/syntax/_inno.py | 3 | 7749 | ###############################################################################
# Name: inno.py #
# Purpose: Syntax configuration module for Inno Setup Scripts #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2008 Cody Precord <[email protected]> #
# License: wxWindows License #
###############################################################################
"""
FILE: inno.py
AUTHOR: Cody Preord
@summary: Lexer configuration module for Inno Setup Scripts
"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: _inno.py 68798 2011-08-20 17:17:05Z CJP $"
__revision__ = "$Revision: 68798 $"
#-----------------------------------------------------------------------------#
# Imports
import wx
import wx.stc as stc
import re
# Local Imports
import synglob
import syndata
#-----------------------------------------------------------------------------#
#---- Keyword Definitions ----#
SECTION_KW = (0, "code components custommessages dirs files icons ini "
"installdelete langoptions languages messages registry run "
"setup types tasks uninstalldelete uninstallrun _istool")
KEYWORDS = (1, "allowcancelduringinstall allownoicons allowrootdirectory "
"allowuncpath alwaysrestart alwaysshowcomponentslist "
"alwaysshowdironreadypage alwaysshowgrouponreadypage "
"alwaysusepersonalgroup appcomments appcontact appcopyright "
"appenddefaultdirname appenddefaultgroupname appid appmodifypath "
"appmutex appname apppublisher apppublisherurl appreadmefile "
"appsupporturl appupdatesurl appvername appversion "
"architecturesallowed architecturesinstallin64bitmode backcolor "
"backcolor2 backcolordirection backsolid changesassociations "
"changesenvironment compression copyrightfontname "
"copyrightfontsize createappdir createuninstallregkey "
"defaultdirname defaultgroupname defaultuserinfoname "
"defaultuserinfoorg defaultuserinfoserial dialogfontname "
"dialogfontsize direxistswarning disabledirpage "
"disablefinishedpage disableprogramgrouppage disablereadymemo "
"disablereadypage disablestartupprompt diskclustersize "
"diskslicesize diskspanning enablesdirdoesntexistwarning "
"encryption extradiskspacerequired flatcomponentslist "
"infoafterfile infobeforefile internalcompresslevel "
"languagedetectionmethod languagecodepage languageid languagename "
"licensefile mergeduplicatefiles minversion onlybelowversion "
"outputbasefilename outputdir outputmanifestfile password "
"privilegesrequired reservebytes restartifneededbyrun "
"setupiconfile showcomponentsizes showlanguagedialog "
"showtaskstreelines slicesperdisk solidcompression sourcedir "
"timestamprounding timestampsinutc titlefontname titlefontsize "
"touchdate touchtime uninstallable uninstalldisplayicon "
"uninstalldisplayname uninstallfilesdir uninstalllogmode "
"uninstallrestartcomputer updateuninstalllogappname "
"usepreviousappdir usepreviousgroup useprevioussetuptype "
"useprevioustasks useprevioususerinfo userinfopage usesetupldr "
"versioninfocompany versioninfocopyright versioninfodescription "
"versioninfotextversion versioninfoversion welcomefontname "
"welcomefontsize windowshowcaption windowstartmaximized "
"windowresizable windowvisible wizardimagebackcolor "
"wizardimagefile wizardimagestretch wizardsmallimagefile")
PARAM_KW = (2, "afterinstall attribs beforeinstall check comment components "
"copymode description destdir destname excludes "
"extradiskspacerequired filename flags fontinstall "
"groupdescription hotkey infoafterfile infobeforefile "
"iconfilename iconindex key languages licensefile messagesfile "
"minversion name onlybelowversion parameters permissions root "
"runonceid section source statusmsg string subkey tasks type "
"types valuedata valuename valuetype workingdir")
PREPROC_KW = (3, "append define dim else emit endif endsub error expr file for "
"if ifdef ifexist ifndef ifnexist include insert pragma sub "
"undef")
PASCAL_KW = (4, "begin break case const continue do downto else end except "
"finally for function if of procedure repeat then to try until "
"uses var while with")
USER_DEF = (5, "")
#---- End Keyword Definitions ----#
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = [(stc.STC_INNO_COMMENT, 'comment_style'),
(stc.STC_INNO_COMMENT_PASCAL, 'comment_style'),
(stc.STC_INNO_DEFAULT, 'default_style'),
(stc.STC_INNO_IDENTIFIER, 'default_style'),
(stc.STC_INNO_KEYWORD, 'keyword_style'),
(stc.STC_INNO_KEYWORD_PASCAL, 'keyword4_style'),
(stc.STC_INNO_KEYWORD_USER, 'default_style'),
(stc.STC_INNO_PARAMETER, 'keyword2_style'),
(stc.STC_INNO_PREPROC, 'pre_style'),
(stc.STC_INNO_SECTION, 'scalar_style'),
(stc.STC_INNO_STRING_DOUBLE, 'string_style'),
(stc.STC_INNO_STRING_SINGLE, 'char_style')]
if wx.VERSION >= (2, 9, 0, 0, ''):
SYNTAX_ITEMS.append((stc.STC_INNO_INLINE_EXPANSION, 'default_style')) #TODO
else:
SYNTAX_ITEMS.append((stc.STC_INNO_PREPROC_INLINE, 'pre_style'))
#---- Extra Properties ----#
FOLD = ("fold", "1")
FOLD_COMP = ("fold.compact", "1")
#-----------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for Inno Setup Scripts"""
def __init__(self, langid):
super(SyntaxData, self).__init__(langid)
# Setup
self.SetLexer(stc.STC_LEX_INNOSETUP)
self.RegisterFeature(synglob.FEATURE_AUTOINDENT, AutoIndenter)
def GetKeywords(self):
"""Returns Specified Keywords List """
return [SECTION_KW, KEYWORDS, PARAM_KW, PREPROC_KW, PASCAL_KW]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetProperties(self):
"""Returns a list of Extra Properties to set """
return [FOLD]
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
# Note: Inno can also use pascal comments (i.e {})
return [u';']
#-----------------------------------------------------------------------------#
def AutoIndenter(estc, pos, ichar):
"""Auto indent Inno Setup Scripts.
@param estc: EditraStyledTextCtrl
@param pos: current carat position
@param ichar: Indentation character
"""
rtxt = u''
line = estc.GetCurrentLine()
text = estc.GetTextRange(estc.PositionFromLine(line), pos)
eolch = estc.GetEOLChar()
indent = estc.GetLineIndentation(line)
if ichar == u"\t":
tabw = estc.GetTabWidth()
else:
tabw = estc.GetIndent()
i_space = indent / tabw
ndent = eolch + ichar * i_space
rtxt = ndent + ((indent - (tabw * i_space)) * u' ')
if_pat = re.compile('if\s+.*\sthen')
text = text.strip()
if text == u'begin' or if_pat.match(text):
rtxt += ichar
# Put text in the buffer
estc.AddText(rtxt)
| apache-2.0 | -6,759,042,251,201,501,000 | 1,449,141,637,399,527,400 | 43.028409 | 80 | 0.61195 | false |
mylukin/Creeper | requests/packages/urllib3/packages/ordered_dict.py | 2040 | 8935 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| apache-2.0 | -7,391,024,644,235,401,000 | -1,522,193,598,193,363,000 | 33.498069 | 87 | 0.548629 | false |
thefinn93/CouchPotatoServer | libs/git/ref_container.py | 122 | 2242 | # Copyright (c) 2009, Rotem Yaari <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import exceptions
class RefContainer(object):
def getBranches(self):
raise NotImplementedError()
def getTags(self):
raise NotImplementedError()
########################### Looking for specific refs ##########################
def _getByName(self, func, name):
for ref in func():
if ref.name == name:
return ref
raise exceptions.NonexistentRefException(name)
def getBranchByName(self, name):
return self._getByName(self.getBranches, name)
def hasBranch(self, name):
try:
self.getBranchByName(name)
return True
except exceptions.NonexistentRefException:
return False
| gpl-3.0 | 8,148,161,317,757,431,000 | -6,526,004,666,222,074,000 | 48.822222 | 84 | 0.707404 | false |
zstackio/zstack-woodpecker | integrationtest/vm/virtualrouter/test_update_vm_cpu_memory2.py | 2 | 1813 | '''
Test change cpu and memory configuration when VM is running
@author: quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
#import zstackwoodpecker.operations.host_operations as host_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
test_util.test_dsc('Test update instance offering')
vm = test_stub.create_basic_vm()
vm.stop()
instance_offering = test_lib.lib_get_instance_offering_by_uuid(vm.get_vm().instanceOfferingUuid)
test_obj_dict.add_vm(vm)
vm_ops.update_vm(vm.get_vm().uuid, instance_offering.cpuNum * 2, None)
vm_ops.update_vm(vm.get_vm().uuid, None, instance_offering.memorySize * 2)
vm.update()
if (vm.get_vm().cpuNum != instance_offering.cpuNum * 2):
test_util.test_fail("cpuNum change is expected to change")
if (vm.get_vm().memorySize != instance_offering.memorySize * 2):
test_util.test_fail("memorySize change is expected to change")
vm.start()
if (vm.get_vm().cpuNum != instance_offering.cpuNum * 2):
test_util.test_fail("cpuNum change is expected to take effect after Vm restart")
if (vm.get_vm().memorySize != instance_offering.memorySize * 2):
test_util.test_fail("memorySize change is expected to take effect after Vm restart")
vm.check()
test_lib.lib_robot_cleanup(test_obj_dict)
test_util.test_pass('Test update instance cpu memory Pass')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
| apache-2.0 | -402,633,426,235,757,600 | -421,428,069,166,366,460 | 39.204545 | 100 | 0.704357 | false |
mdshuai/UATFramework | steps/common.py | 3 | 3982 | '''Common test methods'''
from behave import *
@given(u'"{host}" hosts from dynamic inventory')
def step_impl(context, host):
context.inventory = "dynamic"
context.target_host = host
@given(u'"{host}" hosts from static inventory')
def step_impl(context, host):
context.inventory = "static"
context.target_host = host
@given(u'"{rpm}" is already installed on "{host}"')
def step_impl(context, rpm, host):
'''Install RPM on host but fail if not already installed'''
r = context.remote_cmd("yum",
host,
remote_user="root",
module_args='name=%s state=present' % rpm)
if r:
for i in r:
assert i['msg'] == '' and i['results'] != []
else:
assert False
@given(u'"{rpm}" is already installed')
def step_impl(context, rpm):
'''Install RPM on host but fail if not already installed'''
context.execute_steps(u"""
given "{package_name}" is already installed on "{host}"
""".format(package_name=rpm,host=context.target_host))
@given(u'"{rpms}" are already installed on "{host}"')
def step_impl(context, rpms, host):
'''Install RPM on host but fail if not already installed'''
r = context.remote_cmd("yum",
host,
remote_user="root",
module_args='name=%s' % rpms)
if r:
for i in r:
assert i['msg'] == '' and i['results'] != []
else:
assert False
@given(u'"{rpms}" are already installed')
def step_impl(context, rpms):
'''Install RPM on host but fail if not already installed'''
context.execute_steps(u"""
"given {package_names}" are already installed on "{host}"
""".format(package_names=rpms,host=context.target_host))
@given(u'"{unit}" is already running on "{host}"')
def step_impl(context, unit, host):
'''Ensure service is running but fail if not'''
r = context.remote_cmd("service",
host,
module_args='name=%s state=running enabled=yes' % unit)
if r:
for i in r:
assert i['changed'] is False
else:
assert False
@then(u'"{unit}" is started and enabled on "{host}"')
def step_impl(context, unit, host):
'''Start service but fail if already running'''
r = context.remote_cmd('service',
host,
module_args='name=%s state=running enabled=yes' % unit)
if r:
for i in r:
assert i['state'] == 'started' and i['enabled'] is True
else:
assert False
@then(u'"{unit}" is restarted on "{host}"')
def step_impl(context, unit, host):
'''Restart service'''
r = context.remote_cmd('service',
host,
module_args='name=%s state=restarted' % unit)
if r:
for i in r:
assert i['state'] == 'started' and i['changed'] is True
else:
assert False
@given(u'"{host}" hosts can be pinged')
@given('"{host}" host')
def step(context, host):
'''Verify we can ping the host
host: a host from the ansible inventory file'''
assert context.remote_cmd('ping', host)
@given('run command "{cmd}" on "{host}"')
@when('run command "{cmd}" on "{host}"')
@then('run command "{cmd}" on "{host}"')
def step(context, cmd, host):
'''Run an Ansible module on a host directly from scenario
cmd: a module name plus arguments
<module> key=value [key=value ...]
or...
<module> <param>
host: a host from the inventory file'''
module, args = None, None
if ' ' in cmd:
# we only split on the first space to get the module name
# since module_args are also space-delimited
module, args = cmd.split(' ', 1)
else:
module = cmd
assert context.remote_cmd(module,
host,
module_args=args)
| gpl-2.0 | 1,409,418,527,988,437,800 | -2,490,810,040,346,521,000 | 32.462185 | 82 | 0.558764 | false |
vabs22/zulip | docs/conf.py | 5 | 9917 | # -*- coding: utf-8 -*-
#
# zulip-contributor-docs documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 17 16:24:04 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
if False:
from typing import Any, Dict, List, Optional
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [] # type: List[str]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zulip'
copyright = u'2015-2017, The Zulip Team'
author = u'The Zulip Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.6'
# The full version, including alpha/beta/rc tags.
release = '1.6.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None # type: Optional[str]
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Read The Docs can't import sphinx_rtd_theme, so don't import it there.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'zulip-contributor-docsdoc'
def setup(app):
# type: (Any) -> None
# overrides for wide tables in RTD theme
app.add_stylesheet('theme_overrides.css') # path relative to _static
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
} # type: Dict[str, str]
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'zulip-contributor-docs.tex', u'Zulip Documentation',
u'The Zulip Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'zulip-contributor-docs', u'Zulip Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'zulip-contributor-docs', u'Zulip Documentation',
author, 'zulip-contributor-docs', 'Documentation for contributing to Zulip.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
| apache-2.0 | 8,098,813,184,354,424,000 | -5,029,392,091,020,025,000 | 31.621711 | 82 | 0.704044 | false |
PlayUAV/MissionPlanner | Lib/site-packages/numpy/ma/bench.py | 51 | 6658 | #!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
# encoding: utf-8
import timeit
#import IPython.ipapi
#ip = IPython.ipapi.get()
#from IPython import ipmagic
import numpy
#from numpy import ma
#from numpy.ma import filled
#from numpy.ma.testutils import assert_equal
#####---------------------------------------------------------------------------
#---- --- Global variables ---
#####---------------------------------------------------------------------------
# Small arrays ..................................
xs = numpy.random.uniform(-1,1,6).reshape(2,3)
ys = numpy.random.uniform(-1,1,6).reshape(2,3)
zs = xs + 1j * ys
m1 = [[True, False, False], [False, False, True]]
m2 = [[True, False, True], [False, False, True]]
nmxs = numpy.ma.array(xs, mask=m1)
nmys = numpy.ma.array(ys, mask=m2)
nmzs = numpy.ma.array(zs, mask=m1)
# Big arrays ....................................
xl = numpy.random.uniform(-1,1,100*100).reshape(100,100)
yl = numpy.random.uniform(-1,1,100*100).reshape(100,100)
zl = xl + 1j * yl
maskx = xl > 0.8
masky = yl < -0.8
nmxl = numpy.ma.array(xl, mask=maskx)
nmyl = numpy.ma.array(yl, mask=masky)
nmzl = numpy.ma.array(zl, mask=maskx)
#####---------------------------------------------------------------------------
#---- --- Functions ---
#####---------------------------------------------------------------------------
def timer(s, v='', nloop=500, nrep=3):
units = ["s", "ms", "µs", "ns"]
scaling = [1, 1e3, 1e6, 1e9]
print "%s : %-50s : " % (v,s),
varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz']
setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames)
Timer = timeit.Timer(stmt=s, setup=setup)
best = min(Timer.repeat(nrep, nloop)) / nloop
if best > 0.0:
order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3)
else:
order = 3
print "%d loops, best of %d: %.*g %s per loop" % (nloop, nrep,
3,
best * scaling[order],
units[order])
# ip.magic('timeit -n%i %s' % (nloop,s))
def compare_functions_1v(func, nloop=500,
xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl):
funcname = func.__name__
print "-"*50
print "%s on small arrays" % funcname
module, data = "numpy.ma","nmxs"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
#
print "%s on large arrays" % funcname
module, data = "numpy.ma","nmxl"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
return
def compare_methods(methodname, args, vars='x', nloop=500, test=True,
xs=xs, nmxs=nmxs, xl=xl, nmxl=nmxl):
print "-"*50
print "%s on small arrays" % methodname
data, ver = "nm%ss" % vars, 'numpy.ma'
timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop)
#
print "%s on large arrays" % methodname
data, ver = "nm%sl" % vars, 'numpy.ma'
timer("%(data)s.%(methodname)s(%(args)s)" % locals(), v=ver, nloop=nloop)
return
def compare_functions_2v(func, nloop=500, test=True,
xs=xs, nmxs=nmxs,
ys=ys, nmys=nmys,
xl=xl, nmxl=nmxl,
yl=yl, nmyl=nmyl):
funcname = func.__name__
print "-"*50
print "%s on small arrays" % funcname
module, data = "numpy.ma","nmxs,nmys"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
#
print "%s on large arrays" % funcname
module, data = "numpy.ma","nmxl,nmyl"
timer("%(module)s.%(funcname)s(%(data)s)" % locals(), v="%11s" % module, nloop=nloop)
return
###############################################################################
################################################################################
if __name__ == '__main__':
# # Small arrays ..................................
# xs = numpy.random.uniform(-1,1,6).reshape(2,3)
# ys = numpy.random.uniform(-1,1,6).reshape(2,3)
# zs = xs + 1j * ys
# m1 = [[True, False, False], [False, False, True]]
# m2 = [[True, False, True], [False, False, True]]
# nmxs = numpy.ma.array(xs, mask=m1)
# nmys = numpy.ma.array(ys, mask=m2)
# nmzs = numpy.ma.array(zs, mask=m1)
# mmxs = maskedarray.array(xs, mask=m1)
# mmys = maskedarray.array(ys, mask=m2)
# mmzs = maskedarray.array(zs, mask=m1)
# # Big arrays ....................................
# xl = numpy.random.uniform(-1,1,100*100).reshape(100,100)
# yl = numpy.random.uniform(-1,1,100*100).reshape(100,100)
# zl = xl + 1j * yl
# maskx = xl > 0.8
# masky = yl < -0.8
# nmxl = numpy.ma.array(xl, mask=maskx)
# nmyl = numpy.ma.array(yl, mask=masky)
# nmzl = numpy.ma.array(zl, mask=maskx)
# mmxl = maskedarray.array(xl, mask=maskx, shrink=True)
# mmyl = maskedarray.array(yl, mask=masky, shrink=True)
# mmzl = maskedarray.array(zl, mask=maskx, shrink=True)
#
compare_functions_1v(numpy.sin)
compare_functions_1v(numpy.log)
compare_functions_1v(numpy.sqrt)
#....................................................................
compare_functions_2v(numpy.multiply)
compare_functions_2v(numpy.divide)
compare_functions_2v(numpy.power)
#....................................................................
compare_methods('ravel','', nloop=1000)
compare_methods('conjugate','','z', nloop=1000)
compare_methods('transpose','', nloop=1000)
compare_methods('compressed','', nloop=1000)
compare_methods('__getitem__','0', nloop=1000)
compare_methods('__getitem__','(0,0)', nloop=1000)
compare_methods('__getitem__','[0,-1]', nloop=1000)
compare_methods('__setitem__','0, 17', nloop=1000, test=False)
compare_methods('__setitem__','(0,0), 17', nloop=1000, test=False)
#....................................................................
print "-"*50
print "__setitem__ on small arrays"
timer('nmxs.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ',nloop=10000)
print "-"*50
print "__setitem__ on large arrays"
timer('nmxl.__setitem__((-1,0),numpy.ma.masked)', 'numpy.ma ',nloop=10000)
#....................................................................
print "-"*50
print "where on small arrays"
timer('numpy.ma.where(nmxs>2,nmxs,nmys)', 'numpy.ma ',nloop=1000)
print "-"*50
print "where on large arrays"
timer('numpy.ma.where(nmxl>2,nmxl,nmyl)', 'numpy.ma ',nloop=100)
| gpl-3.0 | 1,308,317,134,031,267,600 | 2,247,370,146,813,698,300 | 39.345455 | 100 | 0.501427 | false |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/tensorflow/contrib/metrics/python/metrics/classification.py | 23 | 2583 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification metrics library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
# TODO(nsilberman): move into metrics/python/ops/
def accuracy(predictions, labels, weights=None):
"""Computes the percentage of times that predictions matches labels.
Args:
predictions: the predicted values, a `Tensor` whose dtype and shape
matches 'labels'.
labels: the ground truth values, a `Tensor` of any shape and
bool, integer, or string dtype.
weights: None or `Tensor` of float values to reweight the accuracy.
Returns:
Accuracy `Tensor`.
Raises:
ValueError: if dtypes don't match or
if dtype is not bool, integer, or string.
"""
if not (labels.dtype.is_integer or
labels.dtype in (dtypes.bool, dtypes.string)):
raise ValueError(
'Labels should have bool, integer, or string dtype, not %r' %
labels.dtype)
if not labels.dtype.is_compatible_with(predictions.dtype):
raise ValueError('Dtypes of predictions and labels should match. '
'Given: predictions (%r) and labels (%r)' %
(predictions.dtype, labels.dtype))
with ops.name_scope('accuracy', values=[predictions, labels]):
is_correct = math_ops.cast(
math_ops.equal(predictions, labels), dtypes.float32)
if weights is not None:
is_correct = math_ops.multiply(is_correct, weights)
num_values = math_ops.multiply(weights, array_ops.ones_like(is_correct))
return math_ops.div(math_ops.reduce_sum(is_correct),
math_ops.reduce_sum(num_values))
return math_ops.reduce_mean(is_correct)
| apache-2.0 | 3,583,460,977,840,329,700 | 2,676,002,976,097,245,700 | 40 | 80 | 0.678281 | false |
asajeffrey/servo | components/script/dom/bindings/codegen/parser/tests/test_empty_sequence_default_value.py | 15 | 1132 | import WebIDL
def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface X {
const sequence<long> foo = [];
};
""")
results = parser.finish()
except Exception as x:
threw = True
harness.ok(threw, "Constant cannot have [] as a default value")
parser = parser.reset()
parser.parse("""
interface X {
void foo(optional sequence<long> arg = []);
};
""")
results = parser.finish();
harness.ok(isinstance(
results[0].members[0].signatures()[0][1][0].defaultValue,
WebIDL.IDLEmptySequenceValue),
"Should have IDLEmptySequenceValue as default value of argument")
parser = parser.reset()
parser.parse("""
dictionary X {
sequence<long> foo = [];
};
""")
results = parser.finish();
harness.ok(isinstance(results[0].members[0].defaultValue,
WebIDL.IDLEmptySequenceValue),
"Should have IDLEmptySequenceValue as default value of "
"dictionary member")
| mpl-2.0 | 2,271,545,213,252,521,500 | -651,457,321,840,438,500 | 24.155556 | 80 | 0.551237 | false |
Tiotao/morpherpy | env/Lib/site-packages/sqlalchemy/util/_collections.py | 7 | 24509 | # util/_collections.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Collection classes and helpers."""
import itertools
import weakref
import operator
from .compat import threading
EMPTY_SET = frozenset()
class KeyedTuple(tuple):
"""``tuple`` subclass that adds labeled names.
E.g.::
>>> k = KeyedTuple([1, 2, 3], labels=["one", "two", "three"])
>>> k.one
1
>>> k.two
2
Result rows returned by :class:`.Query` that contain multiple
ORM entities and/or column expressions make use of this
class to return rows.
The :class:`.KeyedTuple` exhibits similar behavior to the
``collections.namedtuple()`` construct provided in the Python
standard library, however is architected very differently.
Unlike ``collections.namedtuple()``, :class:`.KeyedTuple` is
does not rely on creation of custom subtypes in order to represent
a new series of keys, instead each :class:`.KeyedTuple` instance
receives its list of keys in place. The subtype approach
of ``collections.namedtuple()`` introduces significant complexity
and performance overhead, which is not necessary for the
:class:`.Query` object's use case.
.. versionchanged:: 0.8
Compatibility methods with ``collections.namedtuple()`` have been
added including :attr:`.KeyedTuple._fields` and
:meth:`.KeyedTuple._asdict`.
.. seealso::
:ref:`ormtutorial_querying`
"""
def __new__(cls, vals, labels=None):
t = tuple.__new__(cls, vals)
t._labels = []
if labels:
t.__dict__.update(zip(labels, vals))
t._labels = labels
return t
def keys(self):
"""Return a list of string key names for this :class:`.KeyedTuple`.
.. seealso::
:attr:`.KeyedTuple._fields`
"""
return [l for l in self._labels if l is not None]
@property
def _fields(self):
"""Return a tuple of string key names for this :class:`.KeyedTuple`.
This method provides compatibility with ``collections.namedtuple()``.
.. versionadded:: 0.8
.. seealso::
:meth:`.KeyedTuple.keys`
"""
return tuple(self.keys())
def _asdict(self):
"""Return the contents of this :class:`.KeyedTuple` as a dictionary.
This method provides compatibility with ``collections.namedtuple()``,
with the exception that the dictionary returned is **not** ordered.
.. versionadded:: 0.8
"""
return dict((key, self.__dict__[key]) for key in self.keys())
class ImmutableContainer(object):
def _immutable(self, *arg, **kw):
raise TypeError("%s object is immutable" % self.__class__.__name__)
__delitem__ = __setitem__ = __setattr__ = _immutable
class immutabledict(ImmutableContainer, dict):
clear = pop = popitem = setdefault = \
update = ImmutableContainer._immutable
def __new__(cls, *args):
new = dict.__new__(cls)
dict.__init__(new, *args)
return new
def __init__(self, *args):
pass
def __reduce__(self):
return immutabledict, (dict(self), )
def union(self, d):
if not self:
return immutabledict(d)
else:
d2 = immutabledict(self)
dict.update(d2, d)
return d2
def __repr__(self):
return "immutabledict(%s)" % dict.__repr__(self)
class Properties(object):
"""Provide a __getattr__/__setattr__ interface over a dict."""
def __init__(self, data):
self.__dict__['_data'] = data
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.itervalues()
def __add__(self, other):
return list(self) + list(other)
def __setitem__(self, key, object):
self._data[key] = object
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, object):
self._data[key] = object
def __getstate__(self):
return {'_data': self.__dict__['_data']}
def __setstate__(self, state):
self.__dict__['_data'] = state['_data']
def __getattr__(self, key):
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key):
return key in self._data
def as_immutable(self):
"""Return an immutable proxy for this :class:`.Properties`."""
return ImmutableProperties(self._data)
def update(self, value):
self._data.update(value)
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def keys(self):
return self._data.keys()
def values(self):
return self._data.values()
def items(self):
return self._data.items()
def has_key(self, key):
return key in self._data
def clear(self):
self._data.clear()
class OrderedProperties(Properties):
"""Provide a __getattr__/__setattr__ interface with an OrderedDict
as backing store."""
def __init__(self):
Properties.__init__(self, OrderedDict())
class ImmutableProperties(ImmutableContainer, Properties):
"""Provide immutable dict/object attribute to an underlying dictionary."""
class OrderedDict(dict):
"""A dict that returns keys/values/items in the order they were added."""
def __init__(self, ____sequence=None, **kwargs):
self._list = []
if ____sequence is None:
if kwargs:
self.update(**kwargs)
else:
self.update(____sequence, **kwargs)
def clear(self):
self._list = []
dict.clear(self)
def copy(self):
return self.__copy__()
def __copy__(self):
return OrderedDict(self)
def sort(self, *arg, **kw):
self._list.sort(*arg, **kw)
def update(self, ____sequence=None, **kwargs):
if ____sequence is not None:
if hasattr(____sequence, 'keys'):
for key in ____sequence.keys():
self.__setitem__(key, ____sequence[key])
else:
for key, value in ____sequence:
self[key] = value
if kwargs:
self.update(kwargs)
def setdefault(self, key, value):
if key not in self:
self.__setitem__(key, value)
return value
else:
return self.__getitem__(key)
def __iter__(self):
return iter(self._list)
def values(self):
return [self[key] for key in self._list]
def itervalues(self):
return iter([self[key] for key in self._list])
def keys(self):
return list(self._list)
def iterkeys(self):
return iter(self.keys())
def items(self):
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
return iter(self.items())
def __setitem__(self, key, object):
if key not in self:
try:
self._list.append(key)
except AttributeError:
# work around Python pickle loads() with
# dict subclass (seems to ignore __setstate__?)
self._list = [key]
dict.__setitem__(self, key, object)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._list.remove(key)
def pop(self, key, *default):
present = key in self
value = dict.pop(self, key, *default)
if present:
self._list.remove(key)
return value
def popitem(self):
item = dict.popitem(self)
self._list.remove(item[0])
return item
class OrderedSet(set):
def __init__(self, d=None):
set.__init__(self)
self._list = []
if d is not None:
self.update(d)
def add(self, element):
if element not in self:
self._list.append(element)
set.add(self, element)
def remove(self, element):
set.remove(self, element)
self._list.remove(element)
def insert(self, pos, element):
if element not in self:
self._list.insert(pos, element)
set.add(self, element)
def discard(self, element):
if element in self:
self._list.remove(element)
set.remove(self, element)
def clear(self):
set.clear(self)
self._list = []
def __getitem__(self, key):
return self._list[key]
def __iter__(self):
return iter(self._list)
def __add__(self, other):
return self.union(other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._list)
__str__ = __repr__
def update(self, iterable):
for e in iterable:
if e not in self:
self._list.append(e)
set.add(self, e)
return self
__ior__ = update
def union(self, other):
result = self.__class__(self)
result.update(other)
return result
__or__ = union
def intersection(self, other):
other = set(other)
return self.__class__(a for a in self if a in other)
__and__ = intersection
def symmetric_difference(self, other):
other = set(other)
result = self.__class__(a for a in self if a not in other)
result.update(a for a in other if a not in self)
return result
__xor__ = symmetric_difference
def difference(self, other):
other = set(other)
return self.__class__(a for a in self if a not in other)
__sub__ = difference
def intersection_update(self, other):
other = set(other)
set.intersection_update(self, other)
self._list = [a for a in self._list if a in other]
return self
__iand__ = intersection_update
def symmetric_difference_update(self, other):
set.symmetric_difference_update(self, other)
self._list = [a for a in self._list if a in self]
self._list += [a for a in other._list if a in self]
return self
__ixor__ = symmetric_difference_update
def difference_update(self, other):
set.difference_update(self, other)
self._list = [a for a in self._list if a in self]
return self
__isub__ = difference_update
class IdentitySet(object):
"""A set that considers only object id() for uniqueness.
This strategy has edge cases for builtin types- it's possible to have
two 'foo' strings in one of these sets, for example. Use sparingly.
"""
_working_set = set
def __init__(self, iterable=None):
self._members = dict()
if iterable:
for o in iterable:
self.add(o)
def add(self, value):
self._members[id(value)] = value
def __contains__(self, value):
return id(value) in self._members
def remove(self, value):
del self._members[id(value)]
def discard(self, value):
try:
self.remove(value)
except KeyError:
pass
def pop(self):
try:
pair = self._members.popitem()
return pair[1]
except KeyError:
raise KeyError('pop from an empty set')
def clear(self):
self._members.clear()
def __cmp__(self, other):
raise TypeError('cannot compare sets using cmp()')
def __eq__(self, other):
if isinstance(other, IdentitySet):
return self._members == other._members
else:
return False
def __ne__(self, other):
if isinstance(other, IdentitySet):
return self._members != other._members
else:
return True
def issubset(self, iterable):
other = type(self)(iterable)
if len(self) > len(other):
return False
for m in itertools.ifilterfalse(other._members.__contains__,
self._members.iterkeys()):
return False
return True
def __le__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issubset(other)
def __lt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) < len(other) and self.issubset(other)
def issuperset(self, iterable):
other = type(self)(iterable)
if len(self) < len(other):
return False
for m in itertools.ifilterfalse(self._members.__contains__,
other._members.iterkeys()):
return False
return True
def __ge__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issuperset(other)
def __gt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) > len(other) and self.issuperset(other)
def union(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).union(other))
return result
def __or__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.union(other)
def update(self, iterable):
self._members = self.union(iterable)._members
def __ior__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.update(other)
return self
def difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).difference(other))
return result
def __sub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.difference(other)
def difference_update(self, iterable):
self._members = self.difference(iterable)._members
def __isub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.difference_update(other)
return self
def intersection(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).intersection(other))
return result
def __and__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.intersection(other)
def intersection_update(self, iterable):
self._members = self.intersection(iterable)._members
def __iand__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.intersection_update(other)
return self
def symmetric_difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(
self._working_set(members).symmetric_difference(other))
return result
def _member_id_tuples(self):
return ((id(v), v) for v in self._members.itervalues())
def __xor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference_update(self, iterable):
self._members = self.symmetric_difference(iterable)._members
def __ixor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.symmetric_difference(other)
return self
def copy(self):
return type(self)(self._members.itervalues())
__copy__ = copy
def __len__(self):
return len(self._members)
def __iter__(self):
return self._members.itervalues()
def __hash__(self):
raise TypeError('set objects are unhashable')
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self._members.values())
class WeakSequence(object):
def __init__(self, elements):
self._storage = weakref.WeakValueDictionary(
(idx, element) for idx, element in enumerate(elements)
)
def __iter__(self):
return self._storage.itervalues()
def __getitem__(self, index):
try:
return self._storage[index]
except KeyError:
raise IndexError("Index %s out of range" % index)
class OrderedIdentitySet(IdentitySet):
class _working_set(OrderedSet):
# a testing pragma: exempt the OIDS working set from the test suite's
# "never call the user's __hash__" assertions. this is a big hammer,
# but it's safe here: IDS operates on (id, instance) tuples in the
# working set.
__sa_hash_exempt__ = True
def __init__(self, iterable=None):
IdentitySet.__init__(self)
self._members = OrderedDict()
if iterable:
for o in iterable:
self.add(o)
class PopulateDict(dict):
"""A dict which populates missing values via a creation function.
Note the creation function takes a key, unlike
collections.defaultdict.
"""
def __init__(self, creator):
self.creator = creator
def __missing__(self, key):
self[key] = val = self.creator(key)
return val
# Define collections that are capable of storing
# ColumnElement objects as hashable keys/elements.
# At this point, these are mostly historical, things
# used to be more complicated.
column_set = set
column_dict = dict
ordered_column_set = OrderedSet
populate_column_dict = PopulateDict
def unique_list(seq, hashfunc=None):
seen = {}
if not hashfunc:
return [x for x in seq
if x not in seen
and not seen.__setitem__(x, True)]
else:
return [x for x in seq
if hashfunc(x) not in seen
and not seen.__setitem__(hashfunc(x), True)]
class UniqueAppender(object):
"""Appends items to a collection ensuring uniqueness.
Additional appends() of the same object are ignored. Membership is
determined by identity (``is a``) not equality (``==``).
"""
def __init__(self, data, via=None):
self.data = data
self._unique = {}
if via:
self._data_appender = getattr(data, via)
elif hasattr(data, 'append'):
self._data_appender = data.append
elif hasattr(data, 'add'):
self._data_appender = data.add
def append(self, item):
id_ = id(item)
if id_ not in self._unique:
self._data_appender(item)
self._unique[id_] = True
def __iter__(self):
return iter(self.data)
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, (list, tuple)):
return [x]
else:
return x
def to_set(x):
if x is None:
return set()
if not isinstance(x, set):
return set(to_list(x))
else:
return x
def to_column_set(x):
if x is None:
return column_set()
if not isinstance(x, column_set):
return column_set(to_list(x))
else:
return x
def update_copy(d, _new=None, **kw):
"""Copy the given dict and update with the given values."""
d = d.copy()
if _new:
d.update(_new)
d.update(**kw)
return d
def flatten_iterator(x):
"""Given an iterator of which further sub-elements may also be
iterators, flatten the sub-elements into a single iterator.
"""
for elem in x:
if not isinstance(elem, basestring) and hasattr(elem, '__iter__'):
for y in flatten_iterator(elem):
yield y
else:
yield elem
class LRUCache(dict):
"""Dictionary with 'squishy' removal of least
recently used items.
"""
def __init__(self, capacity=100, threshold=.5):
self.capacity = capacity
self.threshold = threshold
self._counter = 0
def _inc_counter(self):
self._counter += 1
return self._counter
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item[2] = self._inc_counter()
return item[1]
def values(self):
return [i[1] for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = [key, value, self._inc_counter()]
dict.__setitem__(self, key, item)
else:
item[1] = value
self._manage_size()
def _manage_size(self):
while len(self) > self.capacity + self.capacity * self.threshold:
by_counter = sorted(dict.values(self),
key=operator.itemgetter(2),
reverse=True)
for item in by_counter[self.capacity:]:
try:
del self[item[0]]
except KeyError:
# if we couldnt find a key, most
# likely some other thread broke in
# on us. loop around and try again
break
class ScopedRegistry(object):
"""A Registry that can store one or multiple instances of a single
class on the basis of a "scope" function.
The object implements ``__call__`` as the "getter", so by
calling ``myregistry()`` the contained object is returned
for the current scope.
:param createfunc:
a callable that returns a new object to be placed in the registry
:param scopefunc:
a callable that will return a key to store/retrieve an object.
"""
def __init__(self, createfunc, scopefunc):
"""Construct a new :class:`.ScopedRegistry`.
:param createfunc: A creation function that will generate
a new value for the current scope, if none is present.
:param scopefunc: A function that returns a hashable
token representing the current scope (such as, current
thread identifier).
"""
self.createfunc = createfunc
self.scopefunc = scopefunc
self.registry = {}
def __call__(self):
key = self.scopefunc()
try:
return self.registry[key]
except KeyError:
return self.registry.setdefault(key, self.createfunc())
def has(self):
"""Return True if an object is present in the current scope."""
return self.scopefunc() in self.registry
def set(self, obj):
"""Set the value forthe current scope."""
self.registry[self.scopefunc()] = obj
def clear(self):
"""Clear the current scope, if any."""
try:
del self.registry[self.scopefunc()]
except KeyError:
pass
class ThreadLocalRegistry(ScopedRegistry):
"""A :class:`.ScopedRegistry` that uses a ``threading.local()``
variable for storage.
"""
def __init__(self, createfunc):
self.createfunc = createfunc
self.registry = threading.local()
def __call__(self):
try:
return self.registry.value
except AttributeError:
val = self.registry.value = self.createfunc()
return val
def has(self):
return hasattr(self.registry, "value")
def set(self, obj):
self.registry.value = obj
def clear(self):
try:
del self.registry.value
except AttributeError:
pass
def _iter_id(iterable):
"""Generator: ((id(o), o) for o in iterable)."""
for item in iterable:
yield id(item), item
| mit | -85,816,194,247,573,900 | -6,489,519,927,301,510,000 | 26.051876 | 84 | 0.572647 | false |
amazinger2013/OpenSesame | libqtopensesame/sketchpad_elements/_textline.py | 2 | 2960 | #-*- coding:utf-8 -*-
"""
This file is part of openexp.
openexp is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
openexp is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with openexp. If not, see <http://www.gnu.org/licenses/>.
"""
import os
from libopensesame.exceptions import osexception
from libqtopensesame.misc import _
from libqtopensesame.sketchpad_elements._base_element import base_element
from libopensesame.sketchpad_elements import textline as textline_runtime
from PyQt4 import QtCore, QtGui
class textline(base_element, textline_runtime):
"""
desc:
A textline element.
See base_element for docstrings and function descriptions.
"""
def show_edit_dialog(self):
"""
desc:
The show-edit dialog for the textline only edits the text, not the
full element script.
"""
text = self.experiment.text_input(_(u'Edit text'),
message=_(u'Please enter a text for the textline'),
content=self.properties[u'text'].replace(u'<br />', u'\n'),
parent=self.sketchpad._edit_widget)
if text == None:
return
self.properties[u'text'] = self.clean_text(text)
self.sketchpad.draw()
@classmethod
def mouse_press(cls, sketchpad, pos):
text = sketchpad.experiment.text_input(title=_(u'New textline'),
message=_(u'Please enter a text for the textline'),
parent=sketchpad._edit_widget)
if text == None:
return None
properties = {
u'x': pos[0],
u'y': pos[1],
u'text': cls.clean_text(text),
u'color': sketchpad.current_color(),
u'center': sketchpad.current_center(),
u'font_family': sketchpad.current_font_family(),
u'font_size': sketchpad.current_font_size(),
u'font_bold': sketchpad.current_font_bold(),
u'font_italic': sketchpad.current_font_italic(),
u'html': sketchpad.current_html(),
u'show_if' : sketchpad.current_show_if()
}
return textline(sketchpad, properties=properties)
@staticmethod
def clean_text(text):
"""
desc:
Cleans text by removing quotes and converting newlines to <br />
tags.
arguments:
text: The text to clean.
type: [str, unicode, QString]
returns:
desc: Clean text.
type: unicode
"""
text = unicode(text)
text = text.replace(os.linesep, u'<br />')
text = text.replace(u'\n', u'<br />')
text = text.replace(u'"', u'')
return text
@staticmethod
def requires_text():
return True
@staticmethod
def requires_color():
return True
@staticmethod
def requires_center():
return True
@staticmethod
def cursor():
return u'cursor-text', -1, -1
| gpl-3.0 | 2,373,024,479,923,650,600 | 8,661,417,102,016,407,000 | 25.19469 | 73 | 0.700338 | false |
pubnub/Zopkio | zopkio/reporters/html_reporter.py | 3 | 10389 | # Copyright 2014 LinkedIn Corp.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Class used to generate the report.
"""
import os
from jinja2 import Environment, FileSystemLoader
import zopkio.constants as constants
import zopkio.runtime as runtime
import zopkio.utils as utils
class _ReportInfo(object):
"""
Holds data shared among all report pages
"""
def __init__(self, output_dir, logs_dir, naarad_dir):
self.output_dir = os.path.abspath(output_dir)
self.resource_dir = os.path.join(output_dir, "resources/")
self.logs_dir = os.path.abspath(logs_dir)
self.naarad_dir = os.path.abspath(naarad_dir)
self.config_to_test_names_map = {}
self.report_file_sfx = "_report.html"
self.home_page = os.path.join(output_dir, "report.html")
self.diff_page = os.path.join(output_dir, "diff.html")
self.log_page = os.path.join(output_dir, "log.html")
self.project_url = "https://github.com/linkedin/Zopkio"
self.results_map = {
"passed": constants.PASSED,
"failed": constants.FAILED,
"skipped": constants.SKIPPED
}
class Reporter(object):
"""
Class that converts the aggregated output into a user-friendly web page.
"""
def __init__(self, report_name, output_dir, logs_dir, naarad_dir):
"""
:param report_name: used in the title of the front-end
:param output_dir: directory where the report will be generated
:param logs_dir: directory of where the logs will be collected
:param naarad_dir: directory containing the naarad reports
"""
self.name = report_name
self.env = Environment(loader=FileSystemLoader(constants.WEB_RESOURCE_DIR)) # used to load html pages for Jinja2
self.data_source = runtime.get_collector()
self.report_info = _ReportInfo(output_dir, logs_dir, naarad_dir)
def get_config_to_test_names_map(self):
config_to_test_names_map = {}
for config_name in self.data_source.get_config_names():
config_to_test_names_map[config_name] = self.data_source.get_test_names(config_name)
return config_to_test_names_map
def get_report_location(self):
"""
Returns the filename of the landing page
"""
return self.report_info.home_page
def generate(self):
"""
Generates the report
"""
self._setup()
header_html = self._generate_header()
footer_html = self._generate_footer()
results_topbar_html = self._generate_topbar("results")
summary_topbar_html = self._generate_topbar("summary")
logs_topbar_html = self._generate_topbar("logs")
diff_topbar_html = self._generate_topbar("diff")
summary_body_html = self._generate_summary_body()
diff_body_html = self._generate_diff_body()
summary_html = header_html + summary_topbar_html + summary_body_html + footer_html
diff_html = header_html + diff_topbar_html + diff_body_html+ footer_html
Reporter._make_file(summary_html, self.report_info.home_page)
Reporter._make_file(diff_html,self.report_info.diff_page)
log_body_html = self._generate_log_body()
log_html = header_html + logs_topbar_html + log_body_html+footer_html
Reporter._make_file(log_html, self.report_info.log_page)
for config_name in self.report_info.config_to_test_names_map.keys():
config_dir = os.path.join(self.report_info.resource_dir, config_name)
utils.makedirs(config_dir)
config_body_html = self._generate_config_body(config_name)
config_html = header_html + results_topbar_html + config_body_html + footer_html
config_file = os.path.join(config_dir, config_name + self.report_info.report_file_sfx)
Reporter._make_file(config_html, config_file)
for test_name in self.data_source.get_test_names(config_name):
test_body_html = self._generate_test_body(config_name, test_name)
test_html = header_html + results_topbar_html + test_body_html + footer_html
test_file = os.path.join(config_dir, test_name + self.report_info.report_file_sfx)
Reporter._make_file(test_html, test_file)
def _generate_config_body(self, config_name):
summary_stats = [
self.data_source.count_tests(config_name),
self.data_source.count_tests_with_result(config_name, constants.PASSED),
self.data_source.count_tests_with_result(config_name, constants.FAILED),
self.data_source.count_tests_with_result(config_name, constants.SKIPPED),
self.data_source.get_config_exec_time(config_name),
self.data_source.get_config_start_time(config_name),
self.data_source.get_config_end_time(config_name)
]
config_template = self.env.get_template("config_page.html")
config_body_html = config_template.render(
config_data=self.data_source.get_config_result(config_name),
tests=self.data_source.get_test_results(config_name),
report_info=self.report_info,
summary=summary_stats
)
return config_body_html
def _generate_log_body(self):
log_template = self.env.get_template("logs_page.html")
log_body_html = log_template.render(logs_dir=self.report_info.logs_dir)
return log_body_html
def _generate_footer(self):
footer_template = self.env.get_template("footer.html")
footer_html = footer_template.render()
return footer_html
def _generate_header(self):
CSS_INCLUDES = [
"web_resources/style.css"
]
CSS_INCLUDES[:] = [os.path.join(constants.PROJECT_ROOT_DIR, css_include) for css_include in CSS_INCLUDES]
JS_INCLUDES = [
"web_resources/script.js"
]
JS_INCLUDES[:] = [os.path.join(constants.PROJECT_ROOT_DIR, js_include) for js_include in JS_INCLUDES]
header_template = self.env.get_template("header.html")
header_html = header_template.render(
page_title=self.name,
css_includes=CSS_INCLUDES,
js_includes=JS_INCLUDES
)
return header_html
def _generate_diff_body(self):
diff_body_html = ""
config_tests_dict = {}
config_data_dict = {}
#generate diff page only if multiple configs exist
if (len(self.report_info.config_to_test_names_map.keys()) > 1):
# get list of test names in sorted order
test_names = self.data_source.get_test_results(self.report_info.config_to_test_names_map.keys()[0])
test_names.sort(key=lambda x: x.name)
for config_name in self.report_info.config_to_test_names_map.keys():
config_tests = self.data_source.get_test_results(config_name)
config_tests.sort(key=lambda x: x.name)
config_tests_dict[config_name] = config_tests
config_data_dict[config_name] = self.data_source.get_config_result(config_name)
diff_template = self.env.get_template("diff.html")
diff_body_html = diff_template.render(
test_names = test_names,
report_info = self.report_info,
config_names = self.report_info.config_to_test_names_map.keys(),
config_tests_dict = config_tests_dict,
config_data_dict = config_data_dict
)
return diff_body_html
def _generate_summary_body(self):
summary_stats = [
self.data_source.count_all_tests(),
self.data_source.count_all_tests_with_result(constants.PASSED),
self.data_source.count_all_tests_with_result(constants.FAILED),
self.data_source.count_all_tests_with_result(constants.SKIPPED),
self.data_source.get_total_config_exec_time(),
self.data_source.get_summary_start_time(),
self.data_source.get_summary_end_time()
]
config_failure_map = {}
config_total_tests_map = {}
config_test_failure_map = {}
config_test_skipped_map = {}
config_test_passed_map = {}
for config_name in self.report_info.config_to_test_names_map.keys():
config_total_tests_map[config_name] = self.data_source.count_tests(config_name)
config_failure_map[config_name] = self.data_source.get_config_result(config_name).result
config_test_failure_map[config_name] = self.data_source.count_tests_with_result(config_name, constants.FAILED)
config_test_skipped_map[config_name] = self.data_source.count_tests_with_result(config_name, constants.SKIPPED)
config_test_passed_map[config_name] = self.data_source.count_tests_with_result(config_name, constants.PASSED)
summary_template = self.env.get_template("landing_page.html")
summary_body = summary_template.render(
report_info=self.report_info,
summary=summary_stats,
config_fail=config_failure_map,
config_fail_map=config_test_failure_map,
config_skip_map=config_test_skipped_map,
config_tests_map = config_total_tests_map,
config_pass_map = config_test_passed_map
)
return summary_body
def _generate_topbar(self, active_page):
topbar_template = self.env.get_template("topbar.html")
topbar_html = topbar_template.render(
report_info=self.report_info,
active=active_page,
)
return topbar_html
def _generate_test_body(self, config_name, test_name):
test_template = self.env.get_template("test_page.html")
test_body = test_template.render(
config_name=config_name,
test_data=self.data_source.get_test_result(config_name, test_name),
report_info=self.report_info,
config_data=self.data_source.get_config_result(config_name)
)
return test_body
@staticmethod
def _make_file(html, location):
with open(location, "w") as f:
f.write(html)
def _setup(self):
utils.makedirs(self.report_info.output_dir)
utils.makedirs(self.report_info.resource_dir)
self.report_info.config_to_test_names_map = self.get_config_to_test_names_map()
| apache-2.0 | -7,770,641,492,663,692,000 | 5,291,429,111,988,163,000 | 38.501901 | 117 | 0.688613 | false |
jindongh/kombu | kombu/transport/virtual/exchange.py | 33 | 4580 | """
kombu.transport.virtual.exchange
================================
Implementations of the standard exchanges defined
by the AMQ protocol (excluding the `headers` exchange).
"""
from __future__ import absolute_import
from kombu.utils import escape_regex
import re
class ExchangeType(object):
"""Implements the specifics for an exchange type.
:param channel: AMQ Channel
"""
type = None
def __init__(self, channel):
self.channel = channel
def lookup(self, table, exchange, routing_key, default):
"""Lookup all queues matching `routing_key` in `exchange`.
:returns: `default` if no queues matched.
"""
raise NotImplementedError('subclass responsibility')
def prepare_bind(self, queue, exchange, routing_key, arguments):
"""Return tuple of `(routing_key, regex, queue)` to be stored
for bindings to this exchange."""
return routing_key, None, queue
def equivalent(self, prev, exchange, type,
durable, auto_delete, arguments):
"""Return true if `prev` and `exchange` is equivalent."""
return (type == prev['type'] and
durable == prev['durable'] and
auto_delete == prev['auto_delete'] and
(arguments or {}) == (prev['arguments'] or {}))
class DirectExchange(ExchangeType):
"""The `direct` exchange routes based on exact routing keys."""
type = 'direct'
def lookup(self, table, exchange, routing_key, default):
return [queue for rkey, _, queue in table
if rkey == routing_key]
def deliver(self, message, exchange, routing_key, **kwargs):
_lookup = self.channel._lookup
_put = self.channel._put
for queue in _lookup(exchange, routing_key):
_put(queue, message, **kwargs)
class TopicExchange(ExchangeType):
"""The `topic` exchange routes messages based on words separated by
dots, using wildcard characters ``*`` (any single word), and ``#``
(one or more words)."""
type = 'topic'
#: map of wildcard to regex conversions
wildcards = {'*': r'.*?[^\.]',
'#': r'.*?'}
#: compiled regex cache
_compiled = {}
def lookup(self, table, exchange, routing_key, default):
return [queue for rkey, pattern, queue in table
if self._match(pattern, routing_key)]
def deliver(self, message, exchange, routing_key, **kwargs):
_lookup = self.channel._lookup
_put = self.channel._put
deadletter = self.channel.deadletter_queue
for queue in [q for q in _lookup(exchange, routing_key)
if q and q != deadletter]:
_put(queue, message, **kwargs)
def prepare_bind(self, queue, exchange, routing_key, arguments):
return routing_key, self.key_to_pattern(routing_key), queue
def key_to_pattern(self, rkey):
"""Get the corresponding regex for any routing key."""
return '^%s$' % ('\.'.join(
self.wildcards.get(word, word)
for word in escape_regex(rkey, '.#*').split('.')
))
def _match(self, pattern, string):
"""Same as :func:`re.match`, except the regex is compiled and cached,
then reused on subsequent matches with the same pattern."""
try:
compiled = self._compiled[pattern]
except KeyError:
compiled = self._compiled[pattern] = re.compile(pattern, re.U)
return compiled.match(string)
class FanoutExchange(ExchangeType):
"""The `fanout` exchange implements broadcast messaging by delivering
copies of all messages to all queues bound to the exchange.
To support fanout the virtual channel needs to store the table
as shared state. This requires that the `Channel.supports_fanout`
attribute is set to true, and the `Channel._queue_bind` and
`Channel.get_table` methods are implemented. See the redis backend
for an example implementation of these methods.
"""
type = 'fanout'
def lookup(self, table, exchange, routing_key, default):
return [queue for _, _, queue in table]
def deliver(self, message, exchange, routing_key, **kwargs):
if self.channel.supports_fanout:
self.channel._put_fanout(
exchange, message, routing_key, **kwargs)
#: Map of standard exchange types and corresponding classes.
STANDARD_EXCHANGE_TYPES = {'direct': DirectExchange,
'topic': TopicExchange,
'fanout': FanoutExchange}
| bsd-3-clause | -8,822,219,246,445,609,000 | -4,287,535,187,890,088,400 | 33.179104 | 77 | 0.616594 | false |
alexeyum/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause | -6,528,840,162,178,690,000 | 461,163,128,253,542,200 | 35.897321 | 78 | 0.643557 | false |
gcd0318/django | django/core/files/images.py | 429 | 2428 | """
Utility functions for handling images.
Requires Pillow as you might imagine.
"""
import struct
import zlib
from django.core.files import File
class ImageFile(File):
"""
A mixin for use alongside django.core.files.base.File, which provides
additional features for dealing with images.
"""
def _get_width(self):
return self._get_image_dimensions()[0]
width = property(_get_width)
def _get_height(self):
return self._get_image_dimensions()[1]
height = property(_get_height)
def _get_image_dimensions(self):
if not hasattr(self, '_dimensions_cache'):
close = self.closed
self.open()
self._dimensions_cache = get_image_dimensions(self, close=close)
return self._dimensions_cache
def get_image_dimensions(file_or_path, close=False):
"""
Returns the (width, height) of an image, given an open file or a path. Set
'close' to True to close the file at the end if it is initially in an open
state.
"""
from PIL import ImageFile as PillowImageFile
p = PillowImageFile.Parser()
if hasattr(file_or_path, 'read'):
file = file_or_path
file_pos = file.tell()
file.seek(0)
else:
file = open(file_or_path, 'rb')
close = True
try:
# Most of the time Pillow only needs a small chunk to parse the image
# and get the dimensions, but with some TIFF files Pillow needs to
# parse the whole file.
chunk_size = 1024
while 1:
data = file.read(chunk_size)
if not data:
break
try:
p.feed(data)
except zlib.error as e:
# ignore zlib complaining on truncated stream, just feed more
# data to parser (ticket #19457).
if e.args[0].startswith("Error -5"):
pass
else:
raise
except struct.error:
# Ignore PIL failing on a too short buffer when reads return
# less bytes than expected. Skip and feed more data to the
# parser (ticket #24544).
pass
if p.image:
return p.image.size
chunk_size *= 2
return (None, None)
finally:
if close:
file.close()
else:
file.seek(file_pos)
| bsd-3-clause | -7,396,722,628,189,489,000 | -6,533,235,807,060,159,000 | 29.35 | 79 | 0.563839 | false |
yize/grunt-tps | tasks/lib/python/Lib/python2.7/test/test_whichdb.py | 91 | 1799 | #! /usr/bin/env python
"""Test script for the whichdb module
based on test_anydbm.py
"""
import os
import test.test_support
import unittest
import whichdb
import glob
_fname = test.test_support.TESTFN
# Silence Py3k warning
anydbm = test.test_support.import_module('anydbm', deprecated=True)
def _delete_files():
# we don't know the precise name the underlying database uses
# so we use glob to locate all names
for f in glob.glob(_fname + "*"):
try:
os.unlink(f)
except OSError:
pass
class WhichDBTestCase(unittest.TestCase):
# Actual test methods are added to namespace
# after class definition.
def __init__(self, *args):
unittest.TestCase.__init__(self, *args)
def tearDown(self):
_delete_files()
def setUp(self):
_delete_files()
for name in anydbm._names:
# we define a new test method for each
# candidate database module.
try:
# Silence Py3k warning
mod = test.test_support.import_module(name, deprecated=True)
except unittest.SkipTest:
continue
def test_whichdb_name(self, name=name, mod=mod):
# Check whether whichdb correctly guesses module name
# for databases opened with module mod.
# Try with empty files first
f = mod.open(_fname, 'c')
f.close()
self.assertEqual(name, whichdb.whichdb(_fname))
# Now add a key
f = mod.open(_fname, 'w')
f["1"] = "1"
f.close()
self.assertEqual(name, whichdb.whichdb(_fname))
setattr(WhichDBTestCase,"test_whichdb_%s" % name, test_whichdb_name)
def test_main():
try:
test.test_support.run_unittest(WhichDBTestCase)
finally:
_delete_files()
if __name__ == "__main__":
test_main()
| mit | -3,250,343,780,543,463,400 | 2,838,198,986,744,914,000 | 25.455882 | 72 | 0.625903 | false |
heihei1252/lightblue-0.4 | src/mac/_macutil.py | 68 | 9048 | # Copyright (c) 2009 Bea Lam. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
# Mac-specific utility functions and constants.
from Foundation import NSObject, NSDate, NSPoint, NSDefaultRunLoopMode, NSTimer
from AppKit import NSApplication, NSEvent, NSApplicationDefined, NSAnyEventMask
import objc
import time
import _IOBluetooth
import _lightbluecommon
# for mac os 10.5
try:
from Foundation import NSUIntegerMax
NSAnyEventMask = NSUIntegerMax
except:
pass
# values of constants used in _IOBluetooth.framework
kIOReturnSuccess = 0 # defined in <IOKit/IOReturn.h>
kIOBluetoothUserNotificationChannelDirectionIncoming = 1
# defined in <IOBluetooth/IOBluetoothUserLib.h>
kBluetoothHCIErrorPageTimeout = 0x04 # <IOBluetooth/Bluetooth.h>
# defined in <IOBluetooth/IOBluetoothUserLib.h>
kIOBluetoothServiceBrowserControllerOptionsNone = 0L
LIGHTBLUE_NOTIFY_ID = 5444 # any old number
WAIT_MAX_TIMEOUT = 3
# IOBluetoothSDPUUID objects for RFCOMM and OBEX protocol UUIDs
PROTO_UUIDS = {
_lightbluecommon.RFCOMM: _IOBluetooth.IOBluetoothSDPUUID.uuid16_(0x0003),
_lightbluecommon.OBEX: _IOBluetooth.IOBluetoothSDPUUID.uuid16_(0x0008)
}
def formatdevaddr(addr):
"""
Returns address of a device in usual form e.g. "00:00:00:00:00:00"
- addr: address as returned by device.getAddressString() on an
IOBluetoothDevice
"""
# make uppercase cos PyS60 & Linux seem to always return uppercase
# addresses
# can safely encode to ascii cos BT addresses are only in hex (pyobjc
# returns all strings in unicode)
return addr.replace("-", ":").encode('ascii').upper()
def createbtdevaddr(addr):
# in mac 10.5, can use BluetoothDeviceAddress directly
chars = btaddrtochars(addr)
try:
btdevaddr = _IOBluetooth.BluetoothDeviceAddress(chars)
return btdevaddr
except:
return chars
def btaddrtochars(addr):
"""
Takes a bluetooth address and returns a tuple with the corresponding
char values. This can then be used to construct a
IOBluetoothDevice object, providing the signature of the withAddress:
selector has been set (as in _setpyobjcsignatures() in this module).
For example:
>>> chars = btaddrtochars("00:0e:0a:00:a2:00")
>>> chars
(0, 14, 10, 0, 162, 0)
>>> device = _IOBluetooth.IOBluetoothDevice.withAddress_(chars)
>>> type(device)
<objective-c class IOBluetoothDevice at 0xa4024988>
>>> device.getAddressString()
u'00-0e-0a-00-a2-00'
"""
if not _lightbluecommon._isbtaddr(addr):
raise TypeError("address %s not valid bluetooth address" % str(addr))
if addr.find(":") == -1:
addr = addr.replace("-", ":") # consider alternative addr separator
# unhexlify gives binary value like '\x0e', then ord to get the char value.
# unhexlify throws TypeError if value is not a hex pair.
import binascii
chars = [ord(binascii.unhexlify(part)) for part in addr.split(":")]
return tuple(chars)
def looponce():
app = NSApplication.sharedApplication()
# to push the run loops I seem to have to do this twice
# use NSEventTrackingRunLoopMode or NSDefaultRunLoopMode?
for i in range(2):
event = app.nextEventMatchingMask_untilDate_inMode_dequeue_(
NSAnyEventMask, NSDate.dateWithTimeIntervalSinceNow_(0.02),
NSDefaultRunLoopMode, False)
def waituntil(conditionfunc, timeout=None):
"""
Waits until conditionfunc() returns true, or <timeout> seconds have passed.
(If timeout=None, this waits indefinitely until conditionfunc() returns
true.) Returns false if the process timed out, otherwise returns true.
Note!! You must call interruptwait() when you know that conditionfunc()
should be checked (e.g. if you are waiting for data and you know some data
has arrived) so that this can check conditionfunc(); otherwise it will just
continue to wait. (This allows the function to wait for an event that is
sent by interruptwait() instead of polling conditionfunc().)
This allows the caller to wait while the main event loop processes its
events. This must be done for certain situations, e.g. to receive socket
data or to accept client connections on a server socket, since IOBluetooth
requires the presence of an event loop to run these operations.
This function doesn't need to be called if there is something else that is
already processing the main event loop, e.g. if called from within a Cocoa
application.
"""
app = NSApplication.sharedApplication()
starttime = time.time()
if timeout is None:
timeout = NSDate.distantFuture().timeIntervalSinceNow()
if not isinstance(timeout, (int, float)):
raise TypeError("timeout must be int or float, was %s" % \
type(timeout))
endtime = starttime + timeout
while True:
currtime = time.time()
if currtime >= endtime:
return False
# use WAIT_MAX_TIMEOUT, don't wait forever in case of KeyboardInterrupt
e = app.nextEventMatchingMask_untilDate_inMode_dequeue_(NSAnyEventMask, NSDate.dateWithTimeIntervalSinceNow_(min(endtime - currtime, WAIT_MAX_TIMEOUT)), NSDefaultRunLoopMode, True)
if e is not None:
if (e.type() == NSApplicationDefined and e.subtype() == LIGHTBLUE_NOTIFY_ID):
if conditionfunc():
return True
else:
app.postEvent_atStart_(e, True)
def interruptwait():
"""
If waituntil() has been called, this will interrupt the waiting process so
it can check whether it should stop waiting.
"""
evt = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(NSApplicationDefined, NSPoint(), NSApplicationDefined, 0, 1, None, LIGHTBLUE_NOTIFY_ID, 0, 0)
NSApplication.sharedApplication().postEvent_atStart_(evt, True)
class BBCocoaSleeper(NSObject):
def init(self):
self = super(BBCocoaSleeper, self).init()
self.timedout = False
return self
def sleep(self, timeout):
NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(
timeout, self, "timedOut:", None, False)
self.timedout = False
waituntil(lambda: self.timedout)
def timedOut_(self, timer):
self.timedout = True
interruptwait()
timedOut_ = objc.selector(timedOut_, signature="v@:@")
def waitfor(timeout):
sleeper = BBCocoaSleeper.alloc().init()
sleeper.sleep(timeout)
class BBFileLikeObjectReader(NSObject):
"""
Provides a suitable delegate class for the BBDelegatingInputStream class in
LightAquaBlue.framework.
This basically provides a wrapper for a python file-like object so that it
can be read through a NSInputStream.
"""
def initWithFileLikeObject_(self, fileobj):
self = super(BBFileLikeObjectReader, self).init()
self.__fileobj = fileobj
return self
initWithFileLikeObject_ = objc.selector(initWithFileLikeObject_,
signature="@@:@")
def readDataWithMaxLength_(self, maxlength):
try:
data = self.__fileobj.read(maxlength)
except Exception:
return None
return buffer(data)
readDataWithMaxLength_ = objc.selector(readDataWithMaxLength_,
signature="@@:I") #"@12@0:4I8" #"@:I"
class BBFileLikeObjectWriter(NSObject):
"""
Provides a suitable delegate class for the BBDelegatingOutputStream class in
LightAquaBlue.framework.
This basically provides a wrapper for a python file-like object so that it
can be written to through a NSOutputStream.
"""
def initWithFileLikeObject_(self, fileobj):
self = super(BBFileLikeObjectWriter, self).init()
self.__fileobj = fileobj
return self
initWithFileLikeObject_ = objc.selector(initWithFileLikeObject_,
signature="@@:@")
def write_(self, data):
try:
self.__fileobj.write(data)
except Exception:
return -1
return data.length()
write_ = objc.selector(write_, signature="i12@0:4@8") #i12@0:4@8 #i@:@
| gpl-3.0 | -1,620,164,222,322,151,400 | 3,932,410,278,517,016,600 | 36.7 | 205 | 0.681256 | false |
jtrebosc/JTutils | TSpy/zeroim3D.py | 1 | 1091 | # -*- coding: utf-8 -*-
import sys
import os
PYTHONPATH=os.getenv("PYTHONPATH","not_defined")
if "not_defined" in PYTHONPATH:
MSG("cannot acces to PYTHONPATH environment. It's required for accessing to brukerPARIO lib" )
EXIT()
#add the Library path for importing brukerPARIO
sys.path.append(PYTHONPATH)
import brukerPARIO
from os.path import getsize
# from os import system as execute
import subprocess
def get_os_version():
ver = sys.platform.lower()
if ver.startswith('java'):
import java.lang
ver = java.lang.System.getProperty("os.name").lower()
return ver
OS=get_os_version()
dt=CURDATA()
dat=brukerPARIO.dataset(dt)
fn3iii=dat.returnprocpath()+"/3iii"
try :
sz3iii=getsize(fn3iii)
except :
MSG("No file 3iii")
EXIT()
if OS.startswith('mac') or OS.startswith('linux'):
# for Linux with wine
CMD="dd if=/dev/zero of=%s bs=%d count=1" % (fn3iii,sz3iii)
else:
# for windows using topspin cygwin setup
CMD="to be defined"
MSG("not implemented for Windows yet")
EXIT()
# execute(CMD)
subprocess.call(CMD.split())
| bsd-3-clause | -4,817,174,392,288,432,000 | -7,973,216,291,890,173,000 | 22.212766 | 95 | 0.692026 | false |
dstockwell/catapult | third_party/Paste/paste/util/threadedprint.py | 50 | 8210 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
threadedprint.py
================
:author: Ian Bicking
:date: 12 Jul 2004
Multi-threaded printing; allows the output produced via print to be
separated according to the thread.
To use this, you must install the catcher, like::
threadedprint.install()
The installation optionally takes one of three parameters:
default
The default destination for print statements (e.g., ``sys.stdout``).
factory
A function that will produce the stream for a thread, given the
thread's name.
paramwriter
Instead of writing to a file-like stream, this function will be
called like ``paramwriter(thread_name, text)`` for every write.
The thread name is the value returned by
``threading.currentThread().getName()``, a string (typically something
like Thread-N).
You can also submit file-like objects for specific threads, which will
override any of these parameters. To do this, call ``register(stream,
[threadName])``. ``threadName`` is optional, and if not provided the
stream will be registered for the current thread.
If no specific stream is registered for a thread, and no default has
been provided, then an error will occur when anything is written to
``sys.stdout`` (or printed).
Note: the stream's ``write`` method will be called in the thread the
text came from, so you should consider thread safety, especially if
multiple threads share the same writer.
Note: if you want access to the original standard out, use
``sys.__stdout__``.
You may also uninstall this, via::
threadedprint.uninstall()
TODO
----
* Something with ``sys.stderr``.
* Some default handlers. Maybe something that hooks into `logging`.
* Possibly cache the results of ``factory`` calls. This would be a
semantic change.
"""
import threading
import sys
from paste.util import filemixin
class PrintCatcher(filemixin.FileMixin):
def __init__(self, default=None, factory=None, paramwriter=None,
leave_stdout=False):
assert len(filter(lambda x: x is not None,
[default, factory, paramwriter])) <= 1, (
"You can only provide one of default, factory, or paramwriter")
if leave_stdout:
assert not default, (
"You cannot pass in both default (%r) and "
"leave_stdout=True" % default)
default = sys.stdout
if default:
self._defaultfunc = self._writedefault
elif factory:
self._defaultfunc = self._writefactory
elif paramwriter:
self._defaultfunc = self._writeparam
else:
self._defaultfunc = self._writeerror
self._default = default
self._factory = factory
self._paramwriter = paramwriter
self._catchers = {}
def write(self, v, currentThread=threading.currentThread):
name = currentThread().getName()
catchers = self._catchers
if not catchers.has_key(name):
self._defaultfunc(name, v)
else:
catcher = catchers[name]
catcher.write(v)
def seek(self, *args):
# Weird, but Google App Engine is seeking on stdout
name = threading.currentThread().getName()
catchers = self._catchers
if not name in catchers:
self._default.seek(*args)
else:
catchers[name].seek(*args)
def read(self, *args):
name = threading.currentThread().getName()
catchers = self._catchers
if not name in catchers:
self._default.read(*args)
else:
catchers[name].read(*args)
def _writedefault(self, name, v):
self._default.write(v)
def _writefactory(self, name, v):
self._factory(name).write(v)
def _writeparam(self, name, v):
self._paramwriter(name, v)
def _writeerror(self, name, v):
assert False, (
"There is no PrintCatcher output stream for the thread %r"
% name)
def register(self, catcher, name=None,
currentThread=threading.currentThread):
if name is None:
name = currentThread().getName()
self._catchers[name] = catcher
def deregister(self, name=None,
currentThread=threading.currentThread):
if name is None:
name = currentThread().getName()
assert self._catchers.has_key(name), (
"There is no PrintCatcher catcher for the thread %r" % name)
del self._catchers[name]
_printcatcher = None
_oldstdout = None
def install(**kw):
global _printcatcher, _oldstdout, register, deregister
if (not _printcatcher or sys.stdout is not _printcatcher):
_oldstdout = sys.stdout
_printcatcher = sys.stdout = PrintCatcher(**kw)
register = _printcatcher.register
deregister = _printcatcher.deregister
def uninstall():
global _printcatcher, _oldstdout, register, deregister
if _printcatcher:
sys.stdout = _oldstdout
_printcatcher = _oldstdout = None
register = not_installed_error
deregister = not_installed_error
def not_installed_error(*args, **kw):
assert False, (
"threadedprint has not yet been installed (call "
"threadedprint.install())")
register = deregister = not_installed_error
class StdinCatcher(filemixin.FileMixin):
def __init__(self, default=None, factory=None, paramwriter=None):
assert len(filter(lambda x: x is not None,
[default, factory, paramwriter])) <= 1, (
"You can only provide one of default, factory, or paramwriter")
if default:
self._defaultfunc = self._readdefault
elif factory:
self._defaultfunc = self._readfactory
elif paramwriter:
self._defaultfunc = self._readparam
else:
self._defaultfunc = self._readerror
self._default = default
self._factory = factory
self._paramwriter = paramwriter
self._catchers = {}
def read(self, size=None, currentThread=threading.currentThread):
name = currentThread().getName()
catchers = self._catchers
if not catchers.has_key(name):
return self._defaultfunc(name, size)
else:
catcher = catchers[name]
return catcher.read(size)
def _readdefault(self, name, size):
self._default.read(size)
def _readfactory(self, name, size):
self._factory(name).read(size)
def _readparam(self, name, size):
self._paramreader(name, size)
def _readerror(self, name, size):
assert False, (
"There is no StdinCatcher output stream for the thread %r"
% name)
def register(self, catcher, name=None,
currentThread=threading.currentThread):
if name is None:
name = currentThread().getName()
self._catchers[name] = catcher
def deregister(self, catcher, name=None,
currentThread=threading.currentThread):
if name is None:
name = currentThread().getName()
assert self._catchers.has_key(name), (
"There is no StdinCatcher catcher for the thread %r" % name)
del self._catchers[name]
_stdincatcher = None
_oldstdin = None
def install_stdin(**kw):
global _stdincatcher, _oldstdin, register_stdin, deregister_stdin
if not _stdincatcher:
_oldstdin = sys.stdin
_stdincatcher = sys.stdin = StdinCatcher(**kw)
register_stdin = _stdincatcher.register
deregister_stdin = _stdincatcher.deregister
def uninstall_stdin():
global _stdincatcher, _oldstdin, register_stdin, deregister_stdin
if _stdincatcher:
sys.stdin = _oldstdin
_stdincatcher = _oldstdin = None
register_stdin = deregister_stdin = not_installed_error_stdin
def not_installed_error_stdin(*args, **kw):
assert False, (
"threadedprint has not yet been installed for stdin (call "
"threadedprint.install_stdin())")
| bsd-3-clause | -3,969,967,141,014,721,000 | 6,140,901,484,391,711,000 | 31.84 | 84 | 0.63581 | false |
RoanokeHobby/Robots | CamJamRobot/9-avoidance.py | 1 | 4715 | # CamJam EduKit 3 - Robotics
# Worksheet 9 – Obstacle Avoidance
# Copyright (c) 2016 CamJam-EduKit
# The MIT License (MIT)
import RPi.GPIO as GPIO # Import the GPIO Library
import time # Import the Time library
# Set the GPIO modes
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# Set variables for the GPIO motor pins
# Reversed the pins for a test
pinMotorAForwards = 9
pinMotorABackwards = 10
pinMotorBForwards = 7
pinMotorBBackwards = 8
# Define GPIO pins to use on the Pi
pinTrigger = 17
pinEcho = 18
# How many times to turn the pin on and off each second
Frequency = 20
# How long the pin stays on each cycle, as a percent
DutyCycleA = 30
DutyCycleB = 30
# Settng the duty cycle to 0 means the motors will not turn
Stop = 0
# Set the GPIO Pin mode to be Output
GPIO.setup(pinMotorAForwards, GPIO.OUT)
GPIO.setup(pinMotorABackwards, GPIO.OUT)
GPIO.setup(pinMotorBForwards, GPIO.OUT)
GPIO.setup(pinMotorBBackwards, GPIO.OUT)
# Set pins as output and input
GPIO.setup(pinTrigger, GPIO.OUT) # Trigger
GPIO.setup(pinEcho, GPIO.IN) # Echo
# Distance Variables
HowNear = 15.0
ReverseTime = 0.5
TurnTime = 0.75
# Set the GPIO to software PWM at 'Frequency' Hertz
pwmMotorAForwards = GPIO.PWM(pinMotorAForwards, Frequency)
pwmMotorABackwards = GPIO.PWM(pinMotorABackwards, Frequency)
pwmMotorBForwards = GPIO.PWM(pinMotorBForwards, Frequency)
pwmMotorBBackwards = GPIO.PWM(pinMotorBBackwards, Frequency)
# Start the software PWM with a duty cycle of 0 (i.e. not moving)
pwmMotorAForwards.start(Stop)
pwmMotorABackwards.start(Stop)
pwmMotorBForwards.start(Stop)
pwmMotorBBackwards.start(Stop)
# Turn all motors off
def StopMotors():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn both motors forwards
def Forwards():
pwmMotorAForwards.ChangeDutyCycle(DutyCycleA)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(DutyCycleB)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn both motors backwards
def Backwards():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(DutyCycleA)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(DutyCycleB)
# Turn left
def Left():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(DutyCycleA)
pwmMotorBForwards.ChangeDutyCycle(DutyCycleB)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn Right
def Right():
pwmMotorAForwards.ChangeDutyCycle(DutyCycleA)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(DutyCycleB)
# Take a distance measurement
def Measure():
GPIO.output(pinTrigger, True)
time.sleep(0.00001)
GPIO.output(pinTrigger, False)
StartTime = time.time()
StopTime = StartTime
while GPIO.input(pinEcho)==0:
StartTime = time.time()
StopTime = StartTime
while GPIO.input(pinEcho)==1:
StopTime = time.time()
# If the sensor is too close to an object, the Pi cannot
# see the echo quickly enough, so we have to detect that
# problem and say what has happened.
if StopTime-StartTime >= 0.04:
print("Hold on there! You're too close for me to see.")
StopTime = StartTime
break
ElapsedTime = StopTime - StartTime
Distance = (ElapsedTime * 34300)/2
return Distance
# Return True if the ultrasonic sensor sees an obstacle
def IsNearObstacle(localHowNear):
Distance = Measure()
print("Is Near Obstacle: "+str(Distance))
if Distance < localHowNear:
return True
else:
return False
# Move back a little, then turn right
def AvoidObstacle():
# Back off a little
print("Go Backwards")
Backwards()
time.sleep(ReverseTime)
StopMotors()
# Turn right
print("Turn Right")
Right()
time.sleep(TurnTime)
StopMotors()
# Your code to control the robot goes below this line
try:
# Set trigger to False (Low)
GPIO.output(pinTrigger, False)
# Allow module to settle
# time.sleep(0.25)
print("Loading ...")
print("Waiting for sensor to stablize")
time.sleep(1)
print("Three ...")
time.sleep(1)
print("Two ...")
time.sleep(1)
print("One ...")
print("Ultrasonic Measurement")
#repeat the next indented block forever
while True:
Forwards()
time.sleep(0.05)
if IsNearObstacle(HowNear):
StopMotors()
AvoidObstacle()
# If you press CTRL+C, cleanup and stop
except KeyboardInterrupt:
GPIO.cleanup()
| gpl-3.0 | -2,275,103,379,913,343,700 | -698,922,381,751,485,200 | 26.723529 | 68 | 0.719711 | false |
noironetworks/neutron | neutron/tests/unit/common/moved_globals_code1.py | 9 | 1026 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Used by test cases in test__deprecate.py
"""
from neutron.common import _deprecate
from neutron.tests.unit.common import moved_globals_target
# a has been moved to moved_globals_target.a
b = 'barasingha'
# c has been renamed to d
d = 'capybara'
# e has been moved to moved_globals_target.f
g = 'gelada'
_deprecate._moved_global('c', new_name='d')
_deprecate._moved_global('e', new_name='f', new_module=moved_globals_target)
_deprecate._MovedGlobals(moved_globals_target)
| apache-2.0 | -3,386,381,484,980,060,000 | 4,437,899,002,453,790,000 | 31.0625 | 76 | 0.747563 | false |
aburan28/blaze | blaze/tests/test_get_set.py | 7 | 1665 | from __future__ import absolute_import, division, print_function
import blaze
from blaze.datadescriptor import dd_as_py
import numpy as np
import unittest
from blaze.py2help import skip
from blaze.tests.common import MayBeUriTest
class getitem(unittest.TestCase):
caps={'compress': False} # the default is non-compressed arrays
def test_scalar(self):
a = blaze.array(np.arange(3), caps=self.caps)
self.assertEqual(dd_as_py(a[0]._data), 0)
@skip('slices should implemented')
def test_1d(self):
a = blaze.array(np.arange(3), caps=self.caps)
print("a:", a, self.caps)
self.assertEqual(dd_as_py(a[0:2]._data), [0,1])
def test_2d(self):
a = blaze.array(np.arange(3*3).reshape(3,3), caps=self.caps)
self.assertEqual(dd_as_py(a[1]._data), [3,4,5])
class getitem_blz(getitem):
caps={'compress': True}
class setitem(unittest.TestCase):
caps={'compress': False} # the default is non-compressed arrays
def test_scalar(self):
a = blaze.array(np.arange(3), caps=self.caps)
a[0] = 1
self.assertEqual(dd_as_py(a[0]._data), 1)
@skip('slices should be implemented')
def test_1d(self):
a = blaze.array(np.arange(3), caps=self.caps)
a[0:2] = 2
self.assertEqual(dd_as_py(a[0:2]._data), [2,2])
def test_2d(self):
a = blaze.array(np.arange(3*3).reshape(3,3), caps=self.caps)
a[1] = 2
self.assertEqual(dd_as_py(a[1]._data), [2,2,2])
# BLZ is going to be read and append only for the time being
# class setitem_blz(setitem):
# caps={'compress': True}
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -1,013,721,859,602,476,000 | 6,812,447,992,036,512,000 | 29.272727 | 68 | 0.625225 | false |
chokribr/invenio | invenio/legacy/bibclassify/engine.py | 4 | 27111 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibClassify engine.
This module is the main module of BibClassify. its two main methods are
output_keywords_for_sources and get_keywords_from_text. The first one output
keywords for a list of sources (local files or URLs, PDF or text) while the
second one outputs the keywords for text lines (which are obtained using the
module bibclassify_text_normalizer).
This module also takes care of the different outputs (text, MARCXML or HTML).
But unfortunately there is a confusion between running in a standalone mode
and producing output suitable for printing, and running in a web-based
mode where the webtemplate is used. For the moment the pieces of the representation
code are left in this module.
"""
from __future__ import print_function
import os
import re
from six import iteritems
import config as bconfig
from invenio.legacy.bibclassify import ontology_reader as reader
import text_extractor as extractor
import text_normalizer as normalizer
import keyword_analyzer as keyworder
import acronym_analyzer as acronymer
from invenio.utils.text import encode_for_xml
from invenio.utils.filedownload import download_url
log = bconfig.get_logger("bibclassify.engine")
# ---------------------------------------------------------------------
# API
# ---------------------------------------------------------------------
def output_keywords_for_sources(input_sources, taxonomy_name, output_mode="text",
output_limit=bconfig.CFG_BIBCLASSIFY_DEFAULT_OUTPUT_NUMBER, spires=False,
match_mode="full", no_cache=False, with_author_keywords=False,
rebuild_cache=False, only_core_tags=False, extract_acronyms=False,
api=False, **kwargs):
"""Output the keywords for each source in sources."""
from invenio.legacy.refextract.engine import get_plaintext_document_body
# Inner function which does the job and it would be too much work to
# refactor the call (and it must be outside the loop, before it did
# not process multiple files)
def process_lines():
if output_mode == "text":
print("Input file: %s" % source)
line_nb = len(text_lines)
word_nb = 0
for line in text_lines:
word_nb += len(re.findall("\S+", line))
log.info("Remote file has %d lines and %d words." % (line_nb, word_nb))
output = get_keywords_from_text(
text_lines,
taxonomy_name,
output_mode=output_mode,
output_limit=output_limit,
spires=spires,
match_mode=match_mode,
no_cache=no_cache,
with_author_keywords=with_author_keywords,
rebuild_cache=rebuild_cache,
only_core_tags=only_core_tags,
extract_acronyms=extract_acronyms
)
if api:
return output
else:
if isinstance(output, dict):
for i in output:
print(output[i])
# Get the fulltext for each source.
for entry in input_sources:
log.info("Trying to read input file %s." % entry)
text_lines = None
source = ""
if os.path.isdir(entry):
for filename in os.listdir(entry):
if filename.startswith('.'):
continue
filename = os.path.join(entry, filename)
if os.path.isfile(filename):
text_lines, dummy = get_plaintext_document_body(filename)
if text_lines:
source = filename
process_lines()
elif os.path.isfile(entry):
text_lines, dummy = get_plaintext_document_body(entry)
if text_lines:
source = os.path.basename(entry)
process_lines()
else:
# Treat as a URL.
local_file = download_url(entry)
text_lines, dummy = get_plaintext_document_body(local_file)
if text_lines:
source = entry.split("/")[-1]
process_lines()
def get_keywords_from_local_file(local_file, taxonomy_name, output_mode="text",
output_limit=bconfig.CFG_BIBCLASSIFY_DEFAULT_OUTPUT_NUMBER, spires=False,
match_mode="full", no_cache=False, with_author_keywords=False,
rebuild_cache=False, only_core_tags=False, extract_acronyms=False, api=False,
**kwargs):
"""Output keywords reading a local file.
Arguments and output are the same as for :see: get_keywords_from_text().
"""
log.info("Analyzing keywords for local file %s." % local_file)
text_lines = extractor.text_lines_from_local_file(local_file)
return get_keywords_from_text(text_lines,
taxonomy_name,
output_mode=output_mode,
output_limit=output_limit,
spires=spires,
match_mode=match_mode,
no_cache=no_cache,
with_author_keywords=with_author_keywords,
rebuild_cache=rebuild_cache,
only_core_tags=only_core_tags,
extract_acronyms=extract_acronyms)
def get_keywords_from_text(text_lines, taxonomy_name, output_mode="text",
output_limit=bconfig.CFG_BIBCLASSIFY_DEFAULT_OUTPUT_NUMBER,
spires=False, match_mode="full", no_cache=False,
with_author_keywords=False, rebuild_cache=False,
only_core_tags=False, extract_acronyms=False,
**kwargs):
"""Extract keywords from the list of strings.
:param text_lines: list of strings (will be normalized before being
joined into one string)
:param taxonomy_name: string, name of the taxonomy_name
:param output_mode: string - text|html|marcxml|raw
:param output_limit: int
:param spires: boolean, if True marcxml output reflect spires codes.
:param match_mode: str - partial|full; in partial mode only
beginning of the fulltext is searched.
:param no_cache: boolean, means loaded definitions will not be saved.
:param with_author_keywords: boolean, extract keywords from the pdfs.
:param rebuild_cache: boolean
:param only_core_tags: boolean
:return: if output_mode=raw, it will return
(single_keywords, composite_keywords, author_keywords, acronyms)
for other output modes it returns formatted string
"""
cache = reader.get_cache(taxonomy_name)
if not cache:
reader.set_cache(taxonomy_name,
reader.get_regular_expressions(taxonomy_name,
rebuild=rebuild_cache,
no_cache=no_cache))
cache = reader.get_cache(taxonomy_name)
_skw = cache[0]
_ckw = cache[1]
text_lines = normalizer.cut_references(text_lines)
fulltext = normalizer.normalize_fulltext("\n".join(text_lines))
if match_mode == "partial":
fulltext = _get_partial_text(fulltext)
author_keywords = None
if with_author_keywords:
author_keywords = extract_author_keywords(_skw, _ckw, fulltext)
acronyms = {}
if extract_acronyms:
acronyms = extract_abbreviations(fulltext)
single_keywords = extract_single_keywords(_skw, fulltext)
composite_keywords = extract_composite_keywords(_ckw, fulltext, single_keywords)
if only_core_tags:
single_keywords = clean_before_output(_filter_core_keywors(single_keywords))
composite_keywords = _filter_core_keywors(composite_keywords)
else:
# Filter out the "nonstandalone" keywords
single_keywords = clean_before_output(single_keywords)
return get_keywords_output(single_keywords, composite_keywords, taxonomy_name,
author_keywords, acronyms, output_mode, output_limit,
spires, only_core_tags)
def extract_single_keywords(skw_db, fulltext):
"""Find single keywords in the fulltext.
:var skw_db: list of KeywordToken objects
:var fulltext: string, which will be searched
:return : dictionary of matches in a format {
<keyword object>, [[position, position...], ],
..
}
or empty {}
"""
return keyworder.get_single_keywords(skw_db, fulltext) or {}
def extract_composite_keywords(ckw_db, fulltext, skw_spans):
"""Returns a list of composite keywords bound with the number of
occurrences found in the text string.
:var ckw_db: list of KewordToken objects (they are supposed to be composite ones)
:var fulltext: string to search in
:skw_spans: dictionary of already identified single keywords
:return : dictionary of matches in a format {
<keyword object>, [[position, position...], [info_about_matches] ],
..
}
or empty {}
"""
return keyworder.get_composite_keywords(ckw_db, fulltext, skw_spans) or {}
def extract_abbreviations(fulltext):
"""Extract acronyms from the fulltext
:var fulltext: utf-8 string
:return: dictionary of matches in a formt {
<keyword object>, [matched skw or ckw object, ....]
}
or empty {}
"""
acronyms = {}
K = reader.KeywordToken
for k, v in acronymer.get_acronyms(fulltext).items():
acronyms[K(k, type='acronym')] = v
return acronyms
def extract_author_keywords(skw_db, ckw_db, fulltext):
"""Finds out human defined keyowrds in a text string. Searches for
the string "Keywords:" and its declinations and matches the
following words.
:var skw_db: list single kw object
:var ckw_db: list of composite kw objects
:var fulltext: utf-8 string
:return: dictionary of matches in a formt {
<keyword object>, [matched skw or ckw object, ....]
}
or empty {}
"""
akw = {}
K = reader.KeywordToken
for k, v in keyworder.get_author_keywords(skw_db, ckw_db, fulltext).items():
akw[K(k, type='author-kw')] = v
return akw
# ---------------------------------------------------------------------
# presentation functions
# ---------------------------------------------------------------------
def get_keywords_output(single_keywords, composite_keywords, taxonomy_name,
author_keywords=None, acronyms=None, style="text", output_limit=0,
spires=False, only_core_tags=False):
"""Returns a formatted string representing the keywords according
to the chosen style. This is the main routing call, this function will
also strip unwanted keywords before output and limits the number
of returned keywords
:var single_keywords: list of single keywords
:var composite_keywords: list of composite keywords
:var taxonomy_name: string, taxonomy name
:keyword author_keywords: dictionary of author keywords extracted from fulltext
:keyword acronyms: dictionary of extracted acronyms
:keyword style: text|html|marc
:keyword output_limit: int, number of maximum keywords printed (it applies
to single and composite keywords separately)
:keyword spires: boolen meaning spires output style
:keyword only_core_tags: boolean
"""
categories = {}
# sort the keywords, but don't limit them (that will be done later)
single_keywords_p = _sort_kw_matches(single_keywords)
composite_keywords_p = _sort_kw_matches(composite_keywords)
for w in single_keywords_p:
categories[w[0].concept] = w[0].type
for w in single_keywords_p:
categories[w[0].concept] = w[0].type
complete_output = _output_complete(single_keywords_p, composite_keywords_p,
author_keywords, acronyms, spires,
only_core_tags, limit=output_limit)
functions = {"text": _output_text, "marcxml": _output_marc, "html":
_output_html, "dict": _output_dict}
my_styles = {}
for s in style:
if s != "raw":
my_styles[s] = functions[s](complete_output, categories)
else:
if output_limit > 0:
my_styles["raw"] = (_kw(_sort_kw_matches(single_keywords, output_limit)),
_kw(_sort_kw_matches(composite_keywords, output_limit)),
author_keywords, # this we don't limit (?)
_kw(_sort_kw_matches(acronyms, output_limit)))
else:
my_styles["raw"] = (single_keywords_p, composite_keywords_p, author_keywords, acronyms)
return my_styles
def build_marc(recid, single_keywords, composite_keywords,
spires=False, author_keywords=None, acronyms=None):
"""Create xml record.
:var recid: ingeter
:var single_keywords: dictionary of kws
:var composite_keywords: dictionary of kws
:keyword spires: please don't use, left for historical
reasons
:keyword author_keywords: dictionary of extracted keywords
:keyword acronyms: dictionary of extracted acronyms
:return: str, marxml
"""
output = ['<collection><record>\n'
'<controlfield tag="001">%s</controlfield>' % recid]
# no need to sort
single_keywords = single_keywords.items()
composite_keywords = composite_keywords.items()
output.append(_output_marc(single_keywords, composite_keywords, author_keywords, acronyms))
output.append('</record></collection>')
return '\n'.join(output)
def _output_marc(output_complete, categories, kw_field=bconfig.CFG_MAIN_FIELD,
auth_field=bconfig.CFG_AUTH_FIELD, acro_field=bconfig.CFG_ACRON_FIELD,
provenience='BibClassify'):
"""Output the keywords in the MARCXML format.
:var skw_matches: list of single keywords
:var ckw_matches: list of composite keywords
:var author_keywords: dictionary of extracted author keywords
:var acronyms: dictionary of acronyms
:var spires: boolean, True=generate spires output - BUT NOTE: it is
here only not to break compatibility, in fact spires output
should never be used for xml because if we read marc back
into the KeywordToken objects, we would not find them
:keyword provenience: string that identifies source (authority) that
assigned the contents of the field
:return: string, formatted MARC"""
kw_template = ('<datafield tag="%s" ind1="%s" ind2="%s">\n'
' <subfield code="2">%s</subfield>\n'
' <subfield code="a">%s</subfield>\n'
' <subfield code="n">%s</subfield>\n'
' <subfield code="9">%s</subfield>\n'
'</datafield>\n')
output = []
tag, ind1, ind2 = _parse_marc_code(kw_field)
for keywords in (output_complete["Single keywords"], output_complete["Core keywords"]):
for kw in keywords:
output.append(kw_template % (tag, ind1, ind2, encode_for_xml(provenience),
encode_for_xml(kw), keywords[kw],
encode_for_xml(categories[kw])))
for field, keywords in ((auth_field, output_complete["Author keywords"]),
(acro_field, output_complete["Acronyms"])):
if keywords and len(keywords) and field: # field='' we shall not save the keywords
tag, ind1, ind2 = _parse_marc_code(field)
for kw, info in keywords.items():
output.append(kw_template % (tag, ind1, ind2, encode_for_xml(provenience),
encode_for_xml(kw), '', encode_for_xml(categories[kw])))
return "".join(output)
def _output_complete(skw_matches=None, ckw_matches=None, author_keywords=None,
acronyms=None, spires=False, only_core_tags=False,
limit=bconfig.CFG_BIBCLASSIFY_DEFAULT_OUTPUT_NUMBER):
if limit:
resized_skw = skw_matches[0:limit]
resized_ckw = ckw_matches[0:limit]
else:
resized_skw = skw_matches
resized_ckw = ckw_matches
results = {"Core keywords": _get_core_keywords(skw_matches, ckw_matches, spires=spires)}
if not only_core_tags:
results["Author keywords"] = _get_author_keywords(author_keywords, spires=spires)
results["Composite keywords"] = _get_compositekws(resized_ckw, spires=spires)
results["Single keywords"] = _get_singlekws(resized_skw, spires=spires)
results["Field codes"] = _get_fieldcodes(resized_skw, resized_ckw, spires=spires)
results["Acronyms"] = _get_acronyms(acronyms)
return results
def _output_dict(complete_output, categories):
return {
"complete_output": complete_output,
"categories": categories
}
def _output_text(complete_output, categories):
"""Output the results obtained in text format.
:return: str, html formatted output
"""
output = ""
for result in complete_output:
list_result = complete_output[result]
if list_result:
list_result_sorted = sorted(list_result, key=lambda x: list_result[x],
reverse=True)
output += "\n\n{0}:\n".format(result)
for element in list_result_sorted:
output += "\n{0} {1}".format(list_result[element], element)
output += "\n--\n{0}".format(_signature())
return output
def _output_html(complete_output, categories):
"""Output the same as txt output does, but HTML formatted.
:var skw_matches: sorted list of single keywords
:var ckw_matches: sorted list of composite keywords
:var author_keywords: dictionary of extracted author keywords
:var acronyms: dictionary of acronyms
:var spires: boolean
:var only_core_tags: boolean
:keyword limit: int, number of printed keywords
:return: str, html formatted output
"""
return """<html>
<head>
<title>Automatically generated keywords by bibclassify</title>
</head>
<body>
{0}
</body>
</html>""".format(
_output_text(complete_output).replace('\n', '<br>')
).replace('\n', '')
def _get_singlekws(skw_matches, spires=False):
"""
:var skw_matches: dict of {keyword: [info,...]}
:keyword spires: bool, to get the spires output
:return: list of formatted keywords
"""
output = {}
for single_keyword, info in skw_matches:
output[single_keyword.output(spires)] = len(info[0])
return output
def _get_compositekws(ckw_matches, spires=False):
"""
:var ckw_matches: dict of {keyword: [info,...]}
:keyword spires: bool, to get the spires output
:return: list of formatted keywords
"""
output = {}
for composite_keyword, info in ckw_matches:
output[composite_keyword.output(spires)] = {"numbers": len(info[0]),
"details": info[1]}
return output
def _get_acronyms(acronyms):
"""Return a formatted list of acronyms."""
acronyms_str = {}
if acronyms:
for acronym, expansions in iteritems(acronyms):
expansions_str = ", ".join(["%s (%d)" % expansion
for expansion in expansions])
acronyms_str[acronym] = expansions_str
return acronyms
def _get_author_keywords(author_keywords, spires=False):
"""Format the output for the author keywords.
:return: list of formatted author keywors
"""
out = {}
if author_keywords:
for keyword, matches in author_keywords.items():
skw_matches = matches[0] # dictionary of single keywords
ckw_matches = matches[1] # dict of composite keywords
matches_str = []
for ckw, spans in ckw_matches.items():
matches_str.append(ckw.output(spires))
for skw, spans in skw_matches.items():
matches_str.append(skw.output(spires))
if matches_str:
out[keyword] = matches_str
else:
out[keyword] = 0
return out
def _get_fieldcodes(skw_matches, ckw_matches, spires=False):
"""Return the output for the field codes.
:var skw_matches: dict of {keyword: [info,...]}
:var ckw_matches: dict of {keyword: [info,...]}
:keyword spires: bool, to get the spires output
:return: string"""
fieldcodes = {}
output = {}
for skw, _ in skw_matches:
for fieldcode in skw.fieldcodes:
fieldcodes.setdefault(fieldcode, set()).add(skw.output(spires))
for ckw, _ in ckw_matches:
if len(ckw.fieldcodes):
for fieldcode in ckw.fieldcodes:
fieldcodes.setdefault(fieldcode, set()).add(ckw.output(spires))
else: # inherit field-codes from the composites
for kw in ckw.getComponents():
for fieldcode in kw.fieldcodes:
fieldcodes.setdefault(fieldcode, set()).add('%s*' % ckw.output(spires))
fieldcodes.setdefault('*', set()).add(kw.output(spires))
for fieldcode, keywords in fieldcodes.items():
output[fieldcode] = ', '.join(keywords)
return output
def _get_core_keywords(skw_matches, ckw_matches, spires=False):
"""Return the output for the field codes.
:var skw_matches: dict of {keyword: [info,...]}
:var ckw_matches: dict of {keyword: [info,...]}
:keyword spires: bool, to get the spires output
:return: set of formatted core keywords
"""
output = {}
category = {}
def _get_value_kw(kw):
"""Help to sort the Core keywords."""
i = 0
while kw[i].isdigit():
i += 1
if i > 0:
return int(kw[:i])
else:
return 0
for skw, info in skw_matches:
if skw.core:
output[skw.output(spires)] = len(info[0])
category[skw.output(spires)] = skw.type
for ckw, info in ckw_matches:
if ckw.core:
output[ckw.output(spires)] = len(info[0])
else:
#test if one of the components is not core
i = 0
for c in ckw.getComponents():
if c.core:
output[c.output(spires)] = info[1][i]
i += 1
return output
def _filter_core_keywors(keywords):
matches = {}
for kw, info in keywords.items():
if kw.core:
matches[kw] = info
return matches
def _signature():
"""Print out the bibclassify signature.
#todo: add information about taxonomy, rdflib"""
return 'bibclassify v%s' % (bconfig.VERSION,)
def clean_before_output(kw_matches):
"""Return a clean copy of the keywords data structure.
Stripped off the standalone and other unwanted elements"""
filtered_kw_matches = {}
for kw_match, info in iteritems(kw_matches):
if not kw_match.nostandalone:
filtered_kw_matches[kw_match] = info
return filtered_kw_matches
# ---------------------------------------------------------------------
# helper functions
# ---------------------------------------------------------------------
def _skw_matches_comparator(kw0, kw1):
"""
Compare 2 single keywords objects.
First by the number of their spans (ie. how many times they were found),
if it is equal it compares them by lenghts of their labels.
"""
list_comparison = cmp(len(kw1[1][0]), len(kw0[1][0]))
if list_comparison:
return list_comparison
if kw0[0].isComposite() and kw1[0].isComposite():
component_avg0 = sum(kw0[1][1]) / len(kw0[1][1])
component_avg1 = sum(kw1[1][1]) / len(kw1[1][1])
component_comparison = cmp(component_avg1, component_avg0)
if component_comparison:
return component_comparison
return cmp(len(str(kw1[0])), len(str(kw0[0])))
def _kw(keywords):
"""Turn list of keywords into dictionary."""
r = {}
for k, v in keywords:
r[k] = v
return r
def _sort_kw_matches(skw_matches, limit=0):
"""Return a resized version of keywords to the given length."""
sorted_keywords = list(skw_matches.items())
sorted_keywords.sort(_skw_matches_comparator)
return limit and sorted_keywords[:limit] or sorted_keywords
def _get_partial_text(fulltext):
"""
Return a short version of the fulltext used with the partial matching mode.
The version is composed of 20% in the beginning and 20% in the middle of the
text."""
length = len(fulltext)
get_index = lambda x: int(float(x) / 100 * length)
partial_text = [fulltext[get_index(start):get_index(end)]
for start, end in bconfig.CFG_BIBCLASSIFY_PARTIAL_TEXT]
return "\n".join(partial_text)
def save_keywords(filename, xml):
tmp_dir = os.path.dirname(filename)
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
file_desc = open(filename, "w")
file_desc.write(xml)
file_desc.close()
def get_tmp_file(recid):
tmp_directory = "%s/bibclassify" % bconfig.CFG_TMPDIR
if not os.path.isdir(tmp_directory):
os.mkdir(tmp_directory)
filename = "bibclassify_%s.xml" % recid
abs_path = os.path.join(tmp_directory, filename)
return abs_path
def _parse_marc_code(field):
"""Parse marc field and return default indicators if not filled in."""
field = str(field)
if len(field) < 4:
raise Exception('Wrong field code: %s' % field)
else:
field += '__'
tag = field[0:3]
ind1 = field[3].replace('_', '')
ind2 = field[4].replace('_', '')
return tag, ind1, ind2
if __name__ == "__main__":
log.error("Please use bibclassify_cli from now on.")
| gpl-2.0 | 6,844,708,359,844,150,000 | 8,940,540,351,881,108,000 | 36.394483 | 110 | 0.599314 | false |
sharpbitmessage/PyBitmessage | regenerateaddresses.py | 1 | 8348 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'regenerateaddresses.ui'
#
# Created: Thu Jan 24 15:52:24 2013
# by: PyQt4 UI code generator 4.9.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_regenerateAddressesDialog(object):
def setupUi(self, regenerateAddressesDialog):
regenerateAddressesDialog.setObjectName(_fromUtf8("regenerateAddressesDialog"))
regenerateAddressesDialog.resize(532, 332)
self.gridLayout_2 = QtGui.QGridLayout(regenerateAddressesDialog)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.buttonBox = QtGui.QDialogButtonBox(regenerateAddressesDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout_2.addWidget(self.buttonBox, 1, 0, 1, 1)
self.groupBox = QtGui.QGroupBox(regenerateAddressesDialog)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout = QtGui.QGridLayout(self.groupBox)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_6 = QtGui.QLabel(self.groupBox)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout.addWidget(self.label_6, 1, 0, 1, 1)
self.lineEditPassphrase = QtGui.QLineEdit(self.groupBox)
self.lineEditPassphrase.setInputMethodHints(QtCore.Qt.ImhHiddenText|QtCore.Qt.ImhNoAutoUppercase|QtCore.Qt.ImhNoPredictiveText)
self.lineEditPassphrase.setEchoMode(QtGui.QLineEdit.Password)
self.lineEditPassphrase.setObjectName(_fromUtf8("lineEditPassphrase"))
self.gridLayout.addWidget(self.lineEditPassphrase, 2, 0, 1, 5)
self.label_11 = QtGui.QLabel(self.groupBox)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout.addWidget(self.label_11, 3, 0, 1, 3)
self.spinBoxNumberOfAddressesToMake = QtGui.QSpinBox(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spinBoxNumberOfAddressesToMake.sizePolicy().hasHeightForWidth())
self.spinBoxNumberOfAddressesToMake.setSizePolicy(sizePolicy)
self.spinBoxNumberOfAddressesToMake.setMinimum(1)
self.spinBoxNumberOfAddressesToMake.setProperty("value", 8)
self.spinBoxNumberOfAddressesToMake.setObjectName(_fromUtf8("spinBoxNumberOfAddressesToMake"))
self.gridLayout.addWidget(self.spinBoxNumberOfAddressesToMake, 3, 3, 1, 1)
spacerItem = QtGui.QSpacerItem(132, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 3, 4, 1, 1)
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 4, 0, 1, 1)
self.lineEditAddressVersionNumber = QtGui.QLineEdit(self.groupBox)
self.lineEditAddressVersionNumber.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditAddressVersionNumber.sizePolicy().hasHeightForWidth())
self.lineEditAddressVersionNumber.setSizePolicy(sizePolicy)
self.lineEditAddressVersionNumber.setMaximumSize(QtCore.QSize(31, 16777215))
self.lineEditAddressVersionNumber.setObjectName(_fromUtf8("lineEditAddressVersionNumber"))
self.gridLayout.addWidget(self.lineEditAddressVersionNumber, 4, 1, 1, 1)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 4, 2, 1, 1)
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 5, 0, 1, 1)
self.lineEditStreamNumber = QtGui.QLineEdit(self.groupBox)
self.lineEditStreamNumber.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Ignored, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lineEditStreamNumber.sizePolicy().hasHeightForWidth())
self.lineEditStreamNumber.setSizePolicy(sizePolicy)
self.lineEditStreamNumber.setMaximumSize(QtCore.QSize(31, 16777215))
self.lineEditStreamNumber.setObjectName(_fromUtf8("lineEditStreamNumber"))
self.gridLayout.addWidget(self.lineEditStreamNumber, 5, 1, 1, 1)
spacerItem2 = QtGui.QSpacerItem(325, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem2, 5, 2, 1, 3)
self.checkBoxEighteenByteRipe = QtGui.QCheckBox(self.groupBox)
self.checkBoxEighteenByteRipe.setObjectName(_fromUtf8("checkBoxEighteenByteRipe"))
self.gridLayout.addWidget(self.checkBoxEighteenByteRipe, 6, 0, 1, 5)
self.label_4 = QtGui.QLabel(self.groupBox)
self.label_4.setWordWrap(True)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 7, 0, 1, 5)
self.label = QtGui.QLabel(self.groupBox)
self.label.setWordWrap(True)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 5)
self.gridLayout_2.addWidget(self.groupBox, 0, 0, 1, 1)
self.retranslateUi(regenerateAddressesDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), regenerateAddressesDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), regenerateAddressesDialog.reject)
QtCore.QMetaObject.connectSlotsByName(regenerateAddressesDialog)
def retranslateUi(self, regenerateAddressesDialog):
regenerateAddressesDialog.setWindowTitle(QtGui.QApplication.translate("regenerateAddressesDialog", "Regenerate Existing Addresses", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("regenerateAddressesDialog", "Regenerate existing addresses", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "Passphrase", None, QtGui.QApplication.UnicodeUTF8))
self.label_11.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "Number of addresses to make based on your passphrase:", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "Address version Number:", None, QtGui.QApplication.UnicodeUTF8))
self.lineEditAddressVersionNumber.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "2", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "Stream number:", None, QtGui.QApplication.UnicodeUTF8))
self.lineEditStreamNumber.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "1", None, QtGui.QApplication.UnicodeUTF8))
self.checkBoxEighteenByteRipe.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "Spend several minutes of extra computing time to make the address(es) 1 or 2 characters shorter", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "You must check (or not check) this box just like you did (or didn\'t) when you made your addresses the first time.", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("regenerateAddressesDialog", "If you have previously made deterministic addresses but lost them due to an accident (like hard drive failure), you can regenerate them here. If you used the random number generator to make your addresses then this form will be of no use to you.", None, QtGui.QApplication.UnicodeUTF8))
| mit | -3,088,838,789,875,903,500 | -2,804,148,281,120,571,400 | 71.591304 | 372 | 0.750958 | false |
emchristiansen/gtest-sbt-cpp | test/gtest_test_utils.py | 408 | 10444 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO([email protected]): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| bsd-3-clause | 5,658,226,118,029,817,000 | 7,765,239,057,926,728,000 | 33.242623 | 79 | 0.672731 | false |
Akshay0724/scikit-learn | sklearn/gaussian_process/tests/test_kernels.py | 3 | 12567 | """Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from sklearn.externals.funcsigs import signature
import numpy as np
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.base import clone
from sklearn.utils.testing import (assert_equal, assert_almost_equal,
assert_not_equal, assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2,
RBF(length_scale=[2.0]), Matern(length_scale=[2.0])]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
def test_kernel_gradient():
# Compare analytic and numeric gradient of kernels.
for kernel in kernels:
K, K_gradient = kernel(X, eval_gradient=True)
assert_equal(K_gradient.shape[0], X.shape[0])
assert_equal(K_gradient.shape[1], X.shape[0])
assert_equal(K_gradient.shape[2], kernel.theta.shape[0])
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
def test_kernel_theta():
# Check that parameter vector theta of kernel is set correctly.
for kernel in kernels:
if isinstance(kernel, KernelOperator) \
or isinstance(kernel, Exponentiation): # skip non-basic kernels
continue
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s[0:-len("_bounds")],
filter(lambda s: s.endswith("_bounds"), args))
assert_equal(
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters),
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert_equal(theta[i],
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert_equal(theta.shape[0], new_kernel.theta.shape[0] + 1)
assert_equal(K_gradient.shape[2], K_gradient_new.shape[2] + 1)
if i > 0:
assert_equal(theta[:i], new_kernel.theta[:i])
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert_equal(theta[i + 1:], new_kernel.theta[i:])
assert_array_equal(K_gradient[..., i + 1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
def test_auto_vs_cross():
# Auto-correlation and cross-correlation should be consistent.
for kernel in kernels:
if kernel == kernel_white:
continue # Identity is not satisfied on diagonal
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
def test_kernel_diag():
# Test that diag method of kernel returns consistent results.
for kernel in kernels:
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
# Adding kernels and multiplying kernels should be commutative.
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
# Anisotropic kernel should be consistent with isotropic kernels.
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
def test_kernel_stationary():
# Test stationarity of kernels.
for kernel in kernels:
if not kernel.is_stationary():
continue
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def check_hyperparameters_equal(kernel1, kernel2):
# Check that hyperparameters of two kernels are equal
for attr in set(dir(kernel1) + dir(kernel2)):
if attr.startswith("hyperparameter_"):
attr_value1 = getattr(kernel1, attr)
attr_value2 = getattr(kernel2, attr)
assert_equal(attr_value1, attr_value2)
def test_kernel_clone():
# Test that sklearn's clone works correctly on kernels.
bounds = (1e-5, 1e5)
for kernel in kernels:
kernel_cloned = clone(kernel)
# XXX: Should this be fixed?
# This differs from the sklearn's estimators equality check.
assert_equal(kernel, kernel_cloned)
assert_not_equal(id(kernel), id(kernel_cloned))
# Check that all constructor parameters are equal.
assert_equal(kernel.get_params(), kernel_cloned.get_params())
# Check that all hyperparameters are equal.
yield check_hyperparameters_equal, kernel, kernel_cloned
# This test is to verify that using set_params does not
# break clone on kernels.
# This used to break because in kernels such as the RBF, non-trivial
# logic that modified the length scale used to be in the constructor
# See https://github.com/scikit-learn/scikit-learn/issues/6961
# for more details.
params = kernel.get_params()
# RationalQuadratic kernel is isotropic.
isotropic_kernels = (ExpSineSquared, RationalQuadratic)
if 'length_scale' in params and not isinstance(kernel,
isotropic_kernels):
length_scale = params['length_scale']
if np.iterable(length_scale):
params['length_scale'] = length_scale[0]
params['length_scale_bounds'] = bounds
else:
params['length_scale'] = [length_scale] * 2
params['length_scale_bounds'] = bounds * 2
kernel_cloned.set_params(**params)
kernel_cloned_clone = clone(kernel_cloned)
assert_equal(kernel_cloned_clone.get_params(),
kernel_cloned.get_params())
assert_not_equal(id(kernel_cloned_clone), id(kernel_cloned))
yield (check_hyperparameters_equal, kernel_cloned,
kernel_cloned_clone)
def test_matern_kernel():
# Test consistency of Matern kernel for special values of nu.
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
def test_kernel_versus_pairwise():
# Check that GP kernels can also be used as pairwise kernels.
for kernel in kernels:
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
def test_set_get_params():
# Check that set_params()/get_params() is consistent with kernel.theta.
for kernel in kernels:
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value] * size})
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
[value] * size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
def test_repr_kernels():
# Smoke-test for repr in kernels.
for kernel in kernels:
repr(kernel)
| bsd-3-clause | 396,291,731,981,572,000 | -1,137,064,104,306,428,400 | 38.895238 | 77 | 0.594255 | false |
supersven/intellij-community | python/lib/Lib/site-packages/django/utils/unittest/compatibility.py | 575 | 2096 | import os
import sys
try:
from functools import wraps
except ImportError:
# only needed for Python 2.4
def wraps(_):
def _wraps(func):
return func
return _wraps
__unittest = True
def _relpath_nt(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = os.path.splitunc(path)
unc_start, rest = os.path.splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
# default to posixpath definition
def _relpath_posix(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
if os.path is sys.modules.get('ntpath'):
relpath = _relpath_nt
else:
relpath = _relpath_posix
| apache-2.0 | -3,430,007,724,588,478,000 | -1,701,455,985,205,257,000 | 31.75 | 80 | 0.589695 | false |
jay-tyler/ansible | lib/ansible/playbook/taggable.py | 128 | 3278 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import itertools
from six import string_types
from ansible.errors import AnsibleError
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
class Taggable:
untagged = frozenset(['untagged'])
_tags = FieldAttribute(isa='list', default=[], listof=(string_types,int))
def __init__(self):
super(Taggable, self).__init__()
def _load_tags(self, attr, ds):
if isinstance(ds, list):
return ds
elif isinstance(ds, basestring):
return [ ds ]
else:
raise AnsibleError('tags must be specified as a list', obj=ds)
def _get_attr_tags(self):
'''
Override for the 'tags' getattr fetcher, used from Base.
'''
tags = self._attributes['tags']
if tags is None:
tags = []
if hasattr(self, '_get_parent_attribute'):
tags = self._get_parent_attribute('tags', extend=True)
return tags
def evaluate_tags(self, only_tags, skip_tags, all_vars):
''' this checks if the current item should be executed depending on tag options '''
should_run = True
if self.tags:
templar = Templar(loader=self._loader, variables=all_vars)
tags = templar.template(self.tags)
if not isinstance(tags, list):
if tags.find(',') != -1:
tags = set(tags.split(','))
else:
tags = set([tags])
else:
tags = set([i for i,_ in itertools.groupby(tags)])
else:
# this makes isdisjoint work for untagged
tags = self.untagged
if only_tags:
should_run = False
if 'always' in tags or 'all' in only_tags:
should_run = True
elif not tags.isdisjoint(only_tags):
should_run = True
elif 'tagged' in only_tags and tags != self.untagged:
should_run = True
if should_run and skip_tags:
# Check for tags that we need to skip
if 'all' in skip_tags:
if 'always' not in tags or 'always' in skip_tags:
should_run = False
elif not tags.isdisjoint(skip_tags):
should_run = False
elif 'tagged' in skip_tags and tags != self.untagged:
should_run = False
return should_run
| gpl-3.0 | 8,146,897,134,874,292,000 | -4,672,202,765,581,645,000 | 32.44898 | 91 | 0.598231 | false |
pselle/calibre | src/calibre/ebooks/lrf/input.py | 14 | 14450 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import textwrap, operator
from copy import deepcopy, copy
from lxml import etree
from calibre import guess_type
class Canvas(etree.XSLTExtension):
def __init__(self, doc, styles, text_block, log):
self.doc = doc
self.styles = styles
self.text_block = text_block
self.log = log
self.processed = set([])
def execute(self, context, self_node, input_node, output_parent):
cid = input_node.get('objid', None)
if cid is None or cid in self.processed:
return
self.processed.add(cid)
input_node = self.doc.xpath('//Canvas[@objid="%s"]'%cid)[0]
objects = list(self.get_objects(input_node))
if len(objects) == 1 and objects[0][0].tag == 'ImageBlock':
self.image_page(input_node, objects[0][0], output_parent)
else:
canvases = [input_node]
for x in input_node.itersiblings():
if x.tag == 'Canvas':
oid = x.get('objid', None)
if oid is not None:
canvases.append(x)
self.processed.add(oid)
else:
break
table = etree.Element('table')
table.text = '\n\t'
for canvas in canvases:
oid = canvas.get('objid')
tr = table.makeelement('tr')
tr.set('id', oid)
tr.tail = '\n\t'
table.append(tr)
for obj, x, y in self.get_objects(canvas):
if obj.tag != 'TextBlock':
self.log.warn(obj.tag, 'elements in Canvas not supported')
continue
td = table.makeelement('td')
self.text_block.render_block(obj, td)
tr.append(td)
output_parent.append(table)
def image_page(self, input_node, block, output_parent):
div = etree.Element('div')
div.set('id', input_node.get('objid', 'scuzzy'))
div.set('class', 'image_page')
width = self.styles.to_num(block.get("xsize", None))
height = self.styles.to_num(block.get("ysize", None))
img = div.makeelement('img')
if width is not None:
img.set('width', str(int(width)))
if height is not None:
img.set('height', str(int(height)))
ref = block.get('refstream', None)
if ref is not None:
imstr = self.doc.xpath('//ImageStream[@objid="%s"]'%ref)
if imstr:
src = imstr[0].get('file', None)
if src:
img.set('src', src)
div.append(img)
output_parent.append(div)
def get_objects(self, node):
for x in node.xpath('descendant::PutObj[@refobj and @x1 and @y1]'):
objs = node.xpath('//*[@objid="%s"]'%x.get('refobj'))
x, y = map(self.styles.to_num, (x.get('x1'), x.get('y1')))
if objs and x is not None and y is not None:
yield objs[0], int(x), int(y)
class MediaType(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
name = input_node.get('file', None)
typ = guess_type(name)[0]
if not typ:
typ = 'application/octet-stream'
output_parent.text = typ
class ImageBlock(etree.XSLTExtension):
def __init__(self, canvas):
etree.XSLTExtension.__init__(self)
self.canvas = canvas
def execute(self, context, self_node, input_node, output_parent):
self.canvas.image_page(input_node, input_node, output_parent)
class RuledLine(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
hr = etree.Element('hr')
output_parent.append(hr)
class TextBlock(etree.XSLTExtension):
def __init__(self, styles, char_button_map, plot_map, log):
etree.XSLTExtension.__init__(self)
self.styles = styles
self.log = log
self.char_button_map = char_button_map
self.plot_map = plot_map
def execute(self, context, self_node, input_node, output_parent):
input_node = deepcopy(input_node)
div = etree.Element('div')
self.render_block(input_node, div)
output_parent.append(div)
def render_block(self, node, root):
ts = node.get('textstyle', None)
classes = []
bs = node.get('blockstyle')
if bs in self.styles.block_style_map:
classes.append('bs%d'%self.styles.block_style_map[bs])
if ts in self.styles.text_style_map:
classes.append('ts%d'%self.styles.text_style_map[ts])
if classes:
root.set('class', ' '.join(classes))
objid = node.get('objid', None)
if objid:
root.set('id', objid)
root.text = node.text
self.root = root
self.parent = root
self.add_text_to = (self.parent, 'text')
self.fix_deep_nesting(node)
for child in node:
self.process_child(child)
def fix_deep_nesting(self, node):
deepest = 1
def depth(node):
parent = node.getparent()
ans = 1
while parent is not None:
ans += 1
parent = parent.getparent()
return ans
for span in node.xpath('descendant::Span'):
d = depth(span)
if d > deepest:
deepest = d
if d > 500:
break
if deepest < 500:
return
self.log.warn('Found deeply nested spans. Flattening.')
#with open('/t/before.xml', 'wb') as f:
# f.write(etree.tostring(node, method='xml'))
spans = [(depth(span), span) for span in node.xpath('descendant::Span')]
spans.sort(key=operator.itemgetter(0), reverse=True)
for depth, span in spans:
if depth < 3:
continue
p = span.getparent()
gp = p.getparent()
idx = p.index(span)
pidx = gp.index(p)
children = list(p)[idx:]
t = children[-1].tail
t = t if t else ''
children[-1].tail = t + (p.tail if p.tail else '')
p.tail = ''
pattrib = dict(**p.attrib) if p.tag == 'Span' else {}
for child in children:
p.remove(child)
if pattrib and child.tag == "Span":
attrib = copy(pattrib)
attrib.update(child.attrib)
child.attrib.update(attrib)
for child in reversed(children):
gp.insert(pidx+1, child)
#with open('/t/after.xml', 'wb') as f:
# f.write(etree.tostring(node, method='xml'))
def add_text(self, text):
if text:
if getattr(self.add_text_to[0], self.add_text_to[1]) is None:
setattr(self.add_text_to[0], self.add_text_to[1], '')
setattr(self.add_text_to[0], self.add_text_to[1],
getattr(self.add_text_to[0], self.add_text_to[1])+ text)
def process_container(self, child, tgt):
idx = self.styles.get_text_styles(child)
if idx is not None:
tgt.set('class', 'ts%d'%idx)
self.parent.append(tgt)
orig_parent = self.parent
self.parent = tgt
self.add_text_to = (self.parent, 'text')
self.add_text(child.text)
for gchild in child:
self.process_child(gchild)
self.parent = orig_parent
self.add_text_to = (tgt, 'tail')
self.add_text(child.tail)
def process_child(self, child):
if child.tag == 'CR':
if self.parent == self.root or self.parent.tag == 'p':
self.parent = self.root.makeelement('p')
self.root.append(self.parent)
self.add_text_to = (self.parent, 'text')
else:
br = self.parent.makeelement('br')
self.parent.append(br)
self.add_text_to = (br, 'tail')
self.add_text(child.tail)
elif child.tag in ('P', 'Span', 'EmpLine', 'NoBR'):
span = self.root.makeelement('span')
if child.tag == 'EmpLine':
td = 'underline' if child.get('emplineposition', 'before') == 'before' else 'overline'
span.set('style', 'text-decoration: '+td)
self.process_container(child, span)
elif child.tag == 'Sup':
sup = self.root.makeelement('sup')
self.process_container(child, sup)
elif child.tag == 'Sub':
sub = self.root.makeelement('sub')
self.process_container(child, sub)
elif child.tag == 'Italic':
sup = self.root.makeelement('i')
self.process_container(child, sup)
elif child.tag == 'CharButton':
a = self.root.makeelement('a')
oid = child.get('refobj', None)
if oid in self.char_button_map:
a.set('href', self.char_button_map[oid])
self.process_container(child, a)
elif child.tag == 'Plot':
xsize = self.styles.to_num(child.get('xsize', None), 166./720)
ysize = self.styles.to_num(child.get('ysize', None), 166./720)
img = self.root.makeelement('img')
if xsize is not None:
img.set('width', str(int(xsize)))
if ysize is not None:
img.set('height', str(int(ysize)))
ro = child.get('refobj', None)
if ro in self.plot_map:
img.set('src', self.plot_map[ro])
self.parent.append(img)
self.add_text_to = (img, 'tail')
self.add_text(child.tail)
else:
self.log.warn('Unhandled Text element:', child.tag)
class Styles(etree.XSLTExtension):
def __init__(self):
etree.XSLTExtension.__init__(self)
self.text_styles, self.block_styles = [], []
self.text_style_map, self.block_style_map = {}, {}
self.CSS = textwrap.dedent('''
.image_page { text-align:center }
''')
def write(self, name='styles.css'):
def join(style):
ans = ['%s : %s;'%(k, v) for k, v in style.items()]
if ans:
ans[-1] = ans[-1][:-1]
return '\n\t'.join(ans)
with open(name, 'wb') as f:
f.write(self.CSS)
for (w, sel) in [(self.text_styles, 'ts'), (self.block_styles,
'bs')]:
for i, s in enumerate(w):
if not s:
continue
rsel = '.%s%d'%(sel, i)
s = join(s)
f.write(rsel + ' {\n\t' + s + '\n}\n\n')
def execute(self, context, self_node, input_node, output_parent):
if input_node.tag == 'TextStyle':
idx = self.get_text_styles(input_node)
if idx is not None:
self.text_style_map[input_node.get('objid')] = idx
else:
idx = self.get_block_styles(input_node)
self.block_style_map[input_node.get('objid')] = idx
def px_to_pt(self, px):
try:
px = float(px)
return px * 72./166.
except:
return None
def color(self, val):
try:
val = int(val, 16)
r, g, b, a = val & 0xFF, (val>>8)&0xFF, (val>>16)&0xFF, (val>>24)&0xFF
if a == 255:
return None
if a == 0:
return 'rgb(%d,%d,%d)'%(r,g,b)
return 'rgba(%d,%d,%d,%f)'%(r,g,b,1.-a/255.)
except:
return None
def get_block_styles(self, node):
ans = {}
sm = self.px_to_pt(node.get('sidemargin', None))
if sm is not None:
ans['margin-left'] = ans['margin-right'] = '%fpt'%sm
ts = self.px_to_pt(node.get('topskip', None))
if ts is not None:
ans['margin-top'] = '%fpt'%ts
fs = self.px_to_pt(node.get('footskip', None))
if fs is not None:
ans['margin-bottom'] = '%fpt'%fs
fw = self.px_to_pt(node.get('framewidth', None))
if fw is not None:
ans['border-width'] = '%fpt'%fw
ans['border-style'] = 'solid'
fc = self.color(node.get('framecolor', None))
if fc is not None:
ans['border-color'] = fc
bc = self.color(node.get('bgcolor', None))
if bc is not None:
ans['background-color'] = bc
if ans not in self.block_styles:
self.block_styles.append(ans)
return self.block_styles.index(ans)
def to_num(self, val, factor=1.):
try:
return float(val)*factor
except:
return None
def get_text_styles(self, node):
ans = {}
fs = self.to_num(node.get('fontsize', None), 0.1)
if fs is not None:
ans['font-size'] = '%fpt'%fs
fw = self.to_num(node.get('fontweight', None))
if fw is not None:
ans['font-weight'] = ('bold' if fw >= 700 else 'normal')
#fn = getattr(obj, 'fontfacename', None)
#if fn is not None:
# fn = cls.FONT_MAP[fn]
# item('font-family: %s;'%fn)
fg = self.color(node.get('textcolor', None))
if fg is not None:
ans['color'] = fg
bg = self.color(node.get('textbgcolor', None))
if bg is not None:
ans['background-color'] = bg
al = node.get('align', None)
if al is not None:
all = dict(head='left', center='center', foot='right')
ans['text-align'] = all.get(al, 'left')
#lh = self.to_num(node.get('linespace', None), 0.1)
#if lh is not None:
# ans['line-height'] = '%fpt'%lh
pi = self.to_num(node.get('parindent', None), 0.1)
if pi is not None:
ans['text-indent'] = '%fpt'%pi
if not ans:
return None
if ans not in self.text_styles:
self.text_styles.append(ans)
return self.text_styles.index(ans)
| gpl-3.0 | 4,807,998,647,459,853,000 | 6,438,458,649,151,763,000 | 34.416667 | 102 | 0.512872 | false |
EliasTouil/simpleBlog | simpleBlog/Lib/sre_compile.py | 123 | 19817 | # -*- coding: utf-8 -*-
#
# Secret Labs' Regular Expression Engine
#
# convert template to internal format
#
# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
import _sre, sys
import sre_parse
from sre_constants import *
assert _sre.MAGIC == MAGIC, "SRE module mismatch"
if _sre.CODESIZE == 2:
MAXCODE = 65535
else:
MAXCODE = 0xFFFFFFFFL
_LITERAL_CODES = set([LITERAL, NOT_LITERAL])
_REPEATING_CODES = set([REPEAT, MIN_REPEAT, MAX_REPEAT])
_SUCCESS_CODES = set([SUCCESS, FAILURE])
_ASSERT_CODES = set([ASSERT, ASSERT_NOT])
# Sets of lowercase characters which have the same uppercase.
_equivalences = (
# LATIN SMALL LETTER I, LATIN SMALL LETTER DOTLESS I
(0x69, 0x131), # iı
# LATIN SMALL LETTER S, LATIN SMALL LETTER LONG S
(0x73, 0x17f), # sſ
# MICRO SIGN, GREEK SMALL LETTER MU
(0xb5, 0x3bc), # µμ
# COMBINING GREEK YPOGEGRAMMENI, GREEK SMALL LETTER IOTA, GREEK PROSGEGRAMMENI
(0x345, 0x3b9, 0x1fbe), # \u0345ιι
# GREEK SMALL LETTER BETA, GREEK BETA SYMBOL
(0x3b2, 0x3d0), # βϐ
# GREEK SMALL LETTER EPSILON, GREEK LUNATE EPSILON SYMBOL
(0x3b5, 0x3f5), # εϵ
# GREEK SMALL LETTER THETA, GREEK THETA SYMBOL
(0x3b8, 0x3d1), # θϑ
# GREEK SMALL LETTER KAPPA, GREEK KAPPA SYMBOL
(0x3ba, 0x3f0), # κϰ
# GREEK SMALL LETTER PI, GREEK PI SYMBOL
(0x3c0, 0x3d6), # πϖ
# GREEK SMALL LETTER RHO, GREEK RHO SYMBOL
(0x3c1, 0x3f1), # ρϱ
# GREEK SMALL LETTER FINAL SIGMA, GREEK SMALL LETTER SIGMA
(0x3c2, 0x3c3), # ςσ
# GREEK SMALL LETTER PHI, GREEK PHI SYMBOL
(0x3c6, 0x3d5), # φϕ
# LATIN SMALL LETTER S WITH DOT ABOVE, LATIN SMALL LETTER LONG S WITH DOT ABOVE
(0x1e61, 0x1e9b), # ṡẛ
)
# Maps the lowercase code to lowercase codes which have the same uppercase.
_ignorecase_fixes = {i: tuple(j for j in t if i != j)
for t in _equivalences for i in t}
def _compile(code, pattern, flags):
# internal: compile a (sub)pattern
emit = code.append
_len = len
LITERAL_CODES = _LITERAL_CODES
REPEATING_CODES = _REPEATING_CODES
SUCCESS_CODES = _SUCCESS_CODES
ASSERT_CODES = _ASSERT_CODES
if (flags & SRE_FLAG_IGNORECASE and
not (flags & SRE_FLAG_LOCALE) and
flags & SRE_FLAG_UNICODE):
fixes = _ignorecase_fixes
else:
fixes = None
for op, av in pattern:
if op in LITERAL_CODES:
if flags & SRE_FLAG_IGNORECASE:
lo = _sre.getlower(av, flags)
if fixes and lo in fixes:
emit(OPCODES[IN_IGNORE])
skip = _len(code); emit(0)
if op is NOT_LITERAL:
emit(OPCODES[NEGATE])
for k in (lo,) + fixes[lo]:
emit(OPCODES[LITERAL])
emit(k)
emit(OPCODES[FAILURE])
code[skip] = _len(code) - skip
else:
emit(OPCODES[OP_IGNORE[op]])
emit(lo)
else:
emit(OPCODES[op])
emit(av)
elif op is IN:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
def fixup(literal, flags=flags):
return _sre.getlower(literal, flags)
else:
emit(OPCODES[op])
fixup = None
skip = _len(code); emit(0)
_compile_charset(av, flags, code, fixup, fixes)
code[skip] = _len(code) - skip
elif op is ANY:
if flags & SRE_FLAG_DOTALL:
emit(OPCODES[ANY_ALL])
else:
emit(OPCODES[ANY])
elif op in REPEATING_CODES:
if flags & SRE_FLAG_TEMPLATE:
raise error, "internal: unsupported template operator"
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif _simple(av) and op is not REPEAT:
if op is MAX_REPEAT:
emit(OPCODES[REPEAT_ONE])
else:
emit(OPCODES[MIN_REPEAT_ONE])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
else:
emit(OPCODES[REPEAT])
skip = _len(code); emit(0)
emit(av[0])
emit(av[1])
_compile(code, av[2], flags)
code[skip] = _len(code) - skip
if op is MAX_REPEAT:
emit(OPCODES[MAX_UNTIL])
else:
emit(OPCODES[MIN_UNTIL])
elif op is SUBPATTERN:
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2)
# _compile_info(code, av[1], flags)
_compile(code, av[1], flags)
if av[0]:
emit(OPCODES[MARK])
emit((av[0]-1)*2+1)
elif op in SUCCESS_CODES:
emit(OPCODES[op])
elif op in ASSERT_CODES:
emit(OPCODES[op])
skip = _len(code); emit(0)
if av[0] >= 0:
emit(0) # look ahead
else:
lo, hi = av[1].getwidth()
if lo != hi:
raise error, "look-behind requires fixed-width pattern"
emit(lo) # look behind
_compile(code, av[1], flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is CALL:
emit(OPCODES[op])
skip = _len(code); emit(0)
_compile(code, av, flags)
emit(OPCODES[SUCCESS])
code[skip] = _len(code) - skip
elif op is AT:
emit(OPCODES[op])
if flags & SRE_FLAG_MULTILINE:
av = AT_MULTILINE.get(av, av)
if flags & SRE_FLAG_LOCALE:
av = AT_LOCALE.get(av, av)
elif flags & SRE_FLAG_UNICODE:
av = AT_UNICODE.get(av, av)
emit(ATCODES[av])
elif op is BRANCH:
emit(OPCODES[op])
tail = []
tailappend = tail.append
for av in av[1]:
skip = _len(code); emit(0)
# _compile_info(code, av, flags)
_compile(code, av, flags)
emit(OPCODES[JUMP])
tailappend(_len(code)); emit(0)
code[skip] = _len(code) - skip
emit(0) # end of branch
for tail in tail:
code[tail] = _len(code) - tail
elif op is CATEGORY:
emit(OPCODES[op])
if flags & SRE_FLAG_LOCALE:
av = CH_LOCALE[av]
elif flags & SRE_FLAG_UNICODE:
av = CH_UNICODE[av]
emit(CHCODES[av])
elif op is GROUPREF:
if flags & SRE_FLAG_IGNORECASE:
emit(OPCODES[OP_IGNORE[op]])
else:
emit(OPCODES[op])
emit(av-1)
elif op is GROUPREF_EXISTS:
emit(OPCODES[op])
emit(av[0]-1)
skipyes = _len(code); emit(0)
_compile(code, av[1], flags)
if av[2]:
emit(OPCODES[JUMP])
skipno = _len(code); emit(0)
code[skipyes] = _len(code) - skipyes + 1
_compile(code, av[2], flags)
code[skipno] = _len(code) - skipno
else:
code[skipyes] = _len(code) - skipyes + 1
else:
raise ValueError, ("unsupported operand type", op)
def _compile_charset(charset, flags, code, fixup=None, fixes=None):
# compile charset subprogram
emit = code.append
for op, av in _optimize_charset(charset, fixup, fixes,
flags & SRE_FLAG_UNICODE):
emit(OPCODES[op])
if op is NEGATE:
pass
elif op is LITERAL:
emit(av)
elif op is RANGE:
emit(av[0])
emit(av[1])
elif op is CHARSET:
code.extend(av)
elif op is BIGCHARSET:
code.extend(av)
elif op is CATEGORY:
if flags & SRE_FLAG_LOCALE:
emit(CHCODES[CH_LOCALE[av]])
elif flags & SRE_FLAG_UNICODE:
emit(CHCODES[CH_UNICODE[av]])
else:
emit(CHCODES[av])
else:
raise error, "internal: unsupported set operator"
emit(OPCODES[FAILURE])
def _optimize_charset(charset, fixup, fixes, isunicode):
# internal: optimize character set
out = []
tail = []
charmap = bytearray(256)
for op, av in charset:
while True:
try:
if op is LITERAL:
if fixup:
i = fixup(av)
charmap[i] = 1
if fixes and i in fixes:
for k in fixes[i]:
charmap[k] = 1
else:
charmap[av] = 1
elif op is RANGE:
r = range(av[0], av[1]+1)
if fixup:
r = map(fixup, r)
if fixup and fixes:
for i in r:
charmap[i] = 1
if i in fixes:
for k in fixes[i]:
charmap[k] = 1
else:
for i in r:
charmap[i] = 1
elif op is NEGATE:
out.append((op, av))
else:
tail.append((op, av))
except IndexError:
if len(charmap) == 256:
# character set contains non-UCS1 character codes
charmap += b'\0' * 0xff00
continue
# character set contains non-BMP character codes
if fixup and isunicode and op is RANGE:
lo, hi = av
ranges = [av]
# There are only two ranges of cased astral characters:
# 10400-1044F (Deseret) and 118A0-118DF (Warang Citi).
_fixup_range(max(0x10000, lo), min(0x11fff, hi),
ranges, fixup)
for lo, hi in ranges:
if lo == hi:
tail.append((LITERAL, hi))
else:
tail.append((RANGE, (lo, hi)))
else:
tail.append((op, av))
break
# compress character map
runs = []
q = 0
while True:
p = charmap.find(b'\1', q)
if p < 0:
break
if len(runs) >= 2:
runs = None
break
q = charmap.find(b'\0', p)
if q < 0:
runs.append((p, len(charmap)))
break
runs.append((p, q))
if runs is not None:
# use literal/range
for p, q in runs:
if q - p == 1:
out.append((LITERAL, p))
else:
out.append((RANGE, (p, q - 1)))
out += tail
# if the case was changed or new representation is more compact
if fixup or len(out) < len(charset):
return out
# else original character set is good enough
return charset
# use bitmap
if len(charmap) == 256:
data = _mk_bitmap(charmap)
out.append((CHARSET, data))
out += tail
return out
# To represent a big charset, first a bitmap of all characters in the
# set is constructed. Then, this bitmap is sliced into chunks of 256
# characters, duplicate chunks are eliminated, and each chunk is
# given a number. In the compiled expression, the charset is
# represented by a 32-bit word sequence, consisting of one word for
# the number of different chunks, a sequence of 256 bytes (64 words)
# of chunk numbers indexed by their original chunk position, and a
# sequence of 256-bit chunks (8 words each).
# Compression is normally good: in a typical charset, large ranges of
# Unicode will be either completely excluded (e.g. if only cyrillic
# letters are to be matched), or completely included (e.g. if large
# subranges of Kanji match). These ranges will be represented by
# chunks of all one-bits or all zero-bits.
# Matching can be also done efficiently: the more significant byte of
# the Unicode character is an index into the chunk number, and the
# less significant byte is a bit index in the chunk (just like the
# CHARSET matching).
# In UCS-4 mode, the BIGCHARSET opcode still supports only subsets
# of the basic multilingual plane; an efficient representation
# for all of Unicode has not yet been developed.
charmap = bytes(charmap) # should be hashable
comps = {}
mapping = bytearray(256)
block = 0
data = bytearray()
for i in range(0, 65536, 256):
chunk = charmap[i: i + 256]
if chunk in comps:
mapping[i // 256] = comps[chunk]
else:
mapping[i // 256] = comps[chunk] = block
block += 1
data += chunk
data = _mk_bitmap(data)
data[0:0] = [block] + _bytes_to_codes(mapping)
out.append((BIGCHARSET, data))
out += tail
return out
def _fixup_range(lo, hi, ranges, fixup):
for i in map(fixup, range(lo, hi+1)):
for k, (lo, hi) in enumerate(ranges):
if i < lo:
if l == lo - 1:
ranges[k] = (i, hi)
else:
ranges.insert(k, (i, i))
break
elif i > hi:
if i == hi + 1:
ranges[k] = (lo, i)
break
else:
break
else:
ranges.append((i, i))
_CODEBITS = _sre.CODESIZE * 8
_BITS_TRANS = b'0' + b'1' * 255
def _mk_bitmap(bits, _CODEBITS=_CODEBITS, _int=int):
s = bytes(bits).translate(_BITS_TRANS)[::-1]
return [_int(s[i - _CODEBITS: i], 2)
for i in range(len(s), 0, -_CODEBITS)]
def _bytes_to_codes(b):
# Convert block indices to word array
import array
if _sre.CODESIZE == 2:
code = 'H'
else:
code = 'I'
a = array.array(code, bytes(b))
assert a.itemsize == _sre.CODESIZE
assert len(a) * a.itemsize == len(b)
return a.tolist()
def _simple(av):
# check if av is a "simple" operator
lo, hi = av[2].getwidth()
return lo == hi == 1 and av[2][0][0] != SUBPATTERN
def _compile_info(code, pattern, flags):
# internal: compile an info block. in the current version,
# this contains min/max pattern width, and an optional literal
# prefix or a character map
lo, hi = pattern.getwidth()
if lo == 0:
return # not worth it
# look for a literal prefix
prefix = []
prefixappend = prefix.append
prefix_skip = 0
charset = [] # not used
charsetappend = charset.append
if not (flags & SRE_FLAG_IGNORECASE):
# look for literal prefix
for op, av in pattern.data:
if op is LITERAL:
if len(prefix) == prefix_skip:
prefix_skip = prefix_skip + 1
prefixappend(av)
elif op is SUBPATTERN and len(av[1]) == 1:
op, av = av[1][0]
if op is LITERAL:
prefixappend(av)
else:
break
else:
break
# if no prefix, look for charset prefix
if not prefix and pattern.data:
op, av = pattern.data[0]
if op is SUBPATTERN and av[1]:
op, av = av[1][0]
if op is LITERAL:
charsetappend((op, av))
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is BRANCH:
c = []
cappend = c.append
for p in av[1]:
if not p:
break
op, av = p[0]
if op is LITERAL:
cappend((op, av))
else:
break
else:
charset = c
elif op is IN:
charset = av
## if prefix:
## print "*** PREFIX", prefix, prefix_skip
## if charset:
## print "*** CHARSET", charset
# add an info block
emit = code.append
emit(OPCODES[INFO])
skip = len(code); emit(0)
# literal flag
mask = 0
if prefix:
mask = SRE_INFO_PREFIX
if len(prefix) == prefix_skip == len(pattern.data):
mask = mask + SRE_INFO_LITERAL
elif charset:
mask = mask + SRE_INFO_CHARSET
emit(mask)
# pattern length
if lo < MAXCODE:
emit(lo)
else:
emit(MAXCODE)
prefix = prefix[:MAXCODE]
if hi < MAXCODE:
emit(hi)
else:
emit(0)
# add literal prefix
if prefix:
emit(len(prefix)) # length
emit(prefix_skip) # skip
code.extend(prefix)
# generate overlap table
table = [-1] + ([0]*len(prefix))
for i in xrange(len(prefix)):
table[i+1] = table[i]+1
while table[i+1] > 0 and prefix[i] != prefix[table[i+1]-1]:
table[i+1] = table[table[i+1]-1]+1
code.extend(table[1:]) # don't store first entry
elif charset:
_compile_charset(charset, flags, code)
code[skip] = len(code) - skip
try:
unicode
except NameError:
STRING_TYPES = (type(""),)
else:
STRING_TYPES = (type(""), type(unicode("")))
def isstring(obj):
for tp in STRING_TYPES:
if isinstance(obj, tp):
return 1
return 0
def _code(p, flags):
flags = p.pattern.flags | flags
code = []
# compile info block
_compile_info(code, p, flags)
# compile the pattern
_compile(code, p.data, flags)
code.append(OPCODES[SUCCESS])
return code
def compile(p, flags=0):
# internal: convert pattern list to internal format
if isstring(p):
pattern = p
p = sre_parse.parse(p, flags)
else:
pattern = None
code = _code(p, flags)
# print code
# XXX: <fl> get rid of this limitation!
if p.pattern.groups > 100:
raise AssertionError(
"sorry, but this version only supports 100 named groups"
)
# map in either direction
groupindex = p.pattern.groupdict
indexgroup = [None] * p.pattern.groups
for k, i in groupindex.items():
indexgroup[i] = k
return _sre.compile(
pattern, flags | p.pattern.flags, code,
p.pattern.groups-1,
groupindex, indexgroup
)
| gpl-3.0 | -6,565,722,479,541,834,000 | -8,703,340,671,579,126,000 | 32.204698 | 83 | 0.4905 | false |
EmadMokhtar/halaqat | students/forms.py | 1 | 6022 | from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, Div, Layout
from django import forms
from django.utils.translation import ugettext_lazy as _
from students.models import Student
class StudentForm(forms.ModelForm):
dob = forms.DateField(widget=forms.DateInput(
attrs={'class': 'datepicker'}),
label=_('DOB'))
address = forms.CharField(widget=forms.Textarea(),
label=_('Address'))
helper = FormHelper()
helper.form_tag = False
helper.layout = Layout(
Div(
Div(
HTML(_('<h3 class="panel-title">Basic Info</h3>')),
css_class='panel-heading',
),
Div(
Div(
Div('civil_id', css_class='col-md-6'),
Div('dob', css_class='col-md-6'),
css_class='row'
),
Div(
Div('gender', css_class='col-md-6'),
Div('nationality', css_class='col-md-6'),
css_class='row'
),
Div(
Div('school', css_class='col-md-6'),
Div('grade', css_class='col-md-6'),
css_class='row'
),
css_class='panel-body'), # Fields
css_class='panel panel-default', ),
Div(
Div(
HTML(_('<h3 class="panel-title">Contact Info</h3>')),
css_class='panel-heading',
),
Div(
Div(
Div('mobile_number', css_class='col-md-6'),
Div('home_number', css_class='col-md-6'),
css_class='row'),
Div(
Div('parent_number', css_class='col-md-6'),
Div('parent_email', css_class='col-md-6'),
css_class='row'),
Div(
Div('address', css_class='col-md-12'),
css_class='row'),
css_class='panel-body'), # Fields
css_class='panel panel-default', ),
Div(
Div(
HTML(_('<h3 class="panel-title">Halaqat Info</h3>')),
css_class='panel-heading',
),
Div(
Div(
Div('halaqat_class', css_class='col-md-6'),
Div('status', css_class='col-md-6'), css_class='row'),
Div(
Div('chapter_memorized', css_class='col-md-6'),
Div('chapter_memorized_with_center', css_class='col-md-6'),
css_class='row'),
css_class='panel-body'), # Fields
css_class='panel panel-default',
),
)
class Meta:
model = Student
fields = ('dob', 'gender', 'civil_id', 'mobile_number', 'home_number',
'parent_number', 'grade', 'school', 'nationality', 'address',
'parent_email', 'halaqat_class', 'chapter_memorized',
'chapter_memorized_with_center', 'status')
class StudentChangeForm(forms.ModelForm):
dob = forms.DateField(widget=forms.DateInput(
attrs={'class': 'datepicker'}),
label=_('DOB'))
address = forms.CharField(widget=forms.Textarea(),
label=_('Address'))
helper = FormHelper()
helper.form_tag = False
helper.layout = Layout(
Div(
Div(
HTML(_('<h3 class="panel-title">Basic Info</h3>')),
css_class='panel-heading',
),
Div(
Div(
Div('civil_id', css_class='col-md-6'),
Div('dob', css_class='col-md-6'), css_class='row'
),
Div(
Div('gender', css_class='col-md-6'),
Div('nationality', css_class='col-md-6'), css_class='row'
),
Div(
Div('school', css_class='col-md-6'),
Div('grade', css_class='col-md-6'), css_class='row'
), css_class='panel-body'), # Fields
css_class='panel panel-default', ),
Div(
Div(
HTML(_('<h3 class="panel-title">Contact Info</h3>')),
css_class='panel-heading',
),
Div(
Div(
Div('mobile_number', css_class='col-md-6'),
Div('home_number', css_class='col-md-6'), css_class='row'),
Div(
Div('parent_number', css_class='col-md-6'),
Div('parent_email', css_class='col-md-6'),
css_class='row'),
Div(Div('address', css_class='col-md-12'), css_class='row'),
css_class='panel-body'), # Fields
css_class='panel panel-default', ),
Div(
Div(
HTML(_('<h3 class="panel-title">Halaqat Info</h3>')),
css_class='panel-heading',
),
Div(
Div(
Div('halaqat_class', css_class='col-md-6'),
Div('status', css_class='col-md-6'), css_class='row'),
Div(
Div('chapter_memorized', css_class='col-md-6'),
Div('chapter_memorized_with_center', css_class='col-md-6'),
css_class='row'),
css_class='panel-body'), # Fields
css_class='panel panel-default',
),
)
class Meta:
model = Student
fields = ('dob', 'gender', 'civil_id', 'mobile_number', 'home_number',
'parent_number', 'grade', 'school', 'nationality', 'address',
'parent_email', 'halaqat_class', 'chapter_memorized',
'chapter_memorized_with_center', 'status')
| mit | -7,893,167,685,293,235,000 | -1,998,534,279,946,623,700 | 38.359477 | 79 | 0.436898 | false |
punchagan/zulip | zerver/views/zephyr.py | 2 | 2717 | import base64
import logging
import re
import shlex
import subprocess
from typing import Optional
import orjson
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.utils.translation import gettext as _
from zerver.decorator import authenticated_json_view
from zerver.lib.ccache import make_ccache
from zerver.lib.pysa import mark_sanitized
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.lib.users import get_api_key
from zerver.models import UserProfile
# Hack for mit.edu users whose Kerberos usernames don't match what they zephyr
# as. The key is for Kerberos and the value is for zephyr.
kerberos_alter_egos = {
"golem": "ctl",
}
@authenticated_json_view
@has_request_variables
def webathena_kerberos_login(
request: HttpRequest, user_profile: UserProfile, cred: Optional[str] = REQ(default=None)
) -> HttpResponse:
global kerberos_alter_egos
if cred is None:
return json_error(_("Could not find Kerberos credential"))
if not user_profile.realm.webathena_enabled:
return json_error(_("Webathena login not enabled"))
try:
parsed_cred = orjson.loads(cred)
user = parsed_cred["cname"]["nameString"][0]
if user in kerberos_alter_egos:
user = kerberos_alter_egos[user]
assert user == user_profile.email.split("@")[0]
# Limit characters in usernames to valid MIT usernames
# This is important for security since DNS is not secure.
assert re.match(r"^[a-z0-9_.-]+$", user) is not None
ccache = make_ccache(parsed_cred)
# 'user' has been verified to contain only benign characters that won't
# help with shell injection.
user = mark_sanitized(user)
# 'ccache' is only written to disk by the script and used as a kerberos
# credential cache file.
ccache = mark_sanitized(ccache)
except Exception:
return json_error(_("Invalid Kerberos cache"))
# TODO: Send these data via (say) RabbitMQ
try:
api_key = get_api_key(user_profile)
command = [
"/home/zulip/python-zulip-api/zulip/integrations/zephyr/process_ccache",
user,
api_key,
base64.b64encode(ccache).decode("utf-8"),
]
subprocess.check_call(
["ssh", settings.PERSONAL_ZMIRROR_SERVER, "--", " ".join(map(shlex.quote, command))]
)
except subprocess.CalledProcessError:
logging.exception("Error updating the user's ccache", stack_info=True)
return json_error(_("We were unable to set up mirroring for you"))
return json_success()
| apache-2.0 | -2,404,918,311,522,012,000 | -7,826,640,585,295,096,000 | 34.75 | 96 | 0.681266 | false |
maurizi/otm-core | opentreemap/exporter/user.py | 12 | 4354 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import csv
import json
from datetime import datetime
from contextlib import contextmanager
from django.core.exceptions import ValidationError
from django.db.models import Q
from treemap.lib.dates import DATETIME_FORMAT
from treemap.models import User, Audit
from exporter.util import sanitize_unicode_record
def write_users(data_format, *args, **kwargs):
fn = _write_users_csv if data_format == 'csv' else _write_users_json
fn(*args, **kwargs)
def _write_users_csv(csv_obj, instance, min_join_ts=None, min_edit_ts=None):
field_names = ['username', 'email', 'first_name',
'last_name', 'email_hash',
'allow_email_contact', 'role', 'created', 'organization',
'last_edit_model', 'last_edit_model_id',
'last_edit_instance_id', 'last_edit_field',
'last_edit_previous_value', 'last_edit_current_value',
'last_edit_user_id', 'last_edit_action',
'last_edit_requires_auth', 'last_edit_ref',
'last_edit_created']
writer = csv.DictWriter(csv_obj, field_names)
writer.writeheader()
for user in _users_export(instance, min_join_ts, min_edit_ts):
writer.writerow(_user_as_dict(user, instance))
def _write_users_json(json_obj, instance, min_join_ts=None, min_edit_ts=None):
users = _users_export(instance, min_join_ts, min_edit_ts)
users_list = [_user_as_dict(user, instance) for user in users]
json_obj.write(json.dumps(users_list))
def _users_export(instance, min_join_ts, min_edit_ts):
users = User.objects.filter(instance=instance)\
.order_by('username')
if min_join_ts:
with _date_filter(min_join_ts, 'minJoinDate') as min_join_date:
iuser_ids = Audit.objects.filter(instance=instance)\
.filter(model='InstanceUser')\
.filter(created__gt=min_join_date)\
.distinct('model_id')\
.values_list('model_id', flat=True)
users = users.filter(instanceuser__in=iuser_ids)
if min_edit_ts:
with _date_filter(min_edit_ts, 'minEditDate') as min_edit_date:
user_ids = Audit.objects\
.filter(instance=instance)\
.filter(Q(created__gt=min_edit_date) |
Q(updated__gt=min_edit_date))\
.distinct('user')\
.values_list('user_id', flat=True)
users = users.filter(id__in=user_ids)
return users
def _user_as_dict(user, instance):
iuser = user.get_instance_user(instance)
role_name = None
if iuser:
role_name = iuser.role.name
email = ''
if user.allow_email_contact:
email = user.email
modeldata = {'username': user.username,
'organization': user.get_organization(),
'first_name': user.get_first_name(),
'last_name': user.get_last_name(),
'email': email,
'email_hash': user.email_hash,
'allow_email_contact': str(user.allow_email_contact),
'created': str(user.created),
'role': role_name}
last_edits = Audit.objects.filter(instance=instance,
user=user)\
.order_by('-updated')[:1]
if last_edits:
last_edit = last_edits[0]
modeldata.update({'last_edit_%s' % k: v
for (k, v) in last_edit.dict().iteritems()})
return sanitize_unicode_record(modeldata)
@contextmanager
def _date_filter(timestamp, filter_name):
try:
filter_date = datetime.strptime(timestamp, DATETIME_FORMAT)
except ValueError:
raise ValidationError("%(filter_name)s='%(ts)s' not a valid timestamp "
"of format: %(format)s"
% {"ts": timestamp,
"format": DATETIME_FORMAT,
"filter_name": filter_name})
yield filter_date
| agpl-3.0 | -4,571,690,568,984,672,000 | 7,341,097,339,251,570,000 | 35.588235 | 79 | 0.552136 | false |
Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/numpy/distutils/fcompiler/lahey.py | 229 | 1438 | from __future__ import division, absolute_import, print_function
import os
from numpy.distutils.fcompiler import FCompiler
compilers = ['LaheyFCompiler']
class LaheyFCompiler(FCompiler):
compiler_type = 'lahey'
description = 'Lahey/Fujitsu Fortran 95 Compiler'
version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P<version>[^\s*]*)'
executables = {
'version_cmd' : ["<F90>", "--version"],
'compiler_f77' : ["lf95", "--fix"],
'compiler_fix' : ["lf95", "--fix"],
'compiler_f90' : ["lf95"],
'linker_so' : ["lf95", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX Fix me
module_include_switch = None #XXX Fix me
def get_flags_opt(self):
return ['-O']
def get_flags_debug(self):
return ['-g', '--chk', '--chkglobal']
def get_library_dirs(self):
opt = []
d = os.environ.get('LAHEY')
if d:
opt.append(os.path.join(d, 'lib'))
return opt
def get_libraries(self):
opt = []
opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6'])
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='lahey')
compiler.customize()
print(compiler.get_version())
| artistic-2.0 | 4,636,940,198,872,624,000 | 8,355,580,418,837,214,000 | 28.346939 | 88 | 0.569541 | false |
popazerty/test-1 | RecordTimer.py | 1 | 39985 | import os
from enigma import eEPGCache, getBestPlayableServiceReference, \
eServiceReference, iRecordableService, quitMainloop, eActionMap, setPreferredTuner
from Components.config import config
from Components.UsageConfig import defaultMoviePath
from Components.TimerSanityCheck import TimerSanityCheck
from Screens.MessageBox import MessageBox
import Screens.Standby
import Screens.InfoBar
from Tools import Directories, Notifications, ASCIItranslit, Trashcan
from Tools.XMLTools import stringToXML
import timer
import xml.etree.cElementTree
import NavigationInstance
from ServiceReference import ServiceReference
from time import localtime, strftime, ctime, time
from bisect import insort
from sys import maxint
# ok, for descriptions etc we have:
# service reference (to get the service name)
# name (title)
# description (description)
# event data (ONLY for time adjustments etc.)
# parses an event, and gives out a (begin, end, name, duration, eit)-tuple.
# begin and end will be corrected
def parseEvent(ev, description = True):
if description:
name = ev.getEventName()
description = ev.getShortDescription()
if description == "":
description = ev.getExtendedDescription()
else:
name = ""
description = ""
begin = ev.getBeginTime()
end = begin + ev.getDuration()
eit = ev.getEventId()
begin -= config.recording.margin_before.value * 60
end += config.recording.margin_after.value * 60
return (begin, end, name, description, eit)
class AFTEREVENT:
NONE = 0
STANDBY = 1
DEEPSTANDBY = 2
AUTO = 3
def findSafeRecordPath(dirname):
if not dirname:
return None
from Components import Harddisk
dirname = os.path.realpath(dirname)
mountpoint = Harddisk.findMountPoint(dirname)
if mountpoint in ('/', '/media'):
print '[RecordTimer] media is not mounted:', dirname
return None
if not os.path.isdir(dirname):
try:
os.makedirs(dirname)
except Exception, ex:
print '[RecordTimer] Failed to create dir "%s":' % dirname, ex
return None
return dirname
def checkForRecordings():
if NavigationInstance.instance.getRecordings():
return True
rec_time = NavigationInstance.instance.RecordTimer.getNextTimerTime(isWakeup=True)
return rec_time > 0 and (rec_time - time()) < 360
# please do not translate log messages
class RecordTimerEntry(timer.TimerEntry, object):
######### the following static methods and members are only in use when the box is in (soft) standby
wasInStandby = False
wasInDeepStandby = False
receiveRecordEvents = False
@staticmethod
def keypress(key=None, flag=1):
if flag and (RecordTimerEntry.wasInStandby or RecordTimerEntry.wasInDeepStandby):
RecordTimerEntry.wasInStandby = False
RecordTimerEntry.wasInDeepStandby = False
eActionMap.getInstance().unbindAction('', RecordTimerEntry.keypress)
@staticmethod
def setWasInDeepStandby():
RecordTimerEntry.wasInDeepStandby = True
eActionMap.getInstance().bindAction('', -maxint - 1, RecordTimerEntry.keypress)
@staticmethod
def setWasInStandby():
if not RecordTimerEntry.wasInStandby:
if not RecordTimerEntry.wasInDeepStandby:
eActionMap.getInstance().bindAction('', -maxint - 1, RecordTimerEntry.keypress)
RecordTimerEntry.wasInDeepStandby = False
RecordTimerEntry.wasInStandby = True
@staticmethod
def shutdown():
quitMainloop(1)
@staticmethod
def staticGotRecordEvent(recservice, event):
if event == iRecordableService.evEnd:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evEnd)"
if not checkForRecordings():
print "No recordings busy of sceduled within 6 minutes so shutdown"
RecordTimerEntry.shutdown() # immediate shutdown
elif event == iRecordableService.evStart:
print "RecordTimer.staticGotRecordEvent(iRecordableService.evStart)"
@staticmethod
def stopTryQuitMainloop():
print "RecordTimer.stopTryQuitMainloop"
NavigationInstance.instance.record_event.remove(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = False
@staticmethod
def TryQuitMainloop():
if not RecordTimerEntry.receiveRecordEvents and Screens.Standby.inStandby:
print "RecordTimer.TryQuitMainloop"
NavigationInstance.instance.record_event.append(RecordTimerEntry.staticGotRecordEvent)
RecordTimerEntry.receiveRecordEvents = True
# send fake event.. to check if another recordings are running or
# other timers start in a few seconds
RecordTimerEntry.staticGotRecordEvent(None, iRecordableService.evEnd)
#################################################################
def __init__(self, serviceref, begin, end, name, description, eit, disabled = False, justplay = False, afterEvent = AFTEREVENT.AUTO, checkOldTimers = False, dirname = None, tags = None, descramble = True, record_ecm = False, isAutoTimer = False, always_zap = False, zap_wakeup = "always", rename_repeat = True):
timer.TimerEntry.__init__(self, int(begin), int(end))
if checkOldTimers == True:
if self.begin < time() - 1209600:
self.begin = int(time())
if self.end < self.begin:
self.end = self.begin
assert isinstance(serviceref, ServiceReference)
if serviceref and serviceref.isRecordable():
self.service_ref = serviceref
else:
self.service_ref = ServiceReference(None)
self.eit = eit
self.dontSave = False
self.name = name
self.description = description
self.disabled = disabled
self.timer = None
self.__record_service = None
self.start_prepare = 0
self.justplay = justplay
self.always_zap = always_zap
self.zap_wakeup = zap_wakeup
self.afterEvent = afterEvent
self.dirname = dirname
self.dirnameHadToFallback = False
self.autoincrease = False
self.autoincreasetime = 3600 * 24 # 1 day
self.tags = tags or []
self.descramble = descramble
self.record_ecm = record_ecm
self.rename_repeat = rename_repeat
self.needChangePriorityFrontend = config.usage.recording_frontend_priority.value != "-2" and config.usage.recording_frontend_priority.value != config.usage.frontend_priority.value
self.change_frontend = False
self.isAutoTimer = isAutoTimer
self.log_entries = []
self.resetState()
def __repr__(self):
return "RecordTimerEntry(name=%s, begin=%s, serviceref=%s, justplay=%s, isAutoTimer=%s)" % (self.name, ctime(self.begin), self.service_ref, self.justplay, self.isAutoTimer)
def log(self, code, msg):
self.log_entries.append((int(time()), code, msg))
print "[TIMER]", msg
def calculateFilename(self, name=None):
service_name = self.service_ref.getServiceName()
begin_date = strftime("%Y%m%d %H%M", localtime(self.begin))
name = name or self.name
filename = begin_date + " - " + service_name
if name:
if config.recording.filename_composition.value == "short":
filename = strftime("%Y%m%d", localtime(self.begin)) + " - " + name
elif config.recording.filename_composition.value == "long":
filename += " - " + name + " - " + self.description
else:
filename += " - " + name # standard
if config.recording.ascii_filenames.value:
filename = ASCIItranslit.legacyEncode(filename)
if not self.dirname:
dirname = findSafeRecordPath(defaultMoviePath())
else:
dirname = findSafeRecordPath(self.dirname)
if dirname is None:
dirname = findSafeRecordPath(defaultMoviePath())
self.dirnameHadToFallback = True
if not dirname:
return None
self.Filename = Directories.getRecordingFilename(filename, dirname)
self.log(0, "Filename calculated as: '%s'" % self.Filename)
return self.Filename
def tryPrepare(self):
if self.justplay:
return True
else:
if not self.calculateFilename():
self.do_backoff()
self.start_prepare = time() + self.backoff
return False
rec_ref = self.service_ref and self.service_ref.ref
if rec_ref and rec_ref.flags & eServiceReference.isGroup:
rec_ref = getBestPlayableServiceReference(rec_ref, eServiceReference())
if not rec_ref:
self.log(1, "'get best playable service for group... record' failed")
return False
self.setRecordingPreferredTuner()
self.record_service = rec_ref and NavigationInstance.instance.recordService(rec_ref)
if not self.record_service:
self.log(1, "'record service' failed")
self.setRecordingPreferredTuner(setdefault=True)
return False
name = self.name
description = self.description
if self.repeated:
epgcache = eEPGCache.getInstance()
queryTime=self.begin+(self.end-self.begin)/2
evt = epgcache.lookupEventTime(rec_ref, queryTime)
if evt:
if self.rename_repeat:
event_description = evt.getShortDescription()
if not event_description:
event_description = evt.getExtendedDescription()
if event_description and event_description != description:
description = event_description
event_name = evt.getEventName()
if event_name and event_name != name:
name = event_name
if not self.calculateFilename(event_name):
self.do_backoff()
self.start_prepare = time() + self.backoff
return False
event_id = evt.getEventId()
else:
event_id = -1
else:
event_id = self.eit
if event_id is None:
event_id = -1
prep_res=self.record_service.prepare(self.Filename + ".ts", self.begin, self.end, event_id, name.replace("\n", ""), description.replace("\n", ""), ' '.join(self.tags), bool(self.descramble), bool(self.record_ecm))
if prep_res:
if prep_res == -255:
self.log(4, "failed to write meta information")
else:
self.log(2, "'prepare' failed: error %d" % prep_res)
# we must calc nur start time before stopRecordService call because in Screens/Standby.py TryQuitMainloop tries to get
# the next start time in evEnd event handler...
self.do_backoff()
self.start_prepare = time() + self.backoff
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
self.setRecordingPreferredTuner(setdefault=True)
return False
return True
def do_backoff(self):
if self.backoff == 0:
self.backoff = 5
else:
self.backoff *= 2
if self.backoff > 100:
self.backoff = 100
self.log(10, "backoff: retry in %d seconds" % self.backoff)
def activate(self):
next_state = self.state + 1
self.log(5, "activating state %d" % next_state)
if next_state == 1:
if self.always_zap:
if Screens.Standby.inStandby:
self.log(5, "wakeup and zap to recording service")
RecordTimerEntry.setWasInStandby()
#set service to zap after standby
Screens.Standby.inStandby.prev_running_service = self.service_ref.ref
Screens.Standby.inStandby.paused_service = None
#wakeup standby
Screens.Standby.inStandby.Power()
else:
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.setWasInStandby()
cur_zap_ref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if cur_zap_ref and not cur_zap_ref.getPath():# we do not zap away if it is no live service
Notifications.AddNotification(MessageBox, _("In order to record a timer, the TV was switched to the recording service!\n"), type=MessageBox.TYPE_INFO, timeout=20)
self.setRecordingPreferredTuner()
self.failureCB(True)
self.log(5, "zap to recording service")
if next_state == self.StatePrepared:
if self.tryPrepare():
self.log(6, "prepare ok, waiting for begin")
# create file to "reserve" the filename
# because another recording at the same time on another service can try to record the same event
# i.e. cable / sat.. then the second recording needs an own extension... when we create the file
# here than calculateFilename is happy
if not self.justplay:
open(self.Filename + ".ts", "w").close()
# Give the Trashcan a chance to clean up
try:
Trashcan.instance.cleanIfIdle(self.Filename)
except Exception, e:
print "[TIMER] Failed to call Trashcan.instance.cleanIfIdle()"
print "[TIMER] Error:", e
# fine. it worked, resources are allocated.
self.next_activation = self.begin
self.backoff = 0
return True
self.log(7, "prepare failed")
if self.first_try_prepare:
self.first_try_prepare = False
cur_ref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
if cur_ref and not cur_ref.getPath():
if Screens.Standby.inStandby:
self.setRecordingPreferredTuner()
self.failureCB(True)
elif not config.recording.asktozap.value:
self.log(8, "asking user to zap away")
Notifications.AddNotificationWithCallback(self.failureCB, MessageBox, _("A timer failed to record!\nDisable TV and try again?\n"), timeout=20, default=True)
else: # zap without asking
self.log(9, "zap without asking")
Notifications.AddNotification(MessageBox, _("In order to record a timer, the TV was switched to the recording service!\n"), type=MessageBox.TYPE_INFO, timeout=20)
self.setRecordingPreferredTuner()
self.failureCB(True)
elif cur_ref:
self.log(8, "currently running service is not a live service.. so stop it makes no sense")
else:
self.log(8, "currently no service running... so we dont need to stop it")
return False
elif next_state == self.StateRunning:
# if this timer has been cancelled, just go to "end" state.
if self.cancelled:
return True
if self.justplay:
if Screens.Standby.inStandby:
if RecordTimerEntry.wasInDeepStandby and self.zap_wakeup in ("always", "from_deep_standby") or self.zap_wakeup in ("always", "from_standby"):
self.log(11, "wakeup and zap")
RecordTimerEntry.setWasInStandby()
#set service to zap after standby
Screens.Standby.inStandby.prev_running_service = self.service_ref.ref
Screens.Standby.inStandby.paused_service = None
#wakeup standby
Screens.Standby.inStandby.Power()
else:
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.setWasInStandby()
self.log(11, "zapping")
NavigationInstance.instance.playService(self.service_ref.ref)
return True
else:
self.log(11, "start recording")
if RecordTimerEntry.wasInDeepStandby:
RecordTimerEntry.keypress()
if Screens.Standby.inStandby: #In case some plugin did put the receiver already in standby
config.misc.standbyCounter.value = 0
else:
Notifications.AddNotification(Screens.Standby.Standby, StandbyCounterIncrease=False)
record_res = self.record_service.start()
self.setRecordingPreferredTuner(setdefault=True)
if record_res:
self.log(13, "start record returned %d" % record_res)
self.do_backoff()
# retry
self.begin = time() + self.backoff
return False
# Tell the trashcan we started recording. The trashcan gets events,
# but cannot tell what the associated path is.
Trashcan.instance.markDirty(self.Filename)
return True
elif next_state == self.StateEnded:
old_end = self.end
if self.setAutoincreaseEnd():
self.log(12, "autoincrase recording %d minute(s)" % int((self.end - old_end)/60))
self.state -= 1
return True
self.log(12, "stop recording")
if not self.justplay:
NavigationInstance.instance.stopRecordService(self.record_service)
self.record_service = None
if not checkForRecordings():
if self.afterEvent == AFTEREVENT.DEEPSTANDBY or self.afterEvent == AFTEREVENT.AUTO and (Screens.Standby.inStandby or RecordTimerEntry.wasInStandby) and not config.misc.standbyCounter.value:
if not Screens.Standby.inTryQuitMainloop:
if Screens.Standby.inStandby:
RecordTimerEntry.TryQuitMainloop()
else:
Notifications.AddNotificationWithCallback(self.sendTryQuitMainloopNotification, MessageBox, _("A finished record timer wants to shut down\nyour receiver. Shutdown now?"), timeout=20, default=True)
elif self.afterEvent == AFTEREVENT.STANDBY or self.afterEvent == AFTEREVENT.AUTO and RecordTimerEntry.wasInStandby:
if not Screens.Standby.inStandby:
Notifications.AddNotificationWithCallback(self.sendStandbyNotification, MessageBox, _("A finished record timer wants to set your\nreceiver to standby. Do that now?"), timeout=20, default=True)
else:
RecordTimerEntry.keypress()
return True
def setAutoincreaseEnd(self, entry = None):
if not self.autoincrease:
return False
if entry is None:
new_end = int(time()) + self.autoincreasetime
else:
new_end = entry.begin - 30
dummyentry = RecordTimerEntry(self.service_ref, self.begin, new_end, self.name, self.description, self.eit, disabled=True, justplay = self.justplay, afterEvent = self.afterEvent, dirname = self.dirname, tags = self.tags)
dummyentry.disabled = self.disabled
timersanitycheck = TimerSanityCheck(NavigationInstance.instance.RecordTimer.timer_list, dummyentry)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None and len(simulTimerList) > 1:
new_end = simulTimerList[1].begin
new_end -= 30 # 30 Sekunden Prepare-Zeit lassen
if new_end <= time():
return False
self.end = new_end
return True
def setRecordingPreferredTuner(self, setdefault=False):
if self.needChangePriorityFrontend:
elem = None
if not self.change_frontend and not setdefault:
elem = config.usage.recording_frontend_priority.value
self.change_frontend = True
elif self.change_frontend and setdefault:
elem = config.usage.frontend_priority.value
self.change_frontend = False
if elem is not None:
setPreferredTuner(int(elem))
def sendStandbyNotification(self, answer):
RecordTimerEntry.keypress()
if answer:
Notifications.AddNotification(Screens.Standby.Standby)
def sendTryQuitMainloopNotification(self, answer):
RecordTimerEntry.keypress()
if answer:
Notifications.AddNotification(Screens.Standby.TryQuitMainloop, 1)
def getNextActivation(self):
if self.state == self.StateEnded:
return self.end
next_state = self.state + 1
return {self.StatePrepared: self.start_prepare,
self.StateRunning: self.begin,
self.StateEnded: self.end }[next_state]
def failureCB(self, answer):
if answer == True:
self.log(13, "ok, zapped away")
#NavigationInstance.instance.stopUserServices()
NavigationInstance.instance.playService(self.service_ref.ref)
else:
self.log(14, "user didn't want to zap away, record will probably fail")
def timeChanged(self):
old_prepare = self.start_prepare
self.start_prepare = self.begin - self.prepare_time
self.backoff = 0
if int(old_prepare) != int(self.start_prepare):
self.log(15, "record time changed, start prepare is now: %s" % ctime(self.start_prepare))
def gotRecordEvent(self, record, event):
# TODO: this is not working (never true), please fix. (comparing two swig wrapped ePtrs)
if self.__record_service.__deref__() != record.__deref__():
return
self.log(16, "record event %d" % event)
if event == iRecordableService.evRecordWriteError:
print "WRITE ERROR on recording, disk full?"
# show notification. the 'id' will make sure that it will be
# displayed only once, even if more timers are failing at the
# same time. (which is very likely in case of disk fullness)
Notifications.AddPopup(text = _("Write error while recording. Disk full?\n"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "DiskFullMessage")
# ok, the recording has been stopped. we need to properly note
# that in our state, with also keeping the possibility to re-try.
# TODO: this has to be done.
elif event == iRecordableService.evStart:
text = _("A record has been started:\n%s") % self.name
notify = config.usage.show_message_when_recording_starts.value and \
not Screens.Standby.inStandby and \
Screens.InfoBar.InfoBar.instance and \
Screens.InfoBar.InfoBar.instance.execing
if self.dirnameHadToFallback:
text = '\n'.join((text, _("Please note that the previously selected media could not be accessed and therefore the default directory is being used instead.")))
notify = True
if notify:
Notifications.AddPopup(text = text, type = MessageBox.TYPE_INFO, timeout = 3)
elif event == iRecordableService.evRecordAborted:
NavigationInstance.instance.RecordTimer.removeEntry(self)
# we have record_service as property to automatically subscribe to record service events
def setRecordService(self, service):
if self.__record_service is not None:
print "[remove callback]"
NavigationInstance.instance.record_event.remove(self.gotRecordEvent)
self.__record_service = service
if self.__record_service is not None:
print "[add callback]"
NavigationInstance.instance.record_event.append(self.gotRecordEvent)
record_service = property(lambda self: self.__record_service, setRecordService)
def createTimer(xml):
begin = int(xml.get("begin"))
end = int(xml.get("end"))
serviceref = ServiceReference(xml.get("serviceref").encode("utf-8"))
description = xml.get("description").encode("utf-8")
repeated = xml.get("repeated").encode("utf-8")
rename_repeat = long(xml.get("rename_repeat") or "1")
disabled = long(xml.get("disabled") or "0")
justplay = long(xml.get("justplay") or "0")
always_zap = long(xml.get("always_zap") or "0")
zap_wakeup = str(xml.get("zap_wakeup") or "always")
afterevent = str(xml.get("afterevent") or "nothing")
afterevent = {
"nothing": AFTEREVENT.NONE,
"standby": AFTEREVENT.STANDBY,
"deepstandby": AFTEREVENT.DEEPSTANDBY,
"auto": AFTEREVENT.AUTO
}[afterevent]
eit = xml.get("eit")
if eit and eit != "None":
eit = long(eit)
else:
eit = None
location = xml.get("location")
if location and location != "None":
location = location.encode("utf-8")
else:
location = None
tags = xml.get("tags")
if tags and tags != "None":
tags = tags.encode("utf-8").split(' ')
else:
tags = None
descramble = int(xml.get("descramble") or "1")
record_ecm = int(xml.get("record_ecm") or "0")
isAutoTimer = int(xml.get("isAutoTimer") or "0")
name = xml.get("name").encode("utf-8")
#filename = xml.get("filename").encode("utf-8")
entry = RecordTimerEntry(serviceref, begin, end, name, description, eit, disabled, justplay, afterevent, dirname = location, tags = tags, descramble = descramble, record_ecm = record_ecm, isAutoTimer = isAutoTimer, always_zap = always_zap, zap_wakeup = zap_wakeup, rename_repeat = rename_repeat)
entry.repeated = int(repeated)
for l in xml.findall("log"):
time = int(l.get("time"))
code = int(l.get("code"))
msg = l.text.strip().encode("utf-8")
entry.log_entries.append((time, code, msg))
return entry
class RecordTimer(timer.Timer):
def __init__(self):
timer.Timer.__init__(self)
self.Filename = Directories.resolveFilename(Directories.SCOPE_CONFIG, "timers.xml")
try:
self.loadTimer()
except IOError:
print "unable to load timers from file!"
def doActivate(self, w):
# when activating a timer which has already passed,
# simply abort the timer. don't run trough all the stages.
if w.shouldSkip():
w.state = RecordTimerEntry.StateEnded
else:
# when active returns true, this means "accepted".
# otherwise, the current state is kept.
# the timer entry itself will fix up the delay then.
if w.activate():
w.state += 1
self.timer_list.remove(w)
# did this timer reached the last state?
if w.state < RecordTimerEntry.StateEnded:
# no, sort it into active list
insort(self.timer_list, w)
else:
# yes. Process repeated, and re-add.
if w.repeated:
w.processRepeated()
w.state = RecordTimerEntry.StateWaiting
w.first_try_prepare = True
self.addTimerEntry(w)
else:
# Remove old timers as set in config
self.cleanupDaily(config.recording.keep_timers.value)
insort(self.processed_timers, w)
self.stateChanged(w)
def isRecording(self):
for timer in self.timer_list:
if timer.isRunning() and not timer.justplay:
return True
return False
def loadTimer(self):
# TODO: PATH!
if not Directories.fileExists(self.Filename):
return
try:
doc = xml.etree.cElementTree.parse(self.Filename)
except SyntaxError:
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("The timer file (timers.xml) is corrupt and could not be loaded."), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
print "timers.xml failed to load!"
try:
import os
os.rename(self.Filename, self.Filename + "_old")
except (IOError, OSError):
print "renaming broken timer failed"
return
except IOError:
print "timers.xml not found!"
return
root = doc.getroot()
# put out a message when at least one timer overlaps
checkit = True
for timer in root.findall("timer"):
newTimer = createTimer(timer)
if (self.record(newTimer, True, dosave=False) is not None) and (checkit == True):
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("Timer overlap in timers.xml detected!\nPlease recheck it!"), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
checkit = False # at moment it is enough when the message is displayed one time
def saveTimer(self):
#root_element = xml.etree.cElementTree.Element('timers')
#root_element.text = "\n"
#for timer in self.timer_list + self.processed_timers:
# some timers (instant records) don't want to be saved.
# skip them
#if timer.dontSave:
#continue
#t = xml.etree.cElementTree.SubElement(root_element, 'timers')
#t.set("begin", str(int(timer.begin)))
#t.set("end", str(int(timer.end)))
#t.set("serviceref", str(timer.service_ref))
#t.set("repeated", str(timer.repeated))
#t.set("name", timer.name)
#t.set("description", timer.description)
#t.set("afterevent", str({
# AFTEREVENT.NONE: "nothing",
# AFTEREVENT.STANDBY: "standby",
# AFTEREVENT.DEEPSTANDBY: "deepstandby",
# AFTEREVENT.AUTO: "auto"}))
#if timer.eit is not None:
# t.set("eit", str(timer.eit))
#if timer.dirname is not None:
# t.set("location", str(timer.dirname))
#t.set("disabled", str(int(timer.disabled)))
#t.set("justplay", str(int(timer.justplay)))
#t.text = "\n"
#t.tail = "\n"
#for time, code, msg in timer.log_entries:
#l = xml.etree.cElementTree.SubElement(t, 'log')
#l.set("time", str(time))
#l.set("code", str(code))
#l.text = str(msg)
#l.tail = "\n"
#doc = xml.etree.cElementTree.ElementTree(root_element)
#doc.write(self.Filename)
list = []
list.append('<?xml version="1.0" ?>\n')
list.append('<timers>\n')
for timer in self.timer_list + self.processed_timers:
if timer.dontSave:
continue
list.append('<timer')
list.append(' begin="' + str(int(timer.begin)) + '"')
list.append(' end="' + str(int(timer.end)) + '"')
list.append(' serviceref="' + stringToXML(str(timer.service_ref)) + '"')
list.append(' repeated="' + str(int(timer.repeated)) + '"')
list.append(' name="' + str(stringToXML(timer.name)) + '"')
list.append(' description="' + str(stringToXML(timer.description)) + '"')
list.append(' afterevent="' + str(stringToXML({
AFTEREVENT.NONE: "nothing",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.DEEPSTANDBY: "deepstandby",
AFTEREVENT.AUTO: "auto"
}[timer.afterEvent])) + '"')
if timer.eit is not None:
list.append(' eit="' + str(timer.eit) + '"')
if timer.dirname is not None:
list.append(' location="' + str(stringToXML(timer.dirname)) + '"')
if timer.tags is not None:
list.append(' tags="' + str(stringToXML(' '.join(timer.tags))) + '"')
list.append(' disabled="' + str(int(timer.disabled)) + '"')
list.append(' justplay="' + str(int(timer.justplay)) + '"')
list.append(' always_zap="' + str(int(timer.always_zap)) + '"')
list.append(' zap_wakeup="' + str(timer.zap_wakeup) + '"')
list.append(' rename_repeat="' + str(int(timer.rename_repeat)) + '"')
list.append(' descramble="' + str(int(timer.descramble)) + '"')
list.append(' record_ecm="' + str(int(timer.record_ecm)) + '"')
list.append(' isAutoTimer="' + str(int(timer.isAutoTimer)) + '"')
list.append('>\n')
if config.recording.debug.value:
for time, code, msg in timer.log_entries:
list.append('<log')
list.append(' code="' + str(code) + '"')
list.append(' time="' + str(time) + '"')
list.append('>')
list.append(str(stringToXML(msg)))
list.append('</log>\n')
list.append('</timer>\n')
list.append('</timers>\n')
file = open(self.Filename + ".writing", "w")
for x in list:
file.write(x)
file.flush()
import os
os.fsync(file.fileno())
file.close()
os.rename(self.Filename + ".writing", self.Filename)
def getNextZapTime(self, isWakeup=False):
now = time()
for timer in self.timer_list:
if not timer.justplay or timer.begin < now or isWakeup and timer.zap_wakeup in ("from_standby", "never"):
continue
return timer.begin
return -1
def getNextRecordingTime(self):
now = time()
for timer in self.timer_list:
next_act = timer.getNextActivation()
if timer.justplay or next_act < now:
continue
return next_act
return -1
def getNextTimerTime(self, isWakeup=False):
now = time()
for timer in self.timer_list:
next_act = timer.getNextActivation()
if next_act < now or isWakeup and timer.justplay and timer.zap_wakeup in ("from_standby", "never"):
continue
return next_act
return -1
def isNextRecordAfterEventActionAuto(self):
now = time()
t = None
for timer in self.timer_list:
if timer.justplay or timer.begin < now:
continue
if t is None or t.begin == timer.begin:
t = timer
if t.afterEvent == AFTEREVENT.AUTO:
return True
return False
def record(self, entry, ignoreTSC=False, dosave=True): # wird von loadTimer mit dosave=False aufgerufen
timersanitycheck = TimerSanityCheck(self.timer_list,entry)
if not timersanitycheck.check():
if ignoreTSC != True:
print "timer conflict detected!"
print timersanitycheck.getSimulTimerList()
return timersanitycheck.getSimulTimerList()
else:
print "ignore timer conflict"
elif timersanitycheck.doubleCheck():
print "ignore double timer"
return None
entry.timeChanged()
print "[Timer] Record " + str(entry)
entry.Timer = self
self.addTimerEntry(entry)
if dosave:
self.saveTimer()
return None
def isInRepeatTimer(self, timer, event):
time_match = 0
is_editable = False
begin = event.getBeginTime()
duration = event.getDuration()
end = begin + duration
timer_end = timer.end
if timer.disabled and timer.isRunning():
if begin < timer.begin <= end or timer.begin <= begin <= timer_end:
return True
else:
return False
if timer.justplay and (timer_end - timer.begin) <= 1:
timer_end += 60
bt = localtime(begin)
bday = bt.tm_wday
begin2 = 1440 + bt.tm_hour * 60 + bt.tm_min
end2 = begin2 + duration / 60
xbt = localtime(timer.begin)
xet = localtime(timer_end)
offset_day = False
checking_time = timer.begin < begin or begin <= timer.begin <= end
if xbt.tm_yday != xet.tm_yday:
oday = bday - 1
if oday == -1: oday = 6
offset_day = timer.repeated & (1 << oday)
xbegin = 1440 + xbt.tm_hour * 60 + xbt.tm_min
xend = xbegin + ((timer_end - timer.begin) / 60)
if xend < xbegin:
xend += 1440
if timer.repeated & (1 << bday) and checking_time:
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
elif offset_day:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
elif offset_day and checking_time:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
is_editable = True
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
summary_end = (xend - end2) * 60
is_editable = not summary_end and True or time_match >= summary_end
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
summary_end = (begin2 - xbegin) * 60
is_editable = not summary_end and True or time_match >= summary_end
else:
# recording whole event
time_match = (end2 - begin2) * 60
is_editable = True
return time_match and is_editable
def isInTimer(self, eventid, begin, duration, service):
returnValue = None
type = 0
time_match = 0
isAutoTimer = False
bt = None
check_offset_time = not config.recording.margin_before.value and not config.recording.margin_after.value
end = begin + duration
refstr = ':'.join(service.split(':')[:11])
for x in self.timer_list:
if x.isAutoTimer == 1:
isAutoTimer = True
else:
isAutoTimer = False
check = ':'.join(x.service_ref.ref.toString().split(':')[:11]) == refstr
if not check:
sref = x.service_ref.ref
parent_sid = sref.getUnsignedData(5)
parent_tsid = sref.getUnsignedData(6)
if parent_sid and parent_tsid:
# check for subservice
sid = sref.getUnsignedData(1)
tsid = sref.getUnsignedData(2)
sref.setUnsignedData(1, parent_sid)
sref.setUnsignedData(2, parent_tsid)
sref.setUnsignedData(5, 0)
sref.setUnsignedData(6, 0)
check = sref.toCompareString() == refstr
num = 0
if check:
check = False
event = eEPGCache.getInstance().lookupEventId(sref, eventid)
num = event and event.getNumOfLinkageServices() or 0
sref.setUnsignedData(1, sid)
sref.setUnsignedData(2, tsid)
sref.setUnsignedData(5, parent_sid)
sref.setUnsignedData(6, parent_tsid)
for cnt in range(num):
subservice = event.getLinkageService(sref, cnt)
if sref.toCompareString() == subservice.toCompareString():
check = True
break
if check:
timer_end = x.end
timer_begin = x.begin
type_offset = 0
if not x.repeated and check_offset_time:
if 0 < end - timer_end <= 59:
timer_end = end
elif 0 < timer_begin - begin <= 59:
timer_begin = begin
if x.justplay:
type_offset = 5
if (timer_end - x.begin) <= 1:
timer_end += 60
if x.always_zap:
type_offset = 10
timer_repeat = x.repeated
# if set 'don't stop current event but disable coming events' for repeat timer
running_only_curevent = x.disabled and x.isRunning() and timer_repeat
if running_only_curevent:
timer_repeat = 0
type_offset += 15
if timer_repeat != 0:
type_offset += 15
if bt is None:
bt = localtime(begin)
bday = bt.tm_wday
begin2 = 1440 + bt.tm_hour * 60 + bt.tm_min
end2 = begin2 + duration / 60
xbt = localtime(x.begin)
xet = localtime(timer_end)
offset_day = False
checking_time = x.begin < begin or begin <= x.begin <= end
if xbt.tm_yday != xet.tm_yday:
oday = bday - 1
if oday == -1: oday = 6
offset_day = x.repeated & (1 << oday)
xbegin = 1440 + xbt.tm_hour * 60 + xbt.tm_min
xend = xbegin + ((timer_end - x.begin) / 60)
if xend < xbegin:
xend += 1440
if x.repeated & (1 << bday) and checking_time:
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
elif offset_day:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
elif offset_day and checking_time:
xbegin -= 1440
xend -= 1440
if begin2 < xbegin <= end2:
if xend < end2:
# recording within event
time_match = (xend - xbegin) * 60
type = type_offset + 3
else:
# recording last part of event
time_match = (end2 - xbegin) * 60
type = type_offset + 1
elif xbegin <= begin2 <= xend:
if xend < end2:
# recording first part of event
time_match = (xend - begin2) * 60
type = type_offset + 4
else:
# recording whole event
time_match = (end2 - begin2) * 60
type = type_offset + 2
else:
if begin < timer_begin <= end:
if timer_end < end:
# recording within event
time_match = timer_end - timer_begin
type = type_offset + 3
else:
# recording last part of event
time_match = end - timer_begin
type = type_offset + 1
elif timer_begin <= begin <= timer_end:
if timer_end < end:
# recording first part of event
time_match = timer_end - begin
type = type_offset + 4
else:
# recording whole event
time_match = end - begin
type = type_offset + 2
if time_match:
if type in (2,7,12,17,22,27):
# When full recording do not look further
returnValue = (time_match, [type])
break
elif returnValue:
if type not in returnValue[1]:
returnValue[1].append(type)
else:
returnValue = (time_match, [type])
return returnValue
def removeEntry(self, entry):
print "[Timer] Remove " + str(entry)
# avoid re-enqueuing
entry.repeated = False
# abort timer.
# this sets the end time to current time, so timer will be stopped.
entry.autoincrease = False
entry.abort()
if entry.state != entry.StateEnded:
self.timeChanged(entry)
print "state: ", entry.state
print "in processed: ", entry in self.processed_timers
print "in running: ", entry in self.timer_list
# autoincrease instanttimer if possible
if not entry.dontSave:
for x in self.timer_list:
if x.setAutoincreaseEnd():
self.timeChanged(x)
# now the timer should be in the processed_timers list. remove it from there.
self.processed_timers.remove(entry)
self.saveTimer()
def shutdown(self):
self.saveTimer()
| gpl-2.0 | -5,372,605,724,139,803,000 | 1,797,039,786,948,530,400 | 34.353669 | 312 | 0.68033 | false |
ru-faraon/MITMf | core/packetfilter.py | 4 | 1221 | import threading
from core.utils import set_ip_forwarding, iptables
from core.logger import logger
from scapy.all import *
from traceback import print_exc
from netfilterqueue import NetfilterQueue
formatter = logging.Formatter("%(asctime)s [PacketFilter] %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
log = logger().setup_logger("PacketFilter", formatter)
class PacketFilter:
def __init__(self, filter):
self.filter = filter
def start(self):
set_ip_forwarding(1)
iptables().NFQUEUE()
self.nfqueue = NetfilterQueue()
self.nfqueue.bind(1, self.modify)
t = threading.Thread(name='packetparser', target=self.nfqueue.run)
t.setDaemon(True)
t.start()
def modify(self, pkt):
#log.debug("Got packet")
data = pkt.get_payload()
packet = IP(data)
try:
execfile(self.filter)
except Exception:
log.debug("Error occurred in filter")
print_exc()
pkt.set_payload(str(packet)) #set the packet content to our modified version
pkt.accept() #accept the packet
def stop(self):
self.nfqueue.unbind()
set_ip_forwarding(0)
iptables().flush() | gpl-3.0 | -3,392,721,061,736,512,000 | -8,653,677,186,852,917,000 | 26.155556 | 100 | 0.627355 | false |
djmaze/phantomjs | src/breakpad/src/tools/gyp/pylib/gyp/generator/xcode.py | 137 | 50429 | #!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import errno
import os
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'mac_bundle',
'mac_bundle_resources',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
]
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
import sys
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings. This is intended to be
# used very sparingly. Really, almost everything should go into
# target-specific build settings sections. The project-wide settings are
# only intended to be used in cases where Xcode attempts to resolve
# variable references in a project context as opposed to a target context,
# such as when resolving sourceTree references while building up the tree
# tree view for UI display.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, or is a test, add its
# target to the targets, and (if it's a test) add it the to the
# test targets.
is_test = int(target.get('test', 0))
if target.get('run_as') or is_test:
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
# The test runner target has a build phase that executes the
# test, if this has the 'test' attribute. If the 'run_as' tag
# doesn't exist (meaning that this must be a test), then we
# define a default test command line.
command = target.get('run_as', {
'action': ['${BUILT_PRODUCTS_DIR}/${PRODUCT_NAME}']
})
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if is_test and serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
if is_test:
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
target_dict = xcode_target_to_target_dict[dependency_xct]
if target_dict and int(target_dict.get('test', 0)):
assert dependency_xct.test_runner
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
cached_xcode_version = None
def InstalledXcodeVersion():
"""Fetches the installed version of Xcode, returns empty string if it is
unable to figure it out."""
global cached_xcode_version
if not cached_xcode_version is None:
return cached_xcode_version
# Default to an empty string
cached_xcode_version = ''
# Collect the xcodebuild's version information.
try:
import subprocess
cmd = ['/usr/bin/xcodebuild', '-version']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
xcodebuild_version_info = proc.communicate()[0]
# Any error, return empty string
if proc.returncode:
xcodebuild_version_info = ''
except OSError:
# We failed to launch the tool
xcodebuild_version_info = ''
# Pull out the Xcode version itself.
match_line = re.search('^Xcode (.*)$', xcodebuild_version_info, re.MULTILINE)
if match_line:
cached_xcode_version = match_line.group(1)
# Done!
return cached_xcode_version
def AddSourceToTarget(source, pbxp, xct):
# TODO(mark): Perhaps this can be made a little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext != '':
ext = ext[1:].lower()
if ext in source_extensions:
xct.SourcesPhase().AddFile(source)
else:
# Files that aren't added to a sources build phase can still go into
# the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
_xcode_variable_re = re.compile('(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
if parallel_builds:
pbxp.SetProperty('attributes',
{'BuildIndependentTargetsInParallel': 'YES'})
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. We use the type
# with "+bundle" appended if the target has "mac_bundle" set.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'shared_library+bundle': 'com.apple.product-type.framework',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_bundle = int(spec.get('mac_bundle', 0))
if type != 'none':
type_bundle_key = type
if is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
target_product_name = spec.get('product_name', None)
if target_product_name:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_extension=spec.get('product_extension', None))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
# Xcode does not have a distinct type for loadable_modules that are pure
# BSD targets (ie-unbundled). It uses the same setup as a shared_library
# but the mach-o type is explictly set in the settings. So before we do
# anything else, for this one case, we stuff in that one setting. This
# would allow the other data in the spec to change it if need be.
if type == 'loadable_module' and not is_bundle:
xccl.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target as used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
if type != 'none' and (spec_actions or spec_rules):
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + ' Support',
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_basename = posixpath.basename(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = '@echo note: ' + ExpandXcodeVariables(message,
rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s_%s.make' % (target_name, rule['rule_name'])
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
# TODO(mark): quote the list of concrete_output_dirs.
if len(concrete_output_dirs) > 0:
makefile.write('\tmkdir -p %s\n' % ' '.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
makefile.write('\t%s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec "${DEVELOPER_BIN_DIR}/make" -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
for group in ['inputs', 'inputs_excluded']:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" if it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
# Add "copies".
for copy_group in spec.get('copies', []):
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
for file in copy_group['files']:
pbxcp.AddFile(file)
# Excluded files can also go into the project file.
for key in ['sources', 'mac_bundle_resources']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
for action in spec.get('actions', []):
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
# TODO(mark): this logic isn't right. There are certain directories
# that are always searched, we should check to see if the library is
# in one of those directories, and if not, we should do the
# AppendBuildSetting thing.
if not posixpath.isabs(library) and not library.startswith('$'):
# TODO(mark): Need to check to see if library_dir is already in
# LIBRARY_SEARCH_PATHS.
library_dir = posixpath.dirname(library)
xct.AppendBuildSetting('LIBRARY_SEARCH_PATHS', library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
if 'defines' in configuration:
for define in configuration['defines']:
# If the define is of the form A="B", escape the quotes
# yielding A=\"\\\"B\\\"\". The extra set of quotes tell
# Xcode NOT to split on spaces, and still define a string
# literal (with quotes).
set_define = re.sub(r'^([^=]*=)"([^"]*)"$',
r'\1"\"\2\""', define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
| bsd-3-clause | 4,302,894,327,084,935,000 | 2,768,196,734,742,137,300 | 43.470018 | 80 | 0.655377 | false |
lzambella/Qyoutube-dl | youtube_dl/extractor/swrmediathek.py | 165 | 3660 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import parse_duration
class SWRMediathekIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?swrmediathek\.de/(?:content/)?player\.htm\?show=(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
_TESTS = [{
'url': 'http://swrmediathek.de/player.htm?show=849790d0-dab8-11e3-a953-0026b975f2e6',
'md5': '8c5f6f0172753368547ca8413a7768ac',
'info_dict': {
'id': '849790d0-dab8-11e3-a953-0026b975f2e6',
'ext': 'mp4',
'title': 'SWR odysso',
'description': 'md5:2012e31baad36162e97ce9eb3f157b8a',
'thumbnail': 're:^http:.*\.jpg$',
'duration': 2602,
'upload_date': '20140515',
'uploader': 'SWR Fernsehen',
'uploader_id': '990030',
},
}, {
'url': 'http://swrmediathek.de/player.htm?show=0e1a8510-ddf2-11e3-9be3-0026b975f2e6',
'md5': 'b10ab854f912eecc5a6b55cd6fc1f545',
'info_dict': {
'id': '0e1a8510-ddf2-11e3-9be3-0026b975f2e6',
'ext': 'mp4',
'title': 'Nachtcafé - Alltagsdroge Alkohol - zwischen Sektempfang und Komasaufen',
'description': 'md5:e0a3adc17e47db2c23aab9ebc36dbee2',
'thumbnail': 're:http://.*\.jpg',
'duration': 5305,
'upload_date': '20140516',
'uploader': 'SWR Fernsehen',
'uploader_id': '990030',
},
}, {
'url': 'http://swrmediathek.de/player.htm?show=bba23e10-cb93-11e3-bf7f-0026b975f2e6',
'md5': '4382e4ef2c9d7ce6852535fa867a0dd3',
'info_dict': {
'id': 'bba23e10-cb93-11e3-bf7f-0026b975f2e6',
'ext': 'mp3',
'title': 'Saša Stanišic: Vor dem Fest',
'description': 'md5:5b792387dc3fbb171eb709060654e8c9',
'thumbnail': 're:http://.*\.jpg',
'duration': 3366,
'upload_date': '20140520',
'uploader': 'SWR 2',
'uploader_id': '284670',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
video = self._download_json(
'http://swrmediathek.de/AjaxEntry?ekey=%s' % video_id, video_id, 'Downloading video JSON')
attr = video['attr']
media_type = attr['entry_etype']
formats = []
for entry in video['sub']:
if entry['name'] != 'entry_media':
continue
entry_attr = entry['attr']
codec = entry_attr['val0']
quality = int(entry_attr['val1'])
fmt = {
'url': entry_attr['val2'],
'quality': quality,
}
if media_type == 'Video':
fmt.update({
'format_note': ['144p', '288p', '544p', '720p'][quality - 1],
'vcodec': codec,
})
elif media_type == 'Audio':
fmt.update({
'acodec': codec,
})
formats.append(fmt)
self._sort_formats(formats)
return {
'id': video_id,
'title': attr['entry_title'],
'description': attr['entry_descl'],
'thumbnail': attr['entry_image_16_9'],
'duration': parse_duration(attr['entry_durat']),
'upload_date': attr['entry_pdatet'][:-4],
'uploader': attr['channel_title'],
'uploader_id': attr['channel_idkey'],
'formats': formats,
}
| gpl-3.0 | 3,887,601,260,574,134,300 | 7,699,634,849,247,676,000 | 34.163462 | 150 | 0.503965 | false |
AbsoluteMSTR/pies | pies/overrides.py | 2 | 7952 | """pies/overrides.py.
Overrides Python syntax to conform to the Python3 version as much as possible using a '*' import
Copyright (C) 2013 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import abc
import functools
import sys
from numbers import Integral
from ._utils import unmodified_isinstance, with_metaclass
from .version_info import PY2, PY3, VERSION
native_dict = dict
native_round = round
native_filter = filter
native_map = map
native_zip = zip
native_range = range
native_str = str
native_chr = chr
native_input = input
native_next = next
native_object = object
common = ['native_dict', 'native_round', 'native_filter', 'native_map', 'native_range', 'native_str', 'native_chr',
'native_input', 'PY2', 'PY3', 'u', 'itemsview', 'valuesview', 'keysview', 'execute', 'integer_types',
'native_next', 'native_object', 'with_metaclass']
if PY3:
import urllib
import builtins
from urllib import parse
from collections import OrderedDict
integer_types = (int, )
def u(string):
return string
def itemsview(collection):
return collection.items()
def valuesview(collection):
return collection.values()
def keysview(collection):
return collection.keys()
urllib.quote = parse.quote
urllib.quote_plus = parse.quote_plus
urllib.unquote = parse.unquote
urllib.unquote_plus = parse.unquote_plus
urllib.urlencode = parse.urlencode
execute = getattr(builtins, 'exec')
if VERSION[1] < 2:
def callable(entity):
return hasattr(entity, '__call__')
common.append('callable')
__all__ = common + ['OrderedDict', 'urllib']
else:
from itertools import ifilter as filter
from itertools import imap as map
from itertools import izip as zip
from decimal import Decimal, ROUND_HALF_EVEN
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import codecs
str = unicode
chr = unichr
input = raw_input
range = xrange
integer_types = (int, long)
import sys
stdout = sys.stdout
stderr = sys.stderr
reload(sys)
sys.stdout = stdout
sys.stderr = stderr
sys.setdefaultencoding('utf-8')
def _create_not_allowed(name):
def _not_allow(*args, **kwargs):
raise NameError("name '{0}' is not defined".format(name))
_not_allow.__name__ = name
return _not_allow
for removed in ('apply', 'cmp', 'coerce', 'execfile', 'raw_input', 'unpacks'):
globals()[removed] = _create_not_allowed(removed)
def u(s):
if isinstance(s, unicode):
return s
else:
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
def execute(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
class _dict_view_base(object):
__slots__ = ('_dictionary', )
def __init__(self, dictionary):
self._dictionary = dictionary
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__, str(list(self.__iter__())))
def __unicode__(self):
return str(self.__repr__())
def __str__(self):
return str(self.__unicode__())
class dict_keys(_dict_view_base):
__slots__ = ()
def __iter__(self):
return self._dictionary.iterkeys()
class dict_values(_dict_view_base):
__slots__ = ()
def __iter__(self):
return self._dictionary.itervalues()
class dict_items(_dict_view_base):
__slots__ = ()
def __iter__(self):
return self._dictionary.iteritems()
def itemsview(collection):
return dict_items(collection)
def valuesview(collection):
return dict_values(collection)
def keysview(collection):
return dict_keys(collection)
class dict(unmodified_isinstance(native_dict)):
def has_key(self, *args, **kwargs):
return AttributeError("'dict' object has no attribute 'has_key'")
def items(self):
return dict_items(self)
def keys(self):
return dict_keys(self)
def values(self):
return dict_values(self)
def round(number, ndigits=None):
return_int = False
if ndigits is None:
return_int = True
ndigits = 0
if hasattr(number, '__round__'):
return number.__round__(ndigits)
if ndigits < 0:
raise NotImplementedError('negative ndigits not supported yet')
exponent = Decimal('10') ** (-ndigits)
d = Decimal.from_float(number).quantize(exponent,
rounding=ROUND_HALF_EVEN)
if return_int:
return int(d)
else:
return float(d)
def next(iterator):
try:
iterator.__next__()
except Exception:
native_next(iterator)
class FixStr(type):
def __new__(cls, name, bases, dct):
if '__str__' in dct:
dct['__unicode__'] = dct['__str__']
dct['__str__'] = lambda self: self.__unicode__().encode('utf-8')
return type.__new__(cls, name, bases, dct)
if sys.version_info[1] <= 6:
def __instancecheck__(cls, instance):
if cls.__name__ == "object":
return isinstance(instance, native_object)
subclass = getattr(instance, '__class__', None)
subtype = type(instance)
instance_type = getattr(abc, '_InstanceType', None)
if not instance_type:
class test_object:
pass
instance_type = type(test_object)
if subtype is instance_type:
subtype = subclass
if subtype is subclass or subclass is None:
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or cls.__subclasscheck__(subtype))
else:
def __instancecheck__(cls, instance):
if cls.__name__ == "object":
return isinstance(instance, native_object)
return type.__instancecheck__(cls, instance)
class object(with_metaclass(FixStr, object)):
pass
__all__ = common + ['round', 'dict', 'apply', 'cmp', 'coerce', 'execfile', 'raw_input', 'unpacks', 'str', 'chr',
'input', 'range', 'filter', 'map', 'zip', 'object']
| mit | -4,217,481,509,785,399,300 | -933,189,573,101,077,000 | 31.064516 | 116 | 0.593058 | false |
FloatingGhost/MISP | app/files/scripts/stix2/misp2stix2_mapping.py | 2 | 22427 | def attribute_data_observable(data):
return {'type': 'artifact', 'payload_bin': data}
def attribute_data_pattern(data):
return "artifact:payload_bin = '{}'".format(data)
def define_address_type(address):
if ':' in address:
return 'ipv6-addr'
return 'ipv4-addr'
def observable_as(_, attribute_value):
return {'0': {'type': 'autonomous-system', 'number': attribute_value}}
def pattern_as(_, attribute_value):
return "[autonomous-system:number = '{}']".format(attribute_value)
def observable_attachment(*args):
observable = observable_file(args[0], args[1])
if len(args) == 3:
observable['0']['content_ref'] = '0'
return {'0': attribute_data_observable(args[2]), '1': observable['0']}
return observable
def pattern_attachment(*args):
pattern = pattern_file(args[0], args[1])[1:-1]
if len(args) == 3:
pattern += " AND {}".format(attribute_data_pattern(args[2]))
return "[{}]".format(pattern)
def observable_domain(_, attribute_value):
return {'0': {'type': 'domain-name', 'value': attribute_value}}
def pattern_domain(_, attribute_value):
return "[domain-name:value = '{}']".format(attribute_value)
def observable_domain_ip(_, attribute_value):
address_type = define_address_type(attribute_value)
domain_value, ip_value = attribute_value.split('|')
domain = observable_domain(_, domain_value)
domain['0']['resolves_to_refs'] = '1'
domain['1'] = {'type': address_type, 'value': ip_value}
return domain
def pattern_domain_ip(_, attribute_value):
domain_value, ip_value = attribute_value.split('|')
domain = pattern_domain(_, domain_value)[1:-1]
domain += " AND domain-name:resolves_to_refs[*].value = '{}'".format(ip_value)
return "[{}]".format(domain)
def observable_email_address(attribute_type, attribute_value):
email_type = "from_ref" if 'src' in attribute_type else "to_refs"
return {'0': {'type': 'email-addr', 'value': attribute_value},
'1': {'type': 'email-message', email_type: '0', 'is_multipart': 'false'}}
def pattern_email_address(attribute_type, attribute_value):
email_type = "from_ref" if 'src' in attribute_type else "to_refs"
return "[email-message:{} = '{}']".format(email_type, attribute_value)
def observable_email_attachment(_, attribute_value):
observable = observable_file(_, attribute_value)
observable['1'] = {"type": "email-message", 'is_multipart': 'true',
"body_multipart": [{"content_disposition": "attachment; filename=''".format(attribute_value), "body_raw_ref": "0"}]}
return observable
def pattern_email_attachment(_, attribute_value):
return "[email-message:body_multipart[*].body_raw_ref.name = '{}']".format(attribute_value)
def observable_email_message(attribute_type, attribute_value):
email_type = attribute_type.split('-')[1]
return {'0': {'type': 'email-message', email_type: attribute_value, 'is_multipart': 'false'}}
def pattern_email_message(attribute_type, attribute_value):
email_type = attribute_type.split('-')[1]
return "[email-message:{} = '{}']".format(email_type, attribute_value)
def observable_file(_, attribute_value):
return {'0': {'type': 'file', 'name': attribute_value}}
def pattern_file(_, attribute_value):
return "[file:name = '{}']".format(attribute_value)
def observable_file_hash(attribute_type, attribute_value):
_, hash_type = attribute_type.split('|')
value1, value2 = attribute_value.split('|')
return {'0': {'type': 'file', 'name': value1, 'hashes': {hash_type: value2}}}
def pattern_file_hash(attribute_type, attribute_value):
_, hash_type = attribute_type.split('|')
value1, value2 = attribute_value.split('|')
return "[file:name = '{0}' AND file:hashes.'{1}' = '{2}']".format(value1, hash_type, value2)
def observable_hash(attribute_type, attribute_value):
return {'0': {'type': 'file', 'hashes': {attribute_type: attribute_value}}}
def pattern_hash(attribute_type, attribute_value):
return "[file:hashes.'{}' = '{}']".format(attribute_type, attribute_value)
def observable_hostname_port(_, attribute_value):
hostname, port = attribute_value.split('|')
hostname_port = observable_domain(_, hostname)
hostname_port[1] = observable_port(_, port)['0']
return hostname_port
def pattern_hostname_port(_, attribute_value):
hostname, port = attribute_value.split('|')
return "[{} AND {}]".format(pattern_domain(_, hostname)[1:-1], pattern_port(_, port)[1:-1])
def observable_ip(attribute_type, attribute_value):
ip_type = attribute_type.split('-')[1]
address_type = define_address_type(attribute_value)
return {'0': {'type': address_type, 'value': attribute_value},
'1': {'type': 'network-traffic', '{}_ref'.format(ip_type): '0',
'protocols': [address_type.split('-')[0]]}}
def pattern_ip(attribute_type, attribute_value):
ip_type = attribute_type.split('-')[1]
address_type = define_address_type(attribute_value)
return "[network-traffic:{0}_ref.type = '{1}' AND network-traffic:{0}_ref.value = '{2}']".format(ip_type, address_type, attribute_value)
def observable_ip_port(attribute_type, attribute_value):
ip_type, _ = attribute_type.split('|')
ip, port = attribute_value.split('|')
ip_port = observable_ip(ip_type, ip)
port_type = "{}_port".format(ip_type.split('-')[1])
ip_port['1'][port_type] = port
return ip_port
def pattern_ip_port(attribute_type, attribute_value):
ip_type, _ = attribute_type.split('|')
ip, port = attribute_value.split('|')
port_type = "{}_port".format(ip_type.split('-')[1])
return "[network-traffic:{} = '{}' AND {}]".format(port_type, port, pattern_ip(ip_type, ip)[1:-1])
def observable_mac_address(_, attribute_value):
return {'0': {'type': 'mac-addr', 'value': attribute_value}}
def pattern_mac_address(_, attribute_value):
return "[mac-addr:value = '{}']".format(attribute_value)
def observable_malware_sample(*args):
observable = observable_file_hash("filename|md5", args[1])
if len(args) == 3:
observable['0']['content_ref'] = '0'
return {'0': attribute_data_observable(args[2]), '1': observable['0']}
return observable
def pattern_malware_sample(*args):
pattern = pattern_file_hash("filename|md5", args[1])[1:-1]
if len(args) == 3:
pattern += " AND {}".format(attribute_data_pattern(args[2]))
return "[{}]".format(pattern)
def observable_mutex(_, attribute_value):
return {'0': {'type': 'mutex', 'name': attribute_value}}
def pattern_mutex(_, attribute_value):
return "[mutex:name = '{}']".format(attribute_value)
def observable_port(_, attribute_value):
return {'0': {'type': 'network-traffic', 'dst_port': attribute_value, 'protocols': []}}
def pattern_port(_, attribute_value):
return "[network-traffic:dst_port = '{}']".format(attribute_value)
def observable_regkey(_, attribute_value):
return {'0': {'type': 'windows-registry-key', 'key': attribute_value.strip()}}
def pattern_regkey(_, attribute_value):
return "[windows-registry-key:key = '{}']".format(attribute_value.strip())
def observable_regkey_value(_, attribute_value):
from stix2 import WindowsRegistryValueType
key, value = attribute_value.split('|')
regkey = observable_regkey(_, key)
regkey['0']['values'] = WindowsRegistryValueType(**{'name': value.strip()})
return regkey
def pattern_regkey_value(_, attribute_value):
key, value = attribute_value.split('|')
regkey = pattern_regkey(_, key)[1:-1]
regkey += " AND windows-registry-key:values = '{}'".format(value.strip())
return "[{}]".format(regkey)
def observable_reply_to(_, attribute_value):
return {'0': {'type': 'email-addr', 'value': attribute_value},
'1': {'type': 'email-message', 'additional_header_fields': {'Reply-To': ['0']}, 'is_multipart': 'false'}}
def pattern_reply_to(_, attribute_value):
return "[email-message:additional_header_fields.reply_to = '{}']".format(attribute_value)
def observable_url(_, attribute_value):
return {'0': {'type': 'url', 'value': attribute_value}}
def pattern_url(_, attribute_value):
return "[url:value = '{}']".format(attribute_value)
def observable_x509(_, attribute_value):
return {'0': {'type': 'x509-certificate', 'hashes': {'sha1': attribute_value}}}
def pattern_x509(_, attribute_value):
return "[x509-certificate:hashes = '{}']".format(attribute_value)
def return_vulnerability(name):
return {'source_name': 'cve', 'external_id': name}
mispTypesMapping = {
'link': {'to_call': 'handle_link'},
'vulnerability': {'to_call': 'add_vulnerability', 'vulnerability_args': return_vulnerability},
'md5': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha1': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha256': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'filename': {'to_call': 'handle_usual_type', 'observable': observable_file, 'pattern': pattern_file},
'filename|md5': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha1': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha256': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'ip-src': {'to_call': 'handle_usual_type', 'observable': observable_ip, 'pattern': pattern_ip},
'ip-dst': {'to_call': 'handle_usual_type', 'observable': observable_ip, 'pattern': pattern_ip},
'hostname': {'to_call': 'handle_usual_type', 'observable': observable_domain, 'pattern': pattern_domain},
'domain': {'to_call': 'handle_usual_type', 'observable': observable_domain, 'pattern': pattern_domain},
'domain|ip': {'to_call': 'handle_usual_type', 'observable': observable_domain_ip, 'pattern': pattern_domain_ip},
'email-src': {'to_call': 'handle_usual_type', 'observable': observable_email_address, 'pattern': pattern_email_address},
'email-dst': {'to_call': 'handle_usual_type', 'observable': observable_email_address, 'pattern': pattern_email_address},
'email-subject': {'to_call': 'handle_usual_type', 'observable': observable_email_message, 'pattern': pattern_email_message},
'email-body': {'to_call': 'handle_usual_type', 'observable': observable_email_message, 'pattern': pattern_email_message},
'email-attachment': {'to_call': 'handle_usual_type', 'observable': observable_email_attachment, 'pattern': pattern_email_attachment},
'url': {'to_call': 'handle_usual_type', 'observable': observable_url, 'pattern': pattern_url},
'regkey': {'to_call': 'handle_usual_type', 'observable': observable_regkey, 'pattern': pattern_regkey},
'regkey|value': {'to_call': 'handle_usual_type', 'observable': observable_regkey_value, 'pattern': pattern_regkey_value},
'malware-sample': {'to_call': 'handle_usual_type', 'observable': observable_malware_sample, 'pattern': pattern_malware_sample},
'mutex': {'to_call': 'handle_usual_type', 'observable': observable_mutex, 'pattern': pattern_mutex},
'uri': {'to_call': 'handle_usual_type', 'observable': observable_url, 'pattern': pattern_url},
'authentihash': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'ssdeep': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'imphash': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'pehash': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'impfuzzy': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha224': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha384': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha512': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha512/224': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'sha512/256': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'tlsh': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'cdhash': {'to_call': 'handle_usual_type', 'observable': observable_hash, 'pattern': pattern_hash},
'filename|authentihash': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|ssdeep': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|imphash': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|impfuzzy': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|pehash': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha224': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha384': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha512': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha512/224': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|sha512/256': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'filename|tlsh': {'to_call': 'handle_usual_type', 'observable': observable_file_hash, 'pattern': pattern_file_hash},
'x509-fingerprint-sha1': {'to_call': 'handle_usual_type', 'observable': observable_x509, 'pattern': pattern_x509},
'port': {'to_call': 'handle_usual_type', 'observable': observable_port, 'pattern': pattern_port},
'ip-dst|port': {'to_call': 'handle_usual_type', 'observable': observable_ip_port, 'pattern': pattern_ip_port},
'ip-src|port': {'to_call': 'handle_usual_type', 'observable': observable_ip_port, 'pattern': pattern_ip_port},
'hostname|port': {'to_call': 'handle_usual_type', 'observable': observable_hostname_port, 'pattern': pattern_hostname_port},
'email-reply-to': {'to_call': 'handle_usual_type', 'observable': observable_reply_to, 'pattern': pattern_reply_to},
'attachment': {'to_call': 'handle_usual_type', 'observable': observable_attachment, 'pattern': pattern_attachment},
'mac-address': {'to_call': 'handle_usual_type', 'observable': observable_mac_address, 'pattern': pattern_mac_address},
'AS': {'to_call': 'handle_usual_type', 'observable': observable_as, 'pattern': pattern_as}
#'email-dst-display-name': {'observable': {'0': {'type': 'email-addr', 'display_name': ''}},
# 'pattern': 'email-addr:display_name = \'{0}\''},
#'email-src-display-name': {'observable': {'0': {'type': 'email-addr', 'display_name': ''}},
# 'pattern': 'email-addr:display_name = \'{0}\''}
}
network_traffic_pattern = "network-traffic:{0} = '{1}' AND "
network_traffic_src_ref = "src_ref.type = '{0}' AND network-traffic:src_ref.value"
network_traffic_dst_ref = "dst_ref.type = '{0}' AND network-traffic:dst_ref.value"
objectsMapping = {'asn': {'to_call': 'handle_usual_object_name',
'observable': {'type': 'autonomous-system'},
'pattern': "autonomous-system:{0} = '{1}' AND "},
'course-of-action': {'to_call': 'add_course_of_action_from_object'},
'domain-ip': {'to_call': 'handle_usual_object_name',
'pattern': "domain-name:{0} = '{1}' AND "},
'email': {'to_call': 'handle_usual_object_name',
'observable': {'0': {'type': 'email-message'}},
'pattern': "email-{0}:{1} = '{2}' AND "},
'file': {'to_call': 'handle_usual_object_name',
'observable': {'0': {'type': 'file', 'hashes': {}}},
'pattern': "file:{0} = '{1}' AND "},
'ip-port': {'to_call': 'handle_usual_object_name',
'pattern': network_traffic_pattern},
'network-socket': {'to_call': 'handle_usual_object_name',
'pattern': network_traffic_pattern},
'pe': {'to_call': 'populate_objects_to_parse'},
'pe-section': {'to_call': 'populate_objects_to_parse'},
'process': {'to_call': 'handle_usual_object_name',
'pattern': "process:{0} = '{1}' AND "},
'registry-key': {'to_call': 'handle_usual_object_name',
'observable': {'0': {'type': 'windows-registry-key'}},
'pattern': "windows-registry-key:{0} = '{1}' AND "},
'url': {'to_call': 'handle_usual_object_name',
'observable': {'0': {'type': 'url'}},
'pattern': "url:{0} = '{1}' AND "},
'vulnerability': {'to_call': 'add_object_vulnerability'},
'x509': {'to_call': 'handle_usual_object_name',
'pattern': "x509-certificate:{0} = '{1}' AND "}
}
asnObjectMapping = {'asn': 'number', 'description': 'name', 'subnet-announced': 'value'}
domainIpObjectMapping = {'ip-dst': 'resolves_to_refs[*].value', 'domain': 'value'}
emailObjectMapping = {'email-body': {'email_type': 'message', 'stix_type': 'body'},
'subject': {'email_type': 'message', 'stix_type': 'subject'},
'to': {'email_type': 'message', 'stix_type': 'to_refs'}, 'cc': {'email_type': 'message', 'stix_type': 'cc_refs'},
'to-display-name': {'email_type': 'addr', 'stix_type': 'display_name'},
'from': {'email_type': 'message', 'stix_type': 'from_ref'},
'from-display-name': {'email_type': 'addr', 'stix_type': 'display_name'},
'reply-to': {'email_type': 'message', 'stix_type': 'additional_header_fields.reply_to'},
'attachment': {'email_type': 'message', 'stix_type': 'body_multipart[*].body_raw_ref.name'},
'send-date': {'email_type': 'message', 'stix_type': 'date'},
'x-mailer': {'email_type': 'message', 'stix_type': 'additional_header_fields.x_mailer'}}
fileMapping = {'hashes': "hashes.'{0}'", 'size-in-bytes': 'size', 'filename': 'name', 'mime-type': 'mime_type'}
ipPortObjectMapping = {'ip': network_traffic_dst_ref,
'src-port': 'src_port', 'dst-port': 'dst_port',
'first-seen': 'start', 'last-seen': 'end',
'domain': 'value'}
networkSocketMapping = {'address-family': 'address_family', 'domain-family': 'protocol_family',
'protocol': 'protocols', 'src-port': 'src_port', 'dst-port': 'dst_port',
'ip-src': network_traffic_src_ref, 'ip-dst': network_traffic_dst_ref,
'hostname-src': network_traffic_src_ref, 'hostname-dst': network_traffic_dst_ref}
peMapping = {'type': 'pe_type', 'number-sections': 'number_of_sections', 'imphash': 'imphash'}
peSectionMapping = {'name': 'name', 'size-in-bytes': 'size', 'entropy': 'entropy'}
processMapping = {'name': 'name', 'pid': 'pid', 'creation-time': 'created'}
regkeyMapping = {'data-type': 'data_type', 'data': 'data', 'name': 'name',
'last-modified': 'modified', 'key': 'key'}
urlMapping = {'url': 'value', 'domain': 'value', 'port': 'dst_port'}
x509mapping = {'pubkey-info-algorithm': 'subject_public_key_algorithm', 'subject': 'subject',
'pubkey-info-exponent': 'subject_public_key_exponent', 'issuer': 'issuer',
'pubkey-info-modulus': 'subject_public_key_modulus', 'serial-number': 'serial_number',
'validity-not-before': 'validity_not_before', 'validity-not-after': 'validity_not_after',
'version': 'version',}
defineProtocols = {'80': 'http', '443': 'https'}
relationshipsSpecifications = {'attack-pattern': {'vulnerability': 'targets', 'identity': 'targets',
'malware': 'uses', 'tool': 'uses'},
'campaign': {'intrusion-set': 'attributed-to', 'threat-actor': 'attributed-to',
'identity': 'targets', 'vulnerability': 'targets',
'attack-pattern': 'uses', 'malware': 'uses',
'tool': 'uses'},
'course-of-action':{'attack-pattern': 'mitigates', 'malware': 'mitigates',
'tool': 'mitigates', 'vulnerability': 'mitigates'},
'indicator': {'attack-pattern': 'indicates', 'campaign': 'indicates',
'intrusion-set': 'indicates', 'malware': 'indicates',
'threat-actor': 'indicates', 'tool': 'indicates'},
'intrusion-set': {'threat-actor': 'attributed-to', 'identity': 'targets',
'vulnerability': 'targets', 'attack-pattern': 'uses',
'malware': 'uses', 'tool': 'uses'},
'malware': {'identity': 'targets', 'vulnerability': 'targets',
'tool': 'uses', 'malware': 'variant-of'},
'threat-actor': {'identity': 'attributed-to', 'vulnerability': 'targets',
'attack-pattern': 'uses', 'malware': 'uses',
'tool': 'uses'},
'tool': {'identity': 'targets', 'vulnerability': 'targets'}
}
| agpl-3.0 | 434,186,751,342,820,000 | 5,988,094,636,433,948,000 | 59.287634 | 140 | 0.599144 | false |
widdowquinn/pyani | docs/conf.py | 1 | 5946 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This is a configuration file with no functioning code
# pylint: skip-file
#
# pyani documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 11 13:27:32 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pyani"
copyright = "2017-2019, Leighton Pritchard"
author = "Leighton Pritchard"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.2.9"
# The full version, including alpha/beta/rc tags.
release = "0.2.9"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
"**": [
"about.html",
"navigation.html",
"relations.html", # needs 'show_related': True theme option to display
"searchbox.html",
"donate.html",
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "pyanidoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "pyani.tex", "pyani Documentation", "Leighton Pritchard", "manual")
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pyani", "pyani Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pyani",
"pyani Documentation",
author,
"pyani",
"One line description of project.",
"Miscellaneous",
)
]
# -- Magic to run sphinx-apidoc automatically -----------------------------
# See https://github.com/rtfd/readthedocs.org/issues/1139
# on which this is based.
def run_apidoc(_):
"""Call sphinx-apidoc on Bio and BioSQL modules."""
from sphinx.ext.apidoc import main as apidoc_main
apidoc_main(["-e", "-F", "-o", "api/", "../pyani"])
# os.remove("api/thapbi_pict.rst") # replaced with index.rst
def setup(app):
"""Over-ride Sphinx setup to trigger sphinx-apidoc."""
app.connect("builder-inited", run_apidoc)
| mit | 8,120,317,238,676,993,000 | -4,074,863,092,908,753,400 | 29.492308 | 84 | 0.662967 | false |
grpc/grpc | src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py | 9 | 27324 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import distutils.spawn
import errno
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import unittest
from six import moves
import grpc
import grpc.experimental
from tests.unit import test_common
from tests.unit.framework.common import test_constants
import tests.protoc_plugin.protos.payload.test_payload_pb2 as payload_pb2
import tests.protoc_plugin.protos.requests.r.test_requests_pb2 as request_pb2
import tests.protoc_plugin.protos.responses.test_responses_pb2 as response_pb2
import tests.protoc_plugin.protos.service.test_service_pb2_grpc as service_pb2_grpc
# Identifiers of entities we expect to find in the generated module.
STUB_IDENTIFIER = 'TestServiceStub'
SERVICER_IDENTIFIER = 'TestServiceServicer'
ADD_SERVICER_TO_SERVER_IDENTIFIER = 'add_TestServiceServicer_to_server'
class _ServicerMethods(object):
def __init__(self):
self._condition = threading.Condition()
self._paused = False
self._fail = False
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
@contextlib.contextmanager
def fail(self): # pylint: disable=invalid-name
with self._condition:
self._fail = True
yield
with self._condition:
self._fail = False
def _control(self): # pylint: disable=invalid-name
with self._condition:
if self._fail:
raise ValueError()
while self._paused:
self._condition.wait()
def UnaryCall(self, request, unused_rpc_context):
response = response_pb2.SimpleResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, unused_rpc_context):
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, unused_rpc_context):
response = response_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
response.aggregated_payload_size = aggregated_payload_size
self._control()
return response
def FullDuplexCall(self, request_iter, unused_rpc_context):
for request in request_iter:
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def HalfDuplexCall(self, request_iter, unused_rpc_context):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
for response in responses:
yield response
class _Service(
collections.namedtuple('_Service', (
'servicer_methods',
'server',
'stub',
))):
"""A live and running service.
Attributes:
servicer_methods: The _ServicerMethods servicing RPCs.
server: The grpc.Server servicing RPCs.
stub: A stub on which to invoke RPCs.
"""
def _CreateService():
"""Provides a servicer backend and a stub.
Returns:
A _Service with which to test RPCs.
"""
servicer_methods = _ServicerMethods()
class Servicer(getattr(service_pb2_grpc, SERVICER_IDENTIFIER)):
def UnaryCall(self, request, context):
return servicer_methods.UnaryCall(request, context)
def StreamingOutputCall(self, request, context):
return servicer_methods.StreamingOutputCall(request, context)
def StreamingInputCall(self, request_iterator, context):
return servicer_methods.StreamingInputCall(request_iterator,
context)
def FullDuplexCall(self, request_iterator, context):
return servicer_methods.FullDuplexCall(request_iterator, context)
def HalfDuplexCall(self, request_iterator, context):
return servicer_methods.HalfDuplexCall(request_iterator, context)
server = test_common.test_server()
getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(),
server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = getattr(service_pb2_grpc, STUB_IDENTIFIER)(channel)
return _Service(servicer_methods, server, stub)
def _CreateIncompleteService():
"""Provides a servicer backend that fails to implement methods and its stub.
Returns:
A _Service with which to test RPCs. The returned _Service's
servicer_methods implements none of the methods required of it.
"""
class Servicer(getattr(service_pb2_grpc, SERVICER_IDENTIFIER)):
pass
server = test_common.test_server()
getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(),
server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = getattr(service_pb2_grpc, STUB_IDENTIFIER)(channel)
return _Service(None, server, stub)
def _streaming_input_request_iterator():
for _ in range(3):
request = request_pb2.StreamingInputCallRequest()
request.payload.payload_type = payload_pb2.COMPRESSABLE
request.payload.payload_compressable = 'a'
yield request
def _streaming_output_request():
request = request_pb2.StreamingOutputCallRequest()
sizes = [1, 2, 3]
request.response_parameters.add(size=sizes[0], interval_us=0)
request.response_parameters.add(size=sizes[1], interval_us=0)
request.response_parameters.add(size=sizes[2], interval_us=0)
return request
def _full_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
class PythonPluginTest(unittest.TestCase):
"""Test case for the gRPC Python protoc-plugin.
While reading these tests, remember that the futures API
(`stub.method.future()`) only gives futures for the *response-unary*
methods and does not exist for response-streaming methods.
"""
def testImportAttributes(self):
# check that we can access the generated module and its members.
self.assertIsNotNone(getattr(service_pb2_grpc, STUB_IDENTIFIER, None))
self.assertIsNotNone(
getattr(service_pb2_grpc, SERVICER_IDENTIFIER, None))
self.assertIsNotNone(
getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER, None))
def testUpDown(self):
service = _CreateService()
self.assertIsNotNone(service.servicer_methods)
self.assertIsNotNone(service.server)
self.assertIsNotNone(service.stub)
service.server.stop(None)
def testIncompleteServicer(self):
service = _CreateIncompleteService()
request = request_pb2.SimpleRequest(response_size=13)
with self.assertRaises(grpc.RpcError) as exception_context:
service.stub.UnaryCall(request)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNIMPLEMENTED)
service.server.stop(None)
def testUnaryCall(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
response = service.stub.UnaryCall(request)
expected_response = service.servicer_methods.UnaryCall(
request, 'not a real context!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testUnaryCallFuture(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
# Check that the call does not block waiting for the server to respond.
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(request)
response = response_future.result()
expected_response = service.servicer_methods.UnaryCall(
request, 'not a real RpcContext!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testUnaryCallFutureExpired(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(
request, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
self.assertIs(response_future.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testUnaryCallFutureCancelled(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(request)
response_future.cancel()
self.assertTrue(response_future.cancelled())
self.assertIs(response_future.code(), grpc.StatusCode.CANCELLED)
service.server.stop(None)
def testUnaryCallFutureFailed(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.fail():
response_future = service.stub.UnaryCall.future(request)
self.assertIsNotNone(response_future.exception())
self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testStreamingOutputCall(self):
service = _CreateService()
request = _streaming_output_request()
responses = service.stub.StreamingOutputCall(request)
expected_responses = service.servicer_methods.StreamingOutputCall(
request, 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
service.server.stop(None)
def testStreamingOutputCallExpired(self):
service = _CreateService()
request = _streaming_output_request()
with service.servicer_methods.pause():
responses = service.stub.StreamingOutputCall(
request, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
list(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testStreamingOutputCallCancelled(self):
service = _CreateService()
request = _streaming_output_request()
responses = service.stub.StreamingOutputCall(request)
next(responses)
responses.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(responses.code(), grpc.StatusCode.CANCELLED)
service.server.stop(None)
def testStreamingOutputCallFailed(self):
service = _CreateService()
request = _streaming_output_request()
with service.servicer_methods.fail():
responses = service.stub.StreamingOutputCall(request)
self.assertIsNotNone(responses)
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testStreamingInputCall(self):
service = _CreateService()
response = service.stub.StreamingInputCall(
_streaming_input_request_iterator())
expected_response = service.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testStreamingInputCallFuture(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
response = response_future.result()
expected_response = service.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testStreamingInputCallFutureExpired(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator(),
timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIs(response_future.exception().code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testStreamingInputCallFutureCancelled(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
service.server.stop(None)
def testStreamingInputCallFutureFailed(self):
service = _CreateService()
with service.servicer_methods.fail():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
self.assertIsNotNone(response_future.exception())
self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testFullDuplexCall(self):
service = _CreateService()
responses = service.stub.FullDuplexCall(_full_duplex_request_iterator())
expected_responses = service.servicer_methods.FullDuplexCall(
_full_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
service.server.stop(None)
def testFullDuplexCallExpired(self):
request_iterator = _full_duplex_request_iterator()
service = _CreateService()
with service.servicer_methods.pause():
responses = service.stub.FullDuplexCall(
request_iterator, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
list(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testFullDuplexCallCancelled(self):
service = _CreateService()
request_iterator = _full_duplex_request_iterator()
responses = service.stub.FullDuplexCall(request_iterator)
next(responses)
responses.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.CANCELLED)
service.server.stop(None)
def testFullDuplexCallFailed(self):
request_iterator = _full_duplex_request_iterator()
service = _CreateService()
with service.servicer_methods.fail():
responses = service.stub.FullDuplexCall(request_iterator)
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testHalfDuplexCall(self):
service = _CreateService()
def half_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
responses = service.stub.HalfDuplexCall(half_duplex_request_iterator())
expected_responses = service.servicer_methods.HalfDuplexCall(
half_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
service.server.stop(None)
def testHalfDuplexCallWedged(self):
condition = threading.Condition()
wait_cell = [False]
@contextlib.contextmanager
def wait(): # pylint: disable=invalid-name
# Where's Python 3's 'nonlocal' statement when you need it?
with condition:
wait_cell[0] = True
yield
with condition:
wait_cell[0] = False
condition.notify_all()
def half_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
with condition:
while wait_cell[0]:
condition.wait()
service = _CreateService()
with wait():
responses = service.stub.HalfDuplexCall(
half_duplex_request_iterator(),
timeout=test_constants.SHORT_TIMEOUT)
# half-duplex waits for the client to send all info
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
@unittest.skipIf(sys.version_info[0] < 3 or sys.version_info[1] < 6,
"Unsupported on Python 2.")
class SimpleStubsPluginTest(unittest.TestCase):
servicer_methods = _ServicerMethods()
class Servicer(service_pb2_grpc.TestServiceServicer):
def UnaryCall(self, request, context):
return SimpleStubsPluginTest.servicer_methods.UnaryCall(
request, context)
def StreamingOutputCall(self, request, context):
return SimpleStubsPluginTest.servicer_methods.StreamingOutputCall(
request, context)
def StreamingInputCall(self, request_iterator, context):
return SimpleStubsPluginTest.servicer_methods.StreamingInputCall(
request_iterator, context)
def FullDuplexCall(self, request_iterator, context):
return SimpleStubsPluginTest.servicer_methods.FullDuplexCall(
request_iterator, context)
def HalfDuplexCall(self, request_iterator, context):
return SimpleStubsPluginTest.servicer_methods.HalfDuplexCall(
request_iterator, context)
def setUp(self):
super(SimpleStubsPluginTest, self).setUp()
self._server = test_common.test_server()
service_pb2_grpc.add_TestServiceServicer_to_server(
self.Servicer(), self._server)
self._port = self._server.add_insecure_port('[::]:0')
self._server.start()
self._target = 'localhost:{}'.format(self._port)
def tearDown(self):
self._server.stop(None)
super(SimpleStubsPluginTest, self).tearDown()
def testUnaryCall(self):
request = request_pb2.SimpleRequest(response_size=13)
response = service_pb2_grpc.TestService.UnaryCall(
request,
self._target,
channel_credentials=grpc.experimental.insecure_channel_credentials(
),
wait_for_ready=True)
expected_response = self.servicer_methods.UnaryCall(
request, 'not a real context!')
self.assertEqual(expected_response, response)
def testUnaryCallInsecureSugar(self):
request = request_pb2.SimpleRequest(response_size=13)
response = service_pb2_grpc.TestService.UnaryCall(request,
self._target,
insecure=True,
wait_for_ready=True)
expected_response = self.servicer_methods.UnaryCall(
request, 'not a real context!')
self.assertEqual(expected_response, response)
def testStreamingOutputCall(self):
request = _streaming_output_request()
expected_responses = self.servicer_methods.StreamingOutputCall(
request, 'not a real RpcContext!')
responses = service_pb2_grpc.TestService.StreamingOutputCall(
request,
self._target,
channel_credentials=grpc.experimental.insecure_channel_credentials(
),
wait_for_ready=True)
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testStreamingInputCall(self):
response = service_pb2_grpc.TestService.StreamingInputCall(
_streaming_input_request_iterator(),
self._target,
channel_credentials=grpc.experimental.insecure_channel_credentials(
),
wait_for_ready=True)
expected_response = self.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
def testFullDuplexCall(self):
responses = service_pb2_grpc.TestService.FullDuplexCall(
_full_duplex_request_iterator(),
self._target,
channel_credentials=grpc.experimental.insecure_channel_credentials(
),
wait_for_ready=True)
expected_responses = self.servicer_methods.FullDuplexCall(
_full_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
def testHalfDuplexCall(self):
def half_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
responses = service_pb2_grpc.TestService.HalfDuplexCall(
half_duplex_request_iterator(),
self._target,
channel_credentials=grpc.experimental.insecure_channel_credentials(
),
wait_for_ready=True)
expected_responses = self.servicer_methods.HalfDuplexCall(
half_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
class ModuleMainTest(unittest.TestCase):
"""Test case for running `python -m grpc_tools.protoc`.
"""
def test_clean_output(self):
if sys.executable is None:
raise unittest.SkipTest(
"Running on a interpreter that cannot be invoked from the CLI.")
proto_dir_path = os.path.join("src", "proto")
test_proto_path = os.path.join(proto_dir_path, "grpc", "testing",
"empty.proto")
streams = tuple(tempfile.TemporaryFile() for _ in range(2))
work_dir = tempfile.mkdtemp()
try:
invocation = (sys.executable, "-m", "grpc_tools.protoc",
"--proto_path", proto_dir_path, "--python_out",
work_dir, "--grpc_python_out", work_dir,
test_proto_path)
proc = subprocess.Popen(invocation,
stdout=streams[0],
stderr=streams[1])
proc.wait()
outs = []
for stream in streams:
stream.seek(0)
self.assertEqual(0, len(stream.read()))
self.assertEqual(0, proc.returncode)
except Exception: # pylint: disable=broad-except
shutil.rmtree(work_dir)
if __name__ == '__main__':
unittest.main(verbosity=2)
| apache-2.0 | -2,189,290,163,166,315,000 | -5,434,120,473,182,413,000 | 40.21267 | 83 | 0.645001 | false |
mpeuster/estate | experiments/scale-down-hack/pox/pox/messenger/__init__.py | 37 | 19551 | # Copyright 2011,2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The POX Messenger system.
The Messenger system is a way to build services in POX that can be
consumed by external clients.
Sometimes a controller might need to interact with the outside world.
Sometimes you need to integrate with an existing piece of software and
maybe you don't get to choose how you communicate with it. Other times,
you have the opportunity and burden of rolling your own. The Messenger
system is meant to help you with the latter case.
In short, channels are a system for communicating between POX and
external programs by exchanging messages encoded in JSON. It is intended
to be quite general, both in the communication models it supports and in
the transports is supports (as of this writing, it supports a
straightforward TCP socket transport and an HTTP transport). Any
service written to use the Messenger should theoretically be usable via
any transport.
*Connections* are somehow established when a client connects via some
*Transport*. The server can individually send messages to a specific client.
A client can send messages to a *Channel* on the server. A client can also
become a member of a channel, after which it will receive any messages
the server sends to that channel. There is always a default channel with
no name.
Channels can either be permanent or temporary. Temporary channels are
automatically destroyed when they no longer contain any members.
"""
from pox.lib.revent.revent import *
from pox.core import core as core
import json
import time
import random
import hashlib
from base64 import b32encode
log = core.getLogger()
# JSON decoder used by default
defaultDecoder = json.JSONDecoder()
class ChannelJoin (Event):
""" Fired on a channel when a client joins. """
def __init__ (self, connection, channel, msg = {}):
Event.__init__(self)
self.con = connection
self.channel = channel
self.msg = msg
class ConnectionClosed (Event):
""" Fired on a connection when it closes. """
def __init__ (self, connection):
Event.__init__(self)
self.con = connection
class ChannelLeave (Event):
""" Fired on a channel when a client leaves. """
def __init__ (self, connection, channel):
Event.__init__(self)
self.con = connection
self.channel = channel
class ChannelCreate (Event):
""" Fired on a Nexus when a channel is created. """
def __init__ (self, channel):
Event.__init__(self)
self.channel = channel
class ChannelDestroy (Event):
"""
Fired on the channel and its Nexus right before a channel is destroyed.
Set .keep = True to keep the channel after all.
"""
def __init__ (self, channel):
Event.__init__(self)
self.channel = channel
self.keep = False
class ChannelDestroyed (Event):
"""
Fired on the channel and its Nexus right after a channel is destroyed.
"""
def __init__ (self, channel):
Event.__init__(self)
self.channel = channel
class MissingChannel (Event):
"""
Fired on a Nexus when a message has been received to a non-existant channel.
You can create the channel in response to this.
"""
def __init__ (self, connection, channel_name, msg):
Event.__init__(self)
self.con = connection
self.channel_name = channel_name
self.msg = msg
class MessageReceived (Event):
"""
Fired by a channel when a message has been receieved.
Always fired on the Connection itself. Also fired on the corresponding
Channel object as specified by the CHANNEL key.
The listener looks like:
def _handle_MessageReceived (event, msg):
"""
def __init__ (self, connection, channel, msg):
Event.__init__(self)
self.con = connection
self.msg = msg
self.channel = channel
def is_to_channel (self, channel):
"""
Returns True if this message is to the given channel
"""
if isinstance(channel, Channel):
channel = channel.name
if channel == self.channel: return True
if channel in self.channel: return True
return False
def _invoke (self, handler, *args, **kw):
# Special handling -- pass the message
return handler(self, self.msg, *args, **kw)
def _get_nexus (nexus):
if nexus is None: nexus = "MessengerNexus"
if isinstance(nexus, str):
if not core.hasComponent(nexus):
#TODO: Wait for channel Nexus
s = "MessengerNexus %s is not available" % (nexus,)
log.error(s)
raise RuntimeError(s)
return getattr(core, nexus)
assert isinstance(nexus, MessengerNexus)
return nexus
class Transport (object):
def __init__ (self, nexus):
self._nexus = _get_nexus(nexus)
def _forget (self, connection):
""" Forget about a connection """
raise RuntimeError("Not implemented")
class Connection (EventMixin):
"""
Superclass for Connections.
This could actually be a bit thinner, if someone wants to clean it up.
Maintains the state and handles message parsing and dispatch for a
single connection.
"""
_eventMixin_events = set([
MessageReceived,
ConnectionClosed,
])
def __init__ (self, transport):
"""
transport is the source of the connection (e.g, TCPTransport).
"""
EventMixin.__init__(self)
self._is_connected = True
self._transport = transport
self._newlines = False
# Transports that don't do their own encapsulation can use _recv_raw(),
# which uses this. (Such should probably be broken into a subclass.)
self._buf = bytes()
key,num = self._transport._nexus.generate_session()
self._session_id,self._session_num = key,num
def _send_welcome (self):
"""
Send a message to a client so they know they're connected
"""
self.send({"CHANNEL":"","cmd":"welcome","session_id":self._session_id})
def _close (self):
"""
Called internally to shut the connection down.
"""
if self._is_connected is False: return
self._transport._forget(self)
self._is_connected = False
for name,chan in self._transport._nexus._channels.items():
chan._remove_member(self)
self.raiseEventNoErrors(ConnectionClosed, self)
#self._transport._nexus.raiseEventNoErrors(ConnectionClosed, self)
def send (self, whatever):
"""
Send data over the connection.
It will first be encoded into JSON, and optionally followed with
a newline. Ultimately, it will be passed to send_raw() to actually
be sent.
"""
if self._is_connected is False: return False
s = json.dumps(whatever, default=str)
if self._newlines: s += "\n"
self.send_raw(s)
return True
def send_raw (self, data):
"""
This method should actually send data out over the connection.
Subclasses need to implement this.
"""
raise RuntimeError("Not implemented")
@property
def is_connected (self):
"""
True if this Connection is still connected.
"""
return self._is_connected
def _rx_message (self, msg):
"""
Raises events when a complete message is available.
Subclasses may want to call this when they have a new message
available. See _recv_raw().
"""
e = self.raiseEventNoErrors(MessageReceived,self,msg.get('CHANNEL'),msg)
self._transport._nexus._rx_message(self, msg)
def _rx_raw (self, data):
"""
If your subclass receives a stream instead of discrete messages, this
method can parse out individual messages and call _recv_msg() when
it has full messages.
"""
if len(data) == 0: return
if len(self._buf) == 0:
if data[0].isspace():
self._buf = data.lstrip()
else:
self._buf = data
else:
self._buf += data
while len(self._buf) > 0:
try:
msg, l = defaultDecoder.raw_decode(self._buf)
except:
# Need more data before it's a valid message
# (.. or the stream is corrupt and things will never be okay
# ever again)
return
self._buf = self._buf[l:]
if len(self._buf) != 0 and self._buf[0].isspace():
self._buf = self._buf.lstrip()
self._rx_message(msg)
def __str__ (self):
"""
Subclasses should implement better versions of this.
"""
return "<%s/%s/%i>" % (self.__class__.__name__, self._session_id,
self._session_num)
def close (self):
"""
Close the connection.
"""
self._close()
class Channel (EventMixin):
"""
Allows one to easily listen to only messages that have a CHANNEL key
with a specific name.
Generally you will not create these classes directly, but by calling
getChannel() on the ChannelNexus.
"""
_eventMixin_events = set([
MessageReceived,
ChannelJoin, # Immedaitely when a connection goes up
ChannelLeave, # When a connection goes down
ChannelDestroy,
ChannelDestroyed,
])
def __init__ (self, name, nexus = None, temporary = False):
"""
name is the name for the channel (i.e., the value for the messages'
CHANNEL key).
nexus is the specific MessengerNexus with which this channel is to be
associated (defaults to core.MessengerNexus).
"""
EventMixin.__init__(self)
assert isinstance(name, basestring)
self._name = name
self._nexus = _get_nexus(nexus)
self._nexus._channels[name] = self
self.temporary = temporary
self._members = set() # Member Connections
@property
def name (self):
return self._name
def _destroy (self):
""" Remove channel """
e = self.raiseEvent(ChannelDestroy, self)
if e:
if e.keep: return False
self._nexus.raiseEvent(e)
if e.keep: return False
del self._nexus._channels[self._name]
# We can't just do the follow because then listeners
# can't tell if the channel is now empty...
#for sub in set(self._members):
# sub.raiseEvent(ChannelLeave, sub, self)
#
#self._members.clear()
# .. so do the following really straightforward...
for sub in set(self._members):
self._remove_member(sub, allow_destroy = False)
e = ChannelDestroyed(self)
self.raiseEvent(e)
self._nexus.raiseEvent(e)
def _add_member (self, con, msg = {}):
if con in self._members: return
self._members.add(con)
self.raiseEvent(ChannelJoin, con, self, msg)
def _remove_member (self, con, allow_destroy = True):
if con not in self._members: return
self._members.remove(con)
self.raiseEvent(ChannelLeave, con, self)
if not allow_destroy: return
if self.temporary is True:
if len(self._members) == 0:
self._destroy()
def send (self, msg):
d = dict(msg)
d['CHANNEL'] = self._name
for r in list(self._members):
if not r.is_connected: continue
r.send(d)
def __str__ (self):
return "<Channel " + self.name + ">"
def reply (_msg, **kw):
if not isinstance(_msg, dict):
# We'll also take an event...
_msg = _msg.msg
kw['CHANNEL'] = _msg.get('CHANNEL')
if 'XID' in _msg: kw['XID'] = _msg.get('XID')
return kw
class ChannelBot (object):
"""
A very simple framework for writing "bots" that respond to messages
on a channel.
"""
def __str__ (self):
return "<%s@%s>" % (self.__class__.__name__, self.channel)
def __init__ (self, channel, nexus = None, weak = False, extra = {}):
self._startup(channel, nexus, weak, extra)
def _startup (self, channel, nexus = None, weak = False, extra = {}):
self._nexus = _get_nexus(nexus)
if isinstance(channel, Channel):
self.channel = channel
else:
self.channel = self._nexus.get_channel(channel, create=True)
self.listeners = self.channel.addListeners(self, weak = weak)
self.prefixes = None
self._init(extra)
if self.prefixes is None:
self.prefixes = []
for n in dir(self):
if n.startswith("_exec_"):
n = n.split("_")[2]
self.prefixes.append(n)
def _handle_ChannelDestroyed (self, event):
self.channel.removeListeners(self.listeners)
self._destroyed()
def _handle_ChannelJoin (self, event):
self._join(event, event.con, event.msg)
def _handle_ChannelLeave (self, event):
self._leave(event.con, len(self.channel._members) == 0)
def _handle_MessageReceived (self, event, msg):
for prefix in self.prefixes:
if prefix in event.msg:
cmd = "_exec_%s_%s" % (prefix, str(event.msg[prefix]))
if hasattr(self, cmd):
getattr(self, cmd)(event)
return #TODO: Return val?
for prefix in self.prefixes:
if prefix in event.msg:
cmd = "_exec_" + prefix
if hasattr(self, cmd):
getattr(self, cmd)(event, msg[prefix])
return #TODO: Return val?
self._unhandled(event)
def _unhandled (self, event):
""" Called when no command found """
pass
def _join (self, event, connection, msg):
""" Called when a connection joins """
pass
def _leave (self, connection, empty):
"""
Called when a connection leaves
If channel now has no members, empty is True
"""
pass
def _destroyed (self):
""" Called when channel is destroyed """
pass
def _init (self, extra):
"""
Called during initialization
'extra' is any additional information passed in when initializing
the bot. In particular, this may be the message that goes along
with its invitation into a channel.
"""
pass
def reply (__self, __event, **kw):
"""
Unicast reply to a specific message.
"""
__event.con.send(reply(__event, **kw))
def send (__self, __msg={}, **kw):
"""
Send a message to all members of this channel.
"""
m = {}
m.update(__msg)
m.update(kw)
__self.channel.send(m)
class DefaultChannelBot (ChannelBot):
def _init (self, extra):
self._bots = {}
def add_bot (self, bot, name = None):
"""
Registers a bot (an instance of ChannelBot) so that it can be
invited to other channels.
"""
assert issubclass(bot, ChannelBot)
if name is None:
name = bot.__name__
self._bots[name] = bot
def _exec_newlines_False (self, event):
event.con._newlines = False
def _exec_newlines_True (self, event):
event.con._newlines = True
def _exec_cmd_invite (self, event):
"""
Invites a bot that has been registered with add_bot() to a channel.
Note that you can invite a bot to an empty (new) temporary channel.
It will stay until the first member leaves.
"""
botname = event.msg.get('bot')
botclass = self._bots.get(botname)
channel = event.msg.get('channel')
new_channel = False
if channel is None:
new_channel = True
channel = self._gen_channel_name(event.msg.get("prefix", "temp"))
chan = self._nexus.get_channel(channel, create=True, temporary=True)
if chan is None:
#TODO: send an error
log.warning("A bot was invited to a nonexistent channel (%s)"
% (channel,))
return
if botclass is None:
#TODO: send an error
log.warning("A nonexistent bot (%s) was invited to a channel"
% (botname,))
return
bot = botclass(channel, self._nexus)
if new_channel:
self.reply(event, new_channel = new_channel)
def _unhandled (self, event):
log.warn("Default channel got unknown command: "
+ str(event.msg.get('cmd')))
def _gen_channel_name (self, prefix = "temp"):
""" Makes up a channel name """
prefix += "_"
import random
while True:
# Sloppy
r = random.randint(1, 100000)
n = prefix + str(r)
if r not in self._nexus._channels:
break
return n
def _exec_cmd_new_channel (self, event):
""" Generates a new channel with random name """
prefix = event.msg.get('prefix', 'temp')
n = self._gen_channel_name(prefix)
ch = self._nexus.get_channel(n, create=True, temporary=True)
ch._add_member(event.con, event.msg)
self.reply(event, new_channel = n)
def _exec_cmd_join_channel (self, event):
""" Joins/creates a channel """
temp = event.msg.get('temporary', True) # Default temporary!
ch = self._nexus.get_channel(event.msg['channel'], temporary=temp)
if ch is None: return
ch._add_member(event.con, event.msg)
def _exec_cmd_leave_channel (self, event):
ch = self._nexus.get_channel(event.msg['channel'])
if ch is None: return
ch._remove_member(event.con)
def _exec_test (self, event, value):
log.info("Default channel got: " + str(value))
self.reply(event, test = value.upper())
class MessengerNexus (EventMixin):
"""
Transports, Channels, etc. are all associated with a MessengerNexus.
Typically, there is only one, and it is registered as
pox.core.MessengerNexus
"""
_eventMixin_events = set([
MissingChannel, # When a msg arrives to nonexistent channel
ChannelDestroy,
ChannelDestroyed,
ChannelCreate,
])
def __init__ (self):
EventMixin.__init__(self)
self._channels = {} # name -> Channel
self.default_bot = DefaultChannelBot("", self)
self._next_ses = 1
self._session_salt = str(time.time())
def generate_session (self):
"""
Return a new session ID tuple (key, num)
The key is a unique and not-trivial-to-guess alphanumeric value
associated with the session.
The num is a unique numerical value associated with the session.
"""
r = self._next_ses
self._next_ses += 1
key = str(random.random()) + str(time.time()) + str(r)
key += str(id(key)) + self._session_salt
key = b32encode(hashlib.md5(key).digest()).upper().replace('=','')
def alphahex (r):
""" base 16 on digits 'a' through 'p' """
r=hex(r)[2:].lower()
return ''.join(chr((10 if ord(x) >= 97 else 49) + ord(x)) for x in r)
key = alphahex(r) + key
return key,r
def get_channel (self, name, create = True, temporary = False):
if name is None: name = ""
if name in self._channels:
return self._channels[name]
elif create:
c = Channel(name, self, temporary = temporary)
self.raiseEvent(ChannelCreate, c)
return c
else:
return None
def _rx_message (self, con, msg):
"""
Dispatches messages to listeners of this nexus and to its Channels.
Called by Connections.
"""
ret = False
assert isinstance(msg, dict)
if isinstance(msg, dict):
channels = msg.get('CHANNEL')
if channels is None:
channels = [""]
if not isinstance(channels, list):
channels = [channels]
for cname in channels:
channel = self.get_channel(cname, create=False)
if channel is None:
e = self.raiseEvent(MissingChannel, con, cname, msg)
if e is not None: cname = e.channel_name
channel = self.get_channel(cname, create=False)
if channel is not None:
#print "raise on", channel
channel.raiseEvent(MessageReceived, con, channel, msg)
ret = True
return ret
def launch ():
core.registerNew(MessengerNexus)
| apache-2.0 | 3,449,489,011,653,026,000 | 6,335,955,849,178,490,000 | 27.921598 | 78 | 0.645287 | false |
gingerboy92/android_kernel_xiaomi_msm8916---messedup | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 | -7,006,820,439,081,693,000 | -5,681,716,876,674,915,000 | 23.122727 | 70 | 0.49463 | false |
le9i0nx/ansible | lib/ansible/plugins/lookup/together.py | 57 | 2150 | # (c) 2013, Bradley Young <[email protected]>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: toghether
author: Bradley Young <[email protected]>
version_added: '1.3'
short_description: merges lists into syncronized list
description:
- Creates a list with the iterated elements of the supplied lists
- "To clarify with an example, [ 'a', 'b' ] and [ 1, 2 ] turn into [ ('a',1), ('b', 2) ]"
- This is basicaly the same as the 'zip_longest' filter and Python function
- Any 'unbalanced' elements will be substituted with 'None'
options:
_terms:
description: list of lists to merge
required: True
"""
EXAMPLES = """
- name: item.0 returns from the 'a' list, item.1 returns from the '1' list
debug:
msg: "{{ item.0 }} and {{ item.1 }}"
with_together:
- ['a', 'b', 'c', 'd']
- [1, 2, 3, 4]
"""
RETURN = """
_list:
description: syncronized list
"""
from ansible.errors import AnsibleError
from ansible.module_utils.six.moves import zip_longest
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
"""
Transpose a list of arrays:
[1, 2, 3], [4, 5, 6] -> [1, 4], [2, 5], [3, 6]
Replace any empty spots in 2nd array with None:
[1, 2], [3] -> [1, 3], [2, None]
"""
def _lookup_variables(self, terms):
results = []
for x in terms:
intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
terms = self._lookup_variables(terms)
my_list = terms[:]
if len(my_list) == 0:
raise AnsibleError("with_together requires at least one element in each list")
return [self._flatten(x) for x in zip_longest(*my_list, fillvalue=None)]
| gpl-3.0 | 4,630,386,730,101,940,000 | -8,974,924,502,478,427,000 | 32.076923 | 101 | 0.630698 | false |
jmwright/cadquery-x | gui/libs/future/backports/email/feedparser.py | 82 | 22736 | # Copyright (C) 2004-2006 Python Software Foundation
# Authors: Baxter, Wouters and Warsaw
# Contact: [email protected]
"""FeedParser - An email feed parser.
The feed parser implements an interface for incrementally parsing an email
message, line by line. This has advantages for certain applications, such as
those reading email messages off a socket.
FeedParser.feed() is the primary interface for pushing new data into the
parser. It returns when there's nothing more it can do with the available
data. When you have no more data to push into the parser, call .close().
This completes the parsing and returns the root message object.
The other advantage of this parser is that it will never raise a parsing
exception. Instead, when it finds something unexpected, it adds a 'defect' to
the current message. Defects are just instances that live on the message
object's .defects attribute.
"""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future.builtins import object, range, super
from future.utils import implements_iterator, PY3
__all__ = ['FeedParser', 'BytesFeedParser']
import re
from future.backports.email import errors
from future.backports.email import message
from future.backports.email._policybase import compat32
NLCRE = re.compile('\r\n|\r|\n')
NLCRE_bol = re.compile('(\r\n|\r|\n)')
NLCRE_eol = re.compile('(\r\n|\r|\n)\Z')
NLCRE_crack = re.compile('(\r\n|\r|\n)')
# RFC 2822 $3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character
# except controls, SP, and ":".
headerRE = re.compile(r'^(From |[\041-\071\073-\176]{1,}:|[\t ])')
EMPTYSTRING = ''
NL = '\n'
NeedMoreData = object()
# @implements_iterator
class BufferedSubFile(object):
"""A file-ish object that can have new data loaded into it.
You can also push and pop line-matching predicates onto a stack. When the
current predicate matches the current line, a false EOF response
(i.e. empty string) is returned instead. This lets the parser adhere to a
simple abstraction -- it parses until EOF closes the current message.
"""
def __init__(self):
# The last partial line pushed into this object.
self._partial = ''
# The list of full, pushed lines, in reverse order
self._lines = []
# The stack of false-EOF checking predicates.
self._eofstack = []
# A flag indicating whether the file has been closed or not.
self._closed = False
def push_eof_matcher(self, pred):
self._eofstack.append(pred)
def pop_eof_matcher(self):
return self._eofstack.pop()
def close(self):
# Don't forget any trailing partial line.
self._lines.append(self._partial)
self._partial = ''
self._closed = True
def readline(self):
if not self._lines:
if self._closed:
return ''
return NeedMoreData
# Pop the line off the stack and see if it matches the current
# false-EOF predicate.
line = self._lines.pop()
# RFC 2046, section 5.1.2 requires us to recognize outer level
# boundaries at any level of inner nesting. Do this, but be sure it's
# in the order of most to least nested.
for ateof in self._eofstack[::-1]:
if ateof(line):
# We're at the false EOF. But push the last line back first.
self._lines.append(line)
return ''
return line
def unreadline(self, line):
# Let the consumer push a line back into the buffer.
assert line is not NeedMoreData
self._lines.append(line)
def push(self, data):
"""Push some new data into this object."""
# Handle any previous leftovers
data, self._partial = self._partial + data, ''
# Crack into lines, but preserve the newlines on the end of each
parts = NLCRE_crack.split(data)
# The *ahem* interesting behaviour of re.split when supplied grouping
# parentheses is that the last element of the resulting list is the
# data after the final RE. In the case of a NL/CR terminated string,
# this is the empty string.
self._partial = parts.pop()
#GAN 29Mar09 bugs 1555570, 1721862 Confusion at 8K boundary ending with \r:
# is there a \n to follow later?
if not self._partial and parts and parts[-1].endswith('\r'):
self._partial = parts.pop(-2)+parts.pop()
# parts is a list of strings, alternating between the line contents
# and the eol character(s). Gather up a list of lines after
# re-attaching the newlines.
lines = []
for i in range(len(parts) // 2):
lines.append(parts[i*2] + parts[i*2+1])
self.pushlines(lines)
def pushlines(self, lines):
# Reverse and insert at the front of the lines.
self._lines[:0] = lines[::-1]
def __iter__(self):
return self
def __next__(self):
line = self.readline()
if line == '':
raise StopIteration
return line
class FeedParser(object):
"""A feed-style parser of email."""
def __init__(self, _factory=message.Message, **_3to2kwargs):
if 'policy' in _3to2kwargs: policy = _3to2kwargs['policy']; del _3to2kwargs['policy']
else: policy = compat32
"""_factory is called with no arguments to create a new message obj
The policy keyword specifies a policy object that controls a number of
aspects of the parser's operation. The default policy maintains
backward compatibility.
"""
self._factory = _factory
self.policy = policy
try:
_factory(policy=self.policy)
self._factory_kwds = lambda: {'policy': self.policy}
except TypeError:
# Assume this is an old-style factory
self._factory_kwds = lambda: {}
self._input = BufferedSubFile()
self._msgstack = []
if PY3:
self._parse = self._parsegen().__next__
else:
self._parse = self._parsegen().next
self._cur = None
self._last = None
self._headersonly = False
# Non-public interface for supporting Parser's headersonly flag
def _set_headersonly(self):
self._headersonly = True
def feed(self, data):
"""Push more data into the parser."""
self._input.push(data)
self._call_parse()
def _call_parse(self):
try:
self._parse()
except StopIteration:
pass
def close(self):
"""Parse all remaining data and return the root message object."""
self._input.close()
self._call_parse()
root = self._pop_message()
assert not self._msgstack
# Look for final set of defects
if root.get_content_maintype() == 'multipart' \
and not root.is_multipart():
defect = errors.MultipartInvariantViolationDefect()
self.policy.handle_defect(root, defect)
return root
def _new_message(self):
msg = self._factory(**self._factory_kwds())
if self._cur and self._cur.get_content_type() == 'multipart/digest':
msg.set_default_type('message/rfc822')
if self._msgstack:
self._msgstack[-1].attach(msg)
self._msgstack.append(msg)
self._cur = msg
self._last = msg
def _pop_message(self):
retval = self._msgstack.pop()
if self._msgstack:
self._cur = self._msgstack[-1]
else:
self._cur = None
return retval
def _parsegen(self):
# Create a new message and start by parsing headers.
self._new_message()
headers = []
# Collect the headers, searching for a line that doesn't match the RFC
# 2822 header or continuation pattern (including an empty line).
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
if not headerRE.match(line):
# If we saw the RFC defined header/body separator
# (i.e. newline), just throw it away. Otherwise the line is
# part of the body so push it back.
if not NLCRE.match(line):
defect = errors.MissingHeaderBodySeparatorDefect()
self.policy.handle_defect(self._cur, defect)
self._input.unreadline(line)
break
headers.append(line)
# Done with the headers, so parse them and figure out what we're
# supposed to see in the body of the message.
self._parse_headers(headers)
# Headers-only parsing is a backwards compatibility hack, which was
# necessary in the older parser, which could raise errors. All
# remaining lines in the input are thrown into the message body.
if self._headersonly:
lines = []
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
if line == '':
break
lines.append(line)
self._cur.set_payload(EMPTYSTRING.join(lines))
return
if self._cur.get_content_type() == 'message/delivery-status':
# message/delivery-status contains blocks of headers separated by
# a blank line. We'll represent each header block as a separate
# nested message object, but the processing is a bit different
# than standard message/* types because there is no body for the
# nested messages. A blank line separates the subparts.
while True:
self._input.push_eof_matcher(NLCRE.match)
for retval in self._parsegen():
if retval is NeedMoreData:
yield NeedMoreData
continue
break
msg = self._pop_message()
# We need to pop the EOF matcher in order to tell if we're at
# the end of the current file, not the end of the last block
# of message headers.
self._input.pop_eof_matcher()
# The input stream must be sitting at the newline or at the
# EOF. We want to see if we're at the end of this subpart, so
# first consume the blank line, then test the next line to see
# if we're at this subpart's EOF.
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
break
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
break
if line == '':
break
# Not at EOF so this is a line we're going to need.
self._input.unreadline(line)
return
if self._cur.get_content_maintype() == 'message':
# The message claims to be a message/* type, then what follows is
# another RFC 2822 message.
for retval in self._parsegen():
if retval is NeedMoreData:
yield NeedMoreData
continue
break
self._pop_message()
return
if self._cur.get_content_maintype() == 'multipart':
boundary = self._cur.get_boundary()
if boundary is None:
# The message /claims/ to be a multipart but it has not
# defined a boundary. That's a problem which we'll handle by
# reading everything until the EOF and marking the message as
# defective.
defect = errors.NoBoundaryInMultipartDefect()
self.policy.handle_defect(self._cur, defect)
lines = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
lines.append(line)
self._cur.set_payload(EMPTYSTRING.join(lines))
return
# Make sure a valid content type was specified per RFC 2045:6.4.
if (self._cur.get('content-transfer-encoding', '8bit').lower()
not in ('7bit', '8bit', 'binary')):
defect = errors.InvalidMultipartContentTransferEncodingDefect()
self.policy.handle_defect(self._cur, defect)
# Create a line match predicate which matches the inter-part
# boundary as well as the end-of-multipart boundary. Don't push
# this onto the input stream until we've scanned past the
# preamble.
separator = '--' + boundary
boundaryre = re.compile(
'(?P<sep>' + re.escape(separator) +
r')(?P<end>--)?(?P<ws>[ \t]*)(?P<linesep>\r\n|\r|\n)?$')
capturing_preamble = True
preamble = []
linesep = False
close_boundary_seen = False
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
if line == '':
break
mo = boundaryre.match(line)
if mo:
# If we're looking at the end boundary, we're done with
# this multipart. If there was a newline at the end of
# the closing boundary, then we need to initialize the
# epilogue with the empty string (see below).
if mo.group('end'):
close_boundary_seen = True
linesep = mo.group('linesep')
break
# We saw an inter-part boundary. Were we in the preamble?
if capturing_preamble:
if preamble:
# According to RFC 2046, the last newline belongs
# to the boundary.
lastline = preamble[-1]
eolmo = NLCRE_eol.search(lastline)
if eolmo:
preamble[-1] = lastline[:-len(eolmo.group(0))]
self._cur.preamble = EMPTYSTRING.join(preamble)
capturing_preamble = False
self._input.unreadline(line)
continue
# We saw a boundary separating two parts. Consume any
# multiple boundary lines that may be following. Our
# interpretation of RFC 2046 BNF grammar does not produce
# body parts within such double boundaries.
while True:
line = self._input.readline()
if line is NeedMoreData:
yield NeedMoreData
continue
mo = boundaryre.match(line)
if not mo:
self._input.unreadline(line)
break
# Recurse to parse this subpart; the input stream points
# at the subpart's first line.
self._input.push_eof_matcher(boundaryre.match)
for retval in self._parsegen():
if retval is NeedMoreData:
yield NeedMoreData
continue
break
# Because of RFC 2046, the newline preceding the boundary
# separator actually belongs to the boundary, not the
# previous subpart's payload (or epilogue if the previous
# part is a multipart).
if self._last.get_content_maintype() == 'multipart':
epilogue = self._last.epilogue
if epilogue == '':
self._last.epilogue = None
elif epilogue is not None:
mo = NLCRE_eol.search(epilogue)
if mo:
end = len(mo.group(0))
self._last.epilogue = epilogue[:-end]
else:
payload = self._last._payload
if isinstance(payload, str):
mo = NLCRE_eol.search(payload)
if mo:
payload = payload[:-len(mo.group(0))]
self._last._payload = payload
self._input.pop_eof_matcher()
self._pop_message()
# Set the multipart up for newline cleansing, which will
# happen if we're in a nested multipart.
self._last = self._cur
else:
# I think we must be in the preamble
assert capturing_preamble
preamble.append(line)
# We've seen either the EOF or the end boundary. If we're still
# capturing the preamble, we never saw the start boundary. Note
# that as a defect and store the captured text as the payload.
if capturing_preamble:
defect = errors.StartBoundaryNotFoundDefect()
self.policy.handle_defect(self._cur, defect)
self._cur.set_payload(EMPTYSTRING.join(preamble))
epilogue = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
self._cur.epilogue = EMPTYSTRING.join(epilogue)
return
# If we're not processing the preamble, then we might have seen
# EOF without seeing that end boundary...that is also a defect.
if not close_boundary_seen:
defect = errors.CloseBoundaryNotFoundDefect()
self.policy.handle_defect(self._cur, defect)
return
# Everything from here to the EOF is epilogue. If the end boundary
# ended in a newline, we'll need to make sure the epilogue isn't
# None
if linesep:
epilogue = ['']
else:
epilogue = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
epilogue.append(line)
# Any CRLF at the front of the epilogue is not technically part of
# the epilogue. Also, watch out for an empty string epilogue,
# which means a single newline.
if epilogue:
firstline = epilogue[0]
bolmo = NLCRE_bol.match(firstline)
if bolmo:
epilogue[0] = firstline[len(bolmo.group(0)):]
self._cur.epilogue = EMPTYSTRING.join(epilogue)
return
# Otherwise, it's some non-multipart type, so the entire rest of the
# file contents becomes the payload.
lines = []
for line in self._input:
if line is NeedMoreData:
yield NeedMoreData
continue
lines.append(line)
self._cur.set_payload(EMPTYSTRING.join(lines))
def _parse_headers(self, lines):
# Passed a list of lines that make up the headers for the current msg
lastheader = ''
lastvalue = []
for lineno, line in enumerate(lines):
# Check for continuation
if line[0] in ' \t':
if not lastheader:
# The first line of the headers was a continuation. This
# is illegal, so let's note the defect, store the illegal
# line, and ignore it for purposes of headers.
defect = errors.FirstHeaderLineIsContinuationDefect(line)
self.policy.handle_defect(self._cur, defect)
continue
lastvalue.append(line)
continue
if lastheader:
self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
lastheader, lastvalue = '', []
# Check for envelope header, i.e. unix-from
if line.startswith('From '):
if lineno == 0:
# Strip off the trailing newline
mo = NLCRE_eol.search(line)
if mo:
line = line[:-len(mo.group(0))]
self._cur.set_unixfrom(line)
continue
elif lineno == len(lines) - 1:
# Something looking like a unix-from at the end - it's
# probably the first line of the body, so push back the
# line and stop.
self._input.unreadline(line)
return
else:
# Weirdly placed unix-from line. Note this as a defect
# and ignore it.
defect = errors.MisplacedEnvelopeHeaderDefect(line)
self._cur.defects.append(defect)
continue
# Split the line on the colon separating field name from value.
# There will always be a colon, because if there wasn't the part of
# the parser that calls us would have started parsing the body.
i = line.find(':')
assert i>0, "_parse_headers fed line with no : and no leading WS"
lastheader = line[:i]
lastvalue = [line]
# Done with all the lines, so handle the last header.
if lastheader:
self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
class BytesFeedParser(FeedParser):
"""Like FeedParser, but feed accepts bytes."""
def feed(self, data):
super().feed(data.decode('ascii', 'surrogateescape'))
| lgpl-3.0 | 7,768,792,318,425,216,000 | -8,537,128,696,434,672,000 | 42.306667 | 93 | 0.537254 | false |
MarkTheF4rth/youtube-dl | youtube_dl/extractor/youtube.py | 20 | 92251 | # coding: utf-8
from __future__ import unicode_literals
import itertools
import json
import os.path
import re
import time
import traceback
from .common import InfoExtractor, SearchInfoExtractor
from ..jsinterp import JSInterpreter
from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_parse_qs,
compat_urllib_parse,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
encode_dict,
ExtractorError,
float_or_none,
get_element_by_attribute,
get_element_by_id,
int_or_none,
orderedSet,
parse_duration,
remove_start,
smuggle_url,
str_to_int,
unescapeHTML,
unified_strdate,
unsmuggle_url,
uppercase_escape,
ISO3166Utils,
)
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
_NETRC_MACHINE = 'youtube'
# If True it will raise an error if no login info is provided
_LOGIN_REQUIRED = False
def _set_language(self):
self._set_cookie(
'.youtube.com', 'PREF', 'f1=50000000&hl=en',
# YouTube sets the expire time to about two months
expire_time=time.time() + 2 * 30 * 24 * 3600)
def _ids_to_results(self, ids):
return [
self.url_result(vid_id, 'Youtube', video_id=vid_id)
for vid_id in ids]
def _login(self):
"""
Attempt to log in to YouTube.
True is returned if successful or skipped.
False is returned if login failed.
If _LOGIN_REQUIRED is set and no authentication was provided, an error is raised.
"""
(username, password) = self._get_login_info()
# No authentication to be performed
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return True
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='unable to fetch login page', fatal=False)
if login_page is False:
return
galx = self._search_regex(r'(?s)<input.+?name="GALX".+?value="(.+?)"',
login_page, 'Login GALX parameter')
# Log in
login_form_strs = {
'continue': 'https://www.youtube.com/signin?action_handle_signin=true&feature=sign_in_button&hl=en_US&nomobiletemp=1',
'Email': username,
'GALX': galx,
'Passwd': password,
'PersistentCookie': 'yes',
'_utf8': '霱',
'bgresponse': 'js_disabled',
'checkConnection': '',
'checkedDomains': 'youtube',
'dnConn': '',
'pstMsg': '0',
'rmShown': '1',
'secTok': '',
'signIn': 'Sign in',
'timeStmp': '',
'service': 'youtube',
'uilel': '3',
'hl': 'en_US',
}
login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii')
req = compat_urllib_request.Request(self._LOGIN_URL, login_data)
login_results = self._download_webpage(
req, None,
note='Logging in', errnote='unable to log in', fatal=False)
if login_results is False:
return False
if re.search(r'id="errormsg_0_Passwd"', login_results) is not None:
raise ExtractorError('Please use your account password and a two-factor code instead of an application-specific password.', expected=True)
# Two-Factor
# TODO add SMS and phone call support - these require making a request and then prompting the user
if re.search(r'(?i)<form[^>]* id="challenge"', login_results) is not None:
tfa_code = self._get_tfa_info('2-step verification code')
if not tfa_code:
self._downloader.report_warning(
'Two-factor authentication required. Provide it either interactively or with --twofactor <code>'
'(Note that only TOTP (Google Authenticator App) codes work at this time.)')
return False
tfa_code = remove_start(tfa_code, 'G-')
tfa_form_strs = self._form_hidden_inputs('challenge', login_results)
tfa_form_strs.update({
'Pin': tfa_code,
'TrustDevice': 'on',
})
tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii')
tfa_req = compat_urllib_request.Request(self._TWOFACTOR_URL, tfa_data)
tfa_results = self._download_webpage(
tfa_req, None,
note='Submitting TFA code', errnote='unable to submit tfa', fatal=False)
if tfa_results is False:
return False
if re.search(r'(?i)<form[^>]* id="challenge"', tfa_results) is not None:
self._downloader.report_warning('Two-factor code expired or invalid. Please try again, or use a one-use backup code instead.')
return False
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', tfa_results) is not None:
self._downloader.report_warning('unable to log in - did the page structure change?')
return False
if re.search(r'smsauth-interstitial-reviewsettings', tfa_results) is not None:
self._downloader.report_warning('Your Google account has a security notice. Please log in on your web browser, resolve the notice, and try again.')
return False
if re.search(r'(?i)<form[^>]* id="gaia_loginform"', login_results) is not None:
self._downloader.report_warning('unable to log in: bad username or password')
return False
return True
def _real_initialize(self):
if self._downloader is None:
return
self._set_language()
if not self._login():
return
class YoutubeIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com'
_VALID_URL = r"""(?x)^
(
(?:https?://|//) # http(s):// or protocol-independent URL
(?:(?:(?:(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/|
(?:www\.)?deturl\.com/www\.youtube\.com/|
(?:www\.)?pwnyoutube\.com/|
(?:www\.)?yourepeat\.com/|
tube\.majestyc\.net/|
youtube\.googleapis\.com/) # the various hostnames, with wildcard subdomains
(?:.*?\#/)? # handle anchor (#/) redirect urls
(?: # the various things that can precede the ID:
(?:(?:v|embed|e)/(?!videoseries)) # v/ or embed/ or e/
|(?: # or the v= param in all its forms
(?:(?:watch|movie)(?:_popup)?(?:\.php)?/?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx)
(?:\?|\#!?) # the params delimiter ? or # or #!
(?:.*?&)?? # any other preceding param (like /?s=tuff&v=xxxx)
v=
)
))
|(?:
youtu\.be| # just youtu.be/xxxx
vid\.plus # or vid.plus/xxxx
)/
|(?:www\.)?cleanvideosearch\.com/media/action/yt/watch\?videoId=
)
)? # all until now is optional -> you can pass the naked ID
([0-9A-Za-z_-]{11}) # here is it! the YouTube video ID
(?!.*?&list=) # combined list/video URLs are handled by the playlist IE
(?(1).+)? # if we found the ID, everything can follow
$"""
_NEXT_URL_RE = r'[\?&]next_url=([^&]+)'
_formats = {
'5': {'ext': 'flv', 'width': 400, 'height': 240},
'6': {'ext': 'flv', 'width': 450, 'height': 270},
'13': {'ext': '3gp'},
'17': {'ext': '3gp', 'width': 176, 'height': 144},
'18': {'ext': 'mp4', 'width': 640, 'height': 360},
'22': {'ext': 'mp4', 'width': 1280, 'height': 720},
'34': {'ext': 'flv', 'width': 640, 'height': 360},
'35': {'ext': 'flv', 'width': 854, 'height': 480},
'36': {'ext': '3gp', 'width': 320, 'height': 240},
'37': {'ext': 'mp4', 'width': 1920, 'height': 1080},
'38': {'ext': 'mp4', 'width': 4096, 'height': 3072},
'43': {'ext': 'webm', 'width': 640, 'height': 360},
'44': {'ext': 'webm', 'width': 854, 'height': 480},
'45': {'ext': 'webm', 'width': 1280, 'height': 720},
'46': {'ext': 'webm', 'width': 1920, 'height': 1080},
'59': {'ext': 'mp4', 'width': 854, 'height': 480},
'78': {'ext': 'mp4', 'width': 854, 'height': 480},
# 3d videos
'82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'preference': -20},
'83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'preference': -20},
'84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'preference': -20},
'85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'preference': -20},
'100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'preference': -20},
'101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'preference': -20},
'102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'preference': -20},
# Apple HTTP Live Streaming
'92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
'93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'preference': -10},
'94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'preference': -10},
'95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'preference': -10},
'96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'preference': -10},
'132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'preference': -10},
'151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'preference': -10},
# DASH mp4 video
'133': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'134': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'135': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'136': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'137': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'138': {'ext': 'mp4', 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40}, # Height can vary (https://github.com/rg3/youtube-dl/issues/4559)
'160': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'264': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'298': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
'299': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'h264'},
'266': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'h264'},
# Dash mp4 audio
'139': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 48, 'preference': -50, 'container': 'm4a_dash'},
'140': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 128, 'preference': -50, 'container': 'm4a_dash'},
'141': {'ext': 'm4a', 'format_note': 'DASH audio', 'acodec': 'aac', 'vcodec': 'none', 'abr': 256, 'preference': -50, 'container': 'm4a_dash'},
# Dash webm
'167': {'ext': 'webm', 'height': 360, 'width': 640, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'168': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'169': {'ext': 'webm', 'height': 720, 'width': 1280, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'218': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'219': {'ext': 'webm', 'height': 480, 'width': 854, 'format_note': 'DASH video', 'acodec': 'none', 'container': 'webm', 'vcodec': 'vp8', 'preference': -40},
'278': {'ext': 'webm', 'height': 144, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'container': 'webm', 'vcodec': 'vp9'},
'242': {'ext': 'webm', 'height': 240, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'243': {'ext': 'webm', 'height': 360, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'244': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'245': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'246': {'ext': 'webm', 'height': 480, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'247': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'248': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'271': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'272': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40},
'302': {'ext': 'webm', 'height': 720, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
'303': {'ext': 'webm', 'height': 1080, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
'308': {'ext': 'webm', 'height': 1440, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
'313': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'vcodec': 'vp9'},
'315': {'ext': 'webm', 'height': 2160, 'format_note': 'DASH video', 'acodec': 'none', 'preference': -40, 'fps': 60, 'vcodec': 'vp9'},
# Dash webm audio
'171': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 128, 'preference': -50},
'172': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'abr': 256, 'preference': -50},
# Dash webm audio with opus inside
'249': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 50, 'preference': -50},
'250': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 70, 'preference': -50},
'251': {'ext': 'webm', 'vcodec': 'none', 'format_note': 'DASH audio', 'acodec': 'opus', 'abr': 160, 'preference': -50},
# RTMP (unnamed)
'_rtmp': {'protocol': 'rtmp'},
}
IE_NAME = 'youtube'
_TESTS = [
{
'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&t=1s&end=9',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'like_count': int,
'dislike_count': int,
'start_time': 1,
'end_time': 9,
}
},
{
'url': 'http://www.youtube.com/watch?v=UxxajLWwzqY',
'note': 'Test generic use_cipher_signature video (#897)',
'info_dict': {
'id': 'UxxajLWwzqY',
'ext': 'mp4',
'upload_date': '20120506',
'title': 'Icona Pop - I Love It (feat. Charli XCX) [OFFICIAL VIDEO]',
'description': 'md5:782e8651347686cba06e58f71ab51773',
'tags': ['Icona Pop i love it', 'sweden', 'pop music', 'big beat records', 'big beat', 'charli',
'xcx', 'charli xcx', 'girls', 'hbo', 'i love it', "i don't care", 'icona', 'pop',
'iconic ep', 'iconic', 'love', 'it'],
'uploader': 'Icona Pop',
'uploader_id': 'IconaPop',
}
},
{
'url': 'https://www.youtube.com/watch?v=07FYdnEawAQ',
'note': 'Test VEVO video with age protection (#956)',
'info_dict': {
'id': '07FYdnEawAQ',
'ext': 'mp4',
'upload_date': '20130703',
'title': 'Justin Timberlake - Tunnel Vision (Explicit)',
'description': 'md5:64249768eec3bc4276236606ea996373',
'uploader': 'justintimberlakeVEVO',
'uploader_id': 'justintimberlakeVEVO',
'age_limit': 18,
}
},
{
'url': '//www.YouTube.com/watch?v=yZIXLfi8CZQ',
'note': 'Embed-only video (#1746)',
'info_dict': {
'id': 'yZIXLfi8CZQ',
'ext': 'mp4',
'upload_date': '20120608',
'title': 'Principal Sexually Assaults A Teacher - Episode 117 - 8th June 2012',
'description': 'md5:09b78bd971f1e3e289601dfba15ca4f7',
'uploader': 'SET India',
'uploader_id': 'setindia'
}
},
{
'url': 'http://www.youtube.com/watch?v=BaW_jenozKcj&v=UxxajLWwzqY',
'note': 'Use the first video ID in the URL',
'info_dict': {
'id': 'BaW_jenozKc',
'ext': 'mp4',
'title': 'youtube-dl test video "\'/\\ä↭𝕐',
'uploader': 'Philipp Hagemeister',
'uploader_id': 'phihag',
'upload_date': '20121002',
'description': 'test chars: "\'/\\ä↭𝕐\ntest URL: https://github.com/rg3/youtube-dl/issues/1892\n\nThis is a test video for youtube-dl.\n\nFor more information, contact [email protected] .',
'categories': ['Science & Technology'],
'tags': ['youtube-dl'],
'like_count': int,
'dislike_count': int,
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www.youtube.com/watch?v=a9LDPn-MO4I',
'note': '256k DASH audio (format 141) via DASH manifest',
'info_dict': {
'id': 'a9LDPn-MO4I',
'ext': 'm4a',
'upload_date': '20121002',
'uploader_id': '8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
},
# DASH manifest with encrypted signature
{
'url': 'https://www.youtube.com/watch?v=IB3lcPjvWLA',
'info_dict': {
'id': 'IB3lcPjvWLA',
'ext': 'm4a',
'title': 'Afrojack, Spree Wilson - The Spark ft. Spree Wilson',
'description': 'md5:12e7067fa6735a77bdcbb58cb1187d2d',
'uploader': 'AfrojackVEVO',
'uploader_id': 'AfrojackVEVO',
'upload_date': '20131011',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
},
# JS player signature function name containing $
{
'url': 'https://www.youtube.com/watch?v=nfWlot6h_JM',
'info_dict': {
'id': 'nfWlot6h_JM',
'ext': 'm4a',
'title': 'Taylor Swift - Shake It Off',
'description': 'md5:95f66187cd7c8b2c13eb78e1223b63c3',
'uploader': 'TaylorSwiftVEVO',
'uploader_id': 'TaylorSwiftVEVO',
'upload_date': '20140818',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '141',
},
},
# Controversy video
{
'url': 'https://www.youtube.com/watch?v=T4XJQO3qol8',
'info_dict': {
'id': 'T4XJQO3qol8',
'ext': 'mp4',
'upload_date': '20100909',
'uploader': 'The Amazing Atheist',
'uploader_id': 'TheAmazingAtheist',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms\n\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
# Normal age-gate video (No vevo, embed allowed)
{
'url': 'http://youtube.com/watch?v=HtVdAasjOgU',
'info_dict': {
'id': 'HtVdAasjOgU',
'ext': 'mp4',
'title': 'The Witcher 3: Wild Hunt - The Sword Of Destiny Trailer',
'description': 're:(?s).{100,}About the Game\n.*?The Witcher 3: Wild Hunt.{100,}',
'uploader': 'The Witcher',
'uploader_id': 'WitcherGame',
'upload_date': '20140605',
'age_limit': 18,
},
},
# Age-gate video with encrypted signature
{
'url': 'http://www.youtube.com/watch?v=6kLq3WMV1nU',
'info_dict': {
'id': '6kLq3WMV1nU',
'ext': 'mp4',
'title': 'Dedication To My Ex (Miss That) (Lyric Video)',
'description': 'md5:33765bb339e1b47e7e72b5490139bb41',
'uploader': 'LloydVEVO',
'uploader_id': 'LloydVEVO',
'upload_date': '20110629',
'age_limit': 18,
},
},
# video_info is None (https://github.com/rg3/youtube-dl/issues/4421)
{
'url': '__2ABJjxzNo',
'info_dict': {
'id': '__2ABJjxzNo',
'ext': 'mp4',
'upload_date': '20100430',
'uploader_id': 'deadmau5',
'description': 'md5:12c56784b8032162bb936a5f76d55360',
'uploader': 'deadmau5',
'title': 'Deadmau5 - Some Chords (HD)',
},
'expected_warnings': [
'DASH manifest missing',
]
},
# Olympics (https://github.com/rg3/youtube-dl/issues/4431)
{
'url': 'lqQg6PlCWgI',
'info_dict': {
'id': 'lqQg6PlCWgI',
'ext': 'mp4',
'upload_date': '20120724',
'uploader_id': 'olympic',
'description': 'HO09 - Women - GER-AUS - Hockey - 31 July 2012 - London 2012 Olympic Games',
'uploader': 'Olympics',
'title': 'Hockey - Women - GER-AUS - London 2012 Olympic Games',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Non-square pixels
{
'url': 'https://www.youtube.com/watch?v=_b-2C3KPAM0',
'info_dict': {
'id': '_b-2C3KPAM0',
'ext': 'mp4',
'stretched_ratio': 16 / 9.,
'upload_date': '20110310',
'uploader_id': 'AllenMeow',
'description': 'made by Wacom from Korea | 字幕&加油添醋 by TY\'s Allen | 感謝heylisa00cavey1001同學熱情提供梗及翻譯',
'uploader': '孫艾倫',
'title': '[A-made] 變態妍字幕版 太妍 我就是這樣的人',
},
},
# url_encoded_fmt_stream_map is empty string
{
'url': 'qEJwOuvDf7I',
'info_dict': {
'id': 'qEJwOuvDf7I',
'ext': 'webm',
'title': 'Обсуждение судебной практики по выборам 14 сентября 2014 года в Санкт-Петербурге',
'description': '',
'upload_date': '20150404',
'uploader_id': 'spbelect',
'uploader': 'Наблюдатели Петербурга',
},
'params': {
'skip_download': 'requires avconv',
}
},
# Extraction from multiple DASH manifests (https://github.com/rg3/youtube-dl/pull/6097)
{
'url': 'https://www.youtube.com/watch?v=FIl7x6_3R5Y',
'info_dict': {
'id': 'FIl7x6_3R5Y',
'ext': 'mp4',
'title': 'md5:7b81415841e02ecd4313668cde88737a',
'description': 'md5:116377fd2963b81ec4ce64b542173306',
'upload_date': '20150625',
'uploader_id': 'dorappi2000',
'uploader': 'dorappi2000',
'formats': 'mincount:33',
},
},
# DASH manifest with segment_list
{
'url': 'https://www.youtube.com/embed/CsmdDsKjzN8',
'md5': '8ce563a1d667b599d21064e982ab9e31',
'info_dict': {
'id': 'CsmdDsKjzN8',
'ext': 'mp4',
'upload_date': '20150501', # According to '<meta itemprop="datePublished"', but in other places it's 20150510
'uploader': 'Airtek',
'description': 'Retransmisión en directo de la XVIII media maratón de Zaragoza.',
'uploader_id': 'UCzTzUmjXxxacNnL8I3m4LnQ',
'title': 'Retransmisión XVIII Media maratón Zaragoza 2015',
},
'params': {
'youtube_include_dash_manifest': True,
'format': '135', # bestvideo
}
},
{
# Multifeed videos (multiple cameras), URL is for Main Camera
'url': 'https://www.youtube.com/watch?v=jqWvoWXjCVs',
'info_dict': {
'id': 'jqWvoWXjCVs',
'title': 'teamPGP: Rocket League Noob Stream',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
},
'playlist': [{
'info_dict': {
'id': 'jqWvoWXjCVs',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (Main Camera)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
},
}, {
'info_dict': {
'id': '6h8e8xoXJzg',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (kreestuh)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
},
}, {
'info_dict': {
'id': 'PUOgX5z9xZw',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (grizzle)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
},
}, {
'info_dict': {
'id': 'teuwxikvS5k',
'ext': 'mp4',
'title': 'teamPGP: Rocket League Noob Stream (zim)',
'description': 'md5:dc7872fb300e143831327f1bae3af010',
'upload_date': '20150721',
'uploader': 'Beer Games Beer',
'uploader_id': 'beergamesbeer',
},
}],
'params': {
'skip_download': True,
},
},
{
'url': 'http://vid.plus/FlRa-iH7PGw',
'only_matching': True,
}
]
def __init__(self, *args, **kwargs):
super(YoutubeIE, self).__init__(*args, **kwargs)
self._player_cache = {}
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self.to_screen('%s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self.to_screen('%s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self.to_screen('%s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self.to_screen('RTMP download detected')
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
def _extract_signature_function(self, video_id, player_url, example_sig):
id_m = re.match(
r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player(?:-new)?)?\.(?P<ext>[a-z]+)$',
player_url)
if not id_m:
raise ExtractorError('Cannot identify player %r' % player_url)
player_type = id_m.group('ext')
player_id = id_m.group('id')
# Read from filesystem cache
func_id = '%s_%s_%s' % (
player_type, player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id)
if cache_spec is not None:
return lambda s: ''.join(s[i] for i in cache_spec)
download_note = (
'Downloading player %s' % player_url
if self._downloader.params.get('verbose') else
'Downloading %s player %s' % (player_type, player_id)
)
if player_type == 'js':
code = self._download_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
res = self._parse_sig_js(code)
elif player_type == 'swf':
urlh = self._request_webpage(
player_url, video_id,
note=download_note,
errnote='Download of %s failed' % player_url)
code = urlh.read()
res = self._parse_sig_swf(code)
else:
assert False, 'Invalid player type %r' % player_type
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig):
def gen_sig_code(idxs):
def _genslice(start, end, step):
starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps)
step = None
# Quelch pyflakes warnings - start will be set when step is set
start = '(Never used)'
for i, prev in zip(idxs[1:], idxs[:-1]):
if step is not None:
if i - prev == step:
continue
yield _genslice(start, prev, step)
step = None
continue
if i - prev in [-1, 1]:
step = i - prev
start = prev
continue
else:
yield 's[%d]' % prev
if step is None:
yield 's[%d]' % i
else:
yield _genslice(start, i, step)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
expr_code = ' + '.join(gen_sig_code(cache_spec))
signature_id_tuple = '(%s)' % (
', '.join(compat_str(len(p)) for p in example_sig.split('.')))
code = ('if tuple(len(p) for p in s.split(\'.\')) == %s:\n'
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _parse_sig_js(self, jscode):
funcname = self._search_regex(
r'\.sig\|\|([a-zA-Z0-9$]+)\(', jscode,
'Initial JS player signature function name')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _parse_sig_swf(self, file_contents):
swfi = SWFInterpreter(file_contents)
TARGET_CLASSNAME = 'SignatureDecipher'
searched_class = swfi.extract_class(TARGET_CLASSNAME)
initial_function = swfi.extract_function(searched_class, 'decipher')
return lambda s: initial_function([s])
def _decrypt_signature(self, s, video_id, player_url, age_gate=False):
"""Turn the encrypted s field into a working signature"""
if player_url is None:
raise ExtractorError('Cannot decrypt signature without player_url')
if player_url.startswith('//'):
player_url = 'https:' + player_url
try:
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _get_subtitles(self, video_id, webpage):
try:
subs_doc = self._download_xml(
'https://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
return {}
sub_lang_list = {}
for track in subs_doc.findall('track'):
lang = track.attrib['lang_code']
if lang in sub_lang_list:
continue
sub_formats = []
for ext in ['sbv', 'vtt', 'srt']:
params = compat_urllib_parse.urlencode({
'lang': lang,
'v': video_id,
'fmt': ext,
'name': track.attrib['name'].encode('utf-8'),
})
sub_formats.append({
'url': 'https://www.youtube.com/api/timedtext?' + params,
'ext': ext,
})
sub_lang_list[lang] = sub_formats
if not sub_lang_list:
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
return sub_lang_list
def _get_automatic_captions(self, video_id, webpage):
"""We need the webpage for getting the captions url, pass it as an
argument to speed up the process."""
self.to_screen('%s: Looking for automatic captions' % video_id)
mobj = re.search(r';ytplayer.config = ({.*?});', webpage)
err_msg = 'Couldn\'t find automatic captions for %s' % video_id
if mobj is None:
self._downloader.report_warning(err_msg)
return {}
player_config = json.loads(mobj.group(1))
try:
args = player_config['args']
caption_url = args['ttsurl']
timestamp = args['timestamp']
# We get the available subtitles
list_params = compat_urllib_parse.urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
})
list_url = caption_url + '&' + list_params
caption_list = self._download_xml(list_url, video_id)
original_lang_node = caption_list.find('track')
if original_lang_node is None:
self._downloader.report_warning('Video doesn\'t have automatic captions')
return {}
original_lang = original_lang_node.attrib['lang_code']
caption_kind = original_lang_node.attrib.get('kind', '')
sub_lang_list = {}
for lang_node in caption_list.findall('target'):
sub_lang = lang_node.attrib['lang_code']
sub_formats = []
for ext in ['sbv', 'vtt', 'srt']:
params = compat_urllib_parse.urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': ext,
'ts': timestamp,
'kind': caption_kind,
})
sub_formats.append({
'url': caption_url + '&' + params,
'ext': ext,
})
sub_lang_list[sub_lang] = sub_formats
return sub_lang_list
# An extractor error can be raise by the download process if there are
# no automatic captions but there are subtitles
except (KeyError, ExtractorError):
self._downloader.report_warning(err_msg)
return {}
@classmethod
def extract_id(cls, url):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
def _extract_from_m3u8(self, manifest_url, video_id):
url_map = {}
def _get_urls(_manifest):
lines = _manifest.split('\n')
urls = filter(lambda l: l and not l.startswith('#'),
lines)
return urls
manifest = self._download_webpage(manifest_url, video_id, 'Downloading formats manifest')
formats_urls = _get_urls(manifest)
for format_url in formats_urls:
itag = self._search_regex(r'itag/(\d+?)/', format_url, 'itag')
url_map[itag] = format_url
return url_map
def _extract_annotations(self, video_id):
url = 'https://www.youtube.com/annotations_invideo?features=1&legacy=1&video_id=%s' % video_id
return self._download_webpage(url, video_id, note='Searching for annotations.', errnote='Unable to download video annotations.')
def _parse_dash_manifest(
self, video_id, dash_manifest_url, player_url, age_gate, fatal=True):
def decrypt_sig(mobj):
s = mobj.group(1)
dec_s = self._decrypt_signature(s, video_id, player_url, age_gate)
return '/signature/%s' % dec_s
dash_manifest_url = re.sub(r'/s/([a-fA-F0-9\.]+)', decrypt_sig, dash_manifest_url)
dash_doc = self._download_xml(
dash_manifest_url, video_id,
note='Downloading DASH manifest',
errnote='Could not download DASH manifest',
fatal=fatal)
if dash_doc is False:
return []
formats = []
for a in dash_doc.findall('.//{urn:mpeg:DASH:schema:MPD:2011}AdaptationSet'):
mime_type = a.attrib.get('mimeType')
for r in a.findall('{urn:mpeg:DASH:schema:MPD:2011}Representation'):
url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
if url_el is None:
continue
if mime_type == 'text/vtt':
# TODO implement WebVTT downloading
pass
elif mime_type.startswith('audio/') or mime_type.startswith('video/'):
segment_list = r.find('{urn:mpeg:DASH:schema:MPD:2011}SegmentList')
format_id = r.attrib['id']
video_url = url_el.text
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
f = {
'format_id': format_id,
'url': video_url,
'width': int_or_none(r.attrib.get('width')),
'height': int_or_none(r.attrib.get('height')),
'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
'asr': int_or_none(r.attrib.get('audioSamplingRate')),
'filesize': filesize,
'fps': int_or_none(r.attrib.get('frameRate')),
}
if segment_list is not None:
f.update({
'initialization_url': segment_list.find('{urn:mpeg:DASH:schema:MPD:2011}Initialization').attrib['sourceURL'],
'segment_urls': [segment.attrib.get('media') for segment in segment_list.findall('{urn:mpeg:DASH:schema:MPD:2011}SegmentURL')],
'protocol': 'http_dash_segments',
})
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == format_id)
except StopIteration:
full_info = self._formats.get(format_id, {}).copy()
full_info.update(f)
codecs = r.attrib.get('codecs')
if codecs:
if full_info.get('acodec') == 'none' and 'vcodec' not in full_info:
full_info['vcodec'] = codecs
elif full_info.get('vcodec') == 'none' and 'acodec' not in full_info:
full_info['acodec'] = codecs
formats.append(full_info)
else:
existing_format.update(f)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
proto = (
'http' if self._downloader.params.get('prefer_insecure', False)
else 'https')
start_time = None
end_time = None
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
query = compat_parse_qs(component)
if start_time is None and 't' in query:
start_time = parse_duration(query['t'][0])
if start_time is None and 'start' in query:
start_time = parse_duration(query['start'][0])
if end_time is None and 'end' in query:
end_time = parse_duration(query['end'][0])
# Extract original video URL from URL with redirection, like age verification, using next_url parameter
mobj = re.search(self._NEXT_URL_RE, url)
if mobj:
url = proto + '://www.youtube.com/' + compat_urllib_parse_unquote(mobj.group(1)).lstrip('/')
video_id = self.extract_id(url)
# Get video webpage
url = proto + '://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1&bpctr=9999999999' % video_id
video_webpage = self._download_webpage(url, video_id)
# Attempt to extract SWF player URL
mobj = re.search(r'swfConfig.*?"(https?:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage)
if mobj is not None:
player_url = re.sub(r'\\(.)', r'\1', mobj.group(1))
else:
player_url = None
dash_mpds = []
def add_dash_mpd(video_info):
dash_mpd = video_info.get('dashmpd')
if dash_mpd and dash_mpd[0] not in dash_mpds:
dash_mpds.append(dash_mpd[0])
# Get video info
embed_webpage = None
is_live = None
if re.search(r'player-age-gate-content">', video_webpage) is not None:
age_gate = True
# We simulate the access to the video from www.youtube.com/v/{video_id}
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
data = compat_urllib_parse.urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
r'"sts"\s*:\s*(\d+)', embed_webpage, 'sts', default=''),
})
video_info_url = proto + '://www.youtube.com/get_video_info?' + data
video_info_webpage = self._download_webpage(
video_info_url, video_id,
note='Refetching age-gated info webpage',
errnote='unable to download video info webpage')
video_info = compat_parse_qs(video_info_webpage)
add_dash_mpd(video_info)
else:
age_gate = False
video_info = None
# Try looking directly into the video webpage
mobj = re.search(r';ytplayer\.config\s*=\s*({.*?});', video_webpage)
if mobj:
json_code = uppercase_escape(mobj.group(1))
ytplayer_config = json.loads(json_code)
args = ytplayer_config['args']
if args.get('url_encoded_fmt_stream_map'):
# Convert to the same format returned by compat_parse_qs
video_info = dict((k, [v]) for k, v in args.items())
add_dash_mpd(video_info)
if args.get('livestream') == '1' or args.get('live_playback') == 1:
is_live = True
if not video_info or self._downloader.params.get('youtube_include_dash_manifest', True):
# We also try looking in get_video_info since it may contain different dashmpd
# URL that points to a DASH manifest with possibly different itag set (some itags
# are missing from DASH manifest pointed by webpage's dashmpd, some - from DASH
# manifest pointed by get_video_info's dashmpd).
# The general idea is to take a union of itags of both DASH manifests (for example
# video with such 'manifest behavior' see https://github.com/rg3/youtube-dl/issues/6093)
self.report_video_info_webpage_download(video_id)
for el_type in ['&el=info', '&el=embedded', '&el=detailpage', '&el=vevo', '']:
video_info_url = (
'%s://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en'
% (proto, video_id, el_type))
video_info_webpage = self._download_webpage(
video_info_url,
video_id, note=False,
errnote='unable to download video info webpage')
get_video_info = compat_parse_qs(video_info_webpage)
if get_video_info.get('use_cipher_signature') != ['True']:
add_dash_mpd(get_video_info)
if not video_info:
video_info = get_video_info
if 'token' in get_video_info:
break
if 'token' not in video_info:
if 'reason' in video_info:
if 'The uploader has not made this video available in your country.' in video_info['reason']:
regions_allowed = self._html_search_meta('regionsAllowed', video_webpage, default=None)
if regions_allowed:
raise ExtractorError('YouTube said: This video is available in %s only' % (
', '.join(map(ISO3166Utils.short2full, regions_allowed.split(',')))),
expected=True)
raise ExtractorError(
'YouTube said: %s' % video_info['reason'][0],
expected=True, video_id=video_id)
else:
raise ExtractorError(
'"token" parameter not in video info for unknown reason',
video_id=video_id)
# title
if 'title' in video_info:
video_title = video_info['title'][0]
else:
self._downloader.report_warning('Unable to extract video title')
video_title = '_'
# description
video_description = get_element_by_id("eow-description", video_webpage)
if video_description:
video_description = re.sub(r'''(?x)
<a\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
title="([^"]+)"\s+
(?:[a-zA-Z-]+="[^"]+"\s+)*?
class="yt-uix-redirect-link"\s*>
[^<]+
</a>
''', r'\1', video_description)
video_description = clean_html(video_description)
else:
fd_mobj = re.search(r'<meta name="description" content="([^"]+)"', video_webpage)
if fd_mobj:
video_description = unescapeHTML(fd_mobj.group(1))
else:
video_description = ''
if 'multifeed_metadata_list' in video_info and not smuggled_data.get('force_singlefeed', False):
if not self._downloader.params.get('noplaylist'):
entries = []
feed_ids = []
multifeed_metadata_list = compat_urllib_parse_unquote_plus(video_info['multifeed_metadata_list'][0])
for feed in multifeed_metadata_list.split(','):
feed_data = compat_parse_qs(feed)
entries.append({
'_type': 'url_transparent',
'ie_key': 'Youtube',
'url': smuggle_url(
'%s://www.youtube.com/watch?v=%s' % (proto, feed_data['id'][0]),
{'force_singlefeed': True}),
'title': '%s (%s)' % (video_title, feed_data['title'][0]),
})
feed_ids.append(feed_data['id'][0])
self.to_screen(
'Downloading multifeed video (%s) - add --no-playlist to just download video %s'
% (', '.join(feed_ids), video_id))
return self.playlist_result(entries, video_id, video_title, video_description)
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
if 'view_count' in video_info:
view_count = int(video_info['view_count'][0])
else:
view_count = None
# Check for "rental" videos
if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info:
raise ExtractorError('"rental" videos not supported')
# Start extracting information
self.report_information_extraction(video_id)
# uploader
if 'author' not in video_info:
raise ExtractorError('Unable to extract uploader name')
video_uploader = compat_urllib_parse_unquote_plus(video_info['author'][0])
# uploader_id
video_uploader_id = None
mobj = re.search(r'<link itemprop="url" href="http://www.youtube.com/(?:user|channel)/([^"]+)">', video_webpage)
if mobj is not None:
video_uploader_id = mobj.group(1)
else:
self._downloader.report_warning('unable to extract uploader nickname')
# thumbnail image
# We try first to get a high quality image:
m_thumb = re.search(r'<span itemprop="thumbnail".*?href="(.*?)">',
video_webpage, re.DOTALL)
if m_thumb is not None:
video_thumbnail = m_thumb.group(1)
elif 'thumbnail_url' not in video_info:
self._downloader.report_warning('unable to extract video thumbnail')
video_thumbnail = None
else: # don't panic if we can't find it
video_thumbnail = compat_urllib_parse_unquote_plus(video_info['thumbnail_url'][0])
# upload date
upload_date = self._html_search_meta(
'datePublished', video_webpage, 'upload date', default=None)
if not upload_date:
upload_date = self._search_regex(
[r'(?s)id="eow-date.*?>(.*?)</span>',
r'id="watch-uploader-info".*?>.*?(?:Published|Uploaded|Streamed live|Started) on (.+?)</strong>'],
video_webpage, 'upload date', default=None)
if upload_date:
upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split())
upload_date = unified_strdate(upload_date)
m_cat_container = self._search_regex(
r'(?s)<h4[^>]*>\s*Category\s*</h4>\s*<ul[^>]*>(.*?)</ul>',
video_webpage, 'categories', default=None)
if m_cat_container:
category = self._html_search_regex(
r'(?s)<a[^<]+>(.*?)</a>', m_cat_container, 'category',
default=None)
video_categories = None if category is None else [category]
else:
video_categories = None
video_tags = [
unescapeHTML(m.group('content'))
for m in re.finditer(self._meta_regex('og:video:tag'), video_webpage)]
def _extract_count(count_name):
return str_to_int(self._search_regex(
r'-%s-button[^>]+><span[^>]+class="yt-uix-button-content"[^>]*>([\d,]+)</span>'
% re.escape(count_name),
video_webpage, count_name, default=None))
like_count = _extract_count('like')
dislike_count = _extract_count('dislike')
# subtitles
video_subtitles = self.extract_subtitles(video_id, video_webpage)
automatic_captions = self.extract_automatic_captions(video_id, video_webpage)
if 'length_seconds' not in video_info:
self._downloader.report_warning('unable to extract video duration')
video_duration = None
else:
video_duration = int(compat_urllib_parse_unquote_plus(video_info['length_seconds'][0]))
# annotations
video_annotations = None
if self._downloader.params.get('writeannotations', False):
video_annotations = self._extract_annotations(video_id)
def _map_to_format_list(urlmap):
formats = []
for itag, video_real_url in urlmap.items():
dct = {
'format_id': itag,
'url': video_real_url,
'player_url': player_url,
}
if itag in self._formats:
dct.update(self._formats[itag])
formats.append(dct)
return formats
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
formats = [{
'format_id': '_rtmp',
'protocol': 'rtmp',
'url': video_info['conn'][0],
'player_url': player_url,
}]
elif len(video_info.get('url_encoded_fmt_stream_map', [''])[0]) >= 1 or len(video_info.get('adaptive_fmts', [''])[0]) >= 1:
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts', [''])[0]
if 'rtmpe%3Dyes' in encoded_url_map:
raise ExtractorError('rtmpe downloads are not supported, see https://github.com/rg3/youtube-dl/issues/343 for more information.', expected=True)
formats = []
for url_data_str in encoded_url_map.split(','):
url_data = compat_parse_qs(url_data_str)
if 'itag' not in url_data or 'url' not in url_data:
continue
format_id = url_data['itag'][0]
url = url_data['url'][0]
if 'sig' in url_data:
url += '&signature=' + url_data['sig'][0]
elif 's' in url_data:
encrypted_sig = url_data['s'][0]
ASSETS_RE = r'"assets":.+?"js":\s*("[^"]+")'
jsplayer_url_json = self._search_regex(
ASSETS_RE,
embed_webpage if age_gate else video_webpage,
'JS player URL (1)', default=None)
if not jsplayer_url_json and not age_gate:
# We need the embed website after all
if embed_webpage is None:
embed_url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(
embed_url, video_id, 'Downloading embed webpage')
jsplayer_url_json = self._search_regex(
ASSETS_RE, embed_webpage, 'JS player URL')
player_url = json.loads(jsplayer_url_json)
if player_url is None:
player_url_json = self._search_regex(
r'ytplayer\.config.*?"url"\s*:\s*("[^"]+")',
video_webpage, 'age gate player URL')
player_url = json.loads(player_url_json)
if self._downloader.params.get('verbose'):
if player_url is None:
player_version = 'unknown'
player_desc = 'unknown'
else:
if player_url.endswith('swf'):
player_version = self._search_regex(
r'-(.+?)(?:/watch_as3)?\.swf$', player_url,
'flash player', fatal=False)
player_desc = 'flash player %s' % player_version
else:
player_version = self._search_regex(
r'html5player-([^/]+?)(?:/html5player(?:-new)?)?\.js',
player_url,
'html5 player', fatal=False)
player_desc = 'html5 player %s' % player_version
parts_sizes = self._signature_cache_id(encrypted_sig)
self.to_screen('{%s} signature length %s, %s' %
(format_id, parts_sizes, player_desc))
signature = self._decrypt_signature(
encrypted_sig, video_id, player_url, age_gate)
url += '&signature=' + signature
if 'ratebypass' not in url:
url += '&ratebypass=yes'
# Some itags are not included in DASH manifest thus corresponding formats will
# lack metadata (see https://github.com/rg3/youtube-dl/pull/5993).
# Trying to extract metadata from url_encoded_fmt_stream_map entry.
mobj = re.search(r'^(?P<width>\d+)[xX](?P<height>\d+)$', url_data.get('size', [''])[0])
width, height = (int(mobj.group('width')), int(mobj.group('height'))) if mobj else (None, None)
dct = {
'format_id': format_id,
'url': url,
'player_url': player_url,
'filesize': int_or_none(url_data.get('clen', [None])[0]),
'tbr': float_or_none(url_data.get('bitrate', [None])[0], 1000),
'width': width,
'height': height,
'fps': int_or_none(url_data.get('fps', [None])[0]),
'format_note': url_data.get('quality_label', [None])[0] or url_data.get('quality', [None])[0],
}
type_ = url_data.get('type', [None])[0]
if type_:
type_split = type_.split(';')
kind_ext = type_split[0].split('/')
if len(kind_ext) == 2:
kind, ext = kind_ext
dct['ext'] = ext
if kind in ('audio', 'video'):
codecs = None
for mobj in re.finditer(
r'(?P<key>[a-zA-Z_-]+)=(?P<quote>["\']?)(?P<val>.+?)(?P=quote)(?:;|$)', type_):
if mobj.group('key') == 'codecs':
codecs = mobj.group('val')
break
if codecs:
codecs = codecs.split(',')
if len(codecs) == 2:
acodec, vcodec = codecs[0], codecs[1]
else:
acodec, vcodec = (codecs[0], 'none') if kind == 'audio' else ('none', codecs[0])
dct.update({
'acodec': acodec,
'vcodec': vcodec,
})
if format_id in self._formats:
dct.update(self._formats[format_id])
formats.append(dct)
elif video_info.get('hlsvp'):
manifest_url = video_info['hlsvp'][0]
url_map = self._extract_from_m3u8(manifest_url, video_id)
formats = _map_to_format_list(url_map)
else:
raise ExtractorError('no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
# Look for the DASH manifest
if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_mpd_fatal = True
for dash_manifest_url in dash_mpds:
dash_formats = {}
try:
for df in self._parse_dash_manifest(
video_id, dash_manifest_url, player_url, age_gate, dash_mpd_fatal):
# Do not overwrite DASH format found in some previous DASH manifest
if df['format_id'] not in dash_formats:
dash_formats[df['format_id']] = df
# Additional DASH manifests may end up in HTTP Error 403 therefore
# allow them to fail without bug report message if we already have
# some DASH manifest succeeded. This is temporary workaround to reduce
# burst of bug reports until we figure out the reason and whether it
# can be fixed at all.
dash_mpd_fatal = False
except (ExtractorError, KeyError) as e:
self.report_warning(
'Skipping DASH manifest: %r' % e, video_id)
if dash_formats:
# Remove the formats we found through non-DASH, they
# contain less info and it can be wrong, because we use
# fixed values (for example the resolution). See
# https://github.com/rg3/youtube-dl/issues/5774 for an
# example.
formats = [f for f in formats if f['format_id'] not in dash_formats.keys()]
formats.extend(dash_formats.values())
# Check for malformed aspect ratio
stretched_m = re.search(
r'<meta\s+property="og:video:tag".*?content="yt:stretch=(?P<w>[0-9]+):(?P<h>[0-9]+)">',
video_webpage)
if stretched_m:
ratio = float(stretched_m.group('w')) / float(stretched_m.group('h'))
for f in formats:
if f.get('vcodec') != 'none':
f['stretched_ratio'] = ratio
self._sort_formats(formats)
return {
'id': video_id,
'uploader': video_uploader,
'uploader_id': video_uploader_id,
'upload_date': upload_date,
'title': video_title,
'thumbnail': video_thumbnail,
'description': video_description,
'categories': video_categories,
'tags': video_tags,
'subtitles': video_subtitles,
'automatic_captions': automatic_captions,
'duration': video_duration,
'age_limit': 18 if age_gate else 0,
'annotations': video_annotations,
'webpage_url': proto + '://www.youtube.com/watch?v=%s' % video_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'average_rating': float_or_none(video_info.get('avg_rating', [None])[0]),
'formats': formats,
'is_live': is_live,
'start_time': start_time,
'end_time': end_time,
}
class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
IE_DESC = 'YouTube.com playlists'
_VALID_URL = r"""(?x)(?:
(?:https?://)?
(?:\w+\.)?
youtube\.com/
(?:
(?:course|view_play_list|my_playlists|artist|playlist|watch|embed/videoseries)
\? (?:.*?&)*? (?:p|a|list)=
| p/
)
(
(?:PL|LL|EC|UU|FL|RD|UL)?[0-9A-Za-z-_]{10,}
# Top tracks, they can also include dots
|(?:MC)[\w\.]*
)
.*
|
((?:PL|LL|EC|UU|FL|RD|UL)[0-9A-Za-z-_]{10,})
)"""
_TEMPLATE_URL = 'https://www.youtube.com/playlist?list=%s'
_VIDEO_RE = r'href="\s*/watch\?v=(?P<id>[0-9A-Za-z_-]{11})&[^"]*?index=(?P<index>\d+)'
IE_NAME = 'youtube:playlist'
_TESTS = [{
'url': 'https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
'info_dict': {
'title': 'ytdl test PL',
'id': 'PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re',
},
'playlist_count': 3,
}, {
'url': 'https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'info_dict': {
'id': 'PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx',
'title': 'YDL_Empty_List',
},
'playlist_count': 0,
}, {
'note': 'Playlist with deleted videos (#651). As a bonus, the video #51 is also twice in this list.',
'url': 'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
'info_dict': {
'title': '29C3: Not my department',
'id': 'PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC',
},
'playlist_count': 95,
}, {
'note': 'issue #673',
'url': 'PLBB231211A4F62143',
'info_dict': {
'title': '[OLD]Team Fortress 2 (Class-based LP)',
'id': 'PLBB231211A4F62143',
},
'playlist_mincount': 26,
}, {
'note': 'Large playlist',
'url': 'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q',
'info_dict': {
'title': 'Uploads from Cauchemar',
'id': 'UUBABnxM4Ar9ten8Mdjj1j0Q',
},
'playlist_mincount': 799,
}, {
'url': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
'info_dict': {
'title': 'YDL_safe_search',
'id': 'PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl',
},
'playlist_count': 2,
}, {
'note': 'embedded',
'url': 'http://www.youtube.com/embed/videoseries?list=PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
'playlist_count': 4,
'info_dict': {
'title': 'JODA15',
'id': 'PL6IaIsEjSbf96XFRuNccS_RuEXwNdsoEu',
}
}, {
'note': 'Embedded SWF player',
'url': 'http://www.youtube.com/p/YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ?hl=en_US&fs=1&rel=0',
'playlist_count': 4,
'info_dict': {
'title': 'JODA7',
'id': 'YN5VISEtHet5D4NEvfTd0zcgFk84NqFZ',
}
}, {
'note': 'Buggy playlist: the webpage has a "Load more" button but it doesn\'t have more videos',
'url': 'https://www.youtube.com/playlist?list=UUXw-G3eDE9trcvY2sBMM_aA',
'info_dict': {
'title': 'Uploads from Interstellar Movie',
'id': 'UUXw-G3eDE9trcvY2sBMM_aA',
},
'playlist_mincout': 21,
}]
def _real_initialize(self):
self._login()
def _extract_mix(self, playlist_id):
# The mixes are generated from a single video
# the id of the playlist is just 'RD' + video_id
url = 'https://youtube.com/watch?v=%s&list=%s' % (playlist_id[-11:], playlist_id)
webpage = self._download_webpage(
url, playlist_id, 'Downloading Youtube mix')
search_title = lambda class_name: get_element_by_attribute('class', class_name, webpage)
title_span = (
search_title('playlist-title') or
search_title('title long-title') or
search_title('title'))
title = clean_html(title_span)
ids = orderedSet(re.findall(
r'''(?xs)data-video-username=".*?".*?
href="/watch\?v=([0-9A-Za-z_-]{11})&[^"]*?list=%s''' % re.escape(playlist_id),
webpage))
url_results = self._ids_to_results(ids)
return self.playlist_result(url_results, playlist_id, title)
def _extract_playlist(self, playlist_id):
url = self._TEMPLATE_URL % playlist_id
page = self._download_webpage(url, playlist_id)
for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page):
match = match.strip()
# Check if the playlist exists or is private
if re.match(r'[^<]*(The|This) playlist (does not exist|is private)[^<]*', match):
raise ExtractorError(
'The playlist doesn\'t exist or is private, use --username or '
'--netrc to access it.',
expected=True)
elif re.match(r'[^<]*Invalid parameters[^<]*', match):
raise ExtractorError(
'Invalid parameters. Maybe URL is incorrect.',
expected=True)
elif re.match(r'[^<]*Choose your language[^<]*', match):
continue
else:
self.report_warning('Youtube gives an alert message: ' + match)
# Extract the video ids from the playlist pages
def _entries():
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.finditer(self._VIDEO_RE, content_html)
# We remove the duplicates and the link with index 0
# (it's not the first video of the playlist)
new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
for vid_id in new_ids:
yield self.url_result(vid_id, 'Youtube', video_id=vid_id)
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
if not content_html.strip():
# Some webpages show a "Load more" button but they don't
# have more videos
break
more_widget_html = more['load_more_widget_html']
playlist_title = self._html_search_regex(
r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
page, 'title')
return self.playlist_result(_entries(), playlist_id, playlist_title)
def _real_extract(self, url):
# Extract playlist id
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
playlist_id = mobj.group(1) or mobj.group(2)
# Check if it's a video-specific URL
query_dict = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
if 'v' in query_dict:
video_id = query_dict['v'][0]
if self._downloader.params.get('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return self.url_result(video_id, 'Youtube', video_id=video_id)
else:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
if playlist_id.startswith('RD') or playlist_id.startswith('UL'):
# Mixes require a custom extraction process
return self._extract_mix(playlist_id)
return self._extract_playlist(playlist_id)
class YoutubeChannelIE(InfoExtractor):
IE_DESC = 'YouTube.com channels'
_VALID_URL = r'https?://(?:youtu\.be|(?:\w+\.)?youtube(?:-nocookie)?\.com)/channel/(?P<id>[0-9A-Za-z_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/channel/%s/videos'
IE_NAME = 'youtube:channel'
_TESTS = [{
'note': 'paginated channel',
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w',
'playlist_mincount': 91,
'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
}
}]
@staticmethod
def extract_videos_from_page(page):
ids_in_page = []
titles_in_page = []
for mobj in re.finditer(r'(?:title="(?P<title>[^"]+)"[^>]+)?href="/watch\?v=(?P<id>[0-9A-Za-z_-]+)&?', page):
video_id = mobj.group('id')
video_title = unescapeHTML(mobj.group('title'))
try:
idx = ids_in_page.index(video_id)
if video_title and not titles_in_page[idx]:
titles_in_page[idx] = video_title
except ValueError:
ids_in_page.append(video_id)
titles_in_page.append(video_title)
return zip(ids_in_page, titles_in_page)
def _real_extract(self, url):
channel_id = self._match_id(url)
url = self._TEMPLATE_URL % channel_id
# Channel by page listing is restricted to 35 pages of 30 items, i.e. 1050 videos total (see #5778)
# Workaround by extracting as a playlist if managed to obtain channel playlist URL
# otherwise fallback on channel by page extraction
channel_page = self._download_webpage(
url + '?view=57', channel_id,
'Downloading channel page', fatal=False)
if channel_page is False:
channel_playlist_id = False
else:
channel_playlist_id = self._html_search_meta(
'channelId', channel_page, 'channel id', default=None)
if not channel_playlist_id:
channel_playlist_id = self._search_regex(
r'data-channel-external-id="([^"]+)"',
channel_page, 'channel id', default=None)
if channel_playlist_id and channel_playlist_id.startswith('UC'):
playlist_id = 'UU' + channel_playlist_id[2:]
return self.url_result(
compat_urlparse.urljoin(url, '/playlist?list=%s' % playlist_id), 'YoutubePlaylist')
channel_page = self._download_webpage(url, channel_id, 'Downloading page #1')
autogenerated = re.search(r'''(?x)
class="[^"]*?(?:
channel-header-autogenerated-label|
yt-channel-title-autogenerated
)[^"]*"''', channel_page) is not None
if autogenerated:
# The videos are contained in a single page
# the ajax pages can't be used, they are empty
entries = [
self.url_result(
video_id, 'Youtube', video_id=video_id,
video_title=video_title)
for video_id, video_title in self.extract_videos_from_page(channel_page)]
return self.playlist_result(entries, channel_id)
def _entries():
more_widget_html = content_html = channel_page
for pagenum in itertools.count(1):
for video_id, video_title in self.extract_videos_from_page(content_html):
yield self.url_result(
video_id, 'Youtube', video_id=video_id,
video_title=video_title)
mobj = re.search(
r'data-uix-load-more-href="/?(?P<more>[^"]+)"',
more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), channel_id,
'Downloading page #%s' % (pagenum + 1),
transform_source=uppercase_escape)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
return self.playlist_result(_entries(), channel_id)
class YoutubeUserIE(YoutubeChannelIE):
IE_DESC = 'YouTube.com user videos (URL or "ytuser" keyword)'
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?youtube\.com/(?:user/)?(?!(?:attribution_link|watch|results)(?:$|[^a-z_A-Z0-9-])))|ytuser:)(?!feed/)(?P<id>[A-Za-z0-9_-]+)'
_TEMPLATE_URL = 'https://www.youtube.com/user/%s/videos'
IE_NAME = 'youtube:user'
_TESTS = [{
'url': 'https://www.youtube.com/user/TheLinuxFoundation',
'playlist_mincount': 320,
'info_dict': {
'title': 'TheLinuxFoundation',
}
}, {
'url': 'ytuser:phihag',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
# Don't return True if the url can be extracted with other youtube
# extractor, the regex would is too permissive and it would match.
other_ies = iter(klass for (name, klass) in globals().items() if name.endswith('IE') and klass is not cls)
if any(ie.suitable(url) for ie in other_ies):
return False
else:
return super(YoutubeUserIE, cls).suitable(url)
class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
IE_DESC = 'YouTube.com searches'
# there doesn't appear to be a real limit, for example if you search for
# 'python' you get more than 8.000.000 results
_MAX_RESULTS = float('inf')
IE_NAME = 'youtube:search'
_SEARCH_KEY = 'ytsearch'
_EXTRA_QUERY_ARGS = {}
_TESTS = []
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
videos = []
limit = n
for pagenum in itertools.count(1):
url_query = {
'search_query': query.encode('utf-8'),
'page': pagenum,
'spf': 'navigate',
}
url_query.update(self._EXTRA_QUERY_ARGS)
result_url = 'https://www.youtube.com/results?' + compat_urllib_parse.urlencode(url_query)
data = self._download_json(
result_url, video_id='query "%s"' % query,
note='Downloading page %s' % pagenum,
errnote='Unable to download API page')
html_content = data[1]['body']['content']
if 'class="search-message' in html_content:
raise ExtractorError(
'[youtube] No video results', expected=True)
new_videos = self._ids_to_results(orderedSet(re.findall(
r'href="/watch\?v=(.{11})', html_content)))
videos += new_videos
if not new_videos or len(videos) > limit:
break
if len(videos) > n:
videos = videos[:n]
return self.playlist_result(videos, query)
class YoutubeSearchDateIE(YoutubeSearchIE):
IE_NAME = YoutubeSearchIE.IE_NAME + ':date'
_SEARCH_KEY = 'ytsearchdate'
IE_DESC = 'YouTube.com searches, newest videos first'
_EXTRA_QUERY_ARGS = {'search_sort': 'video_date_uploaded'}
class YoutubeSearchURLIE(InfoExtractor):
IE_DESC = 'YouTube.com search URLs'
IE_NAME = 'youtube:search_url'
_VALID_URL = r'https?://(?:www\.)?youtube\.com/results\?(.*?&)?search_query=(?P<query>[^&]+)(?:[&]|$)'
_TESTS = [{
'url': 'https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video',
'playlist_mincount': 5,
'info_dict': {
'title': 'youtube-dl test video',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
query = compat_urllib_parse_unquote_plus(mobj.group('query'))
webpage = self._download_webpage(url, query)
result_code = self._search_regex(
r'(?s)<ol[^>]+class="item-section"(.*?)</ol>', webpage, 'result HTML')
part_codes = re.findall(
r'(?s)<h3[^>]+class="[^"]*yt-lockup-title[^"]*"[^>]*>(.*?)</h3>', result_code)
entries = []
for part_code in part_codes:
part_title = self._html_search_regex(
[r'(?s)title="([^"]+)"', r'>([^<]+)</a>'], part_code, 'item title', fatal=False)
part_url_snippet = self._html_search_regex(
r'(?s)href="([^"]+)"', part_code, 'item URL')
part_url = compat_urlparse.urljoin(
'https://www.youtube.com/', part_url_snippet)
entries.append({
'_type': 'url',
'url': part_url,
'title': part_title,
})
return {
'_type': 'playlist',
'entries': entries,
'title': query,
}
class YoutubeShowIE(InfoExtractor):
IE_DESC = 'YouTube.com (multi-season) shows'
_VALID_URL = r'https?://www\.youtube\.com/show/(?P<id>[^?#]*)'
IE_NAME = 'youtube:show'
_TESTS = [{
'url': 'https://www.youtube.com/show/airdisasters',
'playlist_mincount': 5,
'info_dict': {
'id': 'airdisasters',
'title': 'Air Disasters',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(
'https://www.youtube.com/show/%s/playlists' % playlist_id, playlist_id, 'Downloading show webpage')
# There's one playlist for each season of the show
m_seasons = list(re.finditer(r'href="(/playlist\?list=.*?)"', webpage))
self.to_screen('%s: Found %s seasons' % (playlist_id, len(m_seasons)))
entries = [
self.url_result(
'https://www.youtube.com' + season.group(1), 'YoutubePlaylist')
for season in m_seasons
]
title = self._og_search_title(webpage, fatal=False)
return {
'_type': 'playlist',
'id': playlist_id,
'title': title,
'entries': entries,
}
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME and _PLAYLIST_TITLE properties.
"""
_LOGIN_REQUIRED = True
@property
def IE_NAME(self):
return 'youtube:%s' % self._FEED_NAME
def _real_initialize(self):
self._login()
def _real_extract(self, url):
page = self._download_webpage(
'https://www.youtube.com/feed/%s' % self._FEED_NAME, self._PLAYLIST_TITLE)
# The extraction process is the same as for playlists, but the regex
# for the video ids doesn't contain an index
ids = []
more_widget_html = content_html = page
for page_num in itertools.count(1):
matches = re.findall(r'href="\s*/watch\?v=([0-9A-Za-z_-]{11})', content_html)
# 'recommended' feed has infinite 'load more' and each new portion spins
# the same videos in (sometimes) slightly different order, so we'll check
# for unicity and break when portion has no new videos
new_ids = filter(lambda video_id: video_id not in ids, orderedSet(matches))
if not new_ids:
break
ids.extend(new_ids)
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
if not mobj:
break
more = self._download_json(
'https://youtube.com/%s' % mobj.group('more'), self._PLAYLIST_TITLE,
'Downloading page #%s' % page_num,
transform_source=uppercase_escape)
content_html = more['content_html']
more_widget_html = more['load_more_widget_html']
return self.playlist_result(
self._ids_to_results(ids), playlist_title=self._PLAYLIST_TITLE)
class YoutubeWatchLaterIE(YoutubePlaylistIE):
IE_NAME = 'youtube:watchlater'
IE_DESC = 'Youtube watch later list, ":ytwatchlater" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/(?:feed/watch_later|playlist\?list=WL)|:ytwatchlater'
_TESTS = [] # override PlaylistIE tests
def _real_extract(self, url):
return self._extract_playlist('WL')
class YoutubeFavouritesIE(YoutubeBaseInfoExtractor):
IE_NAME = 'youtube:favorites'
IE_DESC = 'YouTube.com favourite videos, ":ytfav" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/my_favorites|:ytfav(?:ou?rites)?'
_LOGIN_REQUIRED = True
def _real_extract(self, url):
webpage = self._download_webpage('https://www.youtube.com/my_favorites', 'Youtube Favourites videos')
playlist_id = self._search_regex(r'list=(.+?)["&]', webpage, 'favourites playlist id')
return self.url_result(playlist_id, 'YoutubePlaylist')
class YoutubeRecommendedIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com recommended videos, ":ytrec" for short (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/recommended|:ytrec(?:ommended)?'
_FEED_NAME = 'recommended'
_PLAYLIST_TITLE = 'Youtube Recommended videos'
class YoutubeSubscriptionsIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)'
_VALID_URL = r'https?://www\.youtube\.com/feed/subscriptions|:ytsubs(?:criptions)?'
_FEED_NAME = 'subscriptions'
_PLAYLIST_TITLE = 'Youtube Subscriptions'
class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
IE_DESC = 'Youtube watch history, ":ythistory" for short (requires authentication)'
_VALID_URL = 'https?://www\.youtube\.com/feed/history|:ythistory'
_FEED_NAME = 'history'
_PLAYLIST_TITLE = 'Youtube History'
class YoutubeTruncatedURLIE(InfoExtractor):
IE_NAME = 'youtube:truncated_url'
IE_DESC = False # Do not list
_VALID_URL = r'''(?x)
(?:https?://)?
(?:\w+\.)?[yY][oO][uU][tT][uU][bB][eE](?:-nocookie)?\.com/
(?:watch\?(?:
feature=[a-z_]+|
annotation_id=annotation_[^&]+|
x-yt-cl=[0-9]+|
hl=[^&]*|
t=[0-9]+
)?
|
attribution_link\?a=[^&]+
)
$
'''
_TESTS = [{
'url': 'http://www.youtube.com/watch?annotation_id=annotation_3951667041',
'only_matching': True,
}, {
'url': 'http://www.youtube.com/watch?',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?x-yt-cl=84503534',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?feature=foo',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?hl=en-GB',
'only_matching': True,
}, {
'url': 'https://www.youtube.com/watch?t=2372',
'only_matching': True,
}]
def _real_extract(self, url):
raise ExtractorError(
'Did you forget to quote the URL? Remember that & is a meta '
'character in most shells, so you want to put the URL in quotes, '
'like youtube-dl '
'"http://www.youtube.com/watch?feature=foo&v=BaW_jenozKc" '
' or simply youtube-dl BaW_jenozKc .',
expected=True)
class YoutubeTruncatedIDIE(InfoExtractor):
IE_NAME = 'youtube:truncated_id'
IE_DESC = False # Do not list
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
_TESTS = [{
'url': 'https://www.youtube.com/watch?v=N_708QY7Ob',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
raise ExtractorError(
'Incomplete YouTube ID %s. URL %s looks truncated.' % (video_id, url),
expected=True)
| unlicense | 3,802,286,563,380,102,000 | -5,472,947,910,384,876,000 | 44.396943 | 226 | 0.505502 | false |
torte/lymph | setup.py | 2 | 2729 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import sys
with open('README.rst') as f:
description = f.read()
install_requires = [
'docopt>=0.6.1',
'kazoo>=1.3.1',
'kombu>=3.0.16',
'gevent',
'msgpack-python>=0.4.0',
'psutil>=2.1.1',
'PyYAML>=3.11',
'pyzmq>=14.3.0',
'redis>=2.9.1',
'setproctitle>=1.1.8',
'six>=1.6',
'Werkzeug>=0.10.4',
'blessings>=1.5.1',
'netifaces>=0.10.4',
'mock>=1.0.1',
'PyHamcrest>=1.8.2',
'pytz',
'iso8601>=0.1.10',
]
if sys.version_info.major == 2:
install_requires.append('Monotime>=1.0')
elif sys.version_info.major == 3:
install_requires.remove('gevent')
install_requires.append('gevent>=1.1a2')
setup(
name='lymph',
url='http://github.com/deliveryhero/lymph/',
version='0.8.0-dev',
namespace_packages=['lymph'],
packages=find_packages(),
license=u'Apache License (2.0)',
author=u'Delivery Hero Holding GmbH',
maintainer=u'Johannes Dollinger',
maintainer_email=u'[email protected]',
description=u'a service framework',
long_description=description,
include_package_data=True,
install_requires=install_requires,
extras_require={
'sentry': ['raven'],
'newrelic': ['newrelic'],
},
entry_points={
'console_scripts': ['lymph = lymph.cli.main:main'],
'lymph.cli': [
'discover = lymph.cli.discover:DiscoverCommand',
'emit = lymph.cli.emit:EmitCommand',
'help = lymph.cli.help:HelpCommand',
'inspect = lymph.cli.inspect:InspectCommand',
'instance = lymph.cli.service:InstanceCommand',
'list = lymph.cli.list:ListCommand',
'node = lymph.cli.service:NodeCommand',
'request = lymph.cli.request:RequestCommand',
'shell = lymph.cli.shell:ShellCommand',
'subscribe = lymph.cli.subscribe:SubscribeCommand',
'tail = lymph.cli.tail:TailCommand',
'config = lymph.cli.config:ConfigCommand',
],
'nose.plugins.0.10': ['lymph = lymph.testing.nose:LymphPlugin'],
'pytest11': ['lymph = lymph.testing.pytest'],
'kombu.serializers': [
'lymph-json = lymph.serializers.kombu:json_serializer_args',
'lymph-msgpack = lymph.serializers.kombu:msgpack_serializer_args',
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
]
)
| apache-2.0 | -1,824,163,298,211,392,300 | -4,419,351,715,916,996,000 | 30.732558 | 78 | 0.592158 | false |
websbydrew/TestPython | Python Files/Exercise Files/Ch5/htmlparsing_finished.py | 2 | 1751 | #
# Example file for parsing and processing HTML
# (For Python 3.x, be sure to use the ExampleSnippets3.txt file)
# import the HTMLParser module
from HTMLParser import HTMLParser
metacount = 0;
# create a subclass and override the handler methods
class MyHTMLParser(HTMLParser):
# function to handle an opening tag in the doc
# this will be called when the closing ">" of the tag is reached
def handle_starttag(self, tag, attrs):
global metacount
print "Encountered a start tag:", tag
if tag == "meta":
metacount += 1
pos = self.getpos() # returns a tuple indication line and character
print "At line: ", pos[0], " position ", pos[1]
if attrs.__len__ > 0:
print "\tAttributes:"
for a in attrs:
print "\t", a[0],"=",a[1]
# function to handle the ending tag
def handle_endtag(self, tag):
print "Encountered an end tag:", tag
pos = self.getpos()
print "At line: ", pos[0], " position ", pos[1]
# function to handle character and text data (tag contents)
def handle_data(self, data):
print "Encountered some data:", data
pos = self.getpos()
print "At line: ", pos[0], " position ", pos[1]
# function to handle the processing of HTML comments
def handle_comment(self, data):
print "Encountered comment:", data
pos = self.getpos()
print "At line: ", pos[0], " position ", pos[1]
def main():
# instantiate the parser and feed it some HTML
parser = MyHTMLParser()
# open the sample HTML file and read it
f = open("samplehtml.html")
if f.mode == "r":
contents = f.read() # read the entire file
parser.feed(contents)
print "%d meta tags encountered" % metacount
if __name__ == "__main__":
main();
| gpl-3.0 | -7,971,155,121,557,705,000 | -1,928,365,568,355,123,200 | 28.694915 | 71 | 0.641919 | false |
coderbone/SickRage | lib/sqlalchemy/inspection.py | 79 | 3103 | # sqlalchemy/inspect.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The inspection module provides the :func:`.inspect` function,
which delivers runtime information about a wide variety
of SQLAlchemy objects, both within the Core as well as the
ORM.
The :func:`.inspect` function is the entry point to SQLAlchemy's
public API for viewing the configuration and construction
of in-memory objects. Depending on the type of object
passed to :func:`.inspect`, the return value will either be
a related object which provides a known interface, or in many
cases it will return the object itself.
The rationale for :func:`.inspect` is twofold. One is that
it replaces the need to be aware of a large variety of "information
getting" functions in SQLAlchemy, such as :meth:`.Inspector.from_engine`,
:func:`.orm.attributes.instance_state`, :func:`.orm.class_mapper`,
and others. The other is that the return value of :func:`.inspect`
is guaranteed to obey a documented API, thus allowing third party
tools which build on top of SQLAlchemy configurations to be constructed
in a forwards-compatible way.
.. versionadded:: 0.8 The :func:`.inspect` system is introduced
as of version 0.8.
"""
from . import util, exc
_registrars = util.defaultdict(list)
def inspect(subject, raiseerr=True):
"""Produce an inspection object for the given target.
The returned value in some cases may be the
same object as the one given, such as if a
:class:`.Mapper` object is passed. In other
cases, it will be an instance of the registered
inspection type for the given object, such as
if an :class:`.engine.Engine` is passed, an
:class:`.Inspector` object is returned.
:param subject: the subject to be inspected.
:param raiseerr: When ``True``, if the given subject
does not
correspond to a known SQLAlchemy inspected type,
:class:`sqlalchemy.exc.NoInspectionAvailable`
is raised. If ``False``, ``None`` is returned.
"""
type_ = type(subject)
for cls in type_.__mro__:
if cls in _registrars:
reg = _registrars[cls]
if reg is True:
return subject
ret = reg(subject)
if ret is not None:
break
else:
reg = ret = None
if raiseerr and (
reg is None or ret is None
):
raise exc.NoInspectionAvailable(
"No inspection system is "
"available for object of type %s" %
type_)
return ret
def _inspects(*types):
def decorate(fn_or_cls):
for type_ in types:
if type_ in _registrars:
raise AssertionError(
"Type %s is already "
"registered" % type_)
_registrars[type_] = fn_or_cls
return fn_or_cls
return decorate
def _self_inspects(cls):
_inspects(cls)(True)
return cls
| gpl-3.0 | -5,628,696,556,217,990,000 | -7,434,208,495,240,765,000 | 32.728261 | 84 | 0.660973 | false |
nijel/weblate | weblate/trans/validators.py | 1 | 1538 | #
# Copyright © 2012 - 2021 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.core.exceptions import ValidationError
from django.utils.translation import gettext as _
from weblate.checks.flags import Flags
def validate_filemask(val):
"""Validate that filemask contains *."""
if "*" not in val:
raise ValidationError(
_("Filemask does not contain * as a language placeholder!")
)
def validate_autoaccept(val):
"""Validate correct value for autoaccept."""
if val == 1:
raise ValidationError(
_(
"A value of 1 is not allowed for autoaccept as "
"it would permit users to vote on their own suggestions."
)
)
def validate_check_flags(val):
"""Validate check influencing flags."""
flags = Flags(val)
flags.validate()
| gpl-3.0 | -103,750,848,676,861,820 | -2,339,009,396,362,573,000 | 31.659574 | 73 | 0.685993 | false |
alistairlow/tensorflow | tensorflow/python/ops/string_ops.py | 33 | 5311 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for working with string Tensors.
See the @{$python/string_ops} guide.
@@string_to_hash_bucket_fast
@@string_to_hash_bucket_strong
@@string_to_hash_bucket
@@reduce_join
@@string_join
@@string_split
@@substr
@@as_string
@@encode_base64
@@decode_base64
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_string_ops import *
from tensorflow.python.util import deprecation
# pylint: enable=wildcard-import
def string_split(source, delimiter=" ", skip_empty=True): # pylint: disable=invalid-name
"""Split elements of `source` based on `delimiter` into a `SparseTensor`.
Let N be the size of source (typically N will be the batch size). Split each
element of `source` based on `delimiter` and return a `SparseTensor`
containing the split tokens. Empty tokens are ignored.
If `delimiter` is an empty string, each element of the `source` is split
into individual strings, each containing one byte. (This includes splitting
multibyte sequences of UTF-8.) If delimiter contains multiple bytes, it is
treated as a set of delimiters with each considered a potential split point.
For example:
N = 2, source[0] is 'hello world' and source[1] is 'a b c', then the output
will be
st.indices = [0, 0;
0, 1;
1, 0;
1, 1;
1, 2]
st.shape = [2, 3]
st.values = ['hello', 'world', 'a', 'b', 'c']
Args:
source: `1-D` string `Tensor`, the strings to split.
delimiter: `0-D` string `Tensor`, the delimiter character, the string should
be length 0 or 1.
skip_empty: A `bool`. If `True`, skip the empty strings from the result.
Raises:
ValueError: If delimiter is not a string.
Returns:
A `SparseTensor` of rank `2`, the strings split according to the delimiter.
The first column of the indices corresponds to the row in `source` and the
second column corresponds to the index of the split component in this row.
"""
delimiter = ops.convert_to_tensor(delimiter, dtype=dtypes.string)
source = ops.convert_to_tensor(source, dtype=dtypes.string)
# pylint: disable=protected-access
indices, values, shape = gen_string_ops._string_split(
source, delimiter=delimiter, skip_empty=skip_empty)
# pylint: enable=protected-access
indices.set_shape([None, 2])
values.set_shape([None])
shape.set_shape([2])
return sparse_tensor.SparseTensor(indices, values, shape)
def _reduce_join_reduction_dims(x, axis, reduction_indices):
"""Returns range(rank(x) - 1, 0, -1) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation
if reduction_indices is not None:
if axis is not None:
raise ValueError("Can't specify both 'axis' and 'reduction_indices'.")
axis = reduction_indices
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims - 1, -1, -1), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return math_ops.range(array_ops.rank(x) - 1, -1, -1)
def reduce_join(inputs, axis=None,
keep_dims=False,
separator="",
name=None,
reduction_indices=None):
reduction_indices = _reduce_join_reduction_dims(
inputs, axis, reduction_indices)
return gen_string_ops.reduce_join(
inputs=inputs,
reduction_indices=reduction_indices,
keep_dims=keep_dims,
separator=separator,
name=name)
reduce_join.__doc__ = deprecation.rewrite_argument_docstring(
gen_string_ops.reduce_join.__doc__, "reduction_indices", "axis")
ops.NotDifferentiable("StringToHashBucket")
ops.NotDifferentiable("StringToHashBucketFast")
ops.NotDifferentiable("StringToHashBucketStrong")
ops.NotDifferentiable("ReduceJoin")
ops.NotDifferentiable("StringJoin")
ops.NotDifferentiable("StringSplit")
ops.NotDifferentiable("AsString")
ops.NotDifferentiable("EncodeBase64")
ops.NotDifferentiable("DecodeBase64")
| apache-2.0 | 2,159,645,084,560,059,100 | -4,112,457,643,176,298,500 | 34.644295 | 89 | 0.703069 | false |
Widiot/simpleblog | venv/lib/python3.5/site-packages/pygments/plugin.py | 25 | 1721 | # -*- coding: utf-8 -*-
"""
pygments.plugin
~~~~~~~~~~~~~~~
Pygments setuptools plugin interface. The methods defined
here also work if setuptools isn't installed but they just
return nothing.
lexer plugins::
[pygments.lexers]
yourlexer = yourmodule:YourLexer
formatter plugins::
[pygments.formatters]
yourformatter = yourformatter:YourFormatter
/.ext = yourformatter:YourFormatter
As you can see, you can define extensions for the formatter
with a leading slash.
syntax plugins::
[pygments.styles]
yourstyle = yourstyle:YourStyle
filter plugin::
[pygments.filter]
yourfilter = yourfilter:YourFilter
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
LEXER_ENTRY_POINT = 'pygments.lexers'
FORMATTER_ENTRY_POINT = 'pygments.formatters'
STYLE_ENTRY_POINT = 'pygments.styles'
FILTER_ENTRY_POINT = 'pygments.filters'
def iter_entry_points(group_name):
try:
import pkg_resources
except ImportError:
return []
return pkg_resources.iter_entry_points(group_name)
def find_plugin_lexers():
for entrypoint in iter_entry_points(LEXER_ENTRY_POINT):
yield entrypoint.load()
def find_plugin_formatters():
for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_styles():
for entrypoint in iter_entry_points(STYLE_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
def find_plugin_filters():
for entrypoint in iter_entry_points(FILTER_ENTRY_POINT):
yield entrypoint.name, entrypoint.load()
| mit | -4,888,346,105,108,819,000 | -3,137,660,587,602,927,000 | 24.308824 | 70 | 0.680999 | false |
antonvino/inmoov-basic | hexapod_scripts_base/maestro_lib.py | 1 | 13426 | ###########################################################################################
# Filename:
# Device.py
###########################################################################################
# Project Authors:
# Juhapekka Piiroinen
# Brian Wu
#
# Changes:
# June 14, 2010 by Juhapekka Piiroinen - changes committed to svn
# - added comments for the device commands according to the manual from Pololu
# - added latest draft code for rotating base servo (Parallax Continuous Rotating Servo)
# - note! you should be able to clear error flags with .get_errors function according to the manual
# - renamed CameraDriver to LegacyCameraDriver as Brian Wu has done better one
# - integrated batch of changes provided by Brian Wu
#
# June 11, 2010 by Brian Wu - Changes committed thru email
# - Decoupling the implementation from the program
#
# April 19, 2010 by Juhapekka Piiroinen
# - Initial Release
#
# Email:
# [email protected]
#
# License:
# GNU/GPLv3
#
# Description:
# A python-wrapper for Pololu Micro Maestro 6-Channel USB Servo Controller
#
############################################################################################
# /!\ Notes /!\
# You will have to enable _USB Dual Port_ mode from the _Pololu Maestro Control Center_.
#
############################################################################################
# Device Documentation is available @ http://www.pololu.com/docs/pdf/0J40/maestro.pdf
############################################################################################
# (C) 2010 Juhapekka Piiroinen
# Brian Wu
############################################################################################
import serial
import time
def log(*msgline):
for msg in msgline:
print msg,
print
class Device(object):
def __init__(self,con_port="COM6",ser_port="COM7",timeout=1): #/dev/ttyACM0 and /dev/ttyACM1 for Linux
############################
# lets introduce and init the main variables
self.con = None
self.ser = None
self.isInitialized = False
############################
# lets connect the TTL Port
try:
self.con = serial.Serial(con_port,timeout=timeout,baudrate=9600)
self.con.close()
self.con.open()
self.con.baudrate = 9600
log("Link to Command Port -", con_port, "- successful")
except serial.serialutil.SerialException, e:
print e
log("Link to Command Port -", con_port, "- failed")
if self.con:
#####################
#If your Maestro's serial mode is "UART, detect baud rate", you must first send it the baud rate indication byte 0xAA on
#the RX line before sending any commands. The 0xAA baud rate indication byte can be the first byte of a Pololu protocol
#command.
#http://www.pololu.com/docs/pdf/0J40/maestro.pdf - page 35
# self.con.baudrate = 9600
# self.con.write(chr(0xAA))
# self.con.flush()
# log("Baud rate indication byte 0xAA sent!")
pass
###################################
# lets connect the TTL Port
try:
self.ser = serial.Serial(ser_port,timeout=timeout,baudrate=9600)
self.ser.close()
self.ser.open()
self.ser.baudrate = 9600
log("Link to TTL Port -", ser_port, "- successful")
except serial.serialutil.SerialException, e:
print e
log("Link to TTL Port -", ser_port, "- failed!")
self.isInitialized = (self.con!=None and self.ser!=None)
if (self.isInitialized):
err_flags = self.get_errors()
log("Device error flags read (",err_flags,") and cleared")
log("Device initialized:",self.isInitialized)
###########################################################################################################################
## common write function for handling all write related tasks
def write(self,*data):
if not self.isInitialized: log("Not initialized"); return
if not self.ser.writable():
log("Device not writable")
return
for d in data:
self.ser.write(chr(d))
self.ser.flush()
###########################################################################################################################
## Go Home
# Compact protocol: 0xA2
# --
# This command sends all servos and outputs to their home positions, just as if an error had occurred. For servos and
# outputs set to "Ignore", the position will be unchanged.
# --
# Source: http://www.pololu.com/docs/pdf/0J40/maestro.pdf
def go_home(self):
if not self.isInitialized: log("Not initialized"); return
self.write(0xA2)
###########################################################################################################################
## Set Target
# Compact protocol: 0x84, channel number, target low bits, target high bits
# --
# The lower 7 bits of the third data byte represent bits 0-6 of the target (the lower 7 bits), while the lower 7 bits of the
# fourth data byte represent bits 7-13 of the target. The target is a non-negative integer.
# --
# Source: http://www.pololu.com/docs/pdf/0J40/maestro.pdf
def set_target(self,servo,value):
if not self.isInitialized: log("Not initialized"); return
highbits,lowbits = divmod(value,32)
self.write(0x84,servo,lowbits << 2,highbits)
###########################################################################################################################
## Set Speed
# Compact protocol: 0x87, channel number, speed low bits, speed high bits
# --
# This command limits the speed at which a servo channel's output value changes. The speed limit is given in units of (0.25 us)/(10 ms)
# --
# For example, the command 0x87, 0x05, 0x0C, 0x01 sets
# the speed of servo channel 5 to a value of 140, which corresponds to a speed of 3.5 us/ms. What this means is that if
# you send a Set Target command to adjust the target from, say, 1000 us to 1350 us, it will take 100 ms to make that
# adjustment. A speed of 0 makes the speed unlimited, so that setting the target will immediately affect the position. Note
# that the actual speed at which your servo moves is also limited by the design of the servo itself, the supply voltage, and
# mechanical loads; this parameter will not help your servo go faster than what it is physically capable of.
# --
# At the minimum speed setting of 1, the servo output takes 40 seconds to move from 1 to 2 ms.
# The speed setting has no effect on channels configured as inputs or digital outputs.
# --
# Source: http://www.pololu.com/docs/pdf/0J40/maestro.pdf
def set_speed(self,servo,speed):
if not self.isInitialized: log("Not initialized"); return
highbits,lowbits = divmod(speed,32)
self.write(0x87,servo,lowbits << 2,highbits)
###########################################################################################################################
## Set Acceleration
# Compact protocol: 0x89, channel number, acceleration low bits, acceleration high bits
# --
# This command limits the acceleration of a servo channel's output. The acceleration limit is a value from 0 to 255 in units of (0.25 us)/(10 ms)/(80 ms),
# --
# A value of 0 corresponds to no acceleration limit. An acceleration limit causes the speed of a servo to slowly ramp up until it reaches the maximum speed, then
# to ramp down again as position approaches target, resulting in a relatively smooth motion from one point to another.
# With acceleration and speed limits, only a few target settings are required to make natural-looking motions that would
# otherwise be quite complicated to produce.
# --
# At the minimum acceleration setting of 1, the servo output takes about 3 seconds to move smoothly from a target of 1 ms to a target of 2 ms.
# The acceleration setting has no effect on channels configured as inputs or digital outputs.
# --
# Source: http://www.pololu.com/docs/pdf/0J40/maestro.pdf
def set_acceleration(self,servo,acceleration):
if not self.isInitialized: log("Not initialized"); return
highbits,lowbits = divmod(acceleration,32)
self.write(0x89,servo,lowbits << 2,highbits)
###########################################################################################################################
## Get Position
# Compact protocol: 0x90, channel number
# Response: position low 8 bits, position high 8 bits
# --
# This command allows the device communicating with the Maestro to get the position value of a channel. The position
# is sent as a two-byte response immediately after the command is received.
# --
# If the specified channel is configured as a servo, this position value represents the current pulse width that the Maestro
# is transmitting on the channel, reflecting the effects of any previous commands, speed and acceleration limits, or scripts
# running on the Maestro.
# --
# If the channel is configured as a digital output, a position value less than 6000 means the Maestro is driving the line low,
# while a position value of 6000 or greater means the Maestro is driving the line high.
# --
# If the channel is configured as an input, the position represents the voltage measured on the channel. The inputs on
# channels 0-11 are analog: their values range from 0 to 1023, representing voltages from 0 to 5 V. The inputs on channels
# 12-23 are digital: their values are either exactly 0 or exactly 1023.
# --
# Note that the formatting of the position in this command differs from the target/speed/acceleration formatting in the
# other commands. Since there is no restriction on the high bit, the position is formatted as a standard little-endian two-
# byte unsigned integer. For example, a position of 2567 corresponds to a response 0x07, 0x0A.
# --
# Note that the position value returned by this command is equal to four times the number displayed in the Position box
# in the Status tab of the Maestro Control Center.
# --
# Source: http://www.pololu.com/docs/pdf/0J40/maestro.pdf
def get_position(self,servo):
if not self.isInitialized: log("Not initialized"); return None
self.write(0x90,servo)
data = self.ser.read(2)
if data:
return (ord(data[0])+(ord(data[1])<<8))/4
else:
return None
###########################################################################################################################
## Get Moving State
# Compact protocol: 0x93
# Response: 0x00 if no servos are moving, 0x01 if servos are moving
# --
# This command is used to determine whether the servo outputs have reached their targets or are still changing, limited
# by speed or acceleration settings. Using this command together with the Set Target command, you can initiate several
# servo movements and wait for all the movements to finish before moving on to the next step of your program.
# --
# Source: http://www.pololu.com/docs/pdf/0J40/maestro.pdf
def get_moving_state(self):
if not self.isInitialized: log("Not initialized"); return None
self.write(0x93)
data = self.ser.read(1)
if data:
return ord(data[0])
else:
return None
###########################################################################################################################
## Get Errors
# Compact protocol: 0xA1
# --
# Response: error bits 0-7, error bits 8-15
# --
# Use this command to examine the errors that the Maestro has detected.
# --
# The error register is sent as a two-byte response immediately after the command is received,
# then all the error bits are cleared. For most applications using serial control, it is a good idea to check errors continuously
# and take appropriate action if errors occur.
# --
# Source: http://www.pololu.com/docs/pdf/0J40/maestro.pdf
def get_errors(self):
if not self.isInitialized: log("Not initialized"); return None
self.write(0xA1)
data = self.ser.read(2)
if data:
return ord(data[0])+(ord(data[1])<<8)
else:
return None
###########################################################################################################################
## a helper function for Set Target
def wait_until_at_target(self):
while (self.get_moving_state()):
time.sleep(0.1)
###########################################################################################################################
## Lets close and clean when we are done
def __del__(self):
if (self.ser):
self.ser.close()
if (self.con):
self.con.close()
del(self.ser)
del(self.con)
| mit | -497,084,866,109,239,040 | 5,274,815,295,410,531,000 | 48.725926 | 165 | 0.562565 | false |
erickt/hue | desktop/core/ext-py/django-extensions-1.5.0/django_extensions/mongodb/fields/json.py | 44 | 2251 | """
JSONField automatically serializes most Python terms to JSON data.
Creates a TEXT field with a default value of "{}". See test_json.py for
more information.
from django.db import models
from django_extensions.db.fields import json
class LOL(models.Model):
extra = json.JSONField()
"""
import six
import datetime
from decimal import Decimal
from django.conf import settings
from django.utils import simplejson
from mongoengine.fields import StringField
class JSONEncoder(simplejson.JSONEncoder):
def default(self, obj):
if isinstance(obj, Decimal):
return str(obj)
elif isinstance(obj, datetime.datetime):
assert settings.TIME_ZONE == 'UTC'
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
return simplejson.JSONEncoder.default(self, obj)
def dumps(value):
assert isinstance(value, dict)
return JSONEncoder().encode(value)
def loads(txt):
value = simplejson.loads(txt, parse_float=Decimal, encoding=settings.DEFAULT_CHARSET)
assert isinstance(value, dict)
return value
class JSONDict(dict):
"""
Hack so repr() called by dumpdata will output JSON instead of
Python formatted data. This way fixtures will work!
"""
def __repr__(self):
return dumps(self)
class JSONField(StringField):
"""JSONField is a generic textfield that neatly serializes/unserializes
JSON objects seamlessly. Main thingy must be a dict object."""
def __init__(self, *args, **kwargs):
if 'default' not in kwargs:
kwargs['default'] = '{}'
StringField.__init__(self, *args, **kwargs)
def to_python(self, value):
"""Convert our string value to JSON after we load it from the DB"""
if not value:
return {}
elif isinstance(value, six.string_types):
res = loads(value)
assert isinstance(res, dict)
return JSONDict(**res)
else:
return value
def get_db_prep_save(self, value):
"""Convert our JSON object to a string before we save"""
if not value:
return super(JSONField, self).get_db_prep_save("")
else:
return super(JSONField, self).get_db_prep_save(dumps(value))
| apache-2.0 | 7,700,107,882,909,776,000 | -6,771,391,982,384,350,000 | 28.233766 | 89 | 0.649489 | false |
yongshengwang/hue | desktop/core/src/desktop/api2.py | 21 | 4908 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import tempfile
import time
import StringIO
import zipfile
from django.core import management
from django.shortcuts import redirect
from django.utils import html
from desktop.lib.django_util import JsonResponse
from desktop.lib.export_csvxls import make_response
from desktop.lib.i18n import smart_str
from desktop.models import Document2, Document
from django.http import HttpResponse
LOG = logging.getLogger(__name__)
def get_document(request):
if request.GET.get('id'):
doc = Document2.objects.get(id=request.GET['id'])
else:
doc = Document2.objects.get(uuid=request.GET['uuid'])
response = _massage_doc_for_json(doc, request.user, with_data=request.GET.get('with_data'))
return JsonResponse(response)
def _massage_doc_for_json(document, user, with_data=False):
massaged_doc = {
'id': document.id,
'uuid': document.uuid,
'owner': document.owner.username,
'type': html.conditional_escape(document.type),
'name': html.conditional_escape(document.name),
'description': html.conditional_escape(document.description),
'isMine': document.owner == user,
'lastModified': document.last_modified.strftime("%x %X"),
'lastModifiedInMillis': time.mktime(document.last_modified.timetuple()),
'version': document.version,
'is_history': document.is_history,
# tags
# dependencies
}
if with_data:
massaged_doc['data'] = document.data_dict
return massaged_doc
def export_documents(request):
if request.GET.get('documents'):
selection = json.loads(request.GET.get('documents'))
else:
selection = json.loads(request.POST.get('documents'))
# If non admin, only export documents the user owns
docs = Document2.objects
if not request.user.is_superuser:
docs = docs.filter(owner=request.user)
docs = docs.filter(id__in=selection).order_by('-id')
doc_ids = docs.values_list('id', flat=True)
f = StringIO.StringIO()
if doc_ids:
doc_ids = ','.join(map(str, doc_ids))
management.call_command('dumpdata', 'desktop.Document2', primary_keys=doc_ids, indent=2, use_natural_keys=True, verbosity=2, stdout=f)
if request.GET.get('format') == 'json':
return JsonResponse(f.getvalue(), safe=False)
elif request.GET.get('format') == 'zip':
zfile = zipfile.ZipFile(f, 'w')
zfile.writestr("hue.json", f.getvalue())
for doc in docs:
if doc.type == 'notebook':
try:
from spark.models import Notebook
zfile.writestr("notebook-%s-%s.txt" % (doc.name, doc.id), smart_str(Notebook(document=doc).get_str()))
except Exception, e:
print e
LOG.exception(e)
zfile.close()
response = HttpResponse(content_type="application/zip")
response["Content-Length"] = len(f.getvalue())
response['Content-Disposition'] = 'attachment; filename="hue-documents.zip"'
response.write(f.getvalue())
return response
else:
return make_response(f.getvalue(), 'json', 'hue-documents')
def import_documents(request):
if request.FILES.get('documents'):
documents = request.FILES['documents'].read()
else:
documents = json.loads(request.POST.get('documents'))
documents = json.loads(documents)
docs = []
for doc in documents:
if not request.user.is_superuser:
doc['fields']['owner'] = [request.user.username]
owner = doc['fields']['owner'][0]
doc['fields']['tags'] = []
if Document2.objects.filter(uuid=doc['fields']['uuid'], owner__username=owner).exists():
doc['pk'] = Document2.objects.get(uuid=doc['fields']['uuid'], owner__username=owner).pk
else:
doc['pk'] = None
docs.append(doc)
f = tempfile.NamedTemporaryFile(mode='w+', suffix='.json')
f.write(json.dumps(docs))
f.flush()
stdout = StringIO.StringIO()
try:
management.call_command('loaddata', f.name, stdout=stdout)
except Exception, e:
return JsonResponse({'message': smart_str(e)})
Document.objects.sync()
if request.POST.get('redirect'):
return redirect(request.POST.get('redirect'))
else:
return JsonResponse({'message': stdout.getvalue()})
| apache-2.0 | -3,397,143,360,752,405,000 | -4,363,654,071,147,105,300 | 30.063291 | 138 | 0.694988 | false |
apanju/GMIO_Odoo | addons/stock_picking_wave/__init__.py | 374 | 1105 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_picking_wave
import wizard
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,866,679,268,933,171,000 | 9,095,572,735,517,836,000 | 41.5 | 78 | 0.624434 | false |
mimoralea/king-pong | multicnet.py | 1 | 6980 | import tensorflow as tf
import cv2
import numpy as np
class MultilayerConvolutionalNetwork:
"""
This class manages the deep neural network
that will be used by the agent to learn
and extrapolate the state space
"""
def __init__(self, input_width, input_height, nimages, nchannels):
self.session = tf.InteractiveSession()
self.input_width = input_width
self.input_height = input_height
self.nimages = nimages
self.nchannels = nchannels
self.a = tf.placeholder("float", [None, self.nchannels])
self.y = tf.placeholder("float", [None])
self.input_image, self.y_conv, self.h_fc1, self.train_step = self.build_network()
self.session.run(tf.initialize_all_variables())
self.saver = tf.train.Saver()
def weight_variable(self, shape, stddev = 0.01):
"""
Initialize weight with slight amount of noise to
break symmetry and prevent zero gradients
"""
initial = tf.truncated_normal(shape, stddev = stddev)
return tf.Variable(initial)
def bias_variable(self, shape, value = 0.01):
"""
Initialize ReLU neurons with slight positive initial
bias to avoid dead neurons
"""
initial = tf.constant(value, shape=shape)
return tf.Variable(initial)
def conv2d(self, x, W, stride = 1):
"""
We use a stride size of 1 and zero padded convolutions
to ensure we get the same output size as it was our input
"""
return tf.nn.conv2d(x, W, strides = [1, stride, stride, 1], padding = "SAME")
def max_pool_2x2(self, x):
"""
Our pooling is plain old max pooling over 2x2 blocks
"""
return tf.nn.max_pool(x, ksize = [1, 2, 2, 1],
strides = [1, 2, 2, 1], padding = "SAME")
def build_weights_biases(self, weights_shape):
"""
Build the weights and bias of a convolutional layer
"""
return self.weight_variable(weights_shape), \
self.bias_variable(weights_shape[-1:])
def convolve_relu_pool(self, nn_input, weights_shape, stride = 4, pool = True):
"""
Convolve the input to the network with the weight tensor,
add the bias, apply the ReLU function and finally max pool
"""
W_conv, b_conv = self.build_weights_biases(weights_shape)
h_conv = tf.nn.relu(self.conv2d(nn_input, W_conv, stride) + b_conv)
if not pool:
return h_conv
return self.max_pool_2x2(h_conv)
def build_network(self):
"""
Sets up the deep neural network
"""
# the input is going to be reshaped to a
# 80x80 color image (4 channels)
input_image = tf.placeholder("float", [None, self.input_width,
self.input_height, self.nimages])
# create the first convolutional layers
h_pool1 = self.convolve_relu_pool(input_image, [8, 8, self.nimages, 32])
h_conv2 = self.convolve_relu_pool(h_pool1, [4, 4, 32, 64], 2, False)
h_conv3 = self.convolve_relu_pool(h_conv2, [3, 3, 64, 64], 1, False)
# create the densely connected layers
W_fc1, b_fc1 = self.build_weights_biases([5 * 5 * 64, 512])
h_conv3_flat = tf.reshape(h_conv3, [-1, 5 * 5 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)
# finally add the readout layer
W_fc2, b_fc2 = self.build_weights_biases([512, self.nchannels])
readout = tf.matmul(h_fc1, W_fc2) + b_fc2
readout_action = tf.reduce_sum(tf.mul(readout, self.a), reduction_indices=1)
cost_function = tf.reduce_mean(tf.square(self.y - readout_action))
train_step = tf.train.AdamOptimizer(1e-8).minimize(cost_function)
return input_image, readout, h_fc1, train_step
def train(self, value_batch, action_batch, state_batch):
"""
Does the actual training step
"""
self.train_step.run(feed_dict = {
self.y : value_batch,
self.a : action_batch,
self.input_image : state_batch
})
def save_variables(self, a_file, h_file, stack):
"""
Saves neural network weight variables for
debugging purposes
"""
readout_t = self.readout_act(stack)
a_file.write(",".join([str(x) for x in readout_t]) + '\n')
h_file.write(",".join([str(x) for x in self.h_fc1.eval(
feed_dict={self.input_image:[stack]})[0]]) + '\n')
def save_percepts(self, path, x_t1):
"""
Saves an image array to visualize
how the image is compressed before saving
"""
cv2.imwrite(path, np.rot90(x_t1))
def save_network(self, directory, iteration):
"""
Saves the progress of the agent
for further use later on
"""
self.saver.save(self.session, directory + '/network', global_step = iteration)
def attempt_restore(self, directory):
"""
Restors the latest file saved if
available
"""
checkpoint = tf.train.get_checkpoint_state(directory)
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(self.session, checkpoint.model_checkpoint_path)
return checkpoint.model_checkpoint_path
def preprocess_percepts(self, x_t1_colored, reshape = True):
"""
The raw image arrays get shrunk down and
remove any color whatsoever. Also gets it in
3 dimensions if needed
"""
x_t1_resized = cv2.resize(x_t1_colored, (self.input_width, self.input_height))
x_t1_greyscale = cv2.cvtColor(x_t1_resized, cv2.COLOR_BGR2GRAY)
ret, x_t1 = cv2.threshold(x_t1_greyscale, 1, 255, cv2.THRESH_BINARY)
"""
import time
timestamp = int(time.time())
cv2.imwrite("percepts/%d-color.png" % timestamp,
np.rot90(x_t1_colored))
cv2.imwrite("percepts/%d-resized.png" % timestamp,
np.rot90(x_t1_resized))
cv2.imwrite("percepts/%d-greyscale.png" % timestamp,
np.rot90(x_t1_greyscale))
cv2.imwrite("percepts/%d-bandw.png" % timestamp,
np.rot90(x_t1))
"""
if not reshape:
return x_t1
return np.reshape(x_t1, (80, 80, 1))
def readout_act(self, stack):
"""
Gets the best action
for a given stack of images
"""
stack = [stack] if hasattr(stack, 'shape') and len(stack.shape) == 3 else stack
return self.y_conv.eval(feed_dict = {self.input_image: stack})
def select_best_action(self, stack):
"""
Selects the action with the
highest value
"""
return np.argmax(self.readout_act(stack))
def main():
print('This module should be imported')
pass
if __name__ == "__main__":
main()
| mit | 4,029,948,714,981,120,500 | 5,213,098,401,491,745,000 | 35.544503 | 89 | 0.5851 | false |
scipy/scipy | scipy/signal/tests/test_max_len_seq.py | 12 | 3106 | import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from pytest import raises as assert_raises
from numpy.fft import fft, ifft
from scipy.signal import max_len_seq
class TestMLS:
def test_mls_inputs(self):
# can't all be zero state
assert_raises(ValueError, max_len_seq,
10, state=np.zeros(10))
# wrong size state
assert_raises(ValueError, max_len_seq, 10,
state=np.ones(3))
# wrong length
assert_raises(ValueError, max_len_seq, 10, length=-1)
assert_array_equal(max_len_seq(10, length=0)[0], [])
# unknown taps
assert_raises(ValueError, max_len_seq, 64)
# bad taps
assert_raises(ValueError, max_len_seq, 10, taps=[-1, 1])
def test_mls_output(self):
# define some alternate working taps
alt_taps = {2: [1], 3: [2], 4: [3], 5: [4, 3, 2], 6: [5, 4, 1], 7: [4],
8: [7, 5, 3]}
# assume the other bit levels work, too slow to test higher orders...
for nbits in range(2, 8):
for state in [None, np.round(np.random.rand(nbits))]:
for taps in [None, alt_taps[nbits]]:
if state is not None and np.all(state == 0):
state[0] = 1 # they can't all be zero
orig_m = max_len_seq(nbits, state=state,
taps=taps)[0]
m = 2. * orig_m - 1. # convert to +/- 1 representation
# First, make sure we got all 1's or -1
err_msg = "mls had non binary terms"
assert_array_equal(np.abs(m), np.ones_like(m),
err_msg=err_msg)
# Test via circular cross-correlation, which is just mult.
# in the frequency domain with one signal conjugated
tester = np.real(ifft(fft(m) * np.conj(fft(m))))
out_len = 2**nbits - 1
# impulse amplitude == test_len
err_msg = "mls impulse has incorrect value"
assert_allclose(tester[0], out_len, err_msg=err_msg)
# steady-state is -1
err_msg = "mls steady-state has incorrect value"
assert_allclose(tester[1:], np.full(out_len - 1, -1),
err_msg=err_msg)
# let's do the split thing using a couple options
for n in (1, 2**(nbits - 1)):
m1, s1 = max_len_seq(nbits, state=state, taps=taps,
length=n)
m2, s2 = max_len_seq(nbits, state=s1, taps=taps,
length=1)
m3, s3 = max_len_seq(nbits, state=s2, taps=taps,
length=out_len - n - 1)
new_m = np.concatenate((m1, m2, m3))
assert_array_equal(orig_m, new_m)
| bsd-3-clause | 4,883,090,276,360,078,000 | 3,844,537,491,520,498,700 | 46.784615 | 79 | 0.474887 | false |
wilsonkichoi/zipline | zipline/data/data_portal.py | 1 | 64491 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from operator import mul
import bcolz
from logbook import Logger
import numpy as np
import pandas as pd
from pandas.tslib import normalize_date
from six import iteritems
from six.moves import reduce
from zipline.assets import Asset, Future, Equity
from zipline.data.us_equity_pricing import NoDataOnDate
from zipline.data.us_equity_loader import (
USEquityDailyHistoryLoader,
USEquityMinuteHistoryLoader,
)
from zipline.utils import tradingcalendar
from zipline.utils.math_utils import (
nansum,
nanmean,
nanstd
)
from zipline.utils.memoize import remember_last, weak_lru_cache
from zipline.errors import (
NoTradeDataAvailableTooEarly,
NoTradeDataAvailableTooLate,
HistoryWindowStartsBeforeData,
)
log = Logger('DataPortal')
BASE_FIELDS = frozenset([
"open", "high", "low", "close", "volume", "price", "last_traded"
])
OHLCV_FIELDS = frozenset([
"open", "high", "low", "close", "volume"
])
OHLCVP_FIELDS = frozenset([
"open", "high", "low", "close", "volume", "price"
])
HISTORY_FREQUENCIES = set(["1m", "1d"])
class DailyHistoryAggregator(object):
"""
Converts minute pricing data into a daily summary, to be used for the
last slot in a call to history with a frequency of `1d`.
This summary is the same as a daily bar rollup of minute data, with the
distinction that the summary is truncated to the `dt` requested.
i.e. the aggregation slides forward during a the course of simulation day.
Provides aggregation for `open`, `high`, `low`, `close`, and `volume`.
The aggregation rules for each price type is documented in their respective
"""
def __init__(self, market_opens, minute_reader):
self._market_opens = market_opens
self._minute_reader = minute_reader
# The caches are structured as (date, market_open, entries), where
# entries is a dict of asset -> (last_visited_dt, value)
#
# Whenever an aggregation method determines the current value,
# the entry for the respective asset should be overwritten with a new
# entry for the current dt.value (int) and aggregation value.
#
# When the requested dt's date is different from date the cache is
# flushed, so that the cache entries do not grow unbounded.
#
# Example cache:
# cache = (date(2016, 3, 17),
# pd.Timestamp('2016-03-17 13:31', tz='UTC'),
# {
# 1: (1458221460000000000, np.nan),
# 2: (1458221460000000000, 42.0),
# })
self._caches = {
'open': None,
'high': None,
'low': None,
'close': None,
'volume': None
}
# The int value is used for deltas to avoid extra computation from
# creating new Timestamps.
self._one_min = pd.Timedelta('1 min').value
def _prelude(self, dt, field):
date = dt.date()
dt_value = dt.value
cache = self._caches[field]
if cache is None or cache[0] != date:
market_open = self._market_opens.loc[date]
cache = self._caches[field] = (dt.date(), market_open, {})
_, market_open, entries = cache
if dt != market_open:
prev_dt = dt_value - self._one_min
else:
prev_dt = None
return market_open, prev_dt, dt_value, entries
def opens(self, assets, dt):
"""
The open field's aggregation returns the first value that occurs
for the day, if there has been no data on or before the `dt` the open
is `nan`.
Once the first non-nan open is seen, that value remains constant per
asset for the remainder of the day.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open')
opens = []
normalized_date = normalize_date(dt)
for asset in assets:
if not asset._is_alive(normalized_date, True):
opens.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'open')
entries[asset] = (dt_value, val)
opens.append(val)
continue
else:
try:
last_visited_dt, first_open = entries[asset]
if last_visited_dt == dt_value:
opens.append(first_open)
continue
elif not pd.isnull(first_open):
opens.append(first_open)
entries[asset] = (dt_value, first_open)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['open'],
after_last,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['open'],
market_open,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
return np.array(opens)
def highs(self, assets, dt):
"""
The high field's aggregation returns the largest high seen between
the market open and the current dt.
If there has been no data on or before the `dt` the high is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high')
highs = []
normalized_date = normalize_date(dt)
for asset in assets:
if not asset._is_alive(normalized_date, True):
highs.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'high')
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
try:
last_visited_dt, last_max = entries[asset]
if last_visited_dt == dt_value:
highs.append(last_max)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'high')
if pd.isnull(curr_val):
val = last_max
elif pd.isnull(last_max):
val = curr_val
else:
val = max(last_max, curr_val)
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['high'],
after_last,
dt,
[asset],
)[0].T
val = max(last_max, np.nanmax(window))
entries[asset] = (dt_value, val)
highs.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['high'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmax(window)
entries[asset] = (dt_value, val)
highs.append(val)
continue
return np.array(highs)
def lows(self, assets, dt):
"""
The low field's aggregation returns the smallest low seen between
the market open and the current dt.
If there has been no data on or before the `dt` the low is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low')
lows = []
normalized_date = normalize_date(dt)
for asset in assets:
if not asset._is_alive(normalized_date, True):
lows.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'low')
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
try:
last_visited_dt, last_min = entries[asset]
if last_visited_dt == dt_value:
lows.append(last_min)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'low')
val = np.nanmin([last_min, curr_val])
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['low'],
after_last,
dt,
[asset],
)[0].T
window_min = np.nanmin(window)
if pd.isnull(window_min):
val = last_min
else:
val = min(last_min, window_min)
entries[asset] = (dt_value, val)
lows.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['low'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmin(window)
entries[asset] = (dt_value, val)
lows.append(val)
continue
return np.array(lows)
def closes(self, assets, dt):
"""
The close field's aggregation returns the latest close at the given
dt.
If the close for the given dt is `nan`, the most recent non-nan
`close` is used.
If there has been no data on or before the `dt` the close is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close')
closes = []
normalized_dt = normalize_date(dt)
for asset in assets:
if not asset._is_alive(normalized_dt, True):
closes.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'close')
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
try:
last_visited_dt, last_close = entries[asset]
if last_visited_dt == dt_value:
closes.append(last_close)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = last_close
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = self.closes(
[asset],
pd.Timestamp(prev_dt, tz='UTC'))[0]
entries[asset] = (dt_value, val)
closes.append(val)
continue
except KeyError:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = self.closes([asset],
pd.Timestamp(prev_dt, tz='UTC'))[0]
entries[asset] = (dt_value, val)
closes.append(val)
continue
return np.array(closes)
def volumes(self, assets, dt):
"""
The volume field's aggregation returns the sum of all volumes
between the market open and the `dt`
If there has been no data on or before the `dt` the volume is 0.
Returns
-------
np.array with dtype=int64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume')
volumes = []
normalized_date = normalize_date(dt)
for asset in assets:
if not asset._is_alive(normalized_date, True):
volumes.append(0)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'volume')
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
try:
last_visited_dt, last_total = entries[asset]
if last_visited_dt == dt_value:
volumes.append(last_total)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'volume')
val += last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['volume'],
after_last,
dt,
[asset],
)[0]
val = np.nansum(window) + last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['volume'],
market_open,
dt,
[asset],
)[0]
val = np.nansum(window)
entries[asset] = (dt_value, val)
volumes.append(val)
continue
return np.array(volumes)
class DataPortal(object):
"""Interface to all of the data that a zipline simulation needs.
This is used by the simulation runner to answer questions about the data,
like getting the prices of assets on a given day or to service history
calls.
Parameters
----------
env : TradingEnvironment
The trading environment for the simulation. This includes the trading
calendar and benchmark data.
first_trading_day : pd.Timestamp
The first trading day for the simulation.
equity_daily_reader : BcolzDailyBarReader, optional
The daily bar reader for equities. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
equity_minute_reader : BcolzMinuteBarReader, optional
The minute bar reader for equities. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
future_daily_reader : BcolzDailyBarReader, optional
The daily bar ready for futures. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
future_minute_reader : BcolzMinuteBarReader, optional
The minute bar reader for futures. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
adjustment_reader : SQLiteAdjustmentWriter, optional
The adjustment reader. This is used to apply splits, dividends, and
other adjustment data to the raw data from the readers.
"""
def __init__(self,
env,
first_trading_day,
equity_daily_reader=None,
equity_minute_reader=None,
future_daily_reader=None,
future_minute_reader=None,
adjustment_reader=None):
self.env = env
self.views = {}
self._asset_finder = env.asset_finder
self._carrays = {
'open': {},
'high': {},
'low': {},
'close': {},
'volume': {},
'sid': {},
}
self._adjustment_reader = adjustment_reader
# caches of sid -> adjustment list
self._splits_dict = {}
self._mergers_dict = {}
self._dividends_dict = {}
# Cache of sid -> the first trading day of an asset.
self._asset_start_dates = {}
self._asset_end_dates = {}
# Handle extra sources, like Fetcher.
self._augmented_sources_map = {}
self._extra_source_df = None
self._equity_daily_reader = equity_daily_reader
if self._equity_daily_reader is not None:
self._equity_history_loader = USEquityDailyHistoryLoader(
self.env,
self._equity_daily_reader,
self._adjustment_reader
)
self._equity_minute_reader = equity_minute_reader
self._future_daily_reader = future_daily_reader
self._future_minute_reader = future_minute_reader
self._first_trading_day = first_trading_day
if self._equity_minute_reader is not None:
self._equity_daily_aggregator = DailyHistoryAggregator(
self.env.open_and_closes.market_open,
self._equity_minute_reader)
self._equity_minute_history_loader = USEquityMinuteHistoryLoader(
self.env,
self._equity_minute_reader,
self._adjustment_reader
)
self.MINUTE_PRICE_ADJUSTMENT_FACTOR = \
self._equity_minute_reader._ohlc_inverse
def _reindex_extra_source(self, df, source_date_index):
return df.reindex(index=source_date_index, method='ffill')
def handle_extra_source(self, source_df, sim_params):
"""
Extra sources always have a sid column.
We expand the given data (by forward filling) to the full range of
the simulation dates, so that lookup is fast during simulation.
"""
if source_df is None:
return
# Normalize all the dates in the df
source_df.index = source_df.index.normalize()
# source_df's sid column can either consist of assets we know about
# (such as sid(24)) or of assets we don't know about (such as
# palladium).
#
# In both cases, we break up the dataframe into individual dfs
# that only contain a single asset's information. ie, if source_df
# has data for PALLADIUM and GOLD, we split source_df into two
# dataframes, one for each. (same applies if source_df has data for
# AAPL and IBM).
#
# We then take each child df and reindex it to the simulation's date
# range by forward-filling missing values. this makes reads simpler.
#
# Finally, we store the data. For each column, we store a mapping in
# self.augmented_sources_map from the column to a dictionary of
# asset -> df. In other words,
# self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df
# holding that data.
source_date_index = self.env.days_in_range(
start=sim_params.period_start,
end=sim_params.period_end
)
# Break the source_df up into one dataframe per sid. This lets
# us (more easily) calculate accurate start/end dates for each sid,
# de-dup data, and expand the data to fit the backtest start/end date.
grouped_by_sid = source_df.groupby(["sid"])
group_names = grouped_by_sid.groups.keys()
group_dict = {}
for group_name in group_names:
group_dict[group_name] = grouped_by_sid.get_group(group_name)
# This will be the dataframe which we query to get fetcher assets at
# any given time. Get's overwritten every time there's a new fetcher
# call
extra_source_df = pd.DataFrame()
for identifier, df in iteritems(group_dict):
# Before reindexing, save the earliest and latest dates
earliest_date = df.index[0]
latest_date = df.index[-1]
# Since we know this df only contains a single sid, we can safely
# de-dupe by the index (dt). If minute granularity, will take the
# last data point on any given day
df = df.groupby(level=0).last()
# Reindex the dataframe based on the backtest start/end date.
# This makes reads easier during the backtest.
df = self._reindex_extra_source(df, source_date_index)
if not isinstance(identifier, Asset):
# for fake assets we need to store a start/end date
self._asset_start_dates[identifier] = earliest_date
self._asset_end_dates[identifier] = latest_date
for col_name in df.columns.difference(['sid']):
if col_name not in self._augmented_sources_map:
self._augmented_sources_map[col_name] = {}
self._augmented_sources_map[col_name][identifier] = df
# Append to extra_source_df the reindexed dataframe for the single
# sid
extra_source_df = extra_source_df.append(df)
self._extra_source_df = extra_source_df
def _open_minute_file(self, field, asset):
sid_str = str(int(asset))
try:
carray = self._carrays[field][sid_str]
except KeyError:
carray = self._carrays[field][sid_str] = \
self._get_ctable(asset)[field]
return carray
def _get_ctable(self, asset):
sid = int(asset)
if isinstance(asset, Future):
if self._future_minute_reader.sid_path_func is not None:
path = self._future_minute_reader.sid_path_func(
self._future_minute_reader.rootdir, sid
)
else:
path = "{0}/{1}.bcolz".format(
self._future_minute_reader.rootdir, sid)
elif isinstance(asset, Equity):
if self._equity_minute_reader.sid_path_func is not None:
path = self._equity_minute_reader.sid_path_func(
self._equity_minute_reader.rootdir, sid
)
else:
path = "{0}/{1}.bcolz".format(
self._equity_minute_reader.rootdir, sid)
else:
# TODO: Figure out if assets should be allowed if neither, and
# why this code path is being hit.
if self._equity_minute_reader.sid_path_func is not None:
path = self._equity_minute_reader.sid_path_func(
self._equity_minute_reader.rootdir, sid
)
else:
path = "{0}/{1}.bcolz".format(
self._equity_minute_reader.rootdir, sid)
return bcolz.open(path, mode='r')
def get_last_traded_dt(self, asset, dt, data_frequency):
"""
Given an asset and dt, returns the last traded dt from the viewpoint
of the given dt.
If there is a trade on the dt, the answer is dt provided.
"""
if data_frequency == 'minute':
return self._equity_minute_reader.get_last_traded_dt(asset, dt)
elif data_frequency == 'daily':
return self._equity_daily_reader.get_last_traded_dt(asset, dt)
@staticmethod
def _is_extra_source(asset, field, map):
"""
Internal method that determines if this asset/field combination
represents a fetcher value or a regular OHLCVP lookup.
"""
# If we have an extra source with a column called "price", only look
# at it if it's on something like palladium and not AAPL (since our
# own price data always wins when dealing with assets).
return not (field in BASE_FIELDS and isinstance(asset, Asset))
def _get_fetcher_value(self, asset, field, dt):
day = normalize_date(dt)
try:
return \
self._augmented_sources_map[field][asset].loc[day, field]
except KeyError:
return np.NaN
def get_spot_value(self, asset, field, dt, data_frequency):
"""
Public API method that returns a scalar value representing the value
of the desired asset's field at either the given dt.
Parameters
----------
asset : Asset
The asset whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume',
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The spot value of ``field`` for ``asset`` The return type is based
on the ``field`` requested. If the field is one of 'open', 'high',
'low', 'close', or 'price', the value will be a float. If the
``field`` is 'volume' the value will be a int. If the ``field`` is
'last_traded' the value will be a Timestamp.
"""
if self._is_extra_source(asset, field, self._augmented_sources_map):
return self._get_fetcher_value(asset, field, dt)
if field not in BASE_FIELDS:
raise KeyError("Invalid column: " + str(field))
if dt < asset.start_date or \
(data_frequency == "daily" and dt > asset.end_date) or \
(data_frequency == "minute" and
normalize_date(dt) > asset.end_date):
if field == "volume":
return 0
elif field != "last_traded":
return np.NaN
if data_frequency == "daily":
day_to_use = dt
day_to_use = normalize_date(day_to_use)
return self._get_daily_data(asset, field, day_to_use)
else:
if isinstance(asset, Future):
return self._get_minute_spot_value_future(
asset, field, dt)
else:
if field == "last_traded":
return self._equity_minute_reader.get_last_traded_dt(
asset, dt
)
elif field == "price":
return self._get_minute_spot_value(asset, "close", dt,
True)
else:
return self._get_minute_spot_value(asset, field, dt)
def get_adjustments(self, assets, field, dt, perspective_dt):
"""
Returns a list of adjustments between the dt and perspective_dt for the
given field and list of assets
Parameters
----------
assets : list of type Asset, or Asset
The asset, or assets whose adjustments are desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
adjustments : list[Adjustment]
The adjustments to that field.
"""
if isinstance(assets, Asset):
assets = [assets]
adjustment_ratios_per_asset = []
split_adj_factor = lambda x: x if field != 'volume' else 1.0 / x
for asset in assets:
adjustments_for_asset = []
split_adjustments = self._get_adjustment_list(
asset, self._splits_dict, "SPLITS"
)
for adj_dt, adj in split_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(split_adj_factor(adj))
elif adj_dt > perspective_dt:
break
if field != 'volume':
merger_adjustments = self._get_adjustment_list(
asset, self._mergers_dict, "MERGERS"
)
for adj_dt, adj in merger_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
dividend_adjustments = self._get_adjustment_list(
asset, self._dividends_dict, "DIVIDENDS",
)
for adj_dt, adj in dividend_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
ratio = reduce(mul, adjustments_for_asset, 1.0)
adjustment_ratios_per_asset.append(ratio)
return adjustment_ratios_per_asset
def get_adjusted_value(self, asset, field, dt,
perspective_dt,
data_frequency,
spot_value=None):
"""
Returns a scalar value representing the value
of the desired asset's field at the given dt with adjustments applied.
Parameters
----------
asset : Asset
The asset whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The value of the given ``field`` for ``asset`` at ``dt`` with any
adjustments known by ``perspective_dt`` applied. The return type is
based on the ``field`` requested. If the field is one of 'open',
'high', 'low', 'close', or 'price', the value will be a float. If
the ``field`` is 'volume' the value will be a int. If the ``field``
is 'last_traded' the value will be a Timestamp.
"""
if spot_value is None:
# if this a fetcher field, we want to use perspective_dt (not dt)
# because we want the new value as of midnight (fetcher only works
# on a daily basis, all timestamps are on midnight)
if self._is_extra_source(asset, field,
self._augmented_sources_map):
spot_value = self.get_spot_value(asset, field, perspective_dt,
data_frequency)
else:
spot_value = self.get_spot_value(asset, field, dt,
data_frequency)
if isinstance(asset, Equity):
ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0]
spot_value *= ratio
return spot_value
def _get_minute_spot_value_future(self, asset, column, dt):
# Futures bcolz files have 1440 bars per day (24 hours), 7 days a week.
# The file attributes contain the "start_dt" and "last_dt" fields,
# which represent the time period for this bcolz file.
# The start_dt is midnight of the first day that this future started
# trading.
# figure out the # of minutes between dt and this asset's start_dt
start_date = self._get_asset_start_date(asset)
minute_offset = int((dt - start_date).total_seconds() / 60)
if minute_offset < 0:
# asking for a date that is before the asset's start date, no dice
return 0.0
# then just index into the bcolz carray at that offset
carray = self._open_minute_file(column, asset)
result = carray[minute_offset]
# if there's missing data, go backwards until we run out of file
while result == 0 and minute_offset > 0:
minute_offset -= 1
result = carray[minute_offset]
if column != 'volume':
# FIXME switch to a futures reader
return result * 0.001
else:
return result
def _get_minute_spot_value(self, asset, column, dt, ffill=False):
result = self._equity_minute_reader.get_value(
asset.sid, dt, column
)
if column == "volume":
if result == 0:
return 0
elif not ffill or not np.isnan(result):
# if we're not forward filling, or we found a result, return it
return result
# we are looking for price, and didn't find one. have to go hunting.
last_traded_dt = \
self._equity_minute_reader.get_last_traded_dt(asset, dt)
if last_traded_dt is pd.NaT:
# no last traded dt, bail
return np.nan
# get the value as of the last traded dt
result = self._equity_minute_reader.get_value(
asset.sid,
last_traded_dt,
column
)
if np.isnan(result):
return np.nan
if dt == last_traded_dt or dt.date() == last_traded_dt.date():
return result
# the value we found came from a different day, so we have to adjust
# the data if there are any adjustments on that day barrier
return self.get_adjusted_value(
asset, column, last_traded_dt,
dt, "minute", spot_value=result
)
def _get_daily_data(self, asset, column, dt):
if column == "last_traded":
last_traded_dt = \
self._equity_daily_reader.get_last_traded_dt(asset, dt)
if pd.isnull(last_traded_dt):
return pd.NaT
else:
return last_traded_dt
elif column in OHLCV_FIELDS:
# don't forward fill
try:
val = self._equity_daily_reader.spot_price(asset, dt, column)
if val == -1:
if column == "volume":
return 0
else:
return np.nan
else:
return val
except NoDataOnDate:
return np.nan
elif column == "price":
found_dt = dt
while True:
try:
value = self._equity_daily_reader.spot_price(
asset, found_dt, "close"
)
if value != -1:
if dt == found_dt:
return value
else:
# adjust if needed
return self.get_adjusted_value(
asset, column, found_dt, dt, "minute",
spot_value=value
)
else:
found_dt -= tradingcalendar.trading_day
except NoDataOnDate:
return np.nan
@remember_last
def _get_days_for_window(self, end_date, bar_count):
tds = self.env.trading_days
end_loc = self.env.trading_days.get_loc(end_date)
start_loc = end_loc - bar_count + 1
if start_loc < 0:
raise HistoryWindowStartsBeforeData(
first_trading_day=self.env.first_trading_day.date(),
bar_count=bar_count,
suggested_start_day=tds[bar_count].date(),
)
return tds[start_loc:end_loc + 1]
def _get_history_daily_window(self, assets, end_dt, bar_count,
field_to_use):
"""
Internal method that returns a dataframe containing history bars
of daily frequency for the given sids.
"""
days_for_window = self._get_days_for_window(end_dt.date(), bar_count)
if len(assets) == 0:
return pd.DataFrame(None,
index=days_for_window,
columns=None)
future_data = []
eq_assets = []
for asset in assets:
if isinstance(asset, Future):
future_data.append(self._get_history_daily_window_future(
asset, days_for_window, end_dt, field_to_use
))
else:
eq_assets.append(asset)
eq_data = self._get_history_daily_window_equities(
eq_assets, days_for_window, end_dt, field_to_use
)
if future_data:
# TODO: This case appears to be uncovered by testing.
data = np.concatenate(eq_data, np.array(future_data).T)
else:
data = eq_data
return pd.DataFrame(
data,
index=days_for_window,
columns=assets
)
def _get_history_daily_window_future(self, asset, days_for_window,
end_dt, column):
# Since we don't have daily bcolz files for futures (yet), use minute
# bars to calculate the daily values.
data = []
data_groups = []
# get all the minutes for the days NOT including today
for day in days_for_window[:-1]:
minutes = self.env.market_minutes_for_day(day)
values_for_day = np.zeros(len(minutes), dtype=np.float64)
for idx, minute in enumerate(minutes):
minute_val = self._get_minute_spot_value_future(
asset, column, minute
)
values_for_day[idx] = minute_val
data_groups.append(values_for_day)
# get the minutes for today
last_day_minutes = pd.date_range(
start=self.env.get_open_and_close(end_dt)[0],
end=end_dt,
freq="T"
)
values_for_last_day = np.zeros(len(last_day_minutes), dtype=np.float64)
for idx, minute in enumerate(last_day_minutes):
minute_val = self._get_minute_spot_value_future(
asset, column, minute
)
values_for_last_day[idx] = minute_val
data_groups.append(values_for_last_day)
for group in data_groups:
if len(group) == 0:
continue
if column == 'volume':
data.append(np.sum(group))
elif column == 'open':
data.append(group[0])
elif column == 'close':
data.append(group[-1])
elif column == 'high':
data.append(np.amax(group))
elif column == 'low':
data.append(np.amin(group))
return data
def _get_history_daily_window_equities(
self, assets, days_for_window, end_dt, field_to_use):
ends_at_midnight = end_dt.hour == 0 and end_dt.minute == 0
if ends_at_midnight:
# two cases where we use daily data for the whole range:
# 1) the history window ends at midnight utc.
# 2) the last desired day of the window is after the
# last trading day, use daily data for the whole range.
return self._get_daily_window_for_sids(
assets,
field_to_use,
days_for_window,
extra_slot=False
)
else:
# minute mode, requesting '1d'
daily_data = self._get_daily_window_for_sids(
assets,
field_to_use,
days_for_window[0:-1]
)
if field_to_use == 'open':
minute_value = self._equity_daily_aggregator.opens(
assets, end_dt)
elif field_to_use == 'high':
minute_value = self._equity_daily_aggregator.highs(
assets, end_dt)
elif field_to_use == 'low':
minute_value = self._equity_daily_aggregator.lows(
assets, end_dt)
elif field_to_use == 'close':
minute_value = self._equity_daily_aggregator.closes(
assets, end_dt)
elif field_to_use == 'volume':
minute_value = self._equity_daily_aggregator.volumes(
assets, end_dt)
# append the partial day.
daily_data[-1] = minute_value
return daily_data
def _get_history_minute_window(self, assets, end_dt, bar_count,
field_to_use):
"""
Internal method that returns a dataframe containing history bars
of minute frequency for the given sids.
"""
# get all the minutes for this window
mm = self.env.market_minutes
end_loc = mm.get_loc(end_dt)
start_loc = end_loc - bar_count + 1
if start_loc < 0:
suggested_start_day = (mm[bar_count] + self.env.trading_day).date()
raise HistoryWindowStartsBeforeData(
first_trading_day=self.env.first_trading_day.date(),
bar_count=bar_count,
suggested_start_day=suggested_start_day,
)
minutes_for_window = mm[start_loc:end_loc + 1]
asset_minute_data = self._get_minute_window_for_assets(
assets,
field_to_use,
minutes_for_window,
)
return pd.DataFrame(
asset_minute_data,
index=minutes_for_window,
columns=assets
)
def get_history_window(self, assets, end_dt, bar_count, frequency, field,
ffill=True):
"""
Public API method that returns a dataframe containing the requested
history window. Data is fully adjusted.
Parameters
----------
assets : list of zipline.data.Asset objects
The assets whose data is desired.
bar_count: int
The number of bars desired.
frequency: string
"1d" or "1m"
field: string
The desired field of the asset.
ffill: boolean
Forward-fill missing values. Only has effect if field
is 'price'.
Returns
-------
A dataframe containing the requested data.
"""
if field not in OHLCVP_FIELDS:
raise ValueError("Invalid field: {0}".format(field))
if frequency == "1d":
if field == "price":
df = self._get_history_daily_window(assets, end_dt, bar_count,
"close")
else:
df = self._get_history_daily_window(assets, end_dt, bar_count,
field)
elif frequency == "1m":
if field == "price":
df = self._get_history_minute_window(assets, end_dt, bar_count,
"close")
else:
df = self._get_history_minute_window(assets, end_dt, bar_count,
field)
else:
raise ValueError("Invalid frequency: {0}".format(frequency))
# forward-fill price
if field == "price":
if frequency == "1m":
data_frequency = 'minute'
elif frequency == "1d":
data_frequency = 'daily'
else:
raise Exception(
"Only 1d and 1m are supported for forward-filling.")
dt_to_fill = df.index[0]
perspective_dt = df.index[-1]
assets_with_leading_nan = np.where(pd.isnull(df.iloc[0]))[0]
for missing_loc in assets_with_leading_nan:
asset = assets[missing_loc]
previous_dt = self.get_last_traded_dt(
asset, dt_to_fill, data_frequency)
if pd.isnull(previous_dt):
continue
previous_value = self.get_adjusted_value(
asset,
field,
previous_dt,
perspective_dt,
data_frequency,
)
df.iloc[0, missing_loc] = previous_value
df.fillna(method='ffill', inplace=True)
for asset in df.columns:
if df.index[-1] >= asset.end_date:
# if the window extends past the asset's end date, set
# all post-end-date values to NaN in that asset's series
series = df[asset]
series[series.index.normalize() > asset.end_date] = np.NaN
return df
def _get_minute_window_for_assets(self, assets, field, minutes_for_window):
"""
Internal method that gets a window of adjusted minute data for an asset
and specified date range. Used to support the history API method for
minute bars.
Missing bars are filled with NaN.
Parameters
----------
asset : Asset
The asset whose data is desired.
field: string
The specific field to return. "open", "high", "close_price", etc.
minutes_for_window: pd.DateTimeIndex
The list of minutes representing the desired window. Each minute
is a pd.Timestamp.
Returns
-------
A numpy array with requested values.
"""
if isinstance(assets, Future):
return self._get_minute_window_for_future([assets], field,
minutes_for_window)
else:
# TODO: Make caller accept assets.
window = self._get_minute_window_for_equities(assets, field,
minutes_for_window)
return window
def _get_minute_window_for_future(self, asset, field, minutes_for_window):
# THIS IS TEMPORARY. For now, we are only exposing futures within
# equity trading hours (9:30 am to 4pm, Eastern). The easiest way to
# do this is to simply do a spot lookup for each desired minute.
return_data = np.zeros(len(minutes_for_window), dtype=np.float64)
for idx, minute in enumerate(minutes_for_window):
return_data[idx] = \
self._get_minute_spot_value_future(asset, field, minute)
# Note: an improvement could be to find the consecutive runs within
# minutes_for_window, and use them to read the underlying ctable
# more efficiently.
# Once futures are on 24-hour clock, then we can just grab all the
# requested minutes in one shot from the ctable.
# no adjustments for futures, yay.
return return_data
def _get_minute_window_for_equities(
self, assets, field, minutes_for_window):
return self._equity_minute_history_loader.history(assets,
minutes_for_window,
field)
def _apply_all_adjustments(self, data, asset, dts, field,
price_adj_factor=1.0):
"""
Internal method that applies all the necessary adjustments on the
given data array.
The adjustments are:
- splits
- if field != "volume":
- mergers
- dividends
- * 0.001
- any zero fields replaced with NaN
- all values rounded to 3 digits after the decimal point.
Parameters
----------
data : np.array
The data to be adjusted.
asset: Asset
The asset whose data is being adjusted.
dts: pd.DateTimeIndex
The list of minutes or days representing the desired window.
field: string
The field whose values are in the data array.
price_adj_factor: float
Factor with which to adjust OHLC values.
Returns
-------
None. The data array is modified in place.
"""
self._apply_adjustments_to_window(
self._get_adjustment_list(
asset, self._splits_dict, "SPLITS"
),
data,
dts,
field != 'volume'
)
if field != 'volume':
self._apply_adjustments_to_window(
self._get_adjustment_list(
asset, self._mergers_dict, "MERGERS"
),
data,
dts,
True
)
self._apply_adjustments_to_window(
self._get_adjustment_list(
asset, self._dividends_dict, "DIVIDENDS"
),
data,
dts,
True
)
if price_adj_factor is not None:
data *= price_adj_factor
np.around(data, 3, out=data)
def _get_daily_window_for_sids(
self, assets, field, days_in_window, extra_slot=True):
"""
Internal method that gets a window of adjusted daily data for a sid
and specified date range. Used to support the history API method for
daily bars.
Parameters
----------
asset : Asset
The asset whose data is desired.
start_dt: pandas.Timestamp
The start of the desired window of data.
bar_count: int
The number of days of data to return.
field: string
The specific field to return. "open", "high", "close_price", etc.
extra_slot: boolean
Whether to allocate an extra slot in the returned numpy array.
This extra slot will hold the data for the last partial day. It's
much better to create it here than to create a copy of the array
later just to add a slot.
Returns
-------
A numpy array with requested values. Any missing slots filled with
nan.
"""
bar_count = len(days_in_window)
# create an np.array of size bar_count
if extra_slot:
return_array = np.zeros((bar_count + 1, len(assets)))
else:
return_array = np.zeros((bar_count, len(assets)))
if field != "volume":
# volumes default to 0, so we don't need to put NaNs in the array
return_array[:] = np.NAN
if bar_count != 0:
data = self._equity_history_loader.history(assets,
days_in_window,
field)
if extra_slot:
return_array[:len(return_array) - 1, :] = data
else:
return_array[:len(data)] = data
return return_array
@staticmethod
def _apply_adjustments_to_window(adjustments_list, window_data,
dts_in_window, multiply):
if len(adjustments_list) == 0:
return
# advance idx to the correct spot in the adjustments list, based on
# when the window starts
idx = 0
while idx < len(adjustments_list) and dts_in_window[0] >\
adjustments_list[idx][0]:
idx += 1
# if we've advanced through all the adjustments, then there's nothing
# to do.
if idx == len(adjustments_list):
return
while idx < len(adjustments_list):
adjustment_to_apply = adjustments_list[idx]
if adjustment_to_apply[0] > dts_in_window[-1]:
break
range_end = dts_in_window.searchsorted(adjustment_to_apply[0])
if multiply:
window_data[0:range_end] *= adjustment_to_apply[1]
else:
window_data[0:range_end] /= adjustment_to_apply[1]
idx += 1
def _get_adjustment_list(self, asset, adjustments_dict, table_name):
"""
Internal method that returns a list of adjustments for the given sid.
Parameters
----------
asset : Asset
The asset for which to return adjustments.
adjustments_dict: dict
A dictionary of sid -> list that is used as a cache.
table_name: string
The table that contains this data in the adjustments db.
Returns
-------
adjustments: list
A list of [multiplier, pd.Timestamp], earliest first
"""
if self._adjustment_reader is None:
return []
sid = int(asset)
try:
adjustments = adjustments_dict[sid]
except KeyError:
adjustments = adjustments_dict[sid] = self._adjustment_reader.\
get_adjustments_for_sid(table_name, sid)
return adjustments
def _check_is_currently_alive(self, asset, dt):
sid = int(asset)
if sid not in self._asset_start_dates:
self._get_asset_start_date(asset)
start_date = self._asset_start_dates[sid]
if self._asset_start_dates[sid] > dt:
raise NoTradeDataAvailableTooEarly(
sid=sid,
dt=normalize_date(dt),
start_dt=start_date
)
end_date = self._asset_end_dates[sid]
if self._asset_end_dates[sid] < dt:
raise NoTradeDataAvailableTooLate(
sid=sid,
dt=normalize_date(dt),
end_dt=end_date
)
def _get_asset_start_date(self, asset):
self._ensure_asset_dates(asset)
return self._asset_start_dates[asset]
def _get_asset_end_date(self, asset):
self._ensure_asset_dates(asset)
return self._asset_end_dates[asset]
def _ensure_asset_dates(self, asset):
sid = int(asset)
if sid not in self._asset_start_dates:
if self._first_trading_day is not None:
self._asset_start_dates[sid] = \
max(asset.start_date, self._first_trading_day)
else:
self._asset_start_dates[sid] = asset.start_date
self._asset_end_dates[sid] = asset.end_date
def get_splits(self, sids, dt):
"""
Returns any splits for the given sids and the given dt.
Parameters
----------
sids : container
Sids for which we want splits.
dt : pd.Timestamp
The date for which we are checking for splits. Note: this is
expected to be midnight UTC.
Returns
-------
splits : list[(int, float)]
List of splits, where each split is a (sid, ratio) tuple.
"""
if self._adjustment_reader is None or not sids:
return {}
# convert dt to # of seconds since epoch, because that's what we use
# in the adjustments db
seconds = int(dt.value / 1e9)
splits = self._adjustment_reader.conn.execute(
"SELECT sid, ratio FROM SPLITS WHERE effective_date = ?",
(seconds,)).fetchall()
splits = [split for split in splits if split[0] in sids]
return splits
def get_stock_dividends(self, sid, trading_days):
"""
Returns all the stock dividends for a specific sid that occur
in the given trading range.
Parameters
----------
sid: int
The asset whose stock dividends should be returned.
trading_days: pd.DatetimeIndex
The trading range.
Returns
-------
list: A list of objects with all relevant attributes populated.
All timestamp fields are converted to pd.Timestamps.
"""
if self._adjustment_reader is None:
return []
if len(trading_days) == 0:
return []
start_dt = trading_days[0].value / 1e9
end_dt = trading_days[-1].value / 1e9
dividends = self._adjustment_reader.conn.execute(
"SELECT * FROM stock_dividend_payouts WHERE sid = ? AND "
"ex_date > ? AND pay_date < ?", (int(sid), start_dt, end_dt,)).\
fetchall()
dividend_info = []
for dividend_tuple in dividends:
dividend_info.append({
"declared_date": dividend_tuple[1],
"ex_date": pd.Timestamp(dividend_tuple[2], unit="s"),
"pay_date": pd.Timestamp(dividend_tuple[3], unit="s"),
"payment_sid": dividend_tuple[4],
"ratio": dividend_tuple[5],
"record_date": pd.Timestamp(dividend_tuple[6], unit="s"),
"sid": dividend_tuple[7]
})
return dividend_info
def contains(self, asset, field):
return field in BASE_FIELDS or \
(field in self._augmented_sources_map and
asset in self._augmented_sources_map[field])
def get_fetcher_assets(self, dt):
"""
Returns a list of assets for the current date, as defined by the
fetcher data.
Returns
-------
list: a list of Asset objects.
"""
# return a list of assets for the current date, as defined by the
# fetcher source
if self._extra_source_df is None:
return []
day = normalize_date(dt)
if day in self._extra_source_df.index:
assets = self._extra_source_df.loc[day]['sid']
else:
return []
if isinstance(assets, pd.Series):
return [x for x in assets if isinstance(x, Asset)]
else:
return [assets] if isinstance(assets, Asset) else []
@weak_lru_cache(20)
def _get_minute_count_for_transform(self, ending_minute, days_count):
# cache size picked somewhat loosely. this code exists purely to
# handle deprecated API.
# bars is the number of days desired. we have to translate that
# into the number of minutes we want.
# we get all the minutes for the last (bars - 1) days, then add
# all the minutes so far today. the +2 is to account for ignoring
# today, and the previous day, in doing the math.
previous_day = self.env.previous_trading_day(ending_minute)
days = self.env.days_in_range(
self.env.add_trading_days(-days_count + 2, previous_day),
previous_day,
)
minutes_count = \
sum(210 if day in self.env.early_closes else 390 for day in days)
# add the minutes for today
today_open = self.env.get_open_and_close(ending_minute)[0]
minutes_count += \
((ending_minute - today_open).total_seconds() // 60) + 1
return minutes_count
def get_simple_transform(self, asset, transform_name, dt, data_frequency,
bars=None):
if transform_name == "returns":
# returns is always calculated over the last 2 days, regardless
# of the simulation's data frequency.
hst = self.get_history_window(
[asset], dt, 2, "1d", "price", ffill=True
)[asset]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
if bars is None:
raise ValueError("bars cannot be None!")
if data_frequency == "minute":
freq_str = "1m"
calculated_bar_count = self._get_minute_count_for_transform(
dt, bars
)
else:
freq_str = "1d"
calculated_bar_count = bars
price_arr = self.get_history_window(
[asset], dt, calculated_bar_count, freq_str, "price", ffill=True
)[asset]
if transform_name == "mavg":
return nanmean(price_arr)
elif transform_name == "stddev":
return nanstd(price_arr, ddof=1)
elif transform_name == "vwap":
volume_arr = self.get_history_window(
[asset], dt, calculated_bar_count, freq_str, "volume",
ffill=True
)[asset]
vol_sum = nansum(volume_arr)
try:
ret = nansum(price_arr * volume_arr) / vol_sum
except ZeroDivisionError:
ret = np.nan
return ret
| apache-2.0 | -869,887,831,834,666,500 | 5,367,892,125,146,975,000 | 35.830954 | 79 | 0.516025 | false |
kingvuplus/italysat-enigma3 | tools/host_tools/FormatConverter/satxml.py | 112 | 2759 | import os
from datasource import datasource
from xml.dom import minidom
from xml.dom.minidom import Document
from input import inputText
class satxml(datasource):
def __init__(self, filename = "satellites.xml"):
self.filename = filename
datasource.__init__(self)
if not os.path.isfile(filename):
print "File %s doesn't exist. Creating it." % filename
def getStatus(self):
text = datasource.getStatus(self)
return text
def getCapabilities(self):
return [("set filename", self.setFilename), ("read file", self.read), ("write file", self.write), ("print all", self.printAll)]
def getName(self):
return "satellites.xml"
def setFilename(self):
print "Please give a filename <satellites.xml>:"
filename = inputText()
if filename == "":
self.filename = "satellites.xml"
else:
self.filename = filename
print "Filename set to %s" % self.filename
def read(self):
basicsatxml = minidom.parse(self.filename)
for sat in basicsatxml.firstChild.childNodes:
if sat.nodeType == sat.ELEMENT_NODE and sat.localName == "sat":
print sat.localName
satname = str(sat.getAttribute("name"))
satpos = str(sat.getAttribute("position"))
self.addSat(satname, satpos)
for transponder in sat.childNodes:
if transponder.nodeType == transponder.ELEMENT_NODE and transponder.localName == "transponder":
parameters = {}
paramlist = ["frequency", "symbol_rate", "polarization", "fec", "system", "modulation", "tsid", "onid"]
for param in paramlist:
entry = str(transponder.getAttribute(param))
if entry != "":
parameters[param] = entry
if len(parameters.keys()) > 1:
self.addTransponder(satpos, parameters)
print self.transponderlist
def write(self):
satxml = Document()
satellites = satxml.createElement("satellites")
satxml.appendChild(satellites)
satlist = self.transponderlist.keys()
print self.transponderlist
satlist.sort()
for sat in satlist:
xmlsat = satxml.createElement("sat")
xmlsat.setAttribute("name", self.satnames[sat])
xmlsat.setAttribute("flags", "1")
xmlsat.setAttribute("position", sat)
satellites.appendChild(xmlsat)
transponders = self.transponderlist[sat]
transponders.sort(key = lambda a: a["frequency"])
for transponder in transponders:
xmltransponder = satxml.createElement("transponder")
paramlist = ["frequency", "symbol_rate", "polarization", "fec", "system", "modulation", "tsid", "onid"]
for param in paramlist:
if transponder.has_key(param):
xmltransponder.setAttribute(param, transponder[param])
xmlsat.appendChild(xmltransponder)
prettyxml = satxml.toprettyxml()
print prettyxml
file = open(self.filename, "w")
file.write(prettyxml)
file.close()
| gpl-2.0 | 2,036,872,457,078,578,700 | 4,169,353,718,505,740,300 | 31.081395 | 129 | 0.701704 | false |
trinitysoulstars/astralturf | lyriscisssor.py | 1 | 2800 | import nltk
from nltk import FreqDist
import json
import csv
print "* Loading corpus"
#raw = gutenberg.raw('melville-moby_dick.txt')
#raw = gutenberg.raw('bible-kjv.txt')
#raw = gutenberg.raw('tss-lyrics.txt')
lines = []
with open("tss-lyrics.txt", 'r') as raw:
lines = raw.read()
print "* Tokenizing"
#tokens = nltk.word_tokenize(lines)
tokens = nltk.word_tokenize(lines)
print "* Tagging parts of speech"
# Save this to strip articles later
parts_of_speech = nltk.pos_tag(tokens)
print "* Converting POS list into a dict for lookup"
# TODO -- fix this. this is going to fuck up on homonyms
parts_of_speech = dict(parts_of_speech)
# You can ban other parts of speech by adding their tags to this list.
# You can find out what the part-of-speech tags mean by using code like
# this:
# >>> print nltk.help.upenn_tagset('DT')
# DT: determiner
# all an another any both del each either every half la many much nary
# neither no some such that the them these this those
banned_parts_of_speech = [
'DT',
'IN',
'CC',
'TO',
'PRP',
'PRP$',
]
banned_words = [
'Chorus',
'chorus',
'is',
'has',
'had',
'have',
'there',
'so',
'So',
'on',
'On',
'did',
'am',
'are'
'Is',
'be',
'my',
'My',
'can',
'Can',
'was',
'of',
'Of',
'OF',
'OH',
'oh',
'Oh',
'the',
'THE',
'The',
'that',
'That',
'when',
'When',
'what',
'What',
'who',
'Who',
'how',
'How',
'his',
'His',
'were',
'Why',
'why',
'then',
'Then',
'Does',
'does',
'O',
'do',
'Do',
'Go',
'go',
]
print "* Stripping stuff we don't want"
# Strip punctuation and banned parts of speech
tokens = [
token for token in tokens if (
# Kill punctuation
token.isalpha() and
# Kill parts of speech that we don't want.
not parts_of_speech[token] in banned_parts_of_speech and
not token in banned_words #and
#len(token) > 4
)
]
print "* Building frequency distribution"
words = FreqDist(tokens)
N = 1000
def showWords(N=1000):
print "* Printing top %i words" % N
f = open('corpus.txt', 'wb')
writer = csv.writer(f)
for i, pair in enumerate(words.items()):
word, count = pair
row = word, count, parts_of_speech[word]
#row = "%r, %r, %r" % (word, count, parts_of_speech[word])
#row = json.dumps([word, count, parts_of_speech[word]], separators=(',',':'))
writer.writerow(row)
print "%r appeared %i times. Its part of speech is %r" % (
word, count, parts_of_speech[word],
)
if i > N:
break
f.close()
return (word, count, parts_of_speech)
showWords()
| agpl-3.0 | -752,739,453,245,501,400 | -5,152,086,950,067,638,000 | 19.588235 | 85 | 0.5575 | false |
stevekuznetsov/ansible | test/runner/lib/changes.py | 39 | 5484 | """Detect changes in Ansible code."""
from __future__ import absolute_import, print_function
import re
import os
from lib.util import (
ApplicationError,
SubprocessError,
MissingEnvironmentVariable,
CommonConfig,
display,
)
from lib.http import (
HttpClient,
urlencode,
)
from lib.git import (
Git,
)
class InvalidBranch(ApplicationError):
"""Exception for invalid branch specification."""
def __init__(self, branch, reason):
"""
:type branch: str
:type reason: str
"""
message = 'Invalid branch: %s\n%s' % (branch, reason)
super(InvalidBranch, self).__init__(message)
self.branch = branch
class ChangeDetectionNotSupported(ApplicationError):
"""Exception for cases where change detection is not supported."""
def __init__(self, message):
"""
:type message: str
"""
super(ChangeDetectionNotSupported, self).__init__(message)
class ShippableChanges(object):
"""Change information for Shippable build."""
def __init__(self, args, git):
"""
:type args: CommonConfig
:type git: Git
"""
self.args = args
try:
self.branch = os.environ['BRANCH']
self.is_pr = os.environ['IS_PULL_REQUEST'] == 'true'
self.is_tag = os.environ['IS_GIT_TAG'] == 'true'
self.commit = os.environ['COMMIT']
self.project_id = os.environ['PROJECT_ID']
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
if self.is_tag:
raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
if self.is_pr:
self.paths = sorted(git.get_diff_names([self.branch]))
else:
merge_runs = self.get_merge_runs(self.project_id, self.branch)
last_successful_commit = self.get_last_successful_commit(merge_runs)
if last_successful_commit:
self.paths = sorted(git.get_diff_names([last_successful_commit, self.commit]))
else:
# tracked files (including unchanged)
self.paths = sorted(git.get_file_names(['--cached']))
def get_merge_runs(self, project_id, branch):
"""
:type project_id: str
:type branch: str
:rtype: list[dict]
"""
params = dict(
isPullRequest='false',
projectIds=project_id,
branch=branch,
)
client = HttpClient(self.args, always=True)
response = client.get('https://api.shippable.com/runs?%s' % urlencode(params))
return response.json()
@staticmethod
def get_last_successful_commit(merge_runs):
"""
:type merge_runs: dict | list[dict]
:rtype: str
"""
if 'id' in merge_runs and merge_runs['id'] == 4004:
display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
return None
merge_runs = sorted(merge_runs, key=lambda r: r['createdAt'])
known_commits = set()
last_successful_commit = None
for merge_run in merge_runs:
commit_sha = merge_run['commitSha']
if commit_sha not in known_commits:
known_commits.add(commit_sha)
if merge_run['statusCode'] == 30:
last_successful_commit = commit_sha
return last_successful_commit
class LocalChanges(object):
"""Change information for local work."""
def __init__(self, args, git):
"""
:type args: CommonConfig
:type git: Git
"""
self.args = args
self.current_branch = git.get_branch()
if self.is_official_branch(self.current_branch):
raise InvalidBranch(branch=self.current_branch,
reason='Current branch is not a feature branch.')
self.fork_branch = None
self.fork_point = None
self.local_branches = sorted(git.get_branches())
self.official_branches = sorted([b for b in self.local_branches if self.is_official_branch(b)])
for self.fork_branch in self.official_branches:
try:
self.fork_point = git.get_branch_fork_point(self.fork_branch)
break
except SubprocessError:
pass
if self.fork_point is None:
raise ApplicationError('Unable to auto-detect fork branch and fork point.')
# tracked files (including unchanged)
self.tracked = sorted(git.get_file_names(['--cached']))
# untracked files (except ignored)
self.untracked = sorted(git.get_file_names(['--others', '--exclude-standard']))
# tracked changes (including deletions) committed since the branch was forked
self.committed = sorted(git.get_diff_names([self.fork_point, 'HEAD']))
# tracked changes (including deletions) which are staged
self.staged = sorted(git.get_diff_names(['--cached']))
# tracked changes (including deletions) which are not staged
self.unstaged = sorted(git.get_diff_names([]))
@staticmethod
def is_official_branch(name):
"""
:type name: str
:rtype: bool
"""
if name == 'devel':
return True
if re.match(r'^stable-[0-9]+\.[0-9]+$', name):
return True
return False
| gpl-3.0 | -847,270,216,702,590,600 | -1,171,816,972,973,485,800 | 30.337143 | 108 | 0.582604 | false |
MwanzanFelipe/rockletonfortune | lib/django/db/backends/postgresql/version.py | 632 | 1517 | """
Extracts the version of the PostgreSQL server.
"""
import re
# This reg-exp is intentionally fairly flexible here.
# Needs to be able to handle stuff like:
# PostgreSQL #.#.#
# EnterpriseDB #.#
# PostgreSQL #.# beta#
# PostgreSQL #.#beta#
VERSION_RE = re.compile(r'\S+ (\d+)\.(\d+)\.?(\d+)?')
def _parse_version(text):
"Internal parsing method. Factored out for testing purposes."
major, major2, minor = VERSION_RE.search(text).groups()
try:
return int(major) * 10000 + int(major2) * 100 + int(minor)
except (ValueError, TypeError):
return int(major) * 10000 + int(major2) * 100
def get_version(connection):
"""
Returns an integer representing the major, minor and revision number of the
server. Format is the one used for the return value of libpq
PQServerVersion()/``server_version`` connection attribute (available in
newer psycopg2 versions.)
For example, 90304 for 9.3.4. The last two digits will be 00 in the case of
releases (e.g., 90400 for 'PostgreSQL 9.4') or in the case of beta and
prereleases (e.g. 90100 for 'PostgreSQL 9.1beta2').
PQServerVersion()/``server_version`` doesn't execute a query so try that
first, then fallback to a ``SELECT version()`` query.
"""
if hasattr(connection, 'server_version'):
return connection.server_version
else:
with connection.cursor() as cursor:
cursor.execute("SELECT version()")
return _parse_version(cursor.fetchone()[0])
| bsd-3-clause | -6,195,247,280,251,097,000 | -4,382,162,323,669,364,700 | 33.477273 | 79 | 0.661833 | false |
abtink/openthread | tests/scripts/thread-cert/border_router/test_multi_thread_networks.py | 1 | 5473 | #!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import unittest
import config
import thread_cert
# Test description:
# This test verifies bi-directional connectivity accross multiple Thread networks.
#
# Topology:
# -------------(eth)----------------
# | |
# BR1 BR2
# | |
# ROUTER1 ROUTER2
#
# Thread Net1 Thread Net2
#
BR1 = 1
ROUTER1 = 2
BR2 = 3
ROUTER2 = 4
CHANNEL1 = 18
CHANNEL2 = 19
class MultiThreadNetworks(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
BR1: {
'name': 'BR_1',
'allowlist': [ROUTER1],
'is_otbr': True,
'version': '1.2',
'channel': CHANNEL1,
'router_selection_jitter': 1,
},
ROUTER1: {
'name': 'Router_1',
'allowlist': [BR1],
'version': '1.2',
'channel': CHANNEL1,
'router_selection_jitter': 1,
},
BR2: {
'name': 'BR_2',
'allowlist': [ROUTER2],
'is_otbr': True,
'version': '1.2',
'channel': CHANNEL2,
'router_selection_jitter': 1,
},
ROUTER2: {
'name': 'Router_2',
'allowlist': [BR2],
'version': '1.2',
'channel': CHANNEL2,
'router_selection_jitter': 1,
},
}
def test(self):
self.nodes[BR1].start()
self.simulator.go(5)
self.assertEqual('leader', self.nodes[BR1].get_state())
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual('router', self.nodes[ROUTER1].get_state())
self.nodes[BR2].start()
self.simulator.go(5)
self.assertEqual('leader', self.nodes[BR2].get_state())
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual('router', self.nodes[ROUTER2].get_state())
self.collect_ipaddrs()
logging.info("BR1 addrs: %r", self.nodes[BR1].get_addrs())
logging.info("ROUTER1 addrs: %r", self.nodes[ROUTER1].get_addrs())
logging.info("BR2 addrs: %r", self.nodes[BR2].get_addrs())
logging.info("ROUTER2 addrs: %r", self.nodes[ROUTER2].get_addrs())
self.assertTrue(len(self.nodes[BR1].get_prefixes()) == 1)
self.assertTrue(len(self.nodes[ROUTER1].get_prefixes()) == 1)
self.assertTrue(len(self.nodes[BR2].get_prefixes()) == 1)
self.assertTrue(len(self.nodes[ROUTER2].get_prefixes()) == 1)
br1_omr_prefix = self.nodes[BR1].get_prefixes()[0]
br2_omr_prefix = self.nodes[BR2].get_prefixes()[0]
self.assertNotEqual(br1_omr_prefix, br2_omr_prefix)
# Each BR should independently register an external route for the on-link prefix
# and OMR prefix in another Thread Network.
self.assertTrue(len(self.nodes[BR1].get_routes()) == 2)
self.assertTrue(len(self.nodes[ROUTER1].get_routes()) == 2)
self.assertTrue(len(self.nodes[BR2].get_routes()) == 2)
self.assertTrue(len(self.nodes[ROUTER2].get_routes()) == 2)
br1_external_routes = self.nodes[BR1].get_routes()
br2_external_routes = self.nodes[BR2].get_routes()
br1_external_routes.sort()
br2_external_routes.sort()
self.assertNotEqual(br1_external_routes, br2_external_routes)
self.assertTrue(len(self.nodes[ROUTER1].get_ip6_address(config.ADDRESS_TYPE.OMR)) == 1)
self.assertTrue(len(self.nodes[ROUTER2].get_ip6_address(config.ADDRESS_TYPE.OMR)) == 1)
self.assertTrue(self.nodes[ROUTER1].ping(self.nodes[ROUTER2].get_ip6_address(config.ADDRESS_TYPE.OMR)[0]))
self.assertTrue(self.nodes[ROUTER2].ping(self.nodes[ROUTER1].get_ip6_address(config.ADDRESS_TYPE.OMR)[0]))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 6,788,529,754,996,215,000 | -7,585,448,868,444,481,000 | 35.731544 | 114 | 0.625069 | false |
praekelt/django-football365 | football365/migrations/0001_initial.py | 1 | 1330 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Call'
db.create_table('football365_call', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=256)),
('call_type', self.gf('django.db.models.fields.CharField')(max_length=32)),
('football365_service_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('football365', ['Call'])
def backwards(self, orm):
# Deleting model 'Call'
db.delete_table('football365_call')
models = {
'football365.call': {
'Meta': {'ordering': "('title',)", 'object_name': 'Call'},
'call_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'football365_service_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['football365'] | bsd-3-clause | 8,940,753,565,225,168,000 | 6,791,074,349,668,486,000 | 35.972222 | 98 | 0.588722 | false |
albatrossandco/brubeck_cms | brubeck/articles/views.py | 1 | 4971 | # Imports from standard libraries
from datetime import date, datetime, time, timedelta
# Imports from Django
from django.core.paginator import EmptyPage, InvalidPage, Paginator
from django.contrib.sites.models import Site
from django.http import Http404, HttpResponsePermanentRedirect
from django.template import RequestContext
from django.shortcuts import get_object_or_404, render_to_response
from django.views.decorators.cache import cache_page
# Imports from Brubeck
from brubeck.articles.models import Article, Correction
from brubeck.mapping.views import detail as map_detail
@cache_page(60 * 5)
def archive(request, year=None, month=None, day=None, page=1):
"""
Shows a paginated list of articles by year, month or day.
Arguments:
'year'
Optional.
'month'
Optional.
'day'
Optional.
'page'
Optional.
Context:
'archive_name'
A string with the name of the archive to display.
'archive_page'
A Paginator.page instance with object_list containing the articles
to display.
"""
try:
page = int(page)
if year:
year = int(year)
if month:
month = int(month)
if day:
day = int(day)
except ValueError:
raise Http404
site = Site.objects.get_current()
try:
articles = Article.get_published.filter(section__publication__site=site)
except:
raise Http404
if not year:
articles = articles
archive_name = "Article archive"
elif not month:
articles = articles.filter(pub_date__year=year)
archive_name = "Articles from %s" % year
elif not day:
articles = articles.filter(pub_date__year=year, pub_date__month=month)
archive_name = "Articles from %s" % date(year, month, 1).strftime("%B %Y")
else:
articles = articles.filter(pub_date=date(year, month, day))
archive_name = "Articles from %s" % date(year, month, day).strftime("%B %d, %Y")
paginator = Paginator(articles, 20)
try:
archive_page = paginator.page(page)
except (EmptyPage, InvalidPage):
raise Http404
url_base = '/stories/'
if year:
url_base += '%s/' % year
if month:
url_base += '%s/' % month
if day:
url_base += '%s/' % day
next_page_url = '%sp%s/' % (url_base, page + 1)
previous_page_url = '%sp%s/' % (url_base, page - 1)
page = {
'archive_name': archive_name,
'archive_page': archive_page,
'next_page_url': next_page_url,
'previous_page_url': previous_page_url
}
return render_to_response('articles/archive.html', page, context_instance=RequestContext(request))
@cache_page(60 * 5)
def detail(request, year=None, month=None, day=None, slug=None, mode=None):
"""
Shows a particular article or its associated photos and graphics.
"""
site = Site.objects.get_current()
try:
article = Article.get_published.filter(section__publication__site=site).filter(pub_date__year=int(year), pub_date__month=int(month), pub_date__day=int(day)).get(slug=slug)
except Article.DoesNotExist:
raise Http404
images = []
images.extend(article.photos.all())
images.extend(article.editorial_cartoons.all())
images.extend(article.graphics.all())
multimedia = []
multimedia.extend(article.videos.all())
multimedia.extend(article.slideshows.all())
multimedia.extend(article.audio_clips.all())
multimedia.extend(article.podcast_episodes.all())
if article.map:
map_data = map_detail(request, slug=article.map.slug, mode='context')
else:
map_data = None
if article.type == 'column':
try:
article.mugshot = article.byline[0].mugshot
except:
article.mugshot = None
else:
article.mugshot = None
article.attached_audio = False
for item in article.attached_files.all():
if item.get_file_extension() == 'mp3':
article.attached_audio = True
page = {
'article': article,
'images': images,
'map_data': map_data,
'multimedia': multimedia
}
if mode == 'images':
return render_to_response('articles/detail_images.html', page, context_instance=RequestContext(request))
else:
return render_to_response('articles/detail.html', page, context_instance=RequestContext(request))
def corrections(request):
"""
Shows corrections from the past two weeks.
"""
TWOWEEKSAGO = date.today() - timedelta(14)
corrections = Correction.objects.filter(date_corrected__gte=TWOWEEKSAGO)
page = {
'corrections': corrections
}
return render_to_response('articles/correction_list.html', page, context_instance=RequestContext(request))
| bsd-3-clause | 8,259,880,726,438,347,000 | -4,623,599,304,555,851,000 | 30.264151 | 179 | 0.62241 | false |
Kami/libcloud | libcloud/common/dimensiondata.py | 3 | 58274 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dimension Data Common Components
"""
from base64 import b64encode
from time import sleep
# TODO: use disutils.version when Travis CI fixed the pylint issue with version
# from distutils.version import LooseVersion
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.common.base import ConnectionUserAndKey, XmlResponse, RawResponse
from libcloud.compute.base import Node
from libcloud.utils.py3 import basestring
from libcloud.utils.xml import findtext
from libcloud.compute.types import LibcloudError, InvalidCredsError
# Roadmap / TODO:
#
# 1.0 - Copied from OpSource API, named provider details.
# setup a few variables to represent all of the DimensionData cloud namespaces
NAMESPACE_BASE = "http://oec.api.opsource.net/schemas"
ORGANIZATION_NS = NAMESPACE_BASE + "/organization"
SERVER_NS = NAMESPACE_BASE + "/server"
NETWORK_NS = NAMESPACE_BASE + "/network"
DIRECTORY_NS = NAMESPACE_BASE + "/directory"
GENERAL_NS = NAMESPACE_BASE + "/general"
BACKUP_NS = NAMESPACE_BASE + "/backup"
# API 2.0 Namespaces and URNs
TYPES_URN = "urn:didata.com:api:cloud:types"
# API end-points
API_ENDPOINTS = {
'dd-na': {
'name': 'North America (NA)',
'host': 'api-na.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-eu': {
'name': 'Europe (EU)',
'host': 'api-eu.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-au': {
'name': 'Australia (AU)',
'host': 'api-au.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-au-gov': {
'name': 'Australia Canberra ACT (AU)',
'host': 'api-canberra.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-af': {
'name': 'Africa (AF)',
'host': 'api-mea.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-ap': {
'name': 'Asia Pacific (AP)',
'host': 'api-ap.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-latam': {
'name': 'South America (LATAM)',
'host': 'api-latam.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-canada': {
'name': 'Canada (CA)',
'host': 'api-canada.dimensiondata.com',
'vendor': 'DimensionData'
},
'is-na': {
'name': 'North America (NA)',
'host': 'usapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-eu': {
'name': 'Europe (EU)',
'host': 'euapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-au': {
'name': 'Australia (AU)',
'host': 'auapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-af': {
'name': 'Africa (AF)',
'host': 'meaapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-ap': {
'name': 'Asia Pacific (AP)',
'host': 'apapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-latam': {
'name': 'South America (LATAM)',
'host': 'latamapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-canada': {
'name': 'Canada (CA)',
'host': 'canadaapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'ntta-na': {
'name': 'North America (NA)',
'host': 'cloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-eu': {
'name': 'Europe (EU)',
'host': 'eucloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-au': {
'name': 'Australia (AU)',
'host': 'aucloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-af': {
'name': 'Africa (AF)',
'host': 'sacloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-ap': {
'name': 'Asia Pacific (AP)',
'host': 'hkcloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'cisco-na': {
'name': 'North America (NA)',
'host': 'iaas-api-na.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-eu': {
'name': 'Europe (EU)',
'host': 'iaas-api-eu.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-au': {
'name': 'Australia (AU)',
'host': 'iaas-api-au.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-af': {
'name': 'Africa (AF)',
'host': 'iaas-api-mea.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-ap': {
'name': 'Asia Pacific (AP)',
'host': 'iaas-api-ap.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-latam': {
'name': 'South America (LATAM)',
'host': 'iaas-api-sa.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-canada': {
'name': 'Canada (CA)',
'host': 'iaas-api-ca.cisco-ccs.com',
'vendor': 'Cisco'
},
'med1-il': {
'name': 'Israel (IL)',
'host': 'api.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-na': {
'name': 'North America (NA)',
'host': 'api-na.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-eu': {
'name': 'Europe (EU)',
'host': 'api-eu.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-au': {
'name': 'Australia (AU)',
'host': 'api-au.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-af': {
'name': 'Africa (AF)',
'host': 'api-af.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-ap': {
'name': 'Asia Pacific (AP)',
'host': 'api-ap.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-latam': {
'name': 'South America (LATAM)',
'host': 'api-sa.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-canada': {
'name': 'Canada (CA)',
'host': 'api-ca.cloud.med-1.com',
'vendor': 'Med-1'
},
'indosat-id': {
'name': 'Indonesia (ID)',
'host': 'iaas-api.indosat.com',
'vendor': 'Indosat'
},
'indosat-na': {
'name': 'North America (NA)',
'host': 'iaas-usapi.indosat.com',
'vendor': 'Indosat'
},
'indosat-eu': {
'name': 'Europe (EU)',
'host': 'iaas-euapi.indosat.com',
'vendor': 'Indosat'
},
'indosat-au': {
'name': 'Australia (AU)',
'host': 'iaas-auapi.indosat.com',
'vendor': 'Indosat'
},
'indosat-af': {
'name': 'Africa (AF)',
'host': 'iaas-afapi.indosat.com',
'vendor': 'Indosat'
},
'bsnl-in': {
'name': 'India (IN)',
'host': 'api.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-na': {
'name': 'North America (NA)',
'host': 'usapi.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-eu': {
'name': 'Europe (EU)',
'host': 'euapi.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-au': {
'name': 'Australia (AU)',
'host': 'auapi.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-af': {
'name': 'Africa (AF)',
'host': 'afapi.bsnlcloud.com',
'vendor': 'BSNL'
}
}
# Default API end-point for the base connection class.
DEFAULT_REGION = 'dd-na'
BAD_CODE_XML_ELEMENTS = (
('responseCode', SERVER_NS),
('responseCode', TYPES_URN),
('result', GENERAL_NS)
)
BAD_MESSAGE_XML_ELEMENTS = (
('message', SERVER_NS),
('message', TYPES_URN),
('resultDetail', GENERAL_NS)
)
def dd_object_to_id(obj, obj_type, id_value='id'):
"""
Takes in a DD object or string and prints out it's id
This is a helper method, as many of our functions can take either an object
or a string, and we need an easy way of converting them
:param obj: The object to get the id for
:type obj: ``object``
:param func: The function to call, e.g. ex_get_vlan. Note: This
function needs to return an object which has ``status``
attribute.
:type func: ``function``
:rtype: ``str``
"""
if isinstance(obj, obj_type):
return getattr(obj, id_value)
elif isinstance(obj, (basestring)):
return obj
else:
raise TypeError(
"Invalid type %s looking for basestring or %s"
% (type(obj).__name__, obj_type.__name__)
)
# TODO: use disutils.version when Travis CI fixed the pylint issue with version
# This is a temporary workaround.
def LooseVersion(version):
return float(version)
class NetworkDomainServicePlan(object):
ESSENTIALS = "ESSENTIALS"
ADVANCED = "ADVANCED"
class DimensionDataRawResponse(RawResponse):
pass
class DimensionDataResponse(XmlResponse):
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError(self.body)
elif self.status == httplib.FORBIDDEN:
raise InvalidCredsError(self.body)
body = self.parse_body()
if self.status == httplib.BAD_REQUEST:
for response_code in BAD_CODE_XML_ELEMENTS:
code = findtext(body, response_code[0], response_code[1])
if code is not None:
break
for message in BAD_MESSAGE_XML_ELEMENTS:
message = findtext(body, message[0], message[1])
if message is not None:
break
raise DimensionDataAPIException(code=code,
msg=message,
driver=self.connection.driver)
if self.status is not httplib.OK:
raise DimensionDataAPIException(code=self.status,
msg=body,
driver=self.connection.driver)
return self.body
class DimensionDataAPIException(LibcloudError):
def __init__(self, code, msg, driver):
self.code = code
self.msg = msg
self.driver = driver
def __str__(self):
return "%s: %s" % (self.code, self.msg)
def __repr__(self):
return ("<DimensionDataAPIException: code='%s', msg='%s'>" %
(self.code, self.msg))
class DimensionDataConnection(ConnectionUserAndKey):
"""
Connection class for the DimensionData driver
"""
api_path_version_1 = '/oec'
api_path_version_2 = '/caas'
api_version_1 = 0.9
# Earliest version supported
oldest_api_version = '2.2'
# Latest version supported
latest_api_version = '2.4'
# Default api version
active_api_version = '2.4'
_orgId = None
responseCls = DimensionDataResponse
rawResponseCls = DimensionDataRawResponse
allow_insecure = False
def __init__(self, user_id, key, secure=True, host=None, port=None,
url=None, timeout=None, proxy_url=None,
api_version=None, **conn_kwargs):
super(DimensionDataConnection, self).__init__(
user_id=user_id,
key=key,
secure=secure,
host=host, port=port,
url=url, timeout=timeout,
proxy_url=proxy_url)
if conn_kwargs['region']:
self.host = conn_kwargs['region']['host']
if api_version:
if LooseVersion(api_version) < LooseVersion(
self.oldest_api_version):
msg = 'API Version specified is too old. No longer ' \
'supported. Please upgrade to the latest version {}' \
.format(self.active_api_version)
raise DimensionDataAPIException(code=None,
msg=msg,
driver=self.driver)
elif LooseVersion(api_version) > LooseVersion(
self.latest_api_version):
msg = 'Unsupported API Version. The version specified is ' \
'not release yet. Please use the latest supported ' \
'version {}' \
.format(self.active_api_version)
raise DimensionDataAPIException(code=None,
msg=msg,
driver=self.driver)
else:
# Overwrite default version using the version user specified
self.active_api_version = api_version
def add_default_headers(self, headers):
headers['Authorization'] = \
('Basic %s' % b64encode(b('%s:%s' % (self.user_id,
self.key))).decode('utf-8'))
headers['Content-Type'] = 'application/xml'
return headers
def request_api_1(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s/%s" % (self.api_path_version_1,
self.api_version_1, action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def request_api_2(self, path, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s/%s/%s" % (self.api_path_version_2,
self.active_api_version, path, action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def raw_request_with_orgId_api_1(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s" % (self.get_resource_path_api_1(), action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers, raw=True)
def request_with_orgId_api_1(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s" % (self.get_resource_path_api_1(), action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def request_with_orgId_api_2(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s" % (self.get_resource_path_api_2(), action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def paginated_request_with_orgId_api_2(self, action, params=None, data='',
headers=None, method='GET',
page_size=250):
"""
A paginated request to the MCP2.0 API
This essentially calls out to request_with_orgId_api_2 for each page
and yields the response to make a generator
This generator can be looped through to grab all the pages.
:param action: The resource to access (i.e. 'network/vlan')
:type action: ``str``
:param params: Parameters to give to the action
:type params: ``dict`` or ``None``
:param data: The data payload to be added to the request
:type data: ``str``
:param headers: Additional header to be added to the request
:type headers: ``str`` or ``dict`` or ``None``
:param method: HTTP Method for the request (i.e. 'GET', 'POST')
:type method: ``str``
:param page_size: The size of each page to be returned
Note: Max page size in MCP2.0 is currently 250
:type page_size: ``int``
"""
if params is None:
params = {}
params['pageSize'] = page_size
resp = self.request_with_orgId_api_2(action, params,
data, headers,
method).object
yield resp
if len(resp) <= 0:
return
pcount = resp.get('pageCount') # pylint: disable=no-member
psize = resp.get('pageSize') # pylint: disable=no-member
pnumber = resp.get('pageNumber') # pylint: disable=no-member
while int(pcount) >= int(psize):
params['pageNumber'] = int(pnumber) + 1
resp = self.request_with_orgId_api_2(action, params,
data, headers,
method).object
pcount = resp.get('pageCount') # pylint: disable=no-member
psize = resp.get('pageSize') # pylint: disable=no-member
pnumber = resp.get('pageNumber') # pylint: disable=no-member
yield resp
def get_resource_path_api_1(self):
"""
This method returns a resource path which is necessary for referencing
resources that require a full path instead of just an ID, such as
networks, and customer snapshots.
"""
return ("%s/%s/%s" % (self.api_path_version_1, self.api_version_1,
self._get_orgId()))
def get_resource_path_api_2(self):
"""
This method returns a resource path which is necessary for referencing
resources that require a full path instead of just an ID, such as
networks, and customer snapshots.
"""
return ("%s/%s/%s" % (self.api_path_version_2, self.active_api_version,
self._get_orgId()))
def wait_for_state(self, state, func, poll_interval=2, timeout=60, *args,
**kwargs):
"""
Wait for the function which returns a instance with field status/state
to match.
Keep polling func until one of the desired states is matched
:param state: Either the desired state (`str`) or a `list` of states
:type state: ``str`` or ``list``
:param func: The function to call, e.g. ex_get_vlan. Note: This
function needs to return an object which has ``status``
attribute.
:type func: ``function``
:param poll_interval: The number of seconds to wait between checks
:type poll_interval: `int`
:param timeout: The total number of seconds to wait to reach a state
:type timeout: `int`
:param args: The arguments for func
:type args: Positional arguments
:param kwargs: The arguments for func
:type kwargs: Keyword arguments
:return: Result from the calling function.
"""
cnt = 0
result = None
object_state = None
while cnt < timeout / poll_interval:
result = func(*args, **kwargs)
if isinstance(result, Node):
object_state = result.state
else:
object_state = result.status
if object_state is state or str(object_state) in state:
return result
sleep(poll_interval)
cnt += 1
msg = 'Status check for object %s timed out' % (result)
raise DimensionDataAPIException(code=object_state,
msg=msg,
driver=self.driver)
def _get_orgId(self):
"""
Send the /myaccount API request to DimensionData cloud and parse the
'orgId' from the XML response object. We need the orgId to use most
of the other API functions
"""
if self._orgId is None:
body = self.request_api_1('myaccount').object
self._orgId = findtext(body, 'orgId', DIRECTORY_NS)
return self._orgId
def get_account_details(self):
"""
Get the details of this account
:rtype: :class:`DimensionDataAccountDetails`
"""
body = self.request_api_1('myaccount').object
return DimensionDataAccountDetails(
user_name=findtext(body, 'userName', DIRECTORY_NS),
full_name=findtext(body, 'fullName', DIRECTORY_NS),
first_name=findtext(body, 'firstName', DIRECTORY_NS),
last_name=findtext(body, 'lastName', DIRECTORY_NS),
email=findtext(body, 'emailAddress', DIRECTORY_NS))
class DimensionDataAccountDetails(object):
"""
Dimension Data account class details
"""
def __init__(self, user_name, full_name, first_name, last_name, email):
self.user_name = user_name
self.full_name = full_name
self.first_name = first_name
self.last_name = last_name
self.email = email
class DimensionDataStatus(object):
"""
DimensionData API pending operation status class
action, request_time, user_name, number_of_steps, update_time,
step.name, step.number, step.percent_complete, failure_reason,
"""
def __init__(self, action=None, request_time=None, user_name=None,
number_of_steps=None, update_time=None, step_name=None,
step_number=None, step_percent_complete=None,
failure_reason=None):
self.action = action
self.request_time = request_time
self.user_name = user_name
self.number_of_steps = number_of_steps
self.update_time = update_time
self.step_name = step_name
self.step_number = step_number
self.step_percent_complete = step_percent_complete
self.failure_reason = failure_reason
def __repr__(self):
return (('<DimensionDataStatus: action=%s, request_time=%s, '
'user_name=%s, number_of_steps=%s, update_time=%s, '
'step_name=%s, step_number=%s, '
'step_percent_complete=%s, failure_reason=%s>')
% (self.action, self.request_time, self.user_name,
self.number_of_steps, self.update_time, self.step_name,
self.step_number, self.step_percent_complete,
self.failure_reason))
class DimensionDataNetwork(object):
"""
DimensionData network with location.
"""
def __init__(self, id, name, description, location, private_net,
multicast, status):
self.id = str(id)
self.name = name
self.description = description
self.location = location
self.private_net = private_net
self.multicast = multicast
self.status = status
def __repr__(self):
return (('<DimensionDataNetwork: id=%s, name=%s, description=%s, '
'location=%s, private_net=%s, multicast=%s>')
% (self.id, self.name, self.description, self.location,
self.private_net, self.multicast))
class DimensionDataNetworkDomain(object):
"""
DimensionData network domain with location.
"""
def __init__(self, id, name, description, location, status, plan):
self.id = str(id)
self.name = name
self.description = description
self.location = location
self.status = status
self.plan = plan
def __repr__(self):
return (('<DimensionDataNetworkDomain: id=%s, name=%s, '
'description=%s, location=%s, status=%s, plan=%s>')
% (self.id, self.name, self.description, self.location,
self.status, self.plan))
class DimensionDataPublicIpBlock(object):
"""
DimensionData Public IP Block with location.
"""
def __init__(self, id, base_ip, size, location, network_domain,
status):
self.id = str(id)
self.base_ip = base_ip
self.size = size
self.location = location
self.network_domain = network_domain
self.status = status
def __repr__(self):
return (('<DimensionDataNetworkDomain: id=%s, base_ip=%s, '
'size=%s, location=%s, status=%s>')
% (self.id, self.base_ip, self.size, self.location,
self.status))
class DimensionDataServerCpuSpecification(object):
"""
A class that represents the specification of the CPU(s) for a
node
"""
def __init__(self, cpu_count, cores_per_socket, performance):
"""
Instantiate a new :class:`DimensionDataServerCpuSpecification`
:param cpu_count: The number of CPUs
:type cpu_count: ``int``
:param cores_per_socket: The number of cores per socket, the
recommendation is 1
:type cores_per_socket: ``int``
:param performance: The performance type, e.g. HIGHPERFORMANCE
:type performance: ``str``
"""
self.cpu_count = cpu_count
self.cores_per_socket = cores_per_socket
self.performance = performance
def __repr__(self):
return (('<DimensionDataServerCpuSpecification: '
'cpu_count=%s, cores_per_socket=%s, '
'performance=%s>')
% (self.cpu_count, self.cores_per_socket, self.performance))
class DimensionDataServerDisk(object):
"""
A class that represents the disk on a server
"""
def __init__(self, id=None, scsi_id=None, size_gb=None, speed=None,
state=None):
"""
Instantiate a new :class:`DimensionDataServerDisk`
:param id: The id of the disk
:type id: ``str``
:param scsi_id: Representation for scsi
:type scsi_id: ``int``
:param size_gb: Size of the disk
:type size_gb: ``int``
:param speed: Speed of the disk (i.e. STANDARD)
:type speed: ``str``
:param state: State of the disk (i.e. PENDING)
:type state: ``str``
"""
self.id = id
self.scsi_id = scsi_id
self.size_gb = size_gb
self.speed = speed
self.state = state
def __repr__(self):
return (('<DimensionDataServerDisk: '
'id=%s, size_gb=%s')
% (self.id, self.size_gb))
class DimensionDataServerVMWareTools(object):
"""
A class that represents the VMWareTools for a node
"""
def __init__(self, status, version_status, api_version):
"""
Instantiate a new :class:`DimensionDataServerVMWareTools` object
:param status: The status of VMWare Tools
:type status: ``str``
:param version_status: The status for the version of VMWare Tools
(i.e NEEDS_UPGRADE)
:type version_status: ``str``
:param api_version: The API version of VMWare Tools
:type api_version: ``str``
"""
self.status = status
self.version_status = version_status
self.api_version = api_version
def __repr__(self):
return (('<DimensionDataServerVMWareTools '
'status=%s, version_status=%s, '
'api_version=%s>')
% (self.status, self.version_status, self.api_version))
class DimensionDataFirewallRule(object):
"""
DimensionData Firewall Rule for a network domain
"""
def __init__(self, id, name, action, location, network_domain,
status, ip_version, protocol, source, destination,
enabled):
self.id = str(id)
self.name = name
self.action = action
self.location = location
self.network_domain = network_domain
self.status = status
self.ip_version = ip_version
self.protocol = protocol
self.source = source
self.destination = destination
self.enabled = enabled
def __repr__(self):
return (('<DimensionDataFirewallRule: id=%s, name=%s, '
'action=%s, location=%s, network_domain=%s, '
'status=%s, ip_version=%s, protocol=%s, source=%s, '
'destination=%s, enabled=%s>')
% (self.id, self.name, self.action, self.location,
self.network_domain, self.status, self.ip_version,
self.protocol, self.source, self.destination,
self.enabled))
class DimensionDataFirewallAddress(object):
"""
The source or destination model in a firewall rule
"""
def __init__(self, any_ip, ip_address, ip_prefix_size,
port_begin, port_end, address_list_id,
port_list_id):
self.any_ip = any_ip
self.ip_address = ip_address
self.ip_prefix_size = ip_prefix_size
self.port_list_id = port_list_id
self.port_begin = port_begin
self.port_end = port_end
self.address_list_id = address_list_id
self.port_list_id = port_list_id
def __repr__(self):
return (
'<DimensionDataFirewallAddress: any_ip=%s, ip_address=%s, '
'ip_prefix_size=%s, port_begin=%s, port_end=%s, '
'address_list_id=%s, port_list_id=%s>'
% (self.any_ip, self.ip_address, self.ip_prefix_size,
self.port_begin, self.port_end, self.address_list_id,
self.port_list_id))
class DimensionDataNatRule(object):
"""
An IP NAT rule in a network domain
"""
def __init__(self, id, network_domain, internal_ip, external_ip, status):
self.id = id
self.network_domain = network_domain
self.internal_ip = internal_ip
self.external_ip = external_ip
self.status = status
def __repr__(self):
return (('<DimensionDataNatRule: id=%s, status=%s>')
% (self.id, self.status))
class DimensionDataAntiAffinityRule(object):
"""
Anti-Affinity rule for DimensionData
An Anti-Affinity rule ensures that servers in the rule will
not reside on the same VMware ESX host.
"""
def __init__(self, id, node_list):
"""
Instantiate a new :class:`DimensionDataAntiAffinityRule`
:param id: The ID of the Anti-Affinity rule
:type id: ``str``
:param node_list: List of node ids that belong in this rule
:type node_list: ``list`` of ``str``
"""
self.id = id
self.node_list = node_list
def __repr__(self):
return (('<DimensionDataAntiAffinityRule: id=%s>')
% (self.id))
class DimensionDataVlan(object):
"""
DimensionData VLAN.
"""
def __init__(self, id, name, description, location, network_domain,
status, private_ipv4_range_address, private_ipv4_range_size,
ipv6_range_address, ipv6_range_size, ipv4_gateway,
ipv6_gateway):
"""
Initialize an instance of ``DimensionDataVlan``
:param id: The ID of the VLAN
:type id: ``str``
:param name: The name of the VLAN
:type name: ``str``
:param description: Plan text description of the VLAN
:type description: ``str``
:param location: The location (data center) of the VLAN
:type location: ``NodeLocation``
:param network_domain: The Network Domain that owns this VLAN
:type network_domain: :class:`DimensionDataNetworkDomain`
:param status: The status of the VLAN
:type status: :class:`DimensionDataStatus`
:param private_ipv4_range_address: The host address of the VLAN
IP space
:type private_ipv4_range_address: ``str``
:param private_ipv4_range_size: The size (e.g. '24') of the VLAN
as a CIDR range size
:type private_ipv4_range_size: ``int``
:param ipv6_range_address: The host address of the VLAN
IP space
:type ipv6_range_address: ``str``
:param ipv6_range_size: The size (e.g. '32') of the VLAN
as a CIDR range size
:type ipv6_range_size: ``int``
:param ipv4_gateway: The IPv4 default gateway address
:type ipv4_gateway: ``str``
:param ipv6_gateway: The IPv6 default gateway address
:type ipv6_gateway: ``str``
"""
self.id = str(id)
self.name = name
self.location = location
self.description = description
self.network_domain = network_domain
self.status = status
self.private_ipv4_range_address = private_ipv4_range_address
self.private_ipv4_range_size = private_ipv4_range_size
self.ipv6_range_address = ipv6_range_address
self.ipv6_range_size = ipv6_range_size
self.ipv4_gateway = ipv4_gateway
self.ipv6_gateway = ipv6_gateway
def __repr__(self):
return (('<DimensionDataVlan: id=%s, name=%s, '
'description=%s, location=%s, status=%s>')
% (self.id, self.name, self.description,
self.location, self.status))
class DimensionDataPool(object):
"""
DimensionData VIP Pool.
"""
def __init__(self, id, name, description, status, load_balance_method,
health_monitor_id, service_down_action, slow_ramp_time):
"""
Initialize an instance of ``DimensionDataPool``
:param id: The ID of the pool
:type id: ``str``
:param name: The name of the pool
:type name: ``str``
:param description: Plan text description of the pool
:type description: ``str``
:param status: The status of the pool
:type status: :class:`DimensionDataStatus`
:param load_balance_method: The load balancer method
:type load_balance_method: ``str``
:param health_monitor_id: The ID of the health monitor
:type health_monitor_id: ``str``
:param service_down_action: Action to take when pool is down
:type service_down_action: ``str``
:param slow_ramp_time: The ramp-up time for service recovery
:type slow_ramp_time: ``int``
"""
self.id = str(id)
self.name = name
self.description = description
self.status = status
self.load_balance_method = load_balance_method
self.health_monitor_id = health_monitor_id
self.service_down_action = service_down_action
self.slow_ramp_time = slow_ramp_time
def __repr__(self):
return (('<DimensionDataPool: id=%s, name=%s, '
'description=%s, status=%s>')
% (self.id, self.name, self.description,
self.status))
class DimensionDataPoolMember(object):
"""
DimensionData VIP Pool Member.
"""
def __init__(self, id, name, status, ip, port, node_id):
"""
Initialize an instance of ``DimensionDataPoolMember``
:param id: The ID of the pool member
:type id: ``str``
:param name: The name of the pool member
:type name: ``str``
:param status: The status of the pool
:type status: :class:`DimensionDataStatus`
:param ip: The IP of the pool member
:type ip: ``str``
:param port: The port of the pool member
:type port: ``int``
:param node_id: The ID of the associated node
:type node_id: ``str``
"""
self.id = str(id)
self.name = name
self.status = status
self.ip = ip
self.port = port
self.node_id = node_id
def __repr__(self):
return (('<DimensionDataPoolMember: id=%s, name=%s, '
'ip=%s, status=%s, port=%s, node_id=%s>')
% (self.id, self.name,
self.ip, self.status, self.port,
self.node_id))
class DimensionDataVIPNode(object):
def __init__(self, id, name, status, ip, connection_limit='10000',
connection_rate_limit='10000'):
"""
Initialize an instance of :class:`DimensionDataVIPNode`
:param id: The ID of the node
:type id: ``str``
:param name: The name of the node
:type name: ``str``
:param status: The status of the node
:type status: :class:`DimensionDataStatus`
:param ip: The IP of the node
:type ip: ``str``
:param connection_limit: The total connection limit for the node
:type connection_limit: ``int``
:param connection_rate_limit: The rate limit for the node
:type connection_rate_limit: ``int``
"""
self.id = str(id)
self.name = name
self.status = status
self.ip = ip
self.connection_limit = connection_limit
self.connection_rate_limit = connection_rate_limit
def __repr__(self):
return (('<DimensionDataVIPNode: id=%s, name=%s, '
'status=%s, ip=%s>')
% (self.id, self.name,
self.status, self.ip))
class DimensionDataVirtualListener(object):
"""
DimensionData Virtual Listener.
"""
def __init__(self, id, name, status, ip):
"""
Initialize an instance of :class:`DimensionDataVirtualListener`
:param id: The ID of the listener
:type id: ``str``
:param name: The name of the listener
:type name: ``str``
:param status: The status of the listener
:type status: :class:`DimensionDataStatus`
:param ip: The IP of the listener
:type ip: ``str``
"""
self.id = str(id)
self.name = name
self.status = status
self.ip = ip
def __repr__(self):
return (('<DimensionDataVirtualListener: id=%s, name=%s, '
'status=%s, ip=%s>')
% (self.id, self.name,
self.status, self.ip))
class DimensionDataDefaultHealthMonitor(object):
"""
A default health monitor for a VIP (node, pool or listener)
"""
def __init__(self, id, name, node_compatible, pool_compatible):
"""
Initialize an instance of :class:`DimensionDataDefaultHealthMonitor`
:param id: The ID of the monitor
:type id: ``str``
:param name: The name of the monitor
:type name: ``str``
:param node_compatible: Is a monitor capable of monitoring nodes
:type node_compatible: ``bool``
:param pool_compatible: Is a monitor capable of monitoring pools
:type pool_compatible: ``bool``
"""
self.id = id
self.name = name
self.node_compatible = node_compatible
self.pool_compatible = pool_compatible
def __repr__(self):
return (('<DimensionDataDefaultHealthMonitor: id=%s, name=%s>')
% (self.id, self.name))
class DimensionDataPersistenceProfile(object):
"""
Each Persistence Profile declares the combination of Virtual Listener
type and protocol with which it is
compatible and whether or not it is compatible as a
Fallback Persistence Profile.
"""
def __init__(self, id, name, compatible_listeners, fallback_compatible):
"""
Initialize an instance of :class:`DimensionDataPersistenceProfile`
:param id: The ID of the profile
:type id: ``str``
:param name: The name of the profile
:type name: ``str``
:param compatible_listeners: List of compatible Virtual Listener types
:type compatible_listeners: ``list`` of
:class:`DimensionDataVirtualListenerCompatibility`
:param fallback_compatible: Is capable as a fallback profile
:type fallback_compatible: ``bool``
"""
self.id = id
self.name = name
self.compatible_listeners = compatible_listeners
self.fallback_compatible = fallback_compatible
def __repr__(self):
return (('<DimensionDataPersistenceProfile: id=%s, name=%s>')
% (self.id, self.name))
class DimensionDataDefaultiRule(object):
"""
A default iRule for a network domain, can be applied to a listener
"""
def __init__(self, id, name, compatible_listeners):
"""
Initialize an instance of :class:`DimensionDataDefaultiRule`
:param id: The ID of the iRule
:type id: ``str``
:param name: The name of the iRule
:type name: ``str``
:param compatible_listeners: List of compatible Virtual Listener types
:type compatible_listeners: ``list`` of
:class:`DimensionDataVirtualListenerCompatibility`
"""
self.id = id
self.name = name
self.compatible_listeners = compatible_listeners
def __repr__(self):
return (('<DimensionDataDefaultiRule: id=%s, name=%s>')
% (self.id, self.name))
class DimensionDataVirtualListenerCompatibility(object):
"""
A compatibility preference for a persistence profile or iRule
specifies which virtual listener types this profile or iRule can be
applied to.
"""
def __init__(self, type, protocol):
self.type = type
self.protocol = protocol
def __repr__(self):
return (('<DimensionDataVirtualListenerCompatibility: '
'type=%s, protocol=%s>')
% (self.type, self.protocol))
class DimensionDataBackupDetails(object):
"""
Dimension Data Backup Details represents information about
a targets backups configuration
"""
def __init__(self, asset_id, service_plan, status, clients=None):
"""
Initialize an instance of :class:`DimensionDataBackupDetails`
:param asset_id: Asset identification for backups
:type asset_id: ``str``
:param service_plan: The service plan for backups. i.e (Essentials)
:type service_plan: ``str``
:param status: The overall status this backup target.
i.e. (unregistered)
:type status: ``str``
:param clients: Backup clients attached to this target
:type clients: ``list`` of :class:`DimensionDataBackupClient`
"""
self.asset_id = asset_id
self.service_plan = service_plan
self.status = status
self.clients = clients
def __repr__(self):
return (('<DimensionDataBackupDetails: id=%s>')
% (self.asset_id))
class DimensionDataBackupClient(object):
"""
An object that represents a backup client
"""
def __init__(self, id, type, status,
schedule_policy, storage_policy, download_url,
alert=None, running_job=None):
"""
Initialize an instance of :class:`DimensionDataBackupClient`
:param id: Unique ID for the client
:type id: ``str``
:param type: The type of client that this client is
:type type: :class:`DimensionDataBackupClientType`
:param status: The states of this particular backup client.
i.e. (Unregistered)
:type status: ``str``
:param schedule_policy: The schedule policy for this client
NOTE: Dimension Data only sends back the name
of the schedule policy, no further details
:type schedule_policy: ``str``
:param storage_policy: The storage policy for this client
NOTE: Dimension Data only sends back the name
of the storage policy, no further details
:type storage_policy: ``str``
:param download_url: The download url for this client
:type download_url: ``str``
:param alert: The alert configured for this backup client (optional)
:type alert: :class:`DimensionDataBackupClientAlert`
:param alert: The running job for the client (optional)
:type alert: :class:`DimensionDataBackupClientRunningJob`
"""
self.id = id
self.type = type
self.status = status
self.schedule_policy = schedule_policy
self.storage_policy = storage_policy
self.download_url = download_url
self.alert = alert
self.running_job = running_job
def __repr__(self):
return (('<DimensionDataBackupClient: id=%s>')
% (self.id))
class DimensionDataBackupClientAlert(object):
"""
An alert for a backup client
"""
def __init__(self, trigger, notify_list=[]):
"""
Initialize an instance of :class:`DimensionDataBackupClientAlert`
:param trigger: Trigger type for the client i.e. ON_FAILURE
:type trigger: ``str``
:param notify_list: List of email addresses that are notified
when the alert is fired
:type notify_list: ``list`` of ``str``
"""
self.trigger = trigger
self.notify_list = notify_list
def __repr__(self):
return (('<DimensionDataBackupClientAlert: trigger=%s>')
% (self.trigger))
class DimensionDataBackupClientRunningJob(object):
"""
A running job for a given backup client
"""
def __init__(self, id, status, percentage=0):
"""
Initialize an instance of :class:`DimensionDataBackupClientRunningJob`
:param id: The unqiue ID of the job
:type id: ``str``
:param status: The status of the job i.e. Waiting
:type status: ``str``
:param percentage: The percentage completion of the job
:type percentage: ``int``
"""
self.id = id
self.percentage = percentage
self.status = status
def __repr__(self):
return (('<DimensionDataBackupClientRunningJob: id=%s>')
% (self.id))
class DimensionDataBackupClientType(object):
"""
A client type object for backups
"""
def __init__(self, type, is_file_system, description):
"""
Initialize an instance of :class:`DimensionDataBackupClientType`
:param type: The type of client i.e. (FA.Linux, MySQL, ect.)
:type type: ``str``
:param is_file_system: The name of the iRule
:type is_file_system: ``bool``
:param description: Description of the client
:type description: ``str``
"""
self.type = type
self.is_file_system = is_file_system
self.description = description
def __repr__(self):
return (('<DimensionDataBackupClientType: type=%s>')
% (self.type))
class DimensionDataBackupStoragePolicy(object):
"""
A representation of a storage policy
"""
def __init__(self, name, retention_period, secondary_location):
"""
Initialize an instance of :class:`DimensionDataBackupStoragePolicy`
:param name: The name of the storage policy i.e. 14 Day Storage Policy
:type name: ``str``
:param retention_period: How long to keep the backup in days
:type retention_period: ``int``
:param secondary_location: The secondary location i.e. Primary
:type secondary_location: ``str``
"""
self.name = name
self.retention_period = retention_period
self.secondary_location = secondary_location
def __repr__(self):
return (('<DimensionDataBackupStoragePolicy: name=%s>')
% (self.name))
class DimensionDataBackupSchedulePolicy(object):
"""
A representation of a schedule policy
"""
def __init__(self, name, description):
"""
Initialize an instance of :class:`DimensionDataBackupSchedulePolicy`
:param name: The name of the policy i.e 12AM - 6AM
:type name: ``str``
:param description: Short summary of the details of the policy
:type description: ``str``
"""
self.name = name
self.description = description
def __repr__(self):
return (('<DimensionDataBackupSchedulePolicy: name=%s>')
% (self.name))
class DimensionDataTag(object):
"""
A representation of a Tag in Dimension Data
A Tag first must have a Tag Key, then an asset is tag with
a key and an option value. Tags can be queried later to filter assets
and also show up on usage report if so desired.
"""
def __init__(self, asset_type, asset_id, asset_name,
datacenter, key, value):
"""
Initialize an instance of :class:`DimensionDataTag`
:param asset_type: The type of asset. Current asset types:
SERVER, VLAN, NETWORK_DOMAIN, CUSTOMER_IMAGE,
PUBLIC_IP_BLOCK, ACCOUNT
:type asset_type: ``str``
:param asset_id: The GUID of the asset that is tagged
:type asset_id: ``str``
:param asset_name: The name of the asset that is tagged
:type asset_name: ``str``
:param datacenter: The short datacenter name of the tagged asset
:type datacenter: ``str``
:param key: The tagged key
:type key: :class:`DimensionDataTagKey`
:param value: The tagged value
:type value: ``None`` or ``str``
"""
self.asset_type = asset_type
self.asset_id = asset_id
self.asset_name = asset_name
self.datacenter = datacenter
self.key = key
self.value = value
def __repr__(self):
return (('<DimensionDataTag: asset_name=%s, tag_name=%s, value=%s>')
% (self.asset_name, self.key.name, self.value))
class DimensionDataTagKey(object):
"""
A representation of a Tag Key in Dimension Data
A tag key is required to tag an asset
"""
def __init__(self, id, name, description,
value_required, display_on_report):
"""
Initialize an instance of :class:`DimensionDataTagKey`
:param id: GUID of the tag key
:type id: ``str``
:param name: Name of the tag key
:type name: ``str``
:param description: Description of the tag key
:type description: ``str``
:param value_required: If a value is required for this tag key
:type value_required: ``bool``
:param display_on_report: If this tag key should be displayed on
usage reports
:type display_on_report: ``bool``
"""
self.id = id
self.name = name
self.description = description
self.value_required = value_required
self.display_on_report = display_on_report
def __repr__(self):
return (('<DimensionDataTagKey: name=%s>')
% (self.name))
class DimensionDataIpAddressList(object):
"""
DimensionData IP Address list
"""
def __init__(self, id, name, description, ip_version,
ip_address_collection,
state, create_time, child_ip_address_lists=None):
""""
Initialize an instance of :class:`DimensionDataIpAddressList`
:param id: GUID of the IP Address List key
:type id: ``str``
:param name: Name of the IP Address List
:type name: ``str``
:param description: Description of the IP Address List
:type description: ``str``
:param ip_version: IP version. E.g. IPV4, IPV6
:type ip_version: ``str``
:param ip_address_collection: Collection of DimensionDataIpAddress
:type ip_address_collection: ``List``
:param state: IP Address list state
:type state: ``str``
:param create_time: IP Address List created time
:type create_time: ``date time``
:param child_ip_address_lists: List of IP address list to be included
:type child_ip_address_lists: List
of :class:'DimensionDataIpAddressList'
"""
self.id = id
self.name = name
self.description = description
self.ip_version = ip_version
self.ip_address_collection = ip_address_collection
self.state = state
self.create_time = create_time
self.child_ip_address_lists = child_ip_address_lists
def __repr__(self):
return ('<DimensionDataIpAddressList: id=%s, name=%s, description=%s, '
'ip_version=%s, ip_address_collection=%s, state=%s, '
'create_time=%s, child_ip_address_lists=%s>'
% (self.id, self.name, self.description, self.ip_version,
self.ip_address_collection, self.state, self.create_time,
self.child_ip_address_lists))
class DimensionDataChildIpAddressList(object):
"""
DimensionData Child IP Address list
"""
def __init__(self, id, name):
""""
Initialize an instance of :class:`DimensionDataChildIpAddressList`
:param id: GUID of the IP Address List key
:type id: ``str``
:param name: Name of the IP Address List
:type name: ``str``
"""
self.id = id
self.name = name
def __repr__(self):
return ('<DimensionDataChildIpAddressList: id=%s, name=%s>'
% (self.id, self.name))
class DimensionDataIpAddress(object):
"""
A representation of IP Address in Dimension Data
"""
def __init__(self, begin, end=None, prefix_size=None):
"""
Initialize an instance of :class:`DimensionDataIpAddress`
:param begin: IP Address Begin
:type begin: ``str``
:param end: IP Address end
:type end: ``str``
:param prefixSize: IP Address prefix size
:type prefixSize: ``int``
"""
self.begin = begin
self.end = end
self.prefix_size = prefix_size
def __repr__(self):
return ('<DimensionDataIpAddress: begin=%s, end=%s, prefix_size=%s>'
% (self.begin, self.end, self.prefix_size))
class DimensionDataPortList(object):
"""
DimensionData Port list
"""
def __init__(self, id, name, description, port_collection,
child_portlist_list,
state, create_time):
""""
Initialize an instance of :class:`DimensionDataPortList`
:param id: GUID of the Port List key
:type id: ``str``
:param name: Name of the Port List
:type name: ``str``
:param description: Description of the Port List
:type description: ``str``
:param port_collection: Collection of DimensionDataPort
:type port_collection: ``List``
:param child_portlist_list: Collection of DimensionDataChildPort
:type child_portlist_list: ``List``
:param state: Port list state
:type state: ``str``
:param create_time: Port List created time
:type create_time: ``date time``
"""
self.id = id
self.name = name
self.description = description
self.port_collection = port_collection
self.child_portlist_list = child_portlist_list
self.state = state
self.create_time = create_time
def __repr__(self):
return (
"<DimensionDataPortList: id=%s, name=%s, description=%s, "
"port_collection=%s, child_portlist_list=%s, state=%s, "
"create_time=%s>"
% (self.id, self.name, self.description,
self.port_collection, self.child_portlist_list, self.state,
self.create_time))
class DimensionDataChildPortList(object):
"""
DimensionData Child Port list
"""
def __init__(self, id, name):
""""
Initialize an instance of :class:`DimensionDataChildIpAddressList`
:param id: GUID of the child port list key
:type id: ``str``
:param name: Name of the child port List
:type name: ``str``
"""
self.id = id
self.name = name
def __repr__(self):
return ('<DimensionDataChildPortList: id=%s, name=%s>'
% (self.id, self.name))
class DimensionDataPort(object):
"""
A representation of Port in Dimension Data
"""
def __init__(self, begin, end=None):
"""
Initialize an instance of :class:`DimensionDataPort`
:param begin: Port Number Begin
:type begin: ``str``
:param end: Port Number end
:type end: ``str``
"""
self.begin = begin
self.end = end
def __repr__(self):
return ('<DimensionDataPort: begin=%s, end=%s>'
% (self.begin, self.end))
class DimensionDataNic(object):
"""
A representation of Network Adapter in Dimension Data
"""
def __init__(self, private_ip_v4=None, vlan=None,
network_adapter_name=None):
"""
Initialize an instance of :class:`DimensionDataNic`
:param private_ip_v4: IPv4
:type private_ip_v4: ``str``
:param vlan: Network VLAN
:type vlan: class: DimensionDataVlan or ``str``
:param network_adapter_name: Network Adapter Name
:type network_adapter_name: ``str``
"""
self.private_ip_v4 = private_ip_v4
self.vlan = vlan
self.network_adapter_name = network_adapter_name
def __repr__(self):
return ('<DimensionDataNic: private_ip_v4=%s, vlan=%s,'
'network_adapter_name=%s>'
% (self.private_ip_v4, self.vlan, self.network_adapter_name))
| apache-2.0 | -951,642,090,750,823,200 | 4,642,739,897,594,740,000 | 31.39244 | 79 | 0.565038 | false |
Cuuuurzel/KiPyCalc | sympy/liealgebras/weyl_group.py | 17 | 14811 | # -*- coding: utf-8 -*-
from sympy.core import Basic, Rational
from sympy.core.numbers import igcd
from .cartan_type import CartanType
from sympy.mpmath import fac
from operator import itemgetter
from itertools import groupby
from sympy.matrices import Matrix, eye
class WeylGroup(Basic):
"""
For each semisimple Lie group, we have a Weyl group. It is a subgroup of
the isometry group of the root system. Specifically, it’s the subgroup
that is generated by reflections through the hyperplanes orthogonal to
the roots. Therefore, Weyl groups are reflection groups, and so a Weyl
group is a finite Coxeter group.
"""
def __new__(cls, cartantype):
obj = Basic.__new__(cls, cartantype)
obj.cartan_type = CartanType(cartantype)
return obj
def generators(self):
"""
This method creates the generating reflections of the Weyl group for
a given Lie algebra. For a Lie algebra of rank n, there are n
different generating reflections. This function returns them as
a list.
Example
=======
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> c = WeylGroup("F4")
>>> c.generators()
['r1', 'r2', 'r3', 'r4']
"""
n = self.cartan_type.rank()
generators = []
for i in range(1, n+1):
reflection = "r"+str(i)
generators.append(reflection)
return generators
def group_order(self):
"""
This method returns the order of the Weyl group.
For types A, B, C, D, and E the order depends on
the rank of the Lie algebra. For types F and G,
the order is fixed.
Example
=======
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> c = WeylGroup("D4")
>>> c.group_order()
192.0
"""
n = self.cartan_type.rank()
if self.cartan_type.series == "A":
return fac(n+1)
if self.cartan_type.series == "B" or self.cartan_type.series == "C":
return fac(n)*(2**n)
if self.cartan_type.series == "D":
return fac(n)*(2**(n-1))
if self.cartan_type.series == "E":
if n == 6:
return 51840
if n == 7:
return 2903040
if n == 8:
return 696729600
if self.cartan_type.series == "F":
return 1152
if self.cartan_type.series == "G":
return 12
def group_name(self):
"""
This method returns some general information about the Weyl group for
a given Lie algebra. It returns the name of the group and the elements
it acts on, if relevant.
"""
n = self.cartan_type.rank()
if self.cartan_type.series == "A":
return "S"+str(n+1) + ": the symmetric group acting on " + str(n+1) + " elements."
if self.cartan_type.series == "B" or self.cartan_type.series == "C":
return "The hyperoctahedral group acting on " + str(2*n) + " elements."
if self.cartan_type.series == "D":
return "The symmetry group of the " + str(n) + "-dimensional demihypercube."
if self.cartan_type.series == "E":
if n == 6:
return "The symmetry group of the 6-polytope."
if n == 7:
return "The symmetry group of the 7-polytope."
if n == 8:
return "The symmetry group of the 8-polytope."
if self.cartan_type.series == "F":
return "The symmetry group of the 24-cell, or icositetrachoron."
if self.cartan_type.series == "G":
return "D6, the dihedral group of order 12, and symmetry group of the hexagon."
def element_order(self, weylelt):
"""
This method returns the order of a given Weyl group element, which should
be specified by the user in the form of products of the generating
reflections, i.e. of the form r1*r2 etc.
For types A-F, this method current works by taking the matrix form of
the specified element, and then finding what power of the matrix is the
identity. It then returns this power.
Example
========
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> b = WeylGroup("B4")
>>> b.element_order('r1*r4*r2')
4
"""
n = self.cartan_type.rank()
if self.cartan_type.series == "A":
a = self.matrix_form(weylelt)
order = 1
while a != eye(n+1):
a *= self.matrix_form(weylelt)
order += 1
return order
if self.cartan_type.series == "D":
a = self.matrix_form(weylelt)
order = 1
while a != eye(n):
a *= self.matrix_form(weylelt)
order += 1
return order
if self.cartan_type.series == "E":
a = self.matrix_form(weylelt)
order = 1
while a != eye(8):
a *= self.matrix_form(weylelt)
order += 1
return order
if self.cartan_type.series == "G":
elts = list(weylelt)
reflections = elts[1::3]
m = self.delete_doubles(reflections)
while self.delete_doubles(m) != m:
m = self.delete_doubles(m)
reflections = m
if len(reflections) % 2 == 1:
return 2
elif len(reflections) == 0:
return 1
else:
if len(reflections) == 1:
return 2
else:
m = len(reflections) / 2
lcm = (6 * m)/ igcd(m, 6)
order = lcm / m
return order
if self.cartan_type.series == 'F':
a = self.matrix_form(weylelt)
order = 1
while a != eye(4):
a *= self.matrix_form(weylelt)
order += 1
return order
if self.cartan_type.series == "B" or self.cartan_type.series == "C":
a = self.matrix_form(weylelt)
order = 1
while a != eye(n):
a *= self.matrix_form(weylelt)
order += 1
return order
def delete_doubles(self, reflections):
"""
This is a helper method for determining the order of an element in the
Weyl group of G2. It takes a Weyl element and if repeated simple reflections
in it, it deletes them.
"""
counter = 0
copy = list(reflections)
for elt in copy:
if counter < len(copy)-1:
if copy[counter + 1] == elt:
del copy[counter]
del copy[counter]
counter += 1
return copy
def matrix_form(self, weylelt):
"""
This method takes input from the user in the form of products of the
generating reflections, and returns the matrix corresponding to the
element of the Weyl group. Since each element of the Weyl group is
a reflection of some type, there is a corresponding matrix representation.
This method uses the standard representation for all the generating
reflections.
Example
=======
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> f = WeylGroup("F4")
>>> f.matrix_form('r2*r3')
Matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, -1],
[0, 0, 1, 0]])
"""
elts = list(weylelt)
reflections = elts[1::3]
n = self.cartan_type.rank()
if self.cartan_type.series == 'A':
matrixform = eye(n+1)
for elt in reflections:
a = int(elt)
mat = eye(n+1)
mat[a-1, a-1] = 0
mat[a-1, a] = 1
mat[a, a-1] = 1
mat[a, a] = 0
matrixform *= mat
return matrixform
if self.cartan_type.series == 'D':
matrixform = eye(n)
for elt in reflections:
a = int(elt)
mat = eye(n)
if a < n:
mat[a-1, a-1] = 0
mat[a-1, a] = 1
mat[a, a-1] = 1
mat[a, a] = 0
matrixform *= mat
else:
mat[n-2, n-1] = -1
mat[n-2, n-2] = 0
mat[n-1, n-2] = -1
mat[n-1, n-1] = 0
matrixform *= mat
return matrixform
if self.cartan_type.series == 'G':
matrixform = eye(3)
for elt in reflections:
a = int(elt)
if a == 1:
gen1 = Matrix([[1, 0, 0], [0, 0, 1], [0, 1, 0]])
matrixform *= gen1
else:
gen2 = Matrix([[Rational(2, 3), Rational(2, 3), -Rational(1, 3)],
[Rational(2, 3), Rational(-1, 3), Rational(2, 3)], [Rational(-1, 3),
Rational(2, 3), Rational(2, 3)]])
matrixform *= gen2
return matrixform
if self.cartan_type.series == 'F':
matrixform = eye(4)
for elt in reflections:
a = int(elt)
if a == 1:
mat = Matrix([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
matrixform *= mat
elif a == 2:
mat = Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
matrixform *= mat
elif a == 3:
mat = Matrix([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]])
matrixform *= mat
else:
mat = Matrix([[Rational(1, 2), Rational(1, 2), Rational(1, 2), Rational(1, 2)],
[Rational(1, 2), Rational(1, 2), Rational(-1, 2), Rational(-1, 2)],
[Rational(1, 2), Rational(-1, 2), Rational(1, 2), Rational(-1, 2)],
[Rational(1, 2), Rational(-1, 2), Rational(-1, 2), Rational(1, 2)]])
matrixform *= mat
return matrixform
if self.cartan_type.series == 'E':
matrixform = eye(8)
for elt in reflections:
a = int(elt)
if a == 1:
mat = Matrix([[Rational(3, 4), Rational(1, 4), Rational(1, 4), Rational(1, 4),
Rational(1, 4), Rational(1, 4), Rational(1, 4), Rational(-1, 4)],
[Rational(1, 4), Rational(3, 4), Rational(-1, 4), Rational(-1, 4),
Rational(-1, 4), Rational(-1, 4), Rational(1, 4), Rational(-1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(3, 4), Rational(-1, 4),
Rational(-1, 4), Rational(-1, 4), Rational(-1, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(3, 4),
Rational(-1, 4), Rational(-1, 4), Rational(-1, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(-1, 4),
Rational(3, 4), Rational(-1, 4), Rational(-1, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(-1, 4),
Rational(-1, 4), Rational(3, 4), Rational(-1, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(-1, 4),
Rational(-1, 4), Rational(-1, 4), Rational(-3, 4), Rational(1, 4)],
[Rational(1, 4), Rational(-1, 4), Rational(-1, 4), Rational(-1, 4),
Rational(-1, 4), Rational(-1, 4), Rational(-1, 4), Rational(3, 4)]])
matrixform *= mat
elif a == 2:
mat = eye(8)
mat[0, 0] = 0
mat[0, 1] = -1
mat[1, 0] = -1
mat[1, 1] = 0
matrixform *= mat
else:
mat = eye(8)
mat[a-3, a-3] = 0
mat[a-3, a-2] = 1
mat[a-2, a-3] = 1
mat[a-2, a-2] = 0
matrixform *= mat
return matrixform
if self.cartan_type.series == 'B' or self.cartan_type.series == 'C':
matrixform = eye(n)
for elt in reflections:
a = int(elt)
mat = eye(n)
if a == 1:
mat[0, 0] = -1
matrixform *= mat
else:
mat[a - 2, a - 2] = 0
mat[a-2, a-1] = 1
mat[a - 1, a - 2] = 1
mat[a -1, a - 1] = 0
matrixform *= mat
return matrixform
def coxeter_diagram(self):
"""
This method returns the Coxeter diagram corresponding to a Weyl group.
The Coxeter diagram can be obtained from a Lie algebra's Dynkin diagram
by deleting all arrows; the Coxeter diagram is the undirected graph.
The vertices of the Coxeter diagram represent the generating reflections
of the Weyl group, , s_i. An edge is drawn between s_i and s_j if the order
m(i, j) of s_i*s_j is greater than two. If there is one edge, the order
m(i, j) is 3. If there are two edges, the order m(i, j) is 4, and if there
are three edges, the order m(i, j) is 6.
Example
========
>>> from sympy.liealgebras.weyl_group import WeylGroup
>>> c = WeylGroup("B3")
>>> print(c.coxeter_diagram())
0---0===0
1 2 3
"""
n = self.cartan_type.rank()
if self.cartan_type.series == "A" or self.cartan_type.series == "D" or self.cartan_type.series == "E":
return self.cartan_type.dynkin_diagram()
if self.cartan_type.series == "B" or self.cartan_type.series == "C":
diag = "---".join("0" for i in range(1, n)) + "===0\n"
diag += " ".join(str(i) for i in range(1, n+1))
return diag
if self.cartan_type.series == "F":
diag = "0---0===0---0\n"
diag += " ".join(str(i) for i in range(1, 5))
return diag
if self.cartan_type.series == "G":
diag = "0≡≡≡0\n1 2"
return diag
| mit | -7,159,790,580,341,221,000 | 7,304,516,556,306,381,000 | 35.915212 | 110 | 0.469364 | false |
Baz2013/blog_demo | leetcode/easy/pascal_triangle.py | 1 | 1246 | # -*- coding:utf-8 -*-
# 118. Pascal's Triangle QuestionEditorial Solution My Submissions
# Difficulty: Easy
# Contributors: Admin
# Given numRows, generate the first numRows of Pascal's triangle.
#
# For example, given numRows = 5,
# Return
#
# [
# [1],
# [1,1],
# [1,2,1],
# [1,3,3,1],
# [1,4,6,4,1]
# ]
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
res = []
if numRows == 0:
return res
elif numRows == 1:
res.append([1])
return res
else:
res.append([1])
res.append([1, 1])
i = 2
while i < numRows:
tmp = list()
tmp.append(1)
pre_lst = res[i - 1]
for n in range(len(pre_lst)):
if n + 1 <= len(pre_lst) - 1:
tmp.append(pre_lst[n] + pre_lst[n + 1])
else:
tmp.append(1)
res.append(tmp)
i += 1
return res
if __name__ == '__main__':
s = Solution()
print s.generate(5)
print s.generate(3)
print s.generate(2)
print s.generate(1)
print s.generate(0)
| gpl-3.0 | 750,756,787,384,961,400 | 2,328,373,374,085,672,000 | 21.25 | 69 | 0.460674 | false |
gojira/tensorflow | tensorflow/contrib/coder/python/layers/entropybottleneck.py | 9 | 30102 | # -*- coding: utf-8 -*-
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Entropy bottleneck layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.coder.python.ops import coder_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
class EntropyBottleneck(base_layer.Layer):
"""Entropy bottleneck layer.
This layer can be used to model the entropy (the amount of information
conveyed) of the tensor passing through it. During training, this can be used
to impose a (soft) entropy constraint on its activations, limiting the amount
of information flowing through the layer. Note that this is distinct from
other types of bottlenecks, which reduce the dimensionality of the space, for
example. Dimensionality reduction does not limit the amount of information,
and does not enable efficient data compression per se.
After training, this layer can be used to compress any input tensor to a
string, which may be written to a file, and to decompress a file which it
previously generated back to a reconstructed tensor (possibly on a different
machine having access to the same model checkpoint). The entropies estimated
during training or evaluation are approximately equal to the average length of
the strings in bits.
The layer implements a flexible probability density model to estimate entropy,
which is described in the appendix of the paper (please cite the paper if you
use this code for scientific work):
"Variational image compression with a scale hyperprior"
Johannes Ballé, David Minnen, Saurabh Singh, Sung Jin Hwang, Nick Johnston
https://arxiv.org/abs/1802.01436
The layer assumes that the input tensor is at least 2D, with a batch dimension
at the beginning and a channel dimension as specified by `data_format`. The
layer trains an independent probability density model for each channel, but
assumes that across all other dimensions, the inputs are i.i.d. (independent
and identically distributed). Because the entropy (and hence, average
codelength) is a function of the densities, this assumption may have a direct
effect on the compression performance.
Because data compression always involves discretization, the outputs of the
layer are generally only approximations of its inputs. During training,
discretization is modeled using additive uniform noise to ensure
differentiability. The entropies computed during training are differential
entropies. During evaluation, the data is actually quantized, and the
entropies are discrete (Shannon entropies). To make sure the approximated
tensor values are good enough for practical purposes, the training phase must
be used to balance the quality of the approximation with the entropy, by
adding an entropy term to the training loss, as in the following example.
Here, we use the entropy bottleneck to compress the latent representation of
an autoencoder. The data vectors `x` in this case are 4D tensors in
`'channels_last'` format (for example, 16x16 pixel grayscale images).
The layer always produces exactly one auxiliary loss and one update op which
are only significant for compression and decompression. To use the compression
feature, the auxiliary loss must be minimized during or after training. After
that, the update op must be executed at least once. Here, we simply attach
them to the main training step.
Training:
```
# Build autoencoder.
x = tf.placeholder(tf.float32, shape=[None, 16, 16, 1])
y = forward_transform(x)
entropy_bottleneck = EntropyBottleneck()
y_, likelihoods = entropy_bottleneck(y, training=True)
x_ = backward_transform(y_)
# Information content (= predicted codelength) in bits of each batch element
# (note that taking the natural logarithm and dividing by `log(2)` is
# equivalent to taking base-2 logarithms):
bits = tf.reduce_sum(tf.log(likelihoods), axis=(1, 2, 3)) / -np.log(2)
# Squared difference of each batch element:
squared_error = tf.reduce_sum(tf.squared_difference(x, x_), axis=(1, 2, 3))
# The loss is a weighted sum of mean squared error and entropy (average
# information content), where the weight controls the trade-off between
# approximation error and entropy.
main_loss = 0.5 * tf.reduce_mean(squared_error) + tf.reduce_mean(bits)
# Minimize loss and auxiliary loss, and execute update op.
main_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
main_step = optimizer.minimize(main_loss)
# 1e-2 is a good starting point for the learning rate of the auxiliary loss,
# assuming Adam is used.
aux_optimizer = tf.train.AdamOptimizer(learning_rate=1e-2)
aux_step = optimizer.minimize(entropy_bottleneck.losses[0])
step = tf.group(main_step, aux_step, entropy_bottleneck.updates[0])
```
Evaluation:
```
# Build autoencoder.
x = tf.placeholder(tf.float32, shape=[None, 16, 16, 1])
y = forward_transform(x)
y_, likelihoods = EntropyBottleneck()(y, training=False)
x_ = backward_transform(y_)
# Information content (= predicted codelength) in bits of each batch element:
bits = tf.reduce_sum(tf.log(likelihoods), axis=(1, 2, 3)) / -np.log(2)
# Squared difference of each batch element:
squared_error = tf.reduce_sum(tf.squared_difference(x, x_), axis=(1, 2, 3))
# The loss is a weighted sum of mean squared error and entropy (average
# information content), where the weight controls the trade-off between
# approximation error and entropy.
loss = 0.5 * tf.reduce_mean(squared_error) + tf.reduce_mean(bits)
```
To be able to compress the bottleneck tensor and decompress it in a different
session, or on a different machine, you need three items:
- The compressed representations stored as strings.
- The shape of the bottleneck for these string representations as a `Tensor`,
as well as the number of channels of the bottleneck at graph construction
time.
- The checkpoint of the trained model that was used for compression. Note:
It is crucial that the auxiliary loss produced by this layer is minimized
during or after training, and that the update op is run after training and
minimization of the auxiliary loss, but *before* the checkpoint is saved.
Compression:
```
x = tf.placeholder(tf.float32, shape=[None, 16, 16, 1])
y = forward_transform(x)
strings = EntropyBottleneck().compress(y)
shape = tf.shape(y)[1:]
```
Decompression:
```
strings = tf.placeholder(tf.string, shape=[None])
shape = tf.placeholder(tf.int32, shape=[3])
entropy_bottleneck = EntropyBottleneck(dtype=tf.float32)
y_ = entropy_bottleneck.decompress(strings, shape, channels=5)
x_ = backward_transform(y_)
```
Here, we assumed that the tensor produced by the forward transform has 5
channels.
The above four use cases can also be implemented within the same session (i.e.
on the same `EntropyBottleneck` instance), for testing purposes, etc., by
calling the object more than once.
Arguments:
init_scale: Float. A scaling factor determining the initial width of the
probability densities. This should be chosen big enough so that the
range of values of the layer inputs roughly falls within the interval
[`-init_scale`, `init_scale`] at the beginning of training.
filters: An iterable of ints, giving the number of filters at each layer of
the density model. Generally, the more filters and layers, the more
expressive is the density model in terms of modeling more complicated
distributions of the layer inputs. For details, refer to the paper
referenced above. The default is `[3, 3, 3]`, which should be sufficient
for most practical purposes.
tail_mass: Float, between 0 and 1. The bottleneck layer automatically
determines the range of input values that should be represented based on
their frequency of occurrence. Values occurring in the tails of the
distributions will be clipped to that range during compression.
`tail_mass` determines the amount of probability mass in the tails which
is cut off in the worst case. For example, the default value of `1e-9`
means that at most 1 in a billion input samples will be clipped to the
range.
optimize_integer_offset: Boolean. Typically, the input values of this layer
are floats, which means that quantization during evaluation can be
performed with an arbitrary offset. By default, the layer determines that
offset automatically. In special situations, such as when it is known that
the layer will receive only full integer values during evaluation, it can
be desirable to set this argument to `False` instead, in order to always
quantize to full integer values.
likelihood_bound: Float. If positive, the returned likelihood values are
ensured to be greater than or equal to this value. This prevents very
large gradients with a typical entropy loss (defaults to 1e-9).
range_coder_precision: Integer, between 1 and 16. The precision of the range
coder used for compression and decompression. This trades off computation
speed with compression efficiency, where 16 is the slowest but most
efficient setting. Choosing lower values may increase the average
codelength slightly compared to the estimated entropies.
data_format: Either `'channels_first'` or `'channels_last'` (default).
trainable: Boolean. Whether the layer should be trained.
name: String. The name of the layer.
dtype: Default dtype of the layer's parameters (default of `None` means use
the type of the first input).
Read-only properties:
init_scale: See above.
filters: See above.
tail_mass: See above.
optimize_integer_offset: See above.
likelihood_bound: See above.
range_coder_precision: See above.
data_format: See above.
name: String. See above.
dtype: See above.
trainable_variables: List of trainable variables.
non_trainable_variables: List of non-trainable variables.
variables: List of all variables of this layer, trainable and non-trainable.
updates: List of update ops of this layer. Always contains exactly one
update op, which must be run once after the last training step, before
`compress` or `decompress` is used.
losses: List of losses added by this layer. Always contains exactly one
auxiliary loss, which must be added to the training loss.
Mutable properties:
trainable: Boolean. Whether the layer should be trained.
input_spec: Optional `InputSpec` object specifying the constraints on inputs
that can be accepted by the layer.
"""
def __init__(self, init_scale=10, filters=(3, 3, 3), tail_mass=1e-9,
optimize_integer_offset=True, likelihood_bound=1e-9,
range_coder_precision=16, data_format="channels_last", **kwargs):
super(EntropyBottleneck, self).__init__(**kwargs)
self._init_scale = float(init_scale)
self._filters = tuple(int(f) for f in filters)
self._tail_mass = float(tail_mass)
if not 0 < self.tail_mass < 1:
raise ValueError(
"`tail_mass` must be between 0 and 1, got {}.".format(self.tail_mass))
self._optimize_integer_offset = bool(optimize_integer_offset)
self._likelihood_bound = float(likelihood_bound)
self._range_coder_precision = int(range_coder_precision)
self._data_format = data_format
self._channel_axis(2) # trigger ValueError early
self.input_spec = base_layer.InputSpec(min_ndim=2)
@property
def init_scale(self):
return self._init_scale
@property
def filters(self):
return self._filters
@property
def tail_mass(self):
return self._tail_mass
@property
def optimize_integer_offset(self):
return self._optimize_integer_offset
@property
def likelihood_bound(self):
return self._likelihood_bound
@property
def range_coder_precision(self):
return self._range_coder_precision
@property
def data_format(self):
return self._data_format
def _channel_axis(self, ndim):
try:
return {"channels_first": 1, "channels_last": ndim - 1}[self.data_format]
except KeyError:
raise ValueError("Unsupported `data_format` for {} layer: {}.".format(
self.__class__.__name__, self.data_format))
def _logits_cumulative(self, inputs, stop_gradient):
"""Evaluate logits of the cumulative densities.
Args:
inputs: The values at which to evaluate the cumulative densities, expected
to be a `Tensor` of shape `(channels, 1, batch)`.
stop_gradient: Boolean. Whether to add `array_ops.stop_gradient` calls so
that the gradient of the output with respect to the density model
parameters is disconnected (the gradient with respect to `inputs` is
left untouched).
Returns:
A `Tensor` of the same shape as `inputs`, containing the logits of the
cumulative densities evaluated at the given inputs.
"""
logits = inputs
for i in range(len(self.filters) + 1):
matrix = self._matrices[i]
if stop_gradient:
matrix = array_ops.stop_gradient(matrix)
logits = math_ops.matmul(matrix, logits)
bias = self._biases[i]
if stop_gradient:
bias = array_ops.stop_gradient(bias)
logits += bias
if i < len(self._factors):
factor = self._factors[i]
if stop_gradient:
factor = array_ops.stop_gradient(factor)
logits += factor * math_ops.tanh(logits)
return logits
def build(self, input_shape):
"""Builds the layer.
Creates the variables for the network modeling the densities, creates the
auxiliary loss estimating the median and tail quantiles of the densities,
and then uses that to create the probability mass functions and the update
op that produces the discrete cumulative density functions used by the range
coder.
Args:
input_shape: Shape of the input tensor, used to get the number of
channels.
Raises:
ValueError: if `input_shape` doesn't specify the length of the channel
dimension.
"""
input_shape = tensor_shape.TensorShape(input_shape)
channel_axis = self._channel_axis(input_shape.ndims)
channels = input_shape[channel_axis].value
if channels is None:
raise ValueError("The channel dimension of the inputs must be defined.")
self.input_spec = base_layer.InputSpec(
ndim=input_shape.ndims, axes={channel_axis: channels})
filters = (1,) + self.filters + (1,)
scale = self.init_scale ** (1 / (len(self.filters) + 1))
# Create variables.
self._matrices = []
self._biases = []
self._factors = []
for i in range(len(self.filters) + 1):
init = np.log(np.expm1(1 / scale / filters[i + 1]))
matrix = self.add_variable(
"matrix_{}".format(i), dtype=self.dtype,
shape=(channels, filters[i + 1], filters[i]),
initializer=init_ops.Constant(init))
matrix = nn.softplus(matrix)
self._matrices.append(matrix)
bias = self.add_variable(
"bias_{}".format(i), dtype=self.dtype,
shape=(channels, filters[i + 1], 1),
initializer=init_ops.RandomUniform(-.5, .5))
self._biases.append(bias)
if i < len(self.filters):
factor = self.add_variable(
"factor_{}".format(i), dtype=self.dtype,
shape=(channels, filters[i + 1], 1),
initializer=init_ops.Zeros())
factor = math_ops.tanh(factor)
self._factors.append(factor)
# To figure out what range of the densities to sample, we need to compute
# the quantiles given by `tail_mass / 2` and `1 - tail_mass / 2`. Since we
# can't take inverses of the cumulative directly, we make it an optimization
# problem:
# `quantiles = argmin(|logit(cumulative) - target|)`
# where `target` is `logit(tail_mass / 2)` or `logit(1 - tail_mass / 2)`.
# Taking the logit (inverse of sigmoid) of the cumulative makes the
# representation of the right target more numerically stable.
# Numerically stable way of computing logits of `tail_mass / 2`
# and `1 - tail_mass / 2`.
target = np.log(2 / self.tail_mass - 1)
# Compute lower and upper tail quantile as well as median.
target = constant_op.constant([-target, 0, target], dtype=self.dtype)
def quantiles_initializer(shape, dtype=None, partition_info=None):
del partition_info # unused
assert tuple(shape[1:]) == (1, 3)
init = constant_op.constant(
[[[-self.init_scale, 0, self.init_scale]]], dtype=dtype)
return array_ops.tile(init, (shape[0], 1, 1))
quantiles = self.add_variable(
"quantiles", shape=(channels, 1, 3), dtype=self.dtype,
initializer=quantiles_initializer)
logits = self._logits_cumulative(quantiles, stop_gradient=True)
loss = math_ops.reduce_sum(abs(logits - target))
self.add_loss(loss, inputs=None)
# Save medians for `call`, `compress`, and `decompress`.
self._medians = quantiles[:, :, 1:2]
if not self.optimize_integer_offset:
self._medians = math_ops.round(self._medians)
# Largest distance observed between lower tail quantile and median,
# or between median and upper tail quantile.
minima = math_ops.reduce_max(self._medians - quantiles[:, :, 0:1])
maxima = math_ops.reduce_max(quantiles[:, :, 2:3] - self._medians)
minmax = math_ops.maximum(minima, maxima)
minmax = math_ops.ceil(minmax)
minmax = math_ops.maximum(minmax, 1)
# Sample the density up to `minmax` around the median.
samples = math_ops.range(-minmax, minmax + 1, dtype=self.dtype)
samples += self._medians
half = constant_op.constant(.5, dtype=self.dtype)
# We strip the sigmoid from the end here, so we can use the special rule
# below to only compute differences in the left tail of the sigmoid.
# This increases numerical stability (see explanation in `call`).
lower = self._logits_cumulative(samples - half, stop_gradient=True)
upper = self._logits_cumulative(samples + half, stop_gradient=True)
# Flip signs if we can move more towards the left tail of the sigmoid.
sign = -math_ops.sign(math_ops.add_n([lower, upper]))
pmf = abs(math_ops.sigmoid(sign * upper) - math_ops.sigmoid(sign * lower))
# Add tail masses to first and last bin of pmf, as we clip values for
# compression, meaning that out-of-range values get mapped to these bins.
pmf = array_ops.concat([
math_ops.add_n([pmf[:, 0, :1], math_ops.sigmoid(lower[:, 0, :1])]),
pmf[:, 0, 1:-1],
math_ops.add_n([pmf[:, 0, -1:], math_ops.sigmoid(-upper[:, 0, -1:])]),
], axis=-1)
self._pmf = pmf
cdf = coder_ops.pmf_to_quantized_cdf(
pmf, precision=self.range_coder_precision)
def cdf_getter(*args, **kwargs):
del args, kwargs # ignored
return variable_scope.get_variable(
"quantized_cdf", dtype=dtypes.int32, initializer=cdf,
trainable=False, validate_shape=False, collections=())
# Need to provide a fake shape here since add_variable insists on it.
self._quantized_cdf = self.add_variable(
"quantized_cdf", shape=(channels, 1), dtype=dtypes.int32,
getter=cdf_getter, trainable=False)
update_op = state_ops.assign(
self._quantized_cdf, cdf, validate_shape=False)
self.add_update(update_op, inputs=None)
super(EntropyBottleneck, self).build(input_shape)
def call(self, inputs, training):
"""Pass a tensor through the bottleneck.
Args:
inputs: The tensor to be passed through the bottleneck.
training: Boolean. If `True`, returns a differentiable approximation of
the inputs, and their likelihoods under the modeled probability
densities. If `False`, returns the quantized inputs and their
likelihoods under the corresponding probability mass function. These
quantities can't be used for training, as they are not differentiable,
but represent actual compression more closely.
Returns:
values: `Tensor` with the same shape as `inputs` containing the perturbed
or quantized input values.
likelihood: `Tensor` with the same shape as `inputs` containing the
likelihood of `values` under the modeled probability distributions.
Raises:
ValueError: if `inputs` has different `dtype` or number of channels than
a previous set of inputs the model was invoked with earlier.
"""
inputs = ops.convert_to_tensor(inputs)
ndim = self.input_spec.ndim
channel_axis = self._channel_axis(ndim)
half = constant_op.constant(.5, dtype=self.dtype)
# Convert to (channels, 1, batch) format by commuting channels to front
# and then collapsing.
order = list(range(ndim))
order.pop(channel_axis)
order.insert(0, channel_axis)
values = array_ops.transpose(inputs, order)
shape = array_ops.shape(values)
values = array_ops.reshape(values, (shape[0], 1, -1))
# Add noise or quantize.
if training:
noise = random_ops.random_uniform(array_ops.shape(values), -half, half)
values = math_ops.add_n([values, noise])
elif self.optimize_integer_offset:
values = math_ops.round(values - self._medians) + self._medians
else:
values = math_ops.round(values)
# Evaluate densities.
# We can use the special rule below to only compute differences in the left
# tail of the sigmoid. This increases numerical stability: sigmoid(x) is 1
# for large x, 0 for small x. Subtracting two numbers close to 0 can be done
# with much higher precision than subtracting two numbers close to 1.
lower = self._logits_cumulative(values - half, stop_gradient=False)
upper = self._logits_cumulative(values + half, stop_gradient=False)
# Flip signs if we can move more towards the left tail of the sigmoid.
sign = -math_ops.sign(math_ops.add_n([lower, upper]))
sign = array_ops.stop_gradient(sign)
likelihood = abs(
math_ops.sigmoid(sign * upper) - math_ops.sigmoid(sign * lower))
if self.likelihood_bound > 0:
likelihood_bound = constant_op.constant(
self.likelihood_bound, dtype=self.dtype)
# TODO(jballe): Override gradients.
likelihood = math_ops.maximum(likelihood, likelihood_bound)
# Convert back to input tensor shape.
order = list(range(1, ndim))
order.insert(channel_axis, 0)
values = array_ops.reshape(values, shape)
values = array_ops.transpose(values, order)
likelihood = array_ops.reshape(likelihood, shape)
likelihood = array_ops.transpose(likelihood, order)
if not context.executing_eagerly():
values_shape, likelihood_shape = self.compute_output_shape(inputs.shape)
values.set_shape(values_shape)
likelihood.set_shape(likelihood_shape)
return values, likelihood
def compress(self, inputs):
"""Compress inputs and store their binary representations into strings.
Args:
inputs: `Tensor` with values to be compressed.
Returns:
String `Tensor` vector containing the compressed representation of each
batch element of `inputs`.
"""
with ops.name_scope(self._name_scope()):
inputs = ops.convert_to_tensor(inputs)
if not self.built:
# Check input assumptions set before layer building, e.g. input rank.
self._assert_input_compatibility(inputs)
if self.dtype is None:
self._dtype = inputs.dtype.base_dtype.name
self.build(inputs.shape)
# Check input assumptions set after layer building, e.g. input shape.
if not context.executing_eagerly():
self._assert_input_compatibility(inputs)
ndim = self.input_spec.ndim
channel_axis = self._channel_axis(ndim)
# Tuple of slices for expanding dimensions of tensors below.
slices = ndim * [None] + [slice(None)]
slices[channel_axis] = slice(None)
slices = tuple(slices)
# Expand dimensions of CDF to input dimensions, keeping the channels along
# the right dimension.
cdf = self._quantized_cdf[slices[1:]]
num_levels = array_ops.shape(cdf)[-1] - 1
# Bring inputs to the right range by centering the range on the medians.
half = constant_op.constant(.5, dtype=self.dtype)
medians = array_ops.squeeze(self._medians, [1, 2])
offsets = (math_ops.cast(num_levels // 2, self.dtype) + half) - medians
# Expand offsets to input dimensions and add to inputs.
values = inputs + offsets[slices[:-1]]
# Clip to range and cast to integers. Because we have added .5 above, and
# all values are positive, the cast effectively implements rounding.
values = math_ops.maximum(values, half)
values = math_ops.minimum(
values, math_ops.cast(num_levels, self.dtype) - half)
values = math_ops.cast(values, dtypes.int16)
def loop_body(tensor):
return coder_ops.range_encode(
tensor, cdf, precision=self.range_coder_precision)
strings = functional_ops.map_fn(
loop_body, values, dtype=dtypes.string, back_prop=False)
if not context.executing_eagerly():
strings.set_shape(inputs.shape[:1])
return strings
def decompress(self, strings, shape, channels=None):
"""Decompress values from their compressed string representations.
Args:
strings: A string `Tensor` vector containing the compressed data.
shape: A `Tensor` vector of int32 type. Contains the shape of the tensor
to be decompressed, excluding the batch dimension.
channels: Integer. Specifies the number of channels statically. Needs only
be set if the layer hasn't been built yet (i.e., this is the first input
it receives).
Returns:
The decompressed `Tensor`. Its shape will be equal to `shape` prepended
with the batch dimension from `strings`.
Raises:
ValueError: If the length of `shape` isn't available at graph construction
time.
"""
with ops.name_scope(self._name_scope()):
strings = ops.convert_to_tensor(strings)
shape = ops.convert_to_tensor(shape)
if self.built:
ndim = self.input_spec.ndim
channel_axis = self._channel_axis(ndim)
if channels is None:
channels = self.input_spec.axes[channel_axis]
else:
if not (shape.shape.is_fully_defined() and shape.shape.ndims == 1):
raise ValueError("`shape` must be a vector with known length.")
ndim = shape.shape[0].value + 1
channel_axis = self._channel_axis(ndim)
input_shape = ndim * [None]
input_shape[channel_axis] = channels
self.build(input_shape)
# Tuple of slices for expanding dimensions of tensors below.
slices = ndim * [None] + [slice(None)]
slices[channel_axis] = slice(None)
slices = tuple(slices)
# Expand dimensions of CDF to input dimensions, keeping the channels along
# the right dimension.
cdf = self._quantized_cdf[slices[1:]]
num_levels = array_ops.shape(cdf)[-1] - 1
def loop_body(string):
return coder_ops.range_decode(
string, shape, cdf, precision=self.range_coder_precision)
outputs = functional_ops.map_fn(
loop_body, strings, dtype=dtypes.int16, back_prop=False)
outputs = math_ops.cast(outputs, self.dtype)
medians = array_ops.squeeze(self._medians, [1, 2])
offsets = math_ops.cast(num_levels // 2, self.dtype) - medians
outputs -= offsets[slices[:-1]]
if not context.executing_eagerly():
outputs_shape = ndim * [None]
outputs_shape[0] = strings.shape[0]
outputs_shape[channel_axis] = channels
outputs.set_shape(outputs_shape)
return outputs
def visualize(self):
"""Multi-channel visualization of densities as images.
Creates and returns an image summary visualizing the current probabilty
density estimates. The image contains one row for each channel. Within each
row, the pixel intensities are proportional to probability values, and each
row is centered on the median of the corresponding distribution.
Returns:
The created image summary.
"""
with ops.name_scope(self._name_scope()):
image = self._pmf
image *= 255 / math_ops.reduce_max(image, axis=1, keepdims=True)
image = math_ops.cast(image + .5, dtypes.uint8)
image = image[None, :, :, None]
return summary.image("pmf", image, max_outputs=1)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
return input_shape, input_shape
| apache-2.0 | 2,976,749,708,829,741,600 | -5,564,747,008,262,269,000 | 42.186514 | 80 | 0.694628 | false |
nevillegrech/stdl | src/STDL/Tests/DependsOnTest.py | 1 | 4539 | import unittest
import sys
sys.path=['..'] + sys.path
from Parser import Parser # code from module you're testing
from DependsOn import *
from Exceptions import *
class SimpleTests(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.parser = Parser()
self.dependsOn = DependsOn()
def testPopulate1(self):
cut=self.dependsOn
cut.tokens=[]
cut.populate()
assert len(cut)==0, len(cut)
assert not cut
def testPopulate2(self):
cut=self.dependsOn
test="""\
dependsOn x1,x2,y1,y2:
<%
c1=new ArrayList();
c1.add(new Point(%x1%,%y1%));
c1.add(new Point(%x2%,%y2%));
%>
<% %>
out:
returns >= (x1+y1) - (x2-y2)
error:
throws > 3"""
cut.tokens=self.parser.dependsOn.parseString(test)[0]
cut.populate()
assert len(cut.subPartitions)==2
assert len(cut)==4,len(cut)
assert len(cut.outPartitionCheck)==1
assert len(cut.errorPartitionCheck)==1
def testPopulateErr(self):
cut=self.dependsOn
test="""\
dependsOn x1,x2,y1,y2:
<%
c1=new ArrayList();
c1.add(new Point(%x1%,%y1%));
c1.add(new Point(%x2%,%y2%));
%>
error:
throws > 3
returns < 2"""
cut.tokens=self.parser.dependsOn.parseString(test)[0]
self.assertRaises(SemanticException,cut.populate)
def getTestSuite1(self):
#Mock Objects
out1=PartitionCheck()
out1.notEmpty, out1.throws, out1.valid, out1.out=True, False, True, True
check1=PartitionCheckItem()
check1.addReturns, check1.code, check1.comparator = True, 3, '>'
check2=PartitionCheckItem()
check2.addReturns, check2.code, check2.comparator = True, 4, '>'
out1.checkItems=[check1,check2]
out2=PartitionCheck()
out2.notEmpty, out2.throws, out2.valid, out2.out=True, False, True, True
out2.checkItems=[check2]
check1=PartitionCheckItem()
check1.addReturns, check1.code, check1.comparator = True, 2, '<'
check2=PartitionCheckItem()
check2.addReturns, check2.code, check2.comparator = True, 4, '<'
dic1=[out1,TestCaseValue(5,0,True,'a'),TestCaseValue(3,0,True,'b'),TestCaseValue(4,0,True,'c')]
dic2=[out2,TestCaseValue(5,0,True,'a'),TestCaseValue(4,0,True,'b'),TestCaseValue(2,1,True,'c')]
return [dic1,dic2]
def testPutValues1(self):
testSuite=self.getTestSuite1()
#DependsOn object
cut=self.dependsOn
test='''\
dependsOn a,b:
d==34
d<a + b
out:
returns > d
'''
cut.inputName='d'
cut.tokens=self.parser.dependsOn.parseString(test)[0]
cut.populate()
cut.valueStart=2
testSuite[0].append(TestCaseValue(index=3))
testSuite[1].append(TestCaseValue(index=3))
cut.putValues(testSuite[0],4)
cut.putValues(testSuite[1],4)
assert testSuite[0][4].value==7,testSuite[0][4]
assert testSuite[1][4].value==8,testSuite[1][4]
assert len(testSuite[0][0].checkItems)==3,len(testSuite[0][0].checkItems)
assert len(testSuite[1][0].checkItems)==2,len(testSuite[1][0].checkItems)
def testPutValues2(self):
testSuite=self.getTestSuite1()
#DependsOn object
cut=self.dependsOn
test='''\
dependsOn a,b:
d<a + b
d==a+2
d==b-2
out:
returns > d
error:
returns < 0
returns < b - a
'''
cut.inputName='d'
cut.tokens=self.parser.dependsOn.parseString(test)[0]
cut.populate()
cut.valueStart=2
testSuite[0].append(TestCaseValue(index=3))
testSuite[1].append(TestCaseValue(index=4))
cut.putValues(testSuite[0],4)
cut.putValues(testSuite[1],4)
assert testSuite[0][4].value==7,testSuite[0][4]
assert testSuite[1][4].value==2,testSuite[1][4]
#Check out partition check here
assert len(testSuite[0][0].checkItems)==3
assert len(testSuite[1][0].checkItems)==2
if __name__ == "__main__":
unittest.main() # run all tests | epl-1.0 | -2,026,263,126,849,085,400 | 1,393,511,257,657,356,800 | 32.664122 | 103 | 0.54924 | false |
SukkoPera/Arduino-Sensoria | python/server2.py | 1 | 1152 | #!/usr/bin/env python
import time
import server
import argparse
DEFAULT_NAME = "Server2"
class OutdoorSensor (server.TemperatureSensor):
def __init__ (self):
super (OutdoorSensor, self).__init__ ("OS", "Outdoor Sensor", "20171127")
class OutdoorLight (server.ControlledRelayActuator):
def __init__ (self):
super (OutdoorLight, self).__init__ ("KF", "Outdoor Light", "20171127")
parser = argparse.ArgumentParser ( description = 'Simulate some Sensoria transducers')
parser.add_argument ('-n', '--name', help = 'Server name')
parser.add_argument ('-p', '--port', type = int, default = None, help = 'UDP port to listen on')
parser.add_argument ('-a', '--advertise', type = int, default = None, help = 'Interval between server advertisement messages', metavar = "SECONDS")
args = parser.parse_args ()
os = OutdoorSensor ()
ol = OutdoorLight ()
if args.port:
listener = server.CommandListener (args.name, args.port)
else:
listener = server.CommandListener (args.name)
listener.register_sensor (os)
listener.register_sensor (ol)
if args.advertise:
listener.setAdvertiseInterval (args.advertise)
listener.start ()
while True:
time.sleep (1)
| gpl-3.0 | 9,145,369,708,512,062,000 | 2,457,035,238,313,013,000 | 30.135135 | 147 | 0.717014 | false |
ytc301/ec2-auto-scaler | providers/aws.py | 3 | 7385 | import logging
from datetime import timedelta
from datetime import datetime
import time
from boto import ec2
from boto.ec2 import cloudwatch
from errors import ScaleError, InstanceLaunchTimeOut, SpotRequestTimeOut
from providers import Providers
class AWS(Providers):
def __init__(self, access_key_id, secret_access_key, region_name):
self._ec2_conn = ec2.connect_to_region(region_name,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
logging.info('Initialized aws connection to %s' % region_name)
self._cloudwatch_conn = cloudwatch.connect_to_region(region_name,
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key)
logging.info('Initialized cloud watch connection to %s' % region_name)
self._region_name = region_name
def get_connection(self):
return self._ec2_conn
def get_cloudwatch_connection(self):
return self._cloudwatch_conn
def get_instances(self, security_group=None):
conn = self.get_connection()
reservations = conn.get_all_instances()
instances = [inst for resv in reservations
for inst in resv.instances
if inst.state == 'running']
logging.info('Found %s running instances' % len(instances))
if security_group is not None:
logging.info('looking for instances in sg:%s...' % security_group)
instances_in_security_group = []
for inst in instances:
groups = []
for group in inst.groups:
groups.append(group.name)
if security_group in groups:
instances_in_security_group.append(inst)
logging.info('Found %s instances' % len(instances_in_security_group))
return instances_in_security_group
def get_instance_by_id(self, id):
conn = self.get_connection()
reservations = conn.get_all_instances([id])
for resv in reservations:
for instance in resv.instances:
return instance
def wait_for_run(self, instance, timeout=60, interval=5):
trial = timeout / interval
logging.info('Waiting for instance to launch...')
for _ in xrange(trial):
instance.update()
logging.info('Checking... Current State: %s', instance.state)
if instance.state == 'running':
logging.info('Instance running')
break
time.sleep(interval)
else:
logging.error('Cancelling launch due to time out.')
instance.terminate()
raise InstanceLaunchTimeOut()
return instance
def launch_instance(self, instance_properties):
conn = self.get_connection()
resv = conn.run_instances(
instance_properties.ami,
instance_type=instance_properties.type,
security_groups=[instance_properties.security_group],
placement=instance_properties.availability_zone,
key_name=instance_properties.key_pair_name)
for instance in resv.instances:
self.wait_for_run(instance)
conn.create_tags([instance.id],
{'Name': 'auto-' + str(datetime.now())})
instance.update()
return instance
def get_spot_request_by_id(self, id):
conn = self.get_connection()
requests = conn.get_all_spot_instance_requests([id])
for request in requests:
return request
def spot_price(self, instance_properties, hours=6):
conn = self.get_connection()
prices = conn.get_spot_price_history(
start_time=(datetime.utcnow() -
timedelta(hours=hours)).isoformat(),
end_time=datetime.utcnow().isoformat(),
instance_type=instance_properties.type,
product_description='Linux/UNIX',
availability_zone=instance_properties.availability_zone)
spot_price = sum(price.price for price in prices) / len(prices)
logging.info('Spot price seems to be: %s' % spot_price)
return spot_price
def wait_for_fulfill(self, request, timeout=3000, interval=15):
trial = timeout / interval
logging.info('Waiting for request to complete...')
for _ in xrange(trial):
request = self.get_spot_request_by_id(request.id)
logging.info('Checking... Current State: %s', request.state)
if request.state == 'active':
logging.info('Spot request active')
break
time.sleep(interval)
else:
logging.error('Cancelling spot request due to time out.')
request.cancel()
raise SpotRequestTimeOut()
return request
def launch_spot_instance(self, instance_properties):
conn = self.get_connection()
price = self.spot_price(instance_properties) * 3
logging.info('Requesting spot instance with bid %s ' % price)
requests = conn.request_spot_instances(
price=price,
image_id=instance_properties.ami,
count=1,
instance_type=instance_properties.type,
security_groups=[instance_properties.security_group],
placement=instance_properties.availability_zone,
key_name=instance_properties.key_pair_name)
for request in requests:
request = self.wait_for_fulfill(request)
instance = self.get_instance_by_id(request.instance_id)
self.wait_for_run(instance)
conn.create_tags([instance.id],
{'Name': 'auto-' + str(datetime.now())})
instance.update()
return instance
def cpu_utilization(self, instances, minutes=10):
logging.info('In cpu_utilization()')
logging.info('Getting cloudwatch connection')
conn = self.get_cloudwatch_connection()
stat_sum = 0.0
logging.info('Getting CPU Utilization for instances in list')
for instance in instances:
stats = conn.get_metric_statistics(
period=60,
start_time=datetime.utcnow() -
timedelta(minutes=minutes + 5),
end_time=datetime.utcnow(),
metric_name='CPUUtilization',
namespace='AWS/EC2',
statistics=['Average'],
dimensions={'InstanceId': instance.id})
if stats:
stat_sum += sum(stat['Average'] for stat in stats) / len(stats)
else:
raise ScaleError('Stat seems empty.')
try:
avg_cluster_utilization = stat_sum / len(instances)
except ZeroDivisionError:
raise ScaleError('Cluster has no nodes')
logging.info('Avg cluster utilization is %s' % avg_cluster_utilization)
return avg_cluster_utilization | mit | 2,179,798,599,308,600,600 | -179,956,070,464,975,840 | 42.704142 | 79 | 0.568314 | false |
sitigrema/sitigrema.github.io | build.py | 1 | 10927 | #!/usr/bin/env python3
import os
import shutil
import json
import yaml
from PIL import Image
from nxtools import *
class GremaProduct():
def __init__(self, parent, title):
self.parent = parent
self.title = title
self.slug = slugify(title)
@property
def data_dir(self):
return self.parent.data_dir
@property
def site_dir(self):
return self.parent.site_dir
@property
def data_path(self):
return os.path.join(self.data_dir, self.parent.parent.slug, self.parent.slug, self.slug + ".txt")
@property
def image_path(self):
return os.path.join(self.data_dir, self.parent.parent.slug, self.parent.slug, self.slug + ".jpg")
@property
def has_image(self):
return os.path.exists(self.image_path)
@property
def meta(self):
group_slug = self.parent.slug
cat_slug = self.parent.parent.slug
return {
"slug" : self.slug,
"title" : self.title,
"group_slug" : group_slug,
"group_title" : self.parent.title,
"cat_slug" : cat_slug,
"cat_title" : self.parent.parent.title,
"has_image" : self.has_image,
"image" : os.path.join("/products", cat_slug, group_slug, "{}.jpg".format(self.slug)) if self.has_image else "false"
}
def build(self, root_dir):
#output_dir = os.path.join(self.site_dir, "products", self.meta["cat_slug"], self.meta["group_slug"])
if not os.path.exists(self.data_path):
logging.warning("{} data file does not exist".format(self.data_path))
return
# read description and pricelist
description = ""
description_done = False
product_text = ""
pricelist = []
for pline in open(self.data_path).readlines():
r = pline.split(":")
if len(r) == 2 and r[1].strip().isdigit():
pricelist.append(r)
continue
if pline.strip() == "":
description_done = True
if not description_done:
description += pline
product_text += pline
description = description.replace("\n", "")
# write file
with open(os.path.join(root_dir, self.meta["slug"] + ".md"), "w") as f:
f.write("---\nlayout: product\n")
for key in self.meta:
f.write("{} : {}\n".format(key, self.meta[key]))
f.write("description : {}\n".format(description))
if pricelist:
f.write("pricing:\n")
for v, c in pricelist:
f.write(" - variant : {}\n".format(v.strip()))
f.write(" price : {}\n".format(c.strip()))
f.write("---\n")
f.write("\n{}\n\n".format(product_text.strip()))
# create images
if self.has_image:
original_image = Image.open(self.image_path)
image_full_path = os.path.join(root_dir, "{}.jpg".format(self.slug))
image_thumb_path = os.path.join(root_dir, "{}_tn.jpg".format(self.slug))
if os.path.exists(image_full_path):
image_full = original_image.resize((800, 500), Image.ANTIALIAS)
image_full.save(image_full_path)
if not os.path.exists(image_thumb_path):
image_thumb = original_image.resize((261, 163), Image.ANTIALIAS)
image_thumb.save(image_thumb_path)
class GremaProductGroup():
def __init__(self, parent, title):
self.parent = parent
self.title = title
self.slug = slugify(title)
self.products = []
def get_product(self, query):
for product in self.products:
if product.title == query or product.slug == query:
return product
@property
def description(self):
return "TODO: group description"
@property
def data_dir(self):
return self.parent.data_dir
@property
def group_dir(self):
return os.path.join(self.data_dir, self.parent.slug, self.slug)
@property
def site_dir(self):
return self.parent.site_dir
@property
def meta(self):
return {
"title" : self.title,
"slug" : self.slug,
"group_slug" : self.slug, # kvuli zvyraznovani v sidebaru
"cat_slug" : self.parent.slug,
"has_index" : os.path.exists(os.path.join(self.group_dir, "index.txt")),
"has_image" : os.path.exists(os.path.join(self.group_dir, "index.jpg"))
}
@property
def map(self):
result = {key : self.meta[key] for key in self.meta}
result["products"] = [product.meta for product in self.products]
return result
def build(self, root_dir):
group_dir = os.path.join(root_dir, self.slug)
if not os.path.exists(group_dir):
os.makedirs(group_dir)
# Create group index page
with open(os.path.join(group_dir, "index.md"), "w") as f:
f.write("---\nlayout: product_group\n")
for key in self.meta:
f.write("{} : {}\n".format(key, self.meta[key]))
f.write("products:\n")
for product in self.products:
f.write(" - slug: {}\n".format(product.slug))
f.write(" title: {}\n".format(product.title))
f.write(" has_image: {}\n".format(product.has_image))
f.write("---\n\n")
index_path = os.path.join(self.data_dir, self.parent.slug, self.slug, "index.txt")
if os.path.exists(index_path):
f.write(open(index_path).read())
# Convert index image
index_image_path = os.path.join(self.data_dir, self.parent.slug, self.slug, "index.jpg")
if os.path.exists(index_image_path):
original_image = Image.open(index_image_path)
image_full_path = os.path.join(group_dir, "index.jpg")
image_thumb_path = os.path.join(group_dir, "index_tn.jpg")
image_full = original_image.resize((800, 500), Image.ANTIALIAS)
image_full.save(image_full_path)
image_thumb = original_image.resize((261, 163), Image.ANTIALIAS)
image_thumb.save(image_thumb_path)
# Build products
for product in self.products:
product.build(group_dir)
class GremaCategory():
def __init__(self, parent, title):
self.parent = parent
self.title = title
self.slug = slugify(title)
self.load_groups()
def get_product(self, query):
for group in self.groups:
product = group.get_product(query)
if product:
return product
@property
def data_dir(self):
return self.parent.data_dir
@property
def site_dir(self):
return self.parent.site_dir
@property
def map(self):
return {
"title" : self.title,
"slug" : self.slug,
"groups" : [group.map for group in self.groups if (group.products or group.meta["has_index"])]
}
def load_groups(self):
self.groups = []
index_path = os.path.join(self.data_dir, "index-{}.yml".format(self.slug))
if not os.path.exists(index_path):
logging.error("{} does not exist".format(index_path))
return
data = yaml.safe_load(open(index_path))
if not data:
logging.error("No data in {}".format(index_path))
return
for group_title in data.keys():
logging.debug("Creating category {}".format(group_title))
group = GremaProductGroup(self, group_title)
if data[group_title]:
for product_title in data[group_title]:
product = GremaProduct(group, product_title)
group.products.append(product)
self.groups.append(group)
def build(self, root_dir):
category_dir = os.path.join(root_dir, self.slug)
if not os.path.exists(category_dir):
os.makedirs(category_dir)
for group in self.groups:
group.build(category_dir)
class GremaSite():
def __init__(self):
self.data_dir = "_source"
self.site_dir = "."
self.load_categories()
def get_product(self, query):
for category in self.categories:
product = category.get_product(query)
if product:
return product
def load_categories(self):
self.categories = []
index_path = os.path.join(self.data_dir, "index.yml")
if not os.path.exists(index_path):
return
for category_title in yaml.safe_load(open(index_path))["categories"]:
category_title = to_unicode(category_title)
self.categories.append(GremaCategory(self, category_title))
def build(self):
product_map = []
root_dir = os.path.join(self.site_dir, "products")
for category in self.categories:
logging.info("Creating category {}".format(category.title))
category.build(root_dir)
cmap = category.map
if cmap["groups"]:
product_map.append(cmap)
product_map_path = os.path.join(self.site_dir, "_data", "products.yml")
with open(product_map_path, 'w') as outfile:
outfile.write(
yaml.dump(product_map)
)
with open("data.json","w") as f:
json.dump(product_map, f)
# Default thumbnail
original_image = Image.open(os.path.join(self.data_dir, "default.png"))
image_full_path = os.path.join(self.site_dir, "static", "default.jpg")
image_thumb_path = os.path.join(self.site_dir, "static", "default_tn.jpg")
image_full = original_image.resize((640, 400), Image.ANTIALIAS)
image_full.save(image_full_path)
image_thumb = original_image.resize((261, 163), Image.ANTIALIAS)
image_thumb.save(image_thumb_path)
def import_products(site, data_dir):
for fname in os.listdir(data_dir):
if os.path.splitext(fname)[1] != ".txt":
continue
product_source_path = os.path.join(data_dir, fname)
base_name = get_base_name(fname)
image_source_path = os.path.join(data_dir, base_name + ".jpg")
product = site.get_product(base_name)
if not product:
continue
product_dir = os.path.dirname(product.data_path)
if not os.path.exists(product_dir):
os.makedirs(product_dir)
shutil.copy2(product_source_path, product.data_path)
if os.path.exists(image_source_path):
shutil.copy2(image_source_path, product.image_path)
if __name__ == "__main__":
grema = GremaSite()
grema.build()
| mit | 5,600,786,904,910,301,000 | -5,197,698,745,767,174,000 | 32.314024 | 129 | 0.560996 | false |
wrouesnel/ansible | lib/ansible/module_utils/cloudstack.py | 10 | 23642 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, René Moser <[email protected]>
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import sys
import time
from ansible.module_utils._text import to_text, to_native
try:
from cs import CloudStack, CloudStackException, read_config
HAS_LIB_CS = True
except ImportError:
HAS_LIB_CS = False
CS_HYPERVISORS = [
'KVM', 'kvm',
'VMware', 'vmware',
'BareMetal', 'baremetal',
'XenServer', 'xenserver',
'LXC', 'lxc',
'HyperV', 'hyperv',
'UCS', 'ucs',
'OVM', 'ovm',
'Simulator', 'simulator',
]
if sys.version_info > (3,):
long = int
def cs_argument_spec():
return dict(
api_key=dict(default=os.environ.get('CLOUDSTACK_KEY')),
api_secret=dict(default=os.environ.get('CLOUDSTACK_SECRET'), no_log=True),
api_url=dict(default=os.environ.get('CLOUDSTACK_ENDPOINT')),
api_http_method=dict(choices=['get', 'post'], default=os.environ.get('CLOUDSTACK_METHOD')),
api_timeout=dict(type='int', default=os.environ.get('CLOUDSTACK_TIMEOUT')),
api_region=dict(default=os.environ.get('CLOUDSTACK_REGION') or 'cloudstack'),
)
def cs_required_together():
return [['api_key', 'api_secret']]
class AnsibleCloudStack:
def __init__(self, module):
if not HAS_LIB_CS:
module.fail_json(msg="python library cs required: pip install cs")
self.result = {
'changed': False,
'diff': {
'before': dict(),
'after': dict()
}
}
# Common returns, will be merged with self.returns
# search_for_key: replace_with_key
self.common_returns = {
'id': 'id',
'name': 'name',
'created': 'created',
'zonename': 'zone',
'state': 'state',
'project': 'project',
'account': 'account',
'domain': 'domain',
'displaytext': 'display_text',
'displayname': 'display_name',
'description': 'description',
}
# Init returns dict for use in subclasses
self.returns = {}
# these values will be casted to int
self.returns_to_int = {}
# these keys will be compared case sensitive in self.has_changed()
self.case_sensitive_keys = [
'id',
'displaytext',
'displayname',
'description',
]
self.module = module
self._cs = None
# Helper for VPCs
self._vpc_networks_ids = None
self.domain = None
self.account = None
self.project = None
self.ip_address = None
self.network = None
self.vpc = None
self.zone = None
self.vm = None
self.vm_default_nic = None
self.os_type = None
self.hypervisor = None
self.capabilities = None
self.network_acl = None
@property
def cs(self):
if self._cs is None:
api_config = self.get_api_config()
self._cs = CloudStack(**api_config)
return self._cs
def get_api_config(self):
api_region = self.module.params.get('api_region') or os.environ.get('CLOUDSTACK_REGION')
try:
config = read_config(api_region)
except KeyError:
config = {}
api_config = {
'endpoint': self.module.params.get('api_url') or config.get('endpoint'),
'key': self.module.params.get('api_key') or config.get('key'),
'secret': self.module.params.get('api_secret') or config.get('secret'),
'timeout': self.module.params.get('api_timeout') or config.get('timeout') or 10,
'method': self.module.params.get('api_http_method') or config.get('method') or 'get',
}
self.result.update({
'api_region': api_region,
'api_url': api_config['endpoint'],
'api_key': api_config['key'],
'api_timeout': api_config['timeout'],
'api_http_method': api_config['method'],
})
if not all([api_config['endpoint'], api_config['key'], api_config['secret']]):
self.fail_json(msg="Missing api credentials: can not authenticate")
return api_config
def fail_json(self, **kwargs):
self.result.update(kwargs)
self.module.fail_json(**self.result)
def get_or_fallback(self, key=None, fallback_key=None):
value = self.module.params.get(key)
if not value:
value = self.module.params.get(fallback_key)
return value
def has_changed(self, want_dict, current_dict, only_keys=None, skip_diff_for_keys=None):
result = False
for key, value in want_dict.items():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue
# Skip None values
if value is None:
continue
if key in current_dict:
if isinstance(value, (int, float, long, complex)):
# ensure we compare the same type
if isinstance(value, int):
current_dict[key] = int(current_dict[key])
elif isinstance(value, float):
current_dict[key] = float(current_dict[key])
elif isinstance(value, long):
current_dict[key] = long(current_dict[key])
elif isinstance(value, complex):
current_dict[key] = complex(current_dict[key])
if value != current_dict[key]:
if skip_diff_for_keys and key not in skip_diff_for_keys:
self.result['diff']['before'][key] = current_dict[key]
self.result['diff']['after'][key] = value
result = True
else:
before_value = to_text(current_dict[key])
after_value = to_text(value)
if self.case_sensitive_keys and key in self.case_sensitive_keys:
if before_value != after_value:
if skip_diff_for_keys and key not in skip_diff_for_keys:
self.result['diff']['before'][key] = before_value
self.result['diff']['after'][key] = after_value
result = True
# Test for diff in case insensitive way
elif before_value.lower() != after_value.lower():
if skip_diff_for_keys and key not in skip_diff_for_keys:
self.result['diff']['before'][key] = before_value
self.result['diff']['after'][key] = after_value
result = True
else:
if skip_diff_for_keys and key not in skip_diff_for_keys:
self.result['diff']['before'][key] = None
self.result['diff']['after'][key] = to_text(value)
result = True
return result
def _get_by_key(self, key=None, my_dict=None):
if my_dict is None:
my_dict = {}
if key:
if key in my_dict:
return my_dict[key]
self.fail_json(msg="Something went wrong: %s not found" % key)
return my_dict
def query_api(self, command, **args):
try:
res = getattr(self.cs, command)(**args)
if 'errortext' in res:
self.fail_json(msg="Failed: '%s'" % res['errortext'])
except CloudStackException as e:
self.fail_json(msg='CloudStackException: %s' % to_native(e))
except Exception as e:
self.fail_json(msg=to_native(e))
return res
def get_network_acl(self, key=None):
if self.network_acl is None:
args = {
'name': self.module.params.get('network_acl'),
'vpcid': self.get_vpc(key='id'),
}
network_acls = self.query_api('listNetworkACLLists', **args)
if network_acls:
self.network_acl = network_acls['networkacllist'][0]
self.result['network_acl'] = self.network_acl['name']
if self.network_acl:
return self._get_by_key(key, self.network_acl)
else:
self.fail_json(msg="Network ACL %s not found" % self.module.params.get('network_acl'))
def get_vpc(self, key=None):
"""Return a VPC dictionary or the value of given key of."""
if self.vpc:
return self._get_by_key(key, self.vpc)
vpc = self.module.params.get('vpc')
if not vpc:
vpc = os.environ.get('CLOUDSTACK_VPC')
if not vpc:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
}
vpcs = self.query_api('listVPCs', **args)
if not vpcs:
self.fail_json(msg="No VPCs available.")
for v in vpcs['vpc']:
if vpc in [v['name'], v['displaytext'], v['id']]:
# Fail if the identifyer matches more than one VPC
if self.vpc:
self.fail_json(msg="More than one VPC found with the provided identifyer '%s'" % vpc)
else:
self.vpc = v
self.result['vpc'] = v['name']
if self.vpc:
return self._get_by_key(key, self.vpc)
self.fail_json(msg="VPC '%s' not found" % vpc)
def is_vpc_network(self, network_id):
"""Returns True if network is in VPC."""
# This is an efficient way to query a lot of networks at a time
if self._vpc_networks_ids is None:
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
}
vpcs = self.query_api('listVPCs', **args)
self._vpc_networks_ids = []
if vpcs:
for vpc in vpcs['vpc']:
for n in vpc.get('network', []):
self._vpc_networks_ids.append(n['id'])
return network_id in self._vpc_networks_ids
def get_network(self, key=None):
"""Return a network dictionary or the value of given key of."""
if self.network:
return self._get_by_key(key, self.network)
network = self.module.params.get('network')
if not network:
vpc_name = self.get_vpc(key='name')
if vpc_name:
self.fail_json(msg="Could not find network for VPC '%s' due missing argument: network" % vpc_name)
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'vpcid': self.get_vpc(key='id')
}
networks = self.query_api('listNetworks', **args)
if not networks:
self.fail_json(msg="No networks available.")
for n in networks['network']:
# ignore any VPC network if vpc param is not given
if 'vpcid' in n and not self.get_vpc(key='id'):
continue
if network in [n['displaytext'], n['name'], n['id']]:
self.result['network'] = n['name']
self.network = n
return self._get_by_key(key, self.network)
self.fail_json(msg="Network '%s' not found" % network)
def get_project(self, key=None):
if self.project:
return self._get_by_key(key, self.project)
project = self.module.params.get('project')
if not project:
project = os.environ.get('CLOUDSTACK_PROJECT')
if not project:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id')
}
projects = self.query_api('listProjects', **args)
if projects:
for p in projects['project']:
if project.lower() in [p['name'].lower(), p['id']]:
self.result['project'] = p['name']
self.project = p
return self._get_by_key(key, self.project)
self.fail_json(msg="project '%s' not found" % project)
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
ip_address = self.module.params.get('ip_address')
if not ip_address:
self.fail_json(msg="IP address param 'ip_address' is required")
args = {
'ipaddress': ip_address,
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'vpcid': self.get_vpc(key='id'),
}
ip_addresses = self.query_api('listPublicIpAddresses', **args)
if not ip_addresses:
self.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def get_vm_guest_ip(self):
vm_guest_ip = self.module.params.get('vm_guest_ip')
default_nic = self.get_vm_default_nic()
if not vm_guest_ip:
return default_nic['ipaddress']
for secondary_ip in default_nic['secondaryip']:
if vm_guest_ip == secondary_ip['ipaddress']:
return vm_guest_ip
self.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip)
def get_vm_default_nic(self):
if self.vm_default_nic:
return self.vm_default_nic
nics = self.query_api('listNics', virtualmachineid=self.get_vm(key='id'))
if nics:
for n in nics['nic']:
if n['isdefault']:
self.vm_default_nic = n
return self.vm_default_nic
self.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm'))
def get_vm(self, key=None, filter_zone=True):
if self.vm:
return self._get_by_key(key, self.vm)
vm = self.module.params.get('vm')
if not vm:
self.fail_json(msg="Virtual machine param 'vm' is required")
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id') if filter_zone else None,
}
vms = self.query_api('listVirtualMachines', **args)
if vms:
for v in vms['virtualmachine']:
if vm.lower() in [v['name'].lower(), v['displayname'].lower(), v['id']]:
self.vm = v
return self._get_by_key(key, self.vm)
self.fail_json(msg="Virtual machine '%s' not found" % vm)
def get_disk_offering(self, key=None):
disk_offering = self.module.params.get('disk_offering')
if not disk_offering:
return None
# Do not add domain filter for disk offering listing.
disk_offerings = self.query_api('listDiskOfferings')
if disk_offerings:
for d in disk_offerings['diskoffering']:
if disk_offering in [d['displaytext'], d['name'], d['id']]:
return self._get_by_key(key, d)
self.fail_json(msg="Disk offering '%s' not found" % disk_offering)
def get_zone(self, key=None):
if self.zone:
return self._get_by_key(key, self.zone)
zone = self.module.params.get('zone')
if not zone:
zone = os.environ.get('CLOUDSTACK_ZONE')
zones = self.query_api('listZones')
if not zones:
self.fail_json(msg="No zones available. Please create a zone first")
# use the first zone if no zone param given
if not zone:
self.zone = zones['zone'][0]
self.result['zone'] = self.zone['name']
return self._get_by_key(key, self.zone)
if zones:
for z in zones['zone']:
if zone.lower() in [z['name'].lower(), z['id']]:
self.result['zone'] = z['name']
self.zone = z
return self._get_by_key(key, self.zone)
self.fail_json(msg="zone '%s' not found" % zone)
def get_os_type(self, key=None):
if self.os_type:
return self._get_by_key(key, self.zone)
os_type = self.module.params.get('os_type')
if not os_type:
return None
os_types = self.query_api('listOsTypes')
if os_types:
for o in os_types['ostype']:
if os_type in [o['description'], o['id']]:
self.os_type = o
return self._get_by_key(key, self.os_type)
self.fail_json(msg="OS type '%s' not found" % os_type)
def get_hypervisor(self):
if self.hypervisor:
return self.hypervisor
hypervisor = self.module.params.get('hypervisor')
hypervisors = self.query_api('listHypervisors')
# use the first hypervisor if no hypervisor param given
if not hypervisor:
self.hypervisor = hypervisors['hypervisor'][0]['name']
return self.hypervisor
for h in hypervisors['hypervisor']:
if hypervisor.lower() == h['name'].lower():
self.hypervisor = h['name']
return self.hypervisor
self.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
def get_account(self, key=None):
if self.account:
return self._get_by_key(key, self.account)
account = self.module.params.get('account')
if not account:
account = os.environ.get('CLOUDSTACK_ACCOUNT')
if not account:
return None
domain = self.module.params.get('domain')
if not domain:
self.fail_json(msg="Account must be specified with Domain")
args = {
'name': account,
'domainid': self.get_domain(key='id'),
'listall': True
}
accounts = self.query_api('listAccounts', **args)
if accounts:
self.account = accounts['account'][0]
self.result['account'] = self.account['name']
return self._get_by_key(key, self.account)
self.fail_json(msg="Account '%s' not found" % account)
def get_domain(self, key=None):
if self.domain:
return self._get_by_key(key, self.domain)
domain = self.module.params.get('domain')
if not domain:
domain = os.environ.get('CLOUDSTACK_DOMAIN')
if not domain:
return None
args = {
'listall': True,
}
domains = self.query_api('listDomains', **args)
if domains:
for d in domains['domain']:
if d['path'].lower() in [domain.lower(), "root/" + domain.lower(), "root" + domain.lower()]:
self.domain = d
self.result['domain'] = d['path']
return self._get_by_key(key, self.domain)
self.fail_json(msg="Domain '%s' not found" % domain)
def query_tags(self, resource, resource_type):
args = {
'resourceids': resource['id'],
'resourcetype': resource_type,
}
tags = self.query_api('listTags', **args)
return self.get_tags(resource=tags, key='tag')
def get_tags(self, resource=None, key='tags'):
existing_tags = []
for tag in resource.get(key) or []:
existing_tags.append({'key': tag['key'], 'value': tag['value']})
return existing_tags
def _process_tags(self, resource, resource_type, tags, operation="create"):
if tags:
self.result['changed'] = True
if not self.module.check_mode:
args = {
'resourceids': resource['id'],
'resourcetype': resource_type,
'tags': tags,
}
if operation == "create":
response = self.query_api('createTags', **args)
else:
response = self.query_api('deleteTags', **args)
self.poll_job(response)
def _tags_that_should_exist_or_be_updated(self, resource, tags):
existing_tags = self.get_tags(resource)
return [tag for tag in tags if tag not in existing_tags]
def _tags_that_should_not_exist(self, resource, tags):
existing_tags = self.get_tags(resource)
return [tag for tag in existing_tags if tag not in tags]
def ensure_tags(self, resource, resource_type=None):
if not resource_type or not resource:
self.fail_json(msg="Error: Missing resource or resource_type for tags.")
if 'tags' in resource:
tags = self.module.params.get('tags')
if tags is not None:
self._process_tags(resource, resource_type, self._tags_that_should_not_exist(resource, tags), operation="delete")
self._process_tags(resource, resource_type, self._tags_that_should_exist_or_be_updated(resource, tags))
resource['tags'] = self.query_tags(resource=resource, resource_type=resource_type)
return resource
def get_capabilities(self, key=None):
if self.capabilities:
return self._get_by_key(key, self.capabilities)
capabilities = self.query_api('listCapabilities')
self.capabilities = capabilities['capability']
return self._get_by_key(key, self.capabilities)
def poll_job(self, job=None, key=None):
if 'jobid' in job:
while True:
res = self.query_api('queryAsyncJobResult', jobid=job['jobid'])
if res['jobstatus'] != 0 and 'jobresult' in res:
if 'errortext' in res['jobresult']:
self.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
if key and key in res['jobresult']:
job = res['jobresult'][key]
break
time.sleep(2)
return job
def get_result(self, resource):
if resource:
returns = self.common_returns.copy()
returns.update(self.returns)
for search_key, return_key in returns.items():
if search_key in resource:
self.result[return_key] = resource[search_key]
# Bad bad API does not always return int when it should.
for search_key, return_key in self.returns_to_int.items():
if search_key in resource:
self.result[return_key] = int(resource[search_key])
if 'tags' in resource:
self.result['tags'] = resource['tags']
return self.result
| gpl-3.0 | 4,579,783,234,843,156,500 | -1,397,502,155,809,509,600 | 36.347551 | 129 | 0.534199 | false |
andrew-aladev/samba-talloc-debug | buildtools/wafadmin/Tools/qt4.py | 16 | 14672 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006 (ita)
"""
Qt4 support
If QT4_ROOT is given (absolute path), the configuration will look in it first
This module also demonstrates how to add tasks dynamically (when the build has started)
"""
try:
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
except ImportError:
has_xml = False
ContentHandler = object
else:
has_xml = True
import os, sys
import ccroot, cxx
import TaskGen, Task, Utils, Runner, Options, Node, Configure
from TaskGen import taskgen, feature, after, extension
from Logs import error
from Constants import *
MOC_H = ['.h', '.hpp', '.hxx', '.hh']
EXT_RCC = ['.qrc']
EXT_UI = ['.ui']
EXT_QT4 = ['.cpp', '.cc', '.cxx', '.C']
class qxx_task(Task.Task):
"A cpp task that may create a moc task dynamically"
before = ['cxx_link', 'static_link']
def __init__(self, *k, **kw):
Task.Task.__init__(self, *k, **kw)
self.moc_done = 0
def scan(self):
(nodes, names) = ccroot.scan(self)
# for some reasons (variants) the moc node may end in the list of node deps
for x in nodes:
if x.name.endswith('.moc'):
nodes.remove(x)
names.append(x.relpath_gen(self.inputs[0].parent))
return (nodes, names)
def runnable_status(self):
if self.moc_done:
# if there is a moc task, delay the computation of the file signature
for t in self.run_after:
if not t.hasrun:
return ASK_LATER
# the moc file enters in the dependency calculation
# so we need to recompute the signature when the moc file is present
self.signature()
return Task.Task.runnable_status(self)
else:
# yes, really, there are people who generate cxx files
for t in self.run_after:
if not t.hasrun:
return ASK_LATER
self.add_moc_tasks()
return ASK_LATER
def add_moc_tasks(self):
node = self.inputs[0]
tree = node.__class__.bld
try:
# compute the signature once to know if there is a moc file to create
self.signature()
except KeyError:
# the moc file may be referenced somewhere else
pass
else:
# remove the signature, it must be recomputed with the moc task
delattr(self, 'cache_sig')
moctasks=[]
mocfiles=[]
variant = node.variant(self.env)
try:
tmp_lst = tree.raw_deps[self.unique_id()]
tree.raw_deps[self.unique_id()] = []
except KeyError:
tmp_lst = []
for d in tmp_lst:
if not d.endswith('.moc'): continue
# paranoid check
if d in mocfiles:
error("paranoia owns")
continue
# process that base.moc only once
mocfiles.append(d)
# find the extension (performed only when the .cpp has changes)
base2 = d[:-4]
for path in [node.parent] + self.generator.env['INC_PATHS']:
tree.rescan(path)
vals = getattr(Options.options, 'qt_header_ext', '') or MOC_H
for ex in vals:
h_node = path.find_resource(base2 + ex)
if h_node:
break
else:
continue
break
else:
raise Utils.WafError("no header found for %s which is a moc file" % str(d))
m_node = h_node.change_ext('.moc')
tree.node_deps[(self.inputs[0].parent.id, self.env.variant(), m_node.name)] = h_node
# create the task
task = Task.TaskBase.classes['moc'](self.env, normal=0)
task.set_inputs(h_node)
task.set_outputs(m_node)
generator = tree.generator
generator.outstanding.insert(0, task)
generator.total += 1
moctasks.append(task)
# remove raw deps except the moc files to save space (optimization)
tmp_lst = tree.raw_deps[self.unique_id()] = mocfiles
# look at the file inputs, it is set right above
lst = tree.node_deps.get(self.unique_id(), ())
for d in lst:
name = d.name
if name.endswith('.moc'):
task = Task.TaskBase.classes['moc'](self.env, normal=0)
task.set_inputs(tree.node_deps[(self.inputs[0].parent.id, self.env.variant(), name)]) # 1st element in a tuple
task.set_outputs(d)
generator = tree.generator
generator.outstanding.insert(0, task)
generator.total += 1
moctasks.append(task)
# simple scheduler dependency: run the moc task before others
self.run_after = moctasks
self.moc_done = 1
run = Task.TaskBase.classes['cxx'].__dict__['run']
def translation_update(task):
outs = [a.abspath(task.env) for a in task.outputs]
outs = " ".join(outs)
lupdate = task.env['QT_LUPDATE']
for x in task.inputs:
file = x.abspath(task.env)
cmd = "%s %s -ts %s" % (lupdate, file, outs)
Utils.pprint('BLUE', cmd)
task.generator.bld.exec_command(cmd)
class XMLHandler(ContentHandler):
def __init__(self):
self.buf = []
self.files = []
def startElement(self, name, attrs):
if name == 'file':
self.buf = []
def endElement(self, name):
if name == 'file':
self.files.append(''.join(self.buf))
def characters(self, cars):
self.buf.append(cars)
def scan(self):
"add the dependency on the files referenced in the qrc"
node = self.inputs[0]
parser = make_parser()
curHandler = XMLHandler()
parser.setContentHandler(curHandler)
fi = open(self.inputs[0].abspath(self.env))
parser.parse(fi)
fi.close()
nodes = []
names = []
root = self.inputs[0].parent
for x in curHandler.files:
nd = root.find_resource(x)
if nd: nodes.append(nd)
else: names.append(x)
return (nodes, names)
@extension(EXT_RCC)
def create_rcc_task(self, node):
"hook for rcc files"
rcnode = node.change_ext('_rc.cpp')
rcctask = self.create_task('rcc', node, rcnode)
cpptask = self.create_task('cxx', rcnode, rcnode.change_ext('.o'))
self.compiled_tasks.append(cpptask)
return cpptask
@extension(EXT_UI)
def create_uic_task(self, node):
"hook for uic tasks"
uictask = self.create_task('ui4', node)
uictask.outputs = [self.path.find_or_declare(self.env['ui_PATTERN'] % node.name[:-3])]
return uictask
class qt4_taskgen(cxx.cxx_taskgen):
def __init__(self, *k, **kw):
cxx.cxx_taskgen.__init__(self, *k, **kw)
self.features.append('qt4')
@extension('.ts')
def add_lang(self, node):
"""add all the .ts file into self.lang"""
self.lang = self.to_list(getattr(self, 'lang', [])) + [node]
@feature('qt4')
@after('apply_link')
def apply_qt4(self):
if getattr(self, 'lang', None):
update = getattr(self, 'update', None)
lst=[]
trans=[]
for l in self.to_list(self.lang):
if not isinstance(l, Node.Node):
l = self.path.find_resource(l+'.ts')
t = self.create_task('ts2qm', l, l.change_ext('.qm'))
lst.append(t.outputs[0])
if update:
trans.append(t.inputs[0])
trans_qt4 = getattr(Options.options, 'trans_qt4', False)
if update and trans_qt4:
# we need the cpp files given, except the rcc task we create after
# FIXME may be broken
u = Task.TaskCmd(translation_update, self.env, 2)
u.inputs = [a.inputs[0] for a in self.compiled_tasks]
u.outputs = trans
if getattr(self, 'langname', None):
t = Task.TaskBase.classes['qm2rcc'](self.env)
t.set_inputs(lst)
t.set_outputs(self.path.find_or_declare(self.langname+'.qrc'))
t.path = self.path
k = create_rcc_task(self, t.outputs[0])
self.link_task.inputs.append(k.outputs[0])
self.env.append_value('MOC_FLAGS', self.env._CXXDEFFLAGS)
self.env.append_value('MOC_FLAGS', self.env._CXXINCFLAGS)
@extension(EXT_QT4)
def cxx_hook(self, node):
# create the compilation task: cpp or cc
try: obj_ext = self.obj_ext
except AttributeError: obj_ext = '_%d.o' % self.idx
task = self.create_task('qxx', node, node.change_ext(obj_ext))
self.compiled_tasks.append(task)
return task
def process_qm2rcc(task):
outfile = task.outputs[0].abspath(task.env)
f = open(outfile, 'w')
f.write('<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n')
for k in task.inputs:
f.write(' <file>')
#f.write(k.name)
f.write(k.path_to_parent(task.path))
f.write('</file>\n')
f.write('</qresource>\n</RCC>')
f.close()
b = Task.simple_task_type
b('moc', '${QT_MOC} ${MOC_FLAGS} ${SRC} ${MOC_ST} ${TGT}', color='BLUE', vars=['QT_MOC', 'MOC_FLAGS'], shell=False)
cls = b('rcc', '${QT_RCC} -name ${SRC[0].name} ${SRC[0].abspath(env)} ${RCC_ST} -o ${TGT}', color='BLUE', before='cxx moc qxx_task', after="qm2rcc", shell=False)
cls.scan = scan
b('ui4', '${QT_UIC} ${SRC} -o ${TGT}', color='BLUE', before='cxx moc qxx_task', shell=False)
b('ts2qm', '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}', color='BLUE', before='qm2rcc', shell=False)
Task.task_type_from_func('qm2rcc', vars=[], func=process_qm2rcc, color='BLUE', before='rcc', after='ts2qm')
def detect_qt4(conf):
env = conf.env
opt = Options.options
qtdir = getattr(opt, 'qtdir', '')
qtbin = getattr(opt, 'qtbin', '')
qtlibs = getattr(opt, 'qtlibs', '')
useframework = getattr(opt, 'use_qt4_osxframework', True)
paths = []
# the path to qmake has been given explicitely
if qtbin:
paths = [qtbin]
# the qt directory has been given - we deduce the qt binary path
if not qtdir:
qtdir = conf.environ.get('QT4_ROOT', '')
qtbin = os.path.join(qtdir, 'bin')
paths = [qtbin]
# no qtdir, look in the path and in /usr/local/Trolltech
if not qtdir:
paths = os.environ.get('PATH', '').split(os.pathsep)
paths.append('/usr/share/qt4/bin/')
try:
lst = os.listdir('/usr/local/Trolltech/')
except OSError:
pass
else:
if lst:
lst.sort()
lst.reverse()
# keep the highest version
qtdir = '/usr/local/Trolltech/%s/' % lst[0]
qtbin = os.path.join(qtdir, 'bin')
paths.append(qtbin)
# at the end, try to find qmake in the paths given
# keep the one with the highest version
cand = None
prev_ver = ['4', '0', '0']
for qmk in ['qmake-qt4', 'qmake4', 'qmake']:
qmake = conf.find_program(qmk, path_list=paths)
if qmake:
try:
version = Utils.cmd_output([qmake, '-query', 'QT_VERSION']).strip()
except ValueError:
pass
else:
if version:
new_ver = version.split('.')
if new_ver > prev_ver:
cand = qmake
prev_ver = new_ver
if cand:
qmake = cand
else:
conf.fatal('could not find qmake for qt4')
conf.env.QMAKE = qmake
qtincludes = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_HEADERS']).strip()
qtdir = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_PREFIX']).strip() + os.sep
qtbin = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_BINS']).strip() + os.sep
if not qtlibs:
try:
qtlibs = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_LIBS']).strip() + os.sep
except ValueError:
qtlibs = os.path.join(qtdir, 'lib')
def find_bin(lst, var):
for f in lst:
ret = conf.find_program(f, path_list=paths)
if ret:
env[var]=ret
break
vars = "QtCore QtGui QtUiTools QtNetwork QtOpenGL QtSql QtSvg QtTest QtXml QtWebKit Qt3Support".split()
find_bin(['uic-qt3', 'uic3'], 'QT_UIC3')
find_bin(['uic-qt4', 'uic'], 'QT_UIC')
if not env['QT_UIC']:
conf.fatal('cannot find the uic compiler for qt4')
try:
version = Utils.cmd_output(env['QT_UIC'] + " -version 2>&1").strip()
except ValueError:
conf.fatal('your uic compiler is for qt3, add uic for qt4 to your path')
version = version.replace('Qt User Interface Compiler ','')
version = version.replace('User Interface Compiler for Qt', '')
if version.find(" 3.") != -1:
conf.check_message('uic version', '(too old)', 0, option='(%s)'%version)
sys.exit(1)
conf.check_message('uic version', '', 1, option='(%s)'%version)
find_bin(['moc-qt4', 'moc'], 'QT_MOC')
find_bin(['rcc'], 'QT_RCC')
find_bin(['lrelease-qt4', 'lrelease'], 'QT_LRELEASE')
find_bin(['lupdate-qt4', 'lupdate'], 'QT_LUPDATE')
env['UIC3_ST']= '%s -o %s'
env['UIC_ST'] = '%s -o %s'
env['MOC_ST'] = '-o'
env['ui_PATTERN'] = 'ui_%s.h'
env['QT_LRELEASE_FLAGS'] = ['-silent']
vars_debug = [a+'_debug' for a in vars]
try:
conf.find_program('pkg-config', var='pkgconfig', path_list=paths, mandatory=True)
except Configure.ConfigurationError:
for lib in vars_debug+vars:
uselib = lib.upper()
d = (lib.find('_debug') > 0) and 'd' or ''
# original author seems to prefer static to shared libraries
for (pat, kind) in ((conf.env.staticlib_PATTERN, 'STATIC'), (conf.env.shlib_PATTERN, '')):
conf.check_message_1('Checking for %s %s' % (lib, kind))
for ext in ['', '4']:
path = os.path.join(qtlibs, pat % (lib + d + ext))
if os.path.exists(path):
env.append_unique(kind + 'LIB_' + uselib, lib + d + ext)
conf.check_message_2('ok ' + path, 'GREEN')
break
path = os.path.join(qtbin, pat % (lib + d + ext))
if os.path.exists(path):
env.append_unique(kind + 'LIB_' + uselib, lib + d + ext)
conf.check_message_2('ok ' + path, 'GREEN')
break
else:
conf.check_message_2('not found', 'YELLOW')
continue
break
env.append_unique('LIBPATH_' + uselib, qtlibs)
env.append_unique('CPPPATH_' + uselib, qtincludes)
env.append_unique('CPPPATH_' + uselib, qtincludes + os.sep + lib)
else:
for i in vars_debug+vars:
try:
conf.check_cfg(package=i, args='--cflags --libs --silence-errors', path=conf.env.pkgconfig)
except ValueError:
pass
# the libpaths are set nicely, unfortunately they make really long command-lines
# remove the qtcore ones from qtgui, etc
def process_lib(vars_, coreval):
for d in vars_:
var = d.upper()
if var == 'QTCORE': continue
value = env['LIBPATH_'+var]
if value:
core = env[coreval]
accu = []
for lib in value:
if lib in core: continue
accu.append(lib)
env['LIBPATH_'+var] = accu
process_lib(vars, 'LIBPATH_QTCORE')
process_lib(vars_debug, 'LIBPATH_QTCORE_DEBUG')
# rpath if wanted
want_rpath = getattr(Options.options, 'want_rpath', 1)
if want_rpath:
def process_rpath(vars_, coreval):
for d in vars_:
var = d.upper()
value = env['LIBPATH_'+var]
if value:
core = env[coreval]
accu = []
for lib in value:
if var != 'QTCORE':
if lib in core:
continue
accu.append('-Wl,--rpath='+lib)
env['RPATH_'+var] = accu
process_rpath(vars, 'LIBPATH_QTCORE')
process_rpath(vars_debug, 'LIBPATH_QTCORE_DEBUG')
env['QTLOCALE'] = str(env['PREFIX'])+'/share/locale'
def detect(conf):
detect_qt4(conf)
def set_options(opt):
opt.add_option('--want-rpath', type='int', default=1, dest='want_rpath', help='set rpath to 1 or 0 [Default 1]')
opt.add_option('--header-ext',
type='string',
default='',
help='header extension for moc files',
dest='qt_header_ext')
for i in 'qtdir qtbin qtlibs'.split():
opt.add_option('--'+i, type='string', default='', dest=i)
if sys.platform == "darwin":
opt.add_option('--no-qt4-framework', action="store_false", help='do not use the framework version of Qt4 in OS X', dest='use_qt4_osxframework',default=True)
opt.add_option('--translate', action="store_true", help="collect translation strings", dest="trans_qt4", default=False)
| gpl-3.0 | 2,062,673,074,691,619,800 | -5,267,732,050,348,155,000 | 28.053465 | 161 | 0.649264 | false |
OpenCode/l10n-italy | __unported__/l10n_it_prima_nota_cassa/__openerp__.py | 3 | 1526 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011-2013 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Italian Localisation - Prima Nota Cassa',
'version': '0.1',
'category': 'Localisation/Italy',
'description': """Accounting reports - Prima Nota Cassa - Webkit""",
'author': 'OpenERP Italian Community',
'website': 'http://www.openerp-italia.org',
'license': 'AGPL-3',
"depends" : ['account', 'report_webkit'],
"init_xml" : [
],
"update_xml" : [
'reports.xml',
'wizard/wizard_print_prima_nota_cassa.xml',
],
"demo_xml" : [],
"active": False,
'installable': False
}
| agpl-3.0 | -3,197,210,811,612,411,400 | 3,826,335,954,064,127,500 | 38.128205 | 80 | 0.579292 | false |
ethanhlc/streamlink | examples/gst-player.py | 3 | 3875 | #!/usr/bin/env python
from __future__ import print_function
import sys
import gi
from gi.repository import GObject as gobject, Gst as gst
from streamlink import Streamlink, StreamError, PluginError, NoPluginError
def exit(msg):
print(msg, file=sys.stderr)
sys.exit()
class StreamlinkPlayer(object):
def __init__(self):
self.fd = None
self.mainloop = gobject.MainLoop()
# This creates a playbin pipeline and using the appsrc source
# we can feed it our stream data
self.pipeline = gst.ElementFactory.make("playbin", None)
self.pipeline.set_property("uri", "appsrc://")
# When the playbin creates the appsrc source it will call
# this callback and allow us to configure it
self.pipeline.connect("source-setup", self.on_source_setup)
# Creates a bus and set callbacks to receive errors
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect("message::eos", self.on_eos)
self.bus.connect("message::error", self.on_error)
def exit(self, msg):
self.stop()
exit(msg)
def stop(self):
# Stop playback and exit mainloop
self.pipeline.set_state(gst.State.NULL)
self.mainloop.quit()
# Close the stream
if self.fd:
self.fd.close()
def play(self, stream):
# Attempt to open the stream
try:
self.fd = stream.open()
except StreamError as err:
self.exit("Failed to open stream: {0}".format(err))
# Start playback
self.pipeline.set_state(gst.State.PLAYING)
self.mainloop.run()
def on_source_setup(self, element, source):
# When this callback is called the appsrc expects
# us to feed it more data
source.connect("need-data", self.on_source_need_data)
def on_source_need_data(self, source, length):
# Attempt to read data from the stream
try:
data = self.fd.read(length)
except IOError as err:
self.exit("Failed to read data from stream: {0}".format(err))
# If data is empty it's the end of stream
if not data:
source.emit("end-of-stream")
return
# Convert the Python bytes into a GStreamer Buffer
# and then push it to the appsrc
buf = gst.Buffer.new_wrapped(data)
source.emit("push-buffer", buf)
def on_eos(self, bus, msg):
# Stop playback on end of stream
self.stop()
def on_error(self, bus, msg):
# Print error message and exit on error
error = msg.parse_error()[1]
self.exit(error)
def main():
if len(sys.argv) < 3:
exit("Usage: {0} <url> <quality>".format(sys.argv[0]))
# Initialize and check GStreamer version
gi.require_version("Gst", "1.0")
gobject.threads_init()
gst.init(None)
# Collect arguments
url = sys.argv[1]
quality = sys.argv[2]
# Create the Streamlink session
streamlink = Streamlink()
# Enable logging
streamlink.set_loglevel("info")
streamlink.set_logoutput(sys.stdout)
# Attempt to fetch streams
try:
streams = streamlink.streams(url)
except NoPluginError:
exit("Streamlink is unable to handle the URL '{0}'".format(url))
except PluginError as err:
exit("Plugin error: {0}".format(err))
if not streams:
exit("No streams found on URL '{0}'".format(url))
# Look for specified stream
if quality not in streams:
exit("Unable to find '{0}' stream on URL '{1}'".format(quality, url))
# We found the stream
stream = streams[quality]
# Create the player and start playback
player = StreamlinkPlayer()
# Blocks until playback is done
player.play(stream)
if __name__ == "__main__":
main()
| bsd-2-clause | -4,993,037,271,012,724,000 | 8,851,484,318,746,507,000 | 26.877698 | 77 | 0.613677 | false |
bukalov/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/workspace_mock.py | 191 | 1871 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class MockWorkspace(object):
def find_unused_filename(self, directory, name, extension, search_limit=10):
return "%s/%s.%s" % (directory, name, extension)
def create_zip(self, zip_path, source_path):
self.zip_path = zip_path
self.source_path = source_path
return object() # Something that is not None
| bsd-3-clause | 1,222,879,853,506,155,000 | -6,154,463,032,776,520,000 | 49.567568 | 80 | 0.759487 | false |
fujunwei/chromium-crosswalk | ppapi/generators/idl_node.py | 103 | 13144 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Nodes for PPAPI IDL AST"""
#
# IDL Node
#
# IDL Node defines the IDLAttribute and IDLNode objects which are constructed
# by the parser as it processes the various 'productions'. The IDLAttribute
# objects are assigned to the IDLNode's property dictionary instead of being
# applied as children of The IDLNodes, so they do not exist in the final tree.
# The AST of IDLNodes is the output from the parsing state and will be used
# as the source data by the various generators.
#
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_propertynode import IDLPropertyNode
from idl_release import IDLRelease, IDLReleaseMap
# IDLAttribute
#
# A temporary object used by the parsing process to hold an Extended Attribute
# which will be passed as a child to a standard IDLNode.
#
class IDLAttribute(object):
def __init__(self, name, value):
self.cls = 'ExtAttribute'
self.name = name
self.value = value
def __str__(self):
return '%s=%s' % (self.name, self.value)
#
# IDLNode
#
# This class implements the AST tree, providing the associations between
# parents and children. It also contains a namepsace and propertynode to
# allow for look-ups. IDLNode is derived from IDLRelease, so it is
# version aware.
#
class IDLNode(IDLRelease):
# Set of object IDLNode types which have a name and belong in the namespace.
NamedSet = set(['Enum', 'EnumItem', 'File', 'Function', 'Interface',
'Member', 'Param', 'Struct', 'Type', 'Typedef'])
def __init__(self, cls, filename, lineno, pos, children=None):
# Initialize with no starting or ending Version
IDLRelease.__init__(self, None, None)
self.cls = cls
self.lineno = lineno
self.pos = pos
self._filename = filename
self._deps = {}
self.errors = 0
self.namespace = None
self.typelist = None
self.parent = None
self._property_node = IDLPropertyNode()
self._unique_releases = None
# A list of unique releases for this node
self.releases = None
# A map from any release, to the first unique release
self.first_release = None
# self._children is a list of children ordered as defined
self._children = []
# Process the passed in list of children, placing ExtAttributes into the
# property dictionary, and nodes into the local child list in order. In
# addition, add nodes to the namespace if the class is in the NamedSet.
if children:
for child in children:
if child.cls == 'ExtAttribute':
self.SetProperty(child.name, child.value)
else:
self.AddChild(child)
def __str__(self):
name = self.GetName()
if name is None:
name = ''
return '%s(%s)' % (self.cls, name)
def Location(self):
"""Return a file and line number for where this node was defined."""
return '%s(%d)' % (self._filename, self.lineno)
def Error(self, msg):
"""Log an error for this object."""
self.errors += 1
ErrOut.LogLine(self._filename, self.lineno, 0, ' %s %s' %
(str(self), msg))
filenode = self.GetProperty('FILE')
if filenode:
errcnt = filenode.GetProperty('ERRORS')
if not errcnt:
errcnt = 0
filenode.SetProperty('ERRORS', errcnt + 1)
def Warning(self, msg):
"""Log a warning for this object."""
WarnOut.LogLine(self._filename, self.lineno, 0, ' %s %s' %
(str(self), msg))
def GetName(self):
return self.GetProperty('NAME')
def Dump(self, depth=0, comments=False, out=sys.stdout):
"""Dump this object and its children"""
if self.cls in ['Comment', 'Copyright']:
is_comment = True
else:
is_comment = False
# Skip this node if it's a comment, and we are not printing comments
if not comments and is_comment:
return
tab = ''.rjust(depth * 2)
if is_comment:
out.write('%sComment\n' % tab)
for line in self.GetName().split('\n'):
out.write('%s "%s"\n' % (tab, line))
else:
ver = IDLRelease.__str__(self)
if self.releases:
release_list = ': ' + ' '.join(self.releases)
else:
release_list = ': undefined'
out.write('%s%s%s%s\n' % (tab, self, ver, release_list))
if self.typelist:
out.write('%s Typelist: %s\n' % (tab, self.typelist.GetReleases()[0]))
properties = self._property_node.GetPropertyList()
if properties:
out.write('%s Properties\n' % tab)
for p in properties:
if is_comment and p == 'NAME':
# Skip printing the name for comments, since we printed above already
continue
out.write('%s %s : %s\n' % (tab, p, self.GetProperty(p)))
for child in self._children:
child.Dump(depth+1, comments=comments, out=out)
def IsA(self, *typelist):
"""Check if node is of a given type."""
return self.cls in typelist
def GetListOf(self, *keys):
"""Get a list of objects for the given key(s)."""
out = []
for child in self._children:
if child.cls in keys:
out.append(child)
return out
def GetOneOf(self, *keys):
"""Get an object for the given key(s)."""
out = self.GetListOf(*keys)
if out:
return out[0]
return None
def SetParent(self, parent):
self._property_node.AddParent(parent)
self.parent = parent
def AddChild(self, node):
node.SetParent(self)
self._children.append(node)
# Get a list of all children
def GetChildren(self):
return self._children
def GetType(self, release):
if not self.typelist:
return None
return self.typelist.FindRelease(release)
def GetDeps(self, release, visited=None):
visited = visited or set()
# If this release is not valid for this object, then done.
if not self.IsRelease(release) or self.IsA('Comment', 'Copyright'):
return set([])
# If we have cached the info for this release, return the cached value
deps = self._deps.get(release, None)
if deps is not None:
return deps
# If we are already visited, then return
if self in visited:
return set([self])
# Otherwise, build the dependency list
visited |= set([self])
deps = set([self])
# Get child deps
for child in self.GetChildren():
deps |= child.GetDeps(release, visited)
visited |= set(deps)
# Get type deps
typeref = self.GetType(release)
if typeref:
deps |= typeref.GetDeps(release, visited)
self._deps[release] = deps
return deps
def GetVersion(self, release):
filenode = self.GetProperty('FILE')
if not filenode:
return None
return filenode.release_map.GetVersion(release)
def GetUniqueReleases(self, releases):
"""Return the unique set of first releases corresponding to input
Since we are returning the corresponding 'first' version for a
release, we may return a release version prior to the one in the list."""
my_min, my_max = self.GetMinMax(releases)
if my_min > releases[-1] or my_max < releases[0]:
return []
out = set()
for rel in releases:
remapped = self.first_release[rel]
if not remapped:
continue
out |= set([remapped])
# Cache the most recent set of unique_releases
self._unique_releases = sorted(out)
return self._unique_releases
def LastRelease(self, release):
# Get the most recent release from the most recently generated set of
# cached unique releases.
if self._unique_releases and self._unique_releases[-1] > release:
return False
return True
def GetRelease(self, version):
filenode = self.GetProperty('FILE')
if not filenode:
return None
return filenode.release_map.GetRelease(version)
def _GetReleaseList(self, releases, visited=None):
visited = visited or set()
if not self.releases:
# If we are unversionable, then return first available release
if self.IsA('Comment', 'Copyright', 'Label'):
self.releases = []
return self.releases
# Generate the first and if deprecated within this subset, the
# last release for this node
my_min, my_max = self.GetMinMax(releases)
if my_max != releases[-1]:
my_releases = set([my_min, my_max])
else:
my_releases = set([my_min])
r = self.GetRelease(self.GetProperty('version'))
if not r in my_releases:
my_releases |= set([r])
# Break cycle if we reference ourselves
if self in visited:
return [my_min]
visited |= set([self])
# Files inherit all their releases from items in the file
if self.IsA('AST', 'File'):
my_releases = set()
# Visit all children
child_releases = set()
# Exclude sibling results from parent visited set
cur_visits = visited
for child in self._children:
child_releases |= set(child._GetReleaseList(releases, cur_visits))
visited |= set(child_releases)
# Visit my type
type_releases = set()
if self.typelist:
type_list = self.typelist.GetReleases()
for typenode in type_list:
type_releases |= set(typenode._GetReleaseList(releases, cur_visits))
type_release_list = sorted(type_releases)
if my_min < type_release_list[0]:
type_node = type_list[0]
self.Error('requires %s in %s which is undefined at %s.' % (
type_node, type_node._filename, my_min))
for rel in child_releases | type_releases:
if rel >= my_min and rel <= my_max:
my_releases |= set([rel])
self.releases = sorted(my_releases)
return self.releases
def BuildReleaseMap(self, releases):
unique_list = self._GetReleaseList(releases)
_, my_max = self.GetMinMax(releases)
self.first_release = {}
last_rel = None
for rel in releases:
if rel in unique_list:
last_rel = rel
self.first_release[rel] = last_rel
if rel == my_max:
last_rel = None
def SetProperty(self, name, val):
self._property_node.SetProperty(name, val)
def GetProperty(self, name):
return self._property_node.GetProperty(name)
def GetPropertyLocal(self, name):
return self._property_node.GetPropertyLocal(name)
def NodeIsDevOnly(self):
"""Returns true iff a node is only in dev channel."""
return self.GetProperty('dev_version') and not self.GetProperty('version')
def DevInterfaceMatchesStable(self, release):
"""Returns true if an interface has an equivalent stable version."""
assert(self.IsA('Interface'))
for child in self.GetListOf('Member'):
unique = child.GetUniqueReleases([release])
if not unique or not child.InReleases([release]):
continue
if child.NodeIsDevOnly():
return False
return True
#
# IDLFile
#
# A specialized version of IDLNode which tracks errors and warnings.
#
class IDLFile(IDLNode):
def __init__(self, name, children, errors=0):
attrs = [IDLAttribute('NAME', name),
IDLAttribute('ERRORS', errors)]
if not children:
children = []
IDLNode.__init__(self, 'File', name, 1, 0, attrs + children)
# TODO(teravest): Why do we set release map like this here? This looks
# suspicious...
self.release_map = IDLReleaseMap([('M13', 1.0, 'stable')])
#
# Tests
#
def StringTest():
errors = 0
name_str = 'MyName'
text_str = 'MyNode(%s)' % name_str
name_node = IDLAttribute('NAME', name_str)
node = IDLNode('MyNode', 'no file', 1, 0, [name_node])
if node.GetName() != name_str:
ErrOut.Log('GetName returned >%s< not >%s<' % (node.GetName(), name_str))
errors += 1
if node.GetProperty('NAME') != name_str:
ErrOut.Log('Failed to get name property.')
errors += 1
if str(node) != text_str:
ErrOut.Log('str() returned >%s< not >%s<' % (str(node), text_str))
errors += 1
if not errors:
InfoOut.Log('Passed StringTest')
return errors
def ChildTest():
errors = 0
child = IDLNode('child', 'no file', 1, 0)
parent = IDLNode('parent', 'no file', 1, 0, [child])
if child.parent != parent:
ErrOut.Log('Failed to connect parent.')
errors += 1
if [child] != parent.GetChildren():
ErrOut.Log('Failed GetChildren.')
errors += 1
if child != parent.GetOneOf('child'):
ErrOut.Log('Failed GetOneOf(child)')
errors += 1
if parent.GetOneOf('bogus'):
ErrOut.Log('Failed GetOneOf(bogus)')
errors += 1
if not parent.IsA('parent'):
ErrOut.Log('Expecting parent type')
errors += 1
parent = IDLNode('parent', 'no file', 1, 0, [child, child])
if [child, child] != parent.GetChildren():
ErrOut.Log('Failed GetChildren2.')
errors += 1
if not errors:
InfoOut.Log('Passed ChildTest')
return errors
def Main():
errors = StringTest()
errors += ChildTest()
if errors:
ErrOut.Log('IDLNode failed with %d errors.' % errors)
return -1
return 0
if __name__ == '__main__':
sys.exit(Main())
| bsd-3-clause | 9,019,801,712,235,172,000 | -80,964,758,040,899,840 | 28.404922 | 79 | 0.638238 | false |
yaroslavprogrammer/django | tests/forms_tests/tests/test_media.py | 131 | 45513 | # -*- coding: utf-8 -*-
from django.forms import TextInput, Media, TextInput, CharField, Form, MultiWidget
from django.template import Template, Context
from django.test import TestCase
from django.test.utils import override_settings
@override_settings(
STATIC_URL=None,
MEDIA_URL='http://media.example.com/media/',
)
class FormsMediaTestCase(TestCase):
"""Tests for the media handling on widgets and forms"""
def test_construction(self):
# Check construction of media objects
m = Media(css={'all': ('path/to/css1','/path/to/css2')}, js=('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3'))
self.assertEqual(str(m), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
class Foo:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(str(m3), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(str(w1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Media objects can be interrogated by media type
self.assertEqual(str(w1.media['css']), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />""")
self.assertEqual(str(w1.media['js']), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(str(w1.media + w2.media + w3.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Check that media addition hasn't affected the original objects
self.assertEqual(str(w1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super(MyWidget5, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super(MyWidget6, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(str(w6.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(str(w7.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w8 = MyWidget8()
self.assertEqual(str(w8.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(str(w9.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w11 = MyWidget11()
self.assertEqual(str(w11.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w12 = MyWidget12()
self.assertEqual(str(w12.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1','/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1','/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(str(multimedia.media), """<link href="/file4" type="text/css" media="print" rel="stylesheet" />
<link href="/file3" type="text/css" media="screen" rel="stylesheet" />
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet" />
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super(MyMultiWidget, self).__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(str(mymulti.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(str(f1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(str(f1.media + f2.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(str(f3.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>""")
# Media works in templates
self.assertEqual(Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script><link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />""")
@override_settings(
STATIC_URL='http://media.example.com/static/',
MEDIA_URL='http://media.example.com/media/',
)
class StaticFormsMediaTestCase(TestCase):
"""Tests for the media handling on widgets and forms"""
def test_construction(self):
# Check construction of media objects
m = Media(css={'all': ('path/to/css1','/path/to/css2')}, js=('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3'))
self.assertEqual(str(m), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
class Foo:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(str(m3), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(str(w1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Media objects can be interrogated by media type
self.assertEqual(str(w1.media['css']), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />""")
self.assertEqual(str(w1.media['js']), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(str(w1.media + w2.media + w3.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Check that media addition hasn't affected the original objects
self.assertEqual(str(w1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super(MyWidget5, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super(MyWidget6, self).media + Media(css={'all': ('/other/path',)}, js = ('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(str(w6.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(str(w7.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w8 = MyWidget8()
self.assertEqual(str(w8.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js = ('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(str(w9.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w11 = MyWidget11()
self.assertEqual(str(w11.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
w12 = MyWidget12()
self.assertEqual(str(w12.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1','/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1','/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(str(multimedia.media), """<link href="/file4" type="text/css" media="print" rel="stylesheet" />
<link href="/file3" type="text/css" media="screen" rel="stylesheet" />
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet" />
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super(MyMultiWidget, self).__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(str(mymulti.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1','/path/to/css2')
}
js = ('/path/to/js1','http://media.other.com/path/to/js2','https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2','/path/to/css3')
}
js = ('/path/to/js1','/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3','path/to/css1')
}
js = ('/path/to/js1','/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(str(f1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(str(f1.media + f2.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(str(f3.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>""")
# Media works in templates
self.assertEqual(Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script><link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />""")
| bsd-3-clause | -1,117,556,937,123,396,400 | 103,474,482,826,455,300 | 49.179713 | 173 | 0.5692 | false |
UK992/servo | tests/wpt/web-platform-tests/webdriver/tests/take_screenshot/user_prompts.py | 13 | 3735 | # META: timeout=long
import pytest
from tests.support.asserts import assert_dialog_handled, assert_error, assert_png, assert_success
from tests.support.inline import inline
def take_screenshot(session):
return session.transport.send(
"GET", "session/{session_id}/screenshot".format(**vars(session)))
@pytest.fixture
def check_user_prompt_closed_without_exception(session, create_dialog):
def check_user_prompt_closed_without_exception(dialog_type, retval):
session.url = inline("<input/>")
create_dialog(dialog_type, text=dialog_type)
response = take_screenshot(session)
value = assert_success(response)
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert_png(value)
return check_user_prompt_closed_without_exception
@pytest.fixture
def check_user_prompt_closed_with_exception(session, create_dialog):
def check_user_prompt_closed_with_exception(dialog_type, retval):
session.url = inline("<input/>")
create_dialog(dialog_type, text=dialog_type)
response = take_screenshot(session)
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
return check_user_prompt_closed_with_exception
@pytest.fixture
def check_user_prompt_not_closed_but_exception(session, create_dialog):
def check_user_prompt_not_closed_but_exception(dialog_type):
session.url = inline("<input/>")
create_dialog(dialog_type, text=dialog_type)
response = take_screenshot(session)
assert_error(response, "unexpected alert open")
assert session.alert.text == dialog_type
session.alert.dismiss()
return check_user_prompt_not_closed_but_exception
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
check_user_prompt_not_closed_but_exception(dialog_type)
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
| mpl-2.0 | 5,337,812,689,792,135,000 | -4,177,002,843,532,936,000 | 31.763158 | 97 | 0.709772 | false |
Spiderlover/Toontown | otp/launcher/DummyLauncherBase.py | 5 | 4454 | from pandac.PandaModules import *
import string
from direct.showbase.MessengerGlobal import *
from direct.showbase.DirectObject import DirectObject
from direct.showbase.EventManagerGlobal import *
from direct.task.TaskManagerGlobal import *
from direct.task.Task import Task
class DummyLauncherBase:
def __init__(self):
self.logPrefix = ''
self._downloadComplete = False
self.phaseComplete = {}
for phase in self.LauncherPhases:
self.phaseComplete[phase] = 0
self.firstPhase = self.LauncherPhases[0]
self.finalPhase = self.LauncherPhases[-1]
self.launcherFileDbHash = HashVal()
self.serverDbFileHash = HashVal()
self.setPandaErrorCode(0)
self.setServerVersion('dev')
def isDummy(self):
return 1
def startFakeDownload(self):
if ConfigVariableBool('fake-downloads', 0).getValue():
duration = ConfigVariableDouble('fake-download-duration', 60).getValue()
self.fakeDownload(duration)
else:
for phase in self.LauncherPhases:
self.phaseComplete[phase] = 100
self.downloadDoneTask(None)
return
def isTestServer(self):
return base.config.GetBool('is-test-server', 0)
def setPhaseCompleteArray(self, newPhaseComplete):
self.phaseComplete = newPhaseComplete
def setPhaseComplete(self, phase, percent):
self.phaseComplete[phase] = percent
def getPhaseComplete(self, phase):
return self.phaseComplete[phase] >= 100
def setPandaWindowOpen(self):
self.windowOpen = 1
def setPandaErrorCode(self, code):
self.pandaErrorCode = code
def getPandaErrorCode(self):
return self.pandaErrorCode
def setDisconnectDetailsNormal(self):
self.disconnectCode = 0
self.disconnectMsg = 'normal'
def setDisconnectDetails(self, newCode, newMsg):
self.disconnectCode = newCode
self.disconnectMsg = newMsg
def setServerVersion(self, version):
self.ServerVersion = version
def getServerVersion(self):
return self.ServerVersion
def getIsNewInstallation(self):
return base.config.GetBool('new-installation', 0)
def setIsNotNewInstallation(self):
pass
def getLastLogin(self):
if hasattr(self, 'lastLogin'):
return self.lastLogin
return ''
def setLastLogin(self, login):
self.lastLogin = login
def setUserLoggedIn(self):
self.userLoggedIn = 1
def setPaidUserLoggedIn(self):
self.paidUserLoggedIn = 1
def getGameServer(self):
return '206.16.11.19'
def getAccountServer(self):
return ''
def getDeployment(self):
return 'US'
def getBlue(self):
return None
def getPlayToken(self):
return None
def getDISLToken(self):
return None
def fakeDownloadPhaseTask(self, task):
percentComplete = min(100, int(round(task.time / float(task.timePerPhase) * 100)))
self.setPhaseComplete(task.phase, percentComplete)
messenger.send('launcherPercentPhaseComplete', [task.phase,
percentComplete,
0,
0])
if percentComplete >= 100.0:
messenger.send('phaseComplete-' + `(task.phase)`)
return Task.done
else:
return Task.cont
def downloadDoneTask(self, task):
self._downloadComplete = True
messenger.send('launcherAllPhasesComplete')
return Task.done
def fakeDownload(self, timePerPhase):
self.phaseComplete = {1: 100,
2: 100,
3: 0,
3.5: 0,
4: 0,
5: 0,
5.5: 0,
6: 0,
7: 0,
8: 0,
9: 0,
10: 0,
11: 0,
12: 0,
13: 0}
phaseTaskList = []
firstPhaseIndex = self.LauncherPhases.index(self.firstPhase)
for phase in self.LauncherPhases[firstPhaseIndex:]:
phaseTask = Task(self.fakeDownloadPhaseTask, 'phaseDownload' + str(phase))
phaseTask.timePerPhase = timePerPhase
phaseTask.phase = phase
phaseTaskList.append(phaseTask)
phaseTaskList.append(Task(self.downloadDoneTask))
downloadSequence = Task.sequence(*phaseTaskList)
taskMgr.remove('downloadSequence')
taskMgr.add(downloadSequence, 'downloadSequence')
| mit | 6,148,428,271,806,977,000 | 7,864,431,018,695,991,000 | 27.369427 | 90 | 0.629771 | false |
schets/scikit-learn | examples/mixture/plot_gmm_sin.py | 248 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause | -4,804,671,353,337,185,000 | -5,757,911,618,088,750,000 | 32.5 | 77 | 0.621769 | false |
randynobx/ansible | lib/ansible/modules/network/panos/panos_lic.py | 78 | 4915 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_lic
short_description: apply authcode to a device/instance
description:
- Apply an authcode to a device.
- The authcode should have been previously registered on the Palo Alto Networks support portal.
- The device should have Internet access.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
auth_code:
description:
- authcode to be applied
required: true
force:
description:
- whether to apply authcode even if device is already licensed
required: false
default: "false"
'''
EXAMPLES = '''
- hosts: localhost
connection: local
tasks:
- name: fetch license
panos_lic:
ip_address: "192.168.1.1"
password: "paloalto"
auth_code: "IBADCODE"
register: result
- name: Display serialnumber (if already registered)
debug:
var: "{{result.serialnumber}}"
'''
RETURN = '''
serialnumber:
description: serialnumber of the device in case that it has been already registered
returned: success
type: string
sample: 007200004214
'''
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_serial(xapi, module):
xapi.op(cmd="show system info", cmd_xml=True)
r = xapi.element_root
serial = r.find('.//serial')
if serial is None:
module.fail_json(msg="No <serial> tag in show system info")
serial = serial.text
return serial
def apply_authcode(xapi, module, auth_code):
try:
xapi.op(cmd='request license fetch auth-code "%s"' % auth_code,
cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def fetch_authcode(xapi, module):
try:
xapi.op(cmd='request license fetch', cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
auth_code=dict(),
username=dict(default='admin'),
force=dict(type='bool', default=False)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
auth_code = module.params["auth_code"]
force = module.params['force']
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
if not force:
serialnumber = get_serial(xapi, module)
if serialnumber != 'unknown':
return module.exit_json(changed=False, serialnumber=serialnumber)
if auth_code:
apply_authcode(xapi, module, auth_code)
else:
fetch_authcode(xapi, module)
module.exit_json(changed=True, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 | 2,138,590,849,227,305,500 | 2,739,737,009,640,587,300 | 26.61236 | 99 | 0.633571 | false |
miconof/headphones | lib/unidecode/x06a.py | 252 | 4674 | data = (
'Di ', # 0x00
'Zhuang ', # 0x01
'Le ', # 0x02
'Lang ', # 0x03
'Chen ', # 0x04
'Cong ', # 0x05
'Li ', # 0x06
'Xiu ', # 0x07
'Qing ', # 0x08
'Shuang ', # 0x09
'Fan ', # 0x0a
'Tong ', # 0x0b
'Guan ', # 0x0c
'Ji ', # 0x0d
'Suo ', # 0x0e
'Lei ', # 0x0f
'Lu ', # 0x10
'Liang ', # 0x11
'Mi ', # 0x12
'Lou ', # 0x13
'Chao ', # 0x14
'Su ', # 0x15
'Ke ', # 0x16
'Shu ', # 0x17
'Tang ', # 0x18
'Biao ', # 0x19
'Lu ', # 0x1a
'Jiu ', # 0x1b
'Shu ', # 0x1c
'Zha ', # 0x1d
'Shu ', # 0x1e
'Zhang ', # 0x1f
'Men ', # 0x20
'Mo ', # 0x21
'Niao ', # 0x22
'Yang ', # 0x23
'Tiao ', # 0x24
'Peng ', # 0x25
'Zhu ', # 0x26
'Sha ', # 0x27
'Xi ', # 0x28
'Quan ', # 0x29
'Heng ', # 0x2a
'Jian ', # 0x2b
'Cong ', # 0x2c
'[?] ', # 0x2d
'Hokuso ', # 0x2e
'Qiang ', # 0x2f
'Tara ', # 0x30
'Ying ', # 0x31
'Er ', # 0x32
'Xin ', # 0x33
'Zhi ', # 0x34
'Qiao ', # 0x35
'Zui ', # 0x36
'Cong ', # 0x37
'Pu ', # 0x38
'Shu ', # 0x39
'Hua ', # 0x3a
'Kui ', # 0x3b
'Zhen ', # 0x3c
'Zun ', # 0x3d
'Yue ', # 0x3e
'Zhan ', # 0x3f
'Xi ', # 0x40
'Xun ', # 0x41
'Dian ', # 0x42
'Fa ', # 0x43
'Gan ', # 0x44
'Mo ', # 0x45
'Wu ', # 0x46
'Qiao ', # 0x47
'Nao ', # 0x48
'Lin ', # 0x49
'Liu ', # 0x4a
'Qiao ', # 0x4b
'Xian ', # 0x4c
'Run ', # 0x4d
'Fan ', # 0x4e
'Zhan ', # 0x4f
'Tuo ', # 0x50
'Lao ', # 0x51
'Yun ', # 0x52
'Shun ', # 0x53
'Tui ', # 0x54
'Cheng ', # 0x55
'Tang ', # 0x56
'Meng ', # 0x57
'Ju ', # 0x58
'Cheng ', # 0x59
'Su ', # 0x5a
'Jue ', # 0x5b
'Jue ', # 0x5c
'Tan ', # 0x5d
'Hui ', # 0x5e
'Ji ', # 0x5f
'Nuo ', # 0x60
'Xiang ', # 0x61
'Tuo ', # 0x62
'Ning ', # 0x63
'Rui ', # 0x64
'Zhu ', # 0x65
'Chuang ', # 0x66
'Zeng ', # 0x67
'Fen ', # 0x68
'Qiong ', # 0x69
'Ran ', # 0x6a
'Heng ', # 0x6b
'Cen ', # 0x6c
'Gu ', # 0x6d
'Liu ', # 0x6e
'Lao ', # 0x6f
'Gao ', # 0x70
'Chu ', # 0x71
'Zusa ', # 0x72
'Nude ', # 0x73
'Ca ', # 0x74
'San ', # 0x75
'Ji ', # 0x76
'Dou ', # 0x77
'Shou ', # 0x78
'Lu ', # 0x79
'[?] ', # 0x7a
'[?] ', # 0x7b
'Yuan ', # 0x7c
'Ta ', # 0x7d
'Shu ', # 0x7e
'Jiang ', # 0x7f
'Tan ', # 0x80
'Lin ', # 0x81
'Nong ', # 0x82
'Yin ', # 0x83
'Xi ', # 0x84
'Sui ', # 0x85
'Shan ', # 0x86
'Zui ', # 0x87
'Xuan ', # 0x88
'Cheng ', # 0x89
'Gan ', # 0x8a
'Ju ', # 0x8b
'Zui ', # 0x8c
'Yi ', # 0x8d
'Qin ', # 0x8e
'Pu ', # 0x8f
'Yan ', # 0x90
'Lei ', # 0x91
'Feng ', # 0x92
'Hui ', # 0x93
'Dang ', # 0x94
'Ji ', # 0x95
'Sui ', # 0x96
'Bo ', # 0x97
'Bi ', # 0x98
'Ding ', # 0x99
'Chu ', # 0x9a
'Zhua ', # 0x9b
'Kuai ', # 0x9c
'Ji ', # 0x9d
'Jie ', # 0x9e
'Jia ', # 0x9f
'Qing ', # 0xa0
'Zhe ', # 0xa1
'Jian ', # 0xa2
'Qiang ', # 0xa3
'Dao ', # 0xa4
'Yi ', # 0xa5
'Biao ', # 0xa6
'Song ', # 0xa7
'She ', # 0xa8
'Lin ', # 0xa9
'Kunugi ', # 0xaa
'Cha ', # 0xab
'Meng ', # 0xac
'Yin ', # 0xad
'Tao ', # 0xae
'Tai ', # 0xaf
'Mian ', # 0xb0
'Qi ', # 0xb1
'Toan ', # 0xb2
'Bin ', # 0xb3
'Huo ', # 0xb4
'Ji ', # 0xb5
'Qian ', # 0xb6
'Mi ', # 0xb7
'Ning ', # 0xb8
'Yi ', # 0xb9
'Gao ', # 0xba
'Jian ', # 0xbb
'Yin ', # 0xbc
'Er ', # 0xbd
'Qing ', # 0xbe
'Yan ', # 0xbf
'Qi ', # 0xc0
'Mi ', # 0xc1
'Zhao ', # 0xc2
'Gui ', # 0xc3
'Chun ', # 0xc4
'Ji ', # 0xc5
'Kui ', # 0xc6
'Po ', # 0xc7
'Deng ', # 0xc8
'Chu ', # 0xc9
'[?] ', # 0xca
'Mian ', # 0xcb
'You ', # 0xcc
'Zhi ', # 0xcd
'Guang ', # 0xce
'Qian ', # 0xcf
'Lei ', # 0xd0
'Lei ', # 0xd1
'Sa ', # 0xd2
'Lu ', # 0xd3
'Li ', # 0xd4
'Cuan ', # 0xd5
'Lu ', # 0xd6
'Mie ', # 0xd7
'Hui ', # 0xd8
'Ou ', # 0xd9
'Lu ', # 0xda
'Jie ', # 0xdb
'Gao ', # 0xdc
'Du ', # 0xdd
'Yuan ', # 0xde
'Li ', # 0xdf
'Fei ', # 0xe0
'Zhuo ', # 0xe1
'Sou ', # 0xe2
'Lian ', # 0xe3
'Tamo ', # 0xe4
'Chu ', # 0xe5
'[?] ', # 0xe6
'Zhu ', # 0xe7
'Lu ', # 0xe8
'Yan ', # 0xe9
'Li ', # 0xea
'Zhu ', # 0xeb
'Chen ', # 0xec
'Jie ', # 0xed
'E ', # 0xee
'Su ', # 0xef
'Huai ', # 0xf0
'Nie ', # 0xf1
'Yu ', # 0xf2
'Long ', # 0xf3
'Lai ', # 0xf4
'[?] ', # 0xf5
'Xian ', # 0xf6
'Kwi ', # 0xf7
'Ju ', # 0xf8
'Xiao ', # 0xf9
'Ling ', # 0xfa
'Ying ', # 0xfb
'Jian ', # 0xfc
'Yin ', # 0xfd
'You ', # 0xfe
'Ying ', # 0xff
)
| gpl-3.0 | -7,884,080,281,745,453,000 | 4,162,070,313,561,708,000 | 17.116279 | 20 | 0.392169 | false |
hj3938/zulip | zerver/management/commands/email-mirror.py | 114 | 6746 | #!/usr/bin/python
"""
Forward messages sent to the configured email gateway to Zulip.
For zulip.com, messages to that address go to the Inbox of [email protected].
Zulip voyager configurations will differ.
Messages meant for Zulip have a special recipient form of
<stream name>+<regenerable stream token>@streams.zulip.com
This pattern is configurable via the EMAIL_GATEWAY_PATTERN settings.py
variable.
This script can be used via two mechanisms:
1) Run this in a cronjob every N minutes if you have configured Zulip to poll
an external IMAP mailbox for messages. The script will then connect to
your IMAP server and batch-process all messages.
We extract and validate the target stream from information in the
recipient address and retrieve, forward, and archive the message.
2) Alternatively, configure your MTA to execute this script on message
receipt with the contents of the message piped to standard input. The
script will queue the message for processing. In this mode of invocation,
you should pass the destination email address in the ORIGINAL_RECIPIENT
environment variable.
In Postfix, you can express that via an /etc/aliases entry like this:
|/usr/bin/python /home/zulip/deployments/current/manage.py email-mirror
"""
from __future__ import absolute_import
import email
import os
from email.header import decode_header
import logging
import re
import sys
import posix
from django.conf import settings
from django.core.management.base import BaseCommand
from zerver.lib.actions import decode_email_address
from zerver.lib.notifications import convert_html_to_markdown
from zerver.lib.upload import upload_message_image
from zerver.lib.queue import queue_json_publish
from zerver.models import Stream, get_user_profile_by_email, UserProfile
from zerver.lib.email_mirror import logger, process_message, \
extract_and_validate, ZulipEmailForwardError, \
mark_missed_message_address_as_used, is_missed_message_address
from twisted.internet import protocol, reactor, ssl
from twisted.mail import imap4
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../../api"))
import zulip
## Setup ##
log_format = "%(asctime)s: %(message)s"
logging.basicConfig(format=log_format)
formatter = logging.Formatter(log_format)
file_handler = logging.FileHandler(settings.EMAIL_MIRROR_LOG_PATH)
file_handler.setFormatter(formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
## IMAP callbacks ##
def logout(result, proto):
# Log out.
return proto.logout()
def delete(result, proto):
# Close the connection, which also processes any flags that were
# set on messages.
return proto.close().addCallback(logout, proto)
def fetch(result, proto, mailboxes):
if not result:
return proto.logout()
message_uids = result.keys()
# Make sure we forward the messages in time-order.
message_uids.sort()
for uid in message_uids:
message = email.message_from_string(result[uid]["RFC822"])
process_message(message)
# Delete the processed messages from the Inbox.
message_set = ",".join([result[key]["UID"] for key in message_uids])
d = proto.addFlags(message_set, ["\\Deleted"], uid=True, silent=False)
d.addCallback(delete, proto)
return d
def examine_mailbox(result, proto, mailbox):
# Fetch messages from a particular mailbox.
return proto.fetchMessage("1:*", uid=True).addCallback(fetch, proto, mailbox)
def select_mailbox(result, proto):
# Select which mailbox we care about.
mbox = filter(lambda x: settings.EMAIL_GATEWAY_IMAP_FOLDER in x[2], result)[0][2]
return proto.select(mbox).addCallback(examine_mailbox, proto, result)
def list_mailboxes(res, proto):
# List all of the mailboxes for this account.
return proto.list("","*").addCallback(select_mailbox, proto)
def connected(proto):
d = proto.login(settings.EMAIL_GATEWAY_LOGIN, settings.EMAIL_GATEWAY_PASSWORD)
d.addCallback(list_mailboxes, proto)
d.addErrback(login_failed)
return d
def login_failed(failure):
return failure
def done(_):
reactor.callLater(0, reactor.stop)
def main():
imap_client = protocol.ClientCreator(reactor, imap4.IMAP4Client)
d = imap_client.connectSSL(settings.EMAIL_GATEWAY_IMAP_SERVER, settings.EMAIL_GATEWAY_IMAP_PORT, ssl.ClientContextFactory())
d.addCallbacks(connected, login_failed)
d.addBoth(done)
class Command(BaseCommand):
help = __doc__
def add_arguments(self, parser):
parser.add_argument('recipient', metavar='<recipient>', type=str, nargs='?', default=None,
help="original recipient")
def handle(self, *args, **options):
rcpt_to = os.environ.get("ORIGINAL_RECIPIENT", options['recipient'])
if rcpt_to is not None:
if is_missed_message_address(rcpt_to):
try:
mark_missed_message_address_as_used(rcpt_to)
except ZulipEmailForwardError:
print "5.1.1 Bad destination mailbox address: Bad or expired missed message address."
exit(posix.EX_NOUSER)
else:
try:
extract_and_validate(rcpt_to)
except ZulipEmailForwardError:
print "5.1.1 Bad destination mailbox address: Please use the address specified in your Streams page."
exit(posix.EX_NOUSER)
# Read in the message, at most 25MiB. This is the limit enforced by
# Gmail, which we use here as a decent metric.
message = sys.stdin.read(25*1024*1024)
if len(sys.stdin.read(1)) != 0:
# We're not at EOF, reject large mail.
print "5.3.4 Message too big for system: Max size is 25MiB"
exit(posix.EX_DATAERR)
queue_json_publish(
"email_mirror",
{
"message": message,
"rcpt_to": rcpt_to
},
lambda x: None
)
else:
# We're probably running from cron, try to batch-process mail
if (not settings.EMAIL_GATEWAY_BOT or not settings.EMAIL_GATEWAY_LOGIN or
not settings.EMAIL_GATEWAY_PASSWORD or not settings.EMAIL_GATEWAY_IMAP_SERVER or
not settings.EMAIL_GATEWAY_IMAP_PORT or not settings.EMAIL_GATEWAY_IMAP_FOLDER):
print "Please configure the Email Mirror Gateway in your local_settings.py, or specify $ORIGINAL_RECIPIENT if piping a single mail."
exit(1)
reactor.callLater(0, main)
reactor.run()
| apache-2.0 | 7,417,851,833,832,001,000 | 7,094,024,421,320,995,000 | 36.065934 | 148 | 0.676401 | false |
openhumanoids/oh-distro | ipab-distro/tests/systemtests/src/exoticaLWR/test.py | 1 | 1072 | from director import transformUtils
import numpy
if ikPlanner.pushToMatlab==True:
print "FAILURE - pushing requests to matlab"
exit()
qT=numpy.array([0, 0, 0, 0, 0, 0, -6.310489698080346e-05, 0.34103086590766907, 3.8130277971504256e-05, 1.4273228645324707, 5.833456089021638e-05, -0.4845042824745178, -3.8867587136337534e-05])
q0=numpy.array([ 0., 0., 0., 0., 0., 0., 0., 0.78539816, 0., 1.57079633, 0., -0.78539816, 0.])
goalFrame = transformUtils.frameFromPositionAndRPY([0.36932988056397303, -0.009998017176602909, 0.8891143571732633], [-1.3262913021702864e-12, 89.99999979432002, -89.99963750134272])
constraintSet = ikPlanner.planEndEffectorGoal(q0, 'left', goalFrame, lockBase=True, lockBack=True)
q=numpy.array(constraintSet.runIk()[0])
ret=constraintSet.runIkTraj()
if ((q-qT).__abs__()>1e-3).any():
print "FAILURE - IK pose incorrect."
exit()
if ret.plan_info[0]!=0:
print "FAILURE - Planner failed."
exit()
# When everything goes all right, report success
with open(os.environ.get('SYSTEMTEST_RESULT_FILE'),'w+') as f:
f.write('1\n')
exit()
| bsd-3-clause | 3,317,186,704,263,429,000 | 3,480,854,715,215,009,300 | 38.703704 | 192 | 0.724813 | false |
atsushieno/cerbero | cerbero/tools/libtool.py | 2 | 5912 | #!/usr/bin/env python3
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.enums import Platform
from cerbero.utils import shell
from cerbero.errors import FatalError
def get_libtool_versions(version, soversion=0):
parts = version.split('.')
if not parts or len(parts) > 3:
raise FatalError('Version must contain three or fewer parts: {!r}'
''.format(version))
try:
major = int(parts[0])
minor = 0
micro = 0
if len(parts) > 1:
minor = int(parts[1])
if len(parts) > 2:
micro = int(parts[2])
except ValueError:
raise FatalError('Invalid version: {!r}'.format(version))
interface_age = 0
if (minor % 2) == 0:
interface_age = micro
binary_age = (100 * minor) + micro
return (soversion, binary_age - interface_age, interface_age)
class LibtoolLibrary(object):
'''
Helper class to create libtool libraries files (.la)
'''
LIBTOOL_TPL = '''\
# %(libname)s - a libtool library file
# Generated by libtool (GNU libtool) 2.4.2 Debian-2.4.2-1ubuntu1
#
# Please DO NOT delete this file!
# It is necessary for linking the library.
# The name that we can dlopen(3).
dlname='%(dlname)s'
# Names of this library.
library_names='%(library_names)s'
# The name of the static archive.
old_library='%(old_library)s'
# Linker flags that can not go in dependency_libs.
inherited_linker_flags=''
# Libraries that this one depends upon.
dependency_libs='%(dependency_libs)s'
# Names of additional weak libraries provided by this library
weak_library_names=''
# Version information for libglib-2.0.
current=%(current)s
age=%(age)s
revision=%(revision)s
# Is this an already installed library?
installed=yes
# Should we warn about portability when linking against -modules?
shouldnotlink=no
# Files to dlopen/dlpreopen
dlopen=''
dlpreopen=''
# Directory that this library needs to be installed in:
libdir='%(libdir)s'
'''
def __init__(self, libname, major, minor, micro, libdir, platform,
deps=None, static_only=False):
self.libtool_vars = {
'libname': '',
'dlname': '',
'library_names': '',
'old_library': '',
'dependency_libs': '',
'current': '',
'age': '',
'revision': '',
'libdir': ''}
if platform == Platform.WINDOWS:
shared_ext = 'dll.a'
elif platform in [Platform.DARWIN, Platform.IOS]:
shared_ext = 'dylib'
else:
shared_ext = 'so'
if not libname.startswith('lib'):
libname = 'lib%s' % libname
if deps is None:
deps = ''
self.libname = libname
self.libdir = libdir
self.laname = '%s.la' % libname
dlname_base = '%s.%s' % (libname, shared_ext)
dlname = dlname_base
dlname_all = dlname_base
major_str = ''
minor_str = ''
micro_str = ''
if major is not None:
dlname = '%s.%s' % (dlname_base, major)
major_str = major
if minor is not None:
dlname_all = '%s.%s' % (dlname, minor)
minor_str = minor
if micro is not None:
dlname_all = '%s.%s' % (dlname_all, micro)
micro_str = micro
old_library = '%s.a' % libname
self.change_value('libname', self.laname)
if not static_only:
self.change_value('dlname', dlname)
self.change_value('library_names', '%s %s %s' % (dlname_all, dlname,
dlname_base))
self.change_value('old_library', old_library)
self.change_value('current', minor_str)
self.change_value('age', minor_str)
self.change_value('revision', micro_str)
self.change_value('libdir', libdir)
self.change_value('dependency_libs', self._parse_deps(deps))
def save(self):
path = os.path.join(self.libdir, self.laname)
if shell.DRY_RUN:
print('Creating {}'.format(path))
return
with open(path, 'w') as f:
f.write(self.LIBTOOL_TPL % self.libtool_vars)
def change_value(self, key, val):
self.libtool_vars[key] = val
def _parse_deps(self, deps):
# FIXME: these deps need to be resolved recursively since the list of
# dependency_libs in .la files are exhaustive. For now, recipes are
# handling that.
deps_str = ''
libtool_deps = [x for x in deps if not x.startswith('-l')]
lib_deps = [x for x in deps if x.startswith('-l')]
for d in libtool_deps:
dep_dir, dep_name = os.path.split(d)
if dep_dir:
# we already have a prepended path
deps_str += ' ' + d + '.la '
else:
if not d.startswith('lib'):
d = 'lib' + d
deps_str += ' %s/%s.la ' % (self.libdir, d)
deps_str += ' '.join(lib_deps)
return deps_str
| lgpl-2.1 | 4,077,716,283,310,867,500 | -4,782,143,501,998,950,000 | 32.027933 | 80 | 0.590494 | false |
raspberrypi360/python_games | hacker_rank/join_circlesUT.py | 1 | 1942 | import unittest
import join_circles
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testStock(self):
self.assertEqual(18, join_circles.numCombos([1,2,3,4], 3))
self.assertEqual(180, join_circles.numCombos([1,2,3,4,5], 3))
s, num = join_circles.getProduct([1,2,3,4,5], 1)
self.assertEqual(720, s)
s, num = join_circles.getProduct([1,2,3,4,5], 2)
self.assertEqual(5850, s)
s, num = join_circles.getProduct([1,2,3,4,5], 3)
self.assertEqual(25200, s)
k=6
self.assertEqual(150, join_circles.numCombos([i+1 for i in range(k)], 2))
self.assertEqual(900, join_circles.numCombos([i+1 for i in range(k)], 3))
self.assertEqual(2700, join_circles.numCombos([i+1 for i in range(k)], 4))
s, num = join_circles.getProduct([i+1 for i in range(k)], 2)
self.assertEqual(join_circles.getSquare([i+1 for i in range(k)], 2), s)
s, num = join_circles.getProduct([i+1 for i in range(k)], 3)
self.assertEqual(join_circles.getSquare([i+1 for i in range(k)], 3), s)
s, num = join_circles.getProduct([i+1 for i in range(k)], 4)
self.assertEqual(join_circles.getSquare([i+1 for i in range(k)], 4), s)
k=7
s, num = join_circles.getProduct([i+1 for i in range(k)], 2)
self.assertEqual(join_circles.getSquare([i+1 for i in range(k)], 2), s)
s, num = join_circles.getProduct([i+1 for i in range(k)], 3)
self.assertEqual(join_circles.getSquare([i+1 for i in range(k)], 3), s)
s, num = join_circles.getProduct([i+1 for i in range(k)], 4)
self.assertEqual(join_circles.getSquare([i+1 for i in range(k)], 4), s)
s, num = join_circles.getProduct([i+1 for i in range(k)], 5)
self.assertEqual(join_circles.getSquare([i+1 for i in range(k)], 5), s)
if __name__ == "__main__":
unittest.main() | gpl-2.0 | -7,419,709,976,822,180,000 | -1,392,754,946,018,097,200 | 41.23913 | 82 | 0.601957 | false |
GauravSahu/odoo | addons/product/tests/test_pricelist.py | 280 | 6748 | from openerp.tests.common import TransactionCase
class TestPricelist(TransactionCase):
"""Tests for unit of measure conversion"""
def setUp(self):
super(TestPricelist, self).setUp()
cr, uid, context = self.cr, self.uid, {}
self.ir_model_data = self.registry('ir.model.data')
self.product_product = self.registry('product.product')
self.product_pricelist = self.registry('product.pricelist')
self.uom = self.registry('product.uom')
self.usb_adapter_id = self.ir_model_data.get_object_reference(cr, uid, 'product', 'product_product_48')[1]
self.datacard_id = self.ir_model_data.get_object_reference(cr, uid, 'product', 'product_product_46')[1]
self.unit_id = self.ir_model_data.get_object_reference(cr, uid, 'product', 'product_uom_unit')[1]
self.dozen_id = self.ir_model_data.get_object_reference(cr, uid, 'product', 'product_uom_dozen')[1]
self.tonne_id = self.ir_model_data.xmlid_to_res_id(cr, uid, 'product.product_uom_ton')
self.kg_id = self.ir_model_data.xmlid_to_res_id(cr, uid, 'product.product_uom_kgm')
self.public_pricelist_id = self.ir_model_data.get_object_reference(cr, uid, 'product', 'list0')[1]
self.sale_pricelist_id = self.product_pricelist.create(cr, uid, {
'name': 'Sale pricelist',
'type': 'sale',
'version_id': [(0, 0, {
'name': 'v1.0',
'items_id': [(0, 0, {
'name': 'Discount 10%',
'base': 1, # based on public price
'price_discount': -0.1,
'product_id': self.usb_adapter_id
}), (0, 0, {
'name': 'Discount -0.5',
'base': 1, # based on public price
'price_surcharge': -0.5,
'product_id': self.datacard_id
})]
})]
}, context=context)
def test_10_discount(self):
# Make sure the price using a pricelist is the same than without after
# applying the computation manually
cr, uid, context = self.cr, self.uid, {}
public_context = dict(context, pricelist=self.public_pricelist_id)
pricelist_context = dict(context, pricelist=self.sale_pricelist_id)
usb_adapter_without_pricelist = self.product_product.browse(cr, uid, self.usb_adapter_id, context=public_context)
usb_adapter_with_pricelist = self.product_product.browse(cr, uid, self.usb_adapter_id, context=pricelist_context)
self.assertEqual(usb_adapter_with_pricelist.price, usb_adapter_without_pricelist.price*0.9)
datacard_without_pricelist = self.product_product.browse(cr, uid, self.datacard_id, context=public_context)
datacard_with_pricelist = self.product_product.browse(cr, uid, self.datacard_id, context=pricelist_context)
self.assertEqual(datacard_with_pricelist.price, datacard_without_pricelist.price-0.5)
# Make sure that changing the unit of measure does not break the unit
# price (after converting)
unit_context = dict(context,
pricelist=self.sale_pricelist_id,
uom=self.unit_id)
dozen_context = dict(context,
pricelist=self.sale_pricelist_id,
uom=self.dozen_id)
usb_adapter_unit = self.product_product.browse(cr, uid, self.usb_adapter_id, context=unit_context)
usb_adapter_dozen = self.product_product.browse(cr, uid, self.usb_adapter_id, context=dozen_context)
self.assertAlmostEqual(usb_adapter_unit.price*12, usb_adapter_dozen.price)
datacard_unit = self.product_product.browse(cr, uid, self.datacard_id, context=unit_context)
datacard_dozen = self.product_product.browse(cr, uid, self.datacard_id, context=dozen_context)
# price_surcharge applies to product default UoM, here "Units", so surcharge will be multiplied
self.assertAlmostEqual(datacard_unit.price*12, datacard_dozen.price)
def test_20_pricelist_uom(self):
# Verify that the pricelist rules are correctly using the product's default UoM
# as reference, and return a result according to the target UoM (as specific in the context)
cr, uid = self.cr, self.uid
kg, tonne = self.kg_id, self.tonne_id
tonne_price = 100
# make sure 'tonne' resolves down to 1 'kg'.
self.uom.write(cr, uid, tonne, {'rounding': 0.001})
# setup product stored in 'tonnes', with a discounted pricelist for qty > 3 tonnes
spam_id = self.product_product.copy(cr, uid, self.usb_adapter_id,
{ 'name': '1 tonne of spam',
'uom_id': self.tonne_id,
'uos_id': self.tonne_id,
'uom_po_id': self.tonne_id,
'list_price': tonne_price,
})
pricelist_version_id = self.ir_model_data.xmlid_to_res_id(cr, uid, 'product.ver0')
self.registry('product.pricelist.item').create(cr, uid,
{ 'price_version_id': pricelist_version_id,
'sequence': 10,
'name': '3+ tonnes: -10 EUR discount/t',
'base': 1, # based on public price
'min_quantity': 3, # min = 3 tonnes
'price_surcharge': -10, # -10 EUR / tonne
'product_id': spam_id,
})
pricelist_id = self.public_pricelist_id
def test_unit_price(qty, uom, expected_unit_price):
unit_price = self.registry('product.pricelist').price_get(cr, uid, [pricelist_id],
spam_id, qty,
context={'uom': uom})[pricelist_id]
self.assertAlmostEqual(unit_price, expected_unit_price, msg='Computed unit price is wrong')
# Test prices - they are *per unit*, the quantity is only here to match the pricelist rules!
test_unit_price(2, kg, tonne_price / 1000.0)
test_unit_price(2000, kg, tonne_price / 1000.0)
test_unit_price(3500, kg, (tonne_price - 10) / 1000.0)
test_unit_price(2, tonne, tonne_price)
test_unit_price(3, tonne, tonne_price - 10)
| agpl-3.0 | 8,977,856,334,586,589,000 | 7,416,402,174,190,560,000 | 57.678261 | 121 | 0.555127 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.