filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_18496
|
#!/usr/local/sbin/charm-env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple script to parse benchmark transaction results
and reformat them as JSON for sending back to juju
"""
import sys
import json
from charmhelpers.core import hookenv
import re
def parse_benchmark_output():
"""
Parse the output from the benchmark and set the action results:
"""
results = {}
# Find all of the interesting things
regex = re.compile('\t+(.*)=(.*)')
for line in sys.stdin.readlines():
m = regex.match(line)
if m:
results[m.group(1)] = m.group(2)
hookenv.action_set({"meta.raw": json.dumps(results)})
if __name__ == "__main__":
parse_benchmark_output()
|
the-stack_106_18497
|
import os
import pytest
def test_list_command(script, data):
"""
Test default behavior of list command.
"""
script.pip(
'install', '-f', data.find_links, '--no-index', 'simple==1.0',
'simple2==3.0',
)
result = script.pip('list')
assert 'simple (1.0)' in result.stdout, str(result)
assert 'simple2 (3.0)' in result.stdout, str(result)
def test_local_flag(script, data):
"""
Test the behavior of --local flag in the list command
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
result = script.pip('list', '--local')
assert 'simple (1.0)' in result.stdout
def test_user_flag(script, data, virtualenv):
"""
Test the behavior of --user flag in the list command
"""
virtualenv.system_site_packages = True
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
script.pip('install', '-f', data.find_links, '--no-index',
'--user', 'simple2==2.0')
result = script.pip('list', '--user')
assert 'simple (1.0)' not in result.stdout
assert 'simple2 (2.0)' in result.stdout
@pytest.mark.network
def test_uptodate_flag(script, data):
"""
Test the behavior of --uptodate flag in the list command
"""
script.pip(
'install', '-f', data.find_links, '--no-index', 'simple==1.0',
'simple2==3.0',
)
script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package'
)
result = script.pip(
'list', '-f', data.find_links, '--no-index', '--uptodate',
expect_stderr=True,
)
assert 'simple (1.0)' not in result.stdout # 3.0 is latest
assert 'pip-test-package' not in result.stdout # editables excluded
assert 'simple2 (3.0)' in result.stdout, str(result)
@pytest.mark.network
def test_outdated_flag(script, data):
"""
Test the behavior of --outdated flag in the list command
"""
script.pip(
'install', '-f', data.find_links, '--no-index', 'simple==1.0',
'simple2==3.0', 'simplewheel==1.0',
)
script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package'
)
result = script.pip(
'list', '-f', data.find_links, '--no-index', '--outdated',
expect_stderr=True,
)
assert 'simple (Current: 1.0 Latest: 3.0 [sdist])' in result.stdout
assert 'simplewheel (Current: 1.0 Latest: 2.0 [wheel])' in result.stdout
assert 'pip-test-package' not in result.stdout # editables excluded
assert 'simple2' not in result.stdout, str(result) # 3.0 is latest
@pytest.mark.network
def test_editables_flag(script, data):
"""
Test the behavior of --editables flag in the list command
"""
script.pip('install', '-f', data.find_links, '--no-index', 'simple==1.0')
result = script.pip(
'install', '-e',
'git+https://github.com/pypa/pip-test-package.git#egg=pip-test-package'
)
result = script.pip('list', '--editable')
assert 'simple (1.0)' not in result.stdout, str(result)
assert os.path.join('src', 'pip-test-package') in result.stdout, (
str(result)
)
|
the-stack_106_18498
|
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charmhelpers.core.strutils import (
bytes_from_string
)
from charmhelpers.core.hookenv import (
is_relation_made,
relation_ids,
relation_get,
related_units,
service_name,
config,
log as juju_log,
ERROR
)
from charmhelpers.contrib.openstack.context import (
OSContextGenerator,
ApacheSSLContext as SSLContext,
BindHostContext
)
from charmhelpers.contrib.hahelpers.cluster import (
determine_apache_port,
determine_api_port,
)
from charmhelpers.contrib.openstack.utils import (
os_release,
CompareOpenStackReleases,
)
class GlanceContext(OSContextGenerator):
def __call__(self):
ctxt = {
'disk_formats': config('disk-formats')
}
if config('container-formats'):
ctxt['container_formats'] = config('container-formats')
if config('filesystem-store-datadir'):
ctxt['filesystem_store_datadir'] = (
config('filesystem-store-datadir'))
image_size_cap = config('image-size-cap')
if image_size_cap:
try:
ctxt['image_size_cap'] = bytes_from_string(
image_size_cap.replace(' ', '').upper())
except (ValueError, KeyError):
juju_log('Unable to parse value for image-size-cap ({}), '
'see config.yaml for information about valid '
'formatting'.format(config('image-size-cap')),
level=ERROR)
raise
return ctxt
class CephGlanceContext(OSContextGenerator):
interfaces = ['ceph-glance']
def __call__(self):
"""Used to generate template context to be added to glance-api.conf in
the presence of a ceph relation.
"""
if not is_relation_made(relation="ceph",
keys="key"):
return {}
service = service_name()
return {
# pool created based on service name.
'rbd_pool': service,
'rbd_user': service,
'expose_image_locations': config('expose-image-locations')
}
class ObjectStoreContext(OSContextGenerator):
interfaces = ['object-store']
def __call__(self):
"""Object store config.
Used to generate template context to be added to glance-api.conf in
the presence of a 'object-store' relation.
"""
if not relation_ids('object-store'):
return {}
return {
'swift_store': True,
}
class CinderStoreContext(OSContextGenerator):
interfaces = ['cinder-volume-service', 'storage-backend']
def __call__(self):
"""Cinder store config.
Used to generate template context to be added to glance-api.conf in
the presence of a 'cinder-volume-service' relation or in the
presence of a flag 'cinder-backend' in the 'storage-backend' relation.
"""
if relation_ids('cinder-volume-service'):
return {'cinder_store': True}
for rid in relation_ids('storage-backend'):
for unit in related_units(rid):
value = relation_get('cinder-backend', rid=rid, unit=unit)
# value is a boolean flag
return {'cinder_store': value}
return {}
class MultiStoreContext(OSContextGenerator):
def __call__(self):
stores = ['glance.store.filesystem.Store', 'glance.store.http.Store']
store_mapping = {
'ceph': 'glance.store.rbd.Store',
'object-store': 'glance.store.swift.Store',
}
for store_relation, store_type in store_mapping.items():
if relation_ids(store_relation):
stores.append(store_type)
_release = os_release('glance-common')
if ((relation_ids('cinder-volume-service') or
relation_ids('storage-backend')) and
CompareOpenStackReleases(_release) >= 'mitaka'):
# even if storage-backend is present with cinder-backend=False it
# means that glance should not store images in cinder by default
# but can read images from cinder.
stores.append('glance.store.cinder.Store')
stores.sort()
return {
'known_stores': ','.join(stores)
}
class HAProxyContext(OSContextGenerator):
interfaces = ['cluster']
def __call__(self):
'''Extends the main charmhelpers HAProxyContext with a port mapping
specific to this charm.
Also used to extend glance-api.conf context with correct bind_port
'''
haproxy_port = 9292
apache_port = determine_apache_port(9292, singlenode_mode=True)
api_port = determine_api_port(9292, singlenode_mode=True)
ctxt = {
'service_ports': {'glance_api': [haproxy_port, apache_port]},
'bind_port': api_port,
}
return ctxt
class ApacheSSLContext(SSLContext):
interfaces = ['https']
external_ports = [9292]
service_namespace = 'glance'
def __call__(self):
return super(ApacheSSLContext, self).__call__()
class LoggingConfigContext(OSContextGenerator):
def __call__(self):
return {'debug': config('debug'), 'verbose': config('verbose')}
class GlanceIPv6Context(BindHostContext):
def __call__(self):
ctxt = super(GlanceIPv6Context, self).__call__()
if config('prefer-ipv6'):
ctxt['registry_host'] = '[::]'
else:
ctxt['registry_host'] = '0.0.0.0'
return ctxt
|
the-stack_106_18500
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import yaml
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_full_argument_spec
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_module_kwargs
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_cloud_from_module
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: tripleo_network_ports_populate_environment
short_description: Create TripleO network port environment
version_added: "2.8"
description:
- "Create TripleO network port environment by extending the beremetal environment"
options:
environment:
description:
- Existing heat environment data to add to
type: dict
default: {}
role_net_map:
description:
- Structure with role network association
type: dict
default: {}
node_port_map:
description:
- Structure with port data mapped by node and network
type: dict
default: {}
templates:
description:
- The path to tripleo-heat-templates root directory
type: path
default: /usr/share/openstack-tripleo-heat-templates
author:
- Harald Jensås <[email protected]>
'''
RETURN = '''
'''
EXAMPLES = '''
- name: Populate environment with network port data
tripleo_network_ports_populate_environment:
environment: {}
role_net_map:
Controller:
- external
- internal_api
- storage
- tenant
Compute:
- internal_api
- storage
- tenant
node_port_map:
controller-0:
internal_api:
ip_address: 172.18.0.9
ip_subnet: 172.18.0.9/24
ip_address_uri: 172.18.0.9
tenant:
ip_address: 172.19.0.9
ip_subnet: 172.19.0.9/24
ip_address_uri: 172.19.0.9
compute-0:
internal_api:
ip_address: 172.18.0.15
ip_subnet: 172.18.0.15/24
ip_address_uri: 172.18.0.15
tenant:
ip_address: 172.19.0.15
ip_subnet: 172.19.0.15/24
ip_address_uri: 172.19.0.15
register: environment
'''
CTLPLANE_NETWORK = 'ctlplane'
REGISTRY_KEY_TPL = 'OS::TripleO::{role}::Ports::{net_name}Port'
PORT_PATH_TPL = 'network/ports/deployed_{net_name_lower}.yaml'
def get_net_name_map(conn, role_net_map):
_map = {}
networks = set()
for role, nets in role_net_map.items():
networks.update(nets)
for name_lower in networks:
if name_lower == CTLPLANE_NETWORK:
_map[name_lower] = name_lower
continue
net = conn.network.find_network(name_or_id=name_lower)
if not net:
raise Exception('Network {} not found'.format(name_lower))
name_upper = [x.split('=').pop() for x in net.tags
if x.startswith('tripleo_network_name')]
if not name_upper:
raise Exception(
'Unable to find network name for network with name_lower: {}, '
'please make sure the network tag tripleo_network_name'
'=$NET_NAME is set.'.format(name_lower))
_map[name_lower] = name_upper.pop()
return _map
def update_environment(environment, node_port_map, role_net_map, net_name_map,
templates):
resource_registry = environment.setdefault('resource_registry', {})
parameter_defaults = environment.setdefault('parameter_defaults', {})
for role, nets in role_net_map.items():
for net in nets:
if net == CTLPLANE_NETWORK:
continue
registry_key = REGISTRY_KEY_TPL.format(role=role,
net_name=net_name_map[net])
template_path = os.path.join(
templates, PORT_PATH_TPL.format(net_name_lower=net))
resource_registry.update({registry_key: template_path})
_map = parameter_defaults.setdefault('NodePortMap', {})
_map.update(node_port_map)
def run_module():
result = dict(
success=False,
changed=False,
error="",
environment={},
)
argument_spec = openstack_full_argument_spec(
**yaml.safe_load(DOCUMENTATION)['options']
)
module = AnsibleModule(
argument_spec,
supports_check_mode=False,
**openstack_module_kwargs()
)
environment = result['environment'] = module.params['environment']
role_net_map = module.params['role_net_map']
node_port_map = module.params['node_port_map']
templates = module.params['templates']
try:
_, conn = openstack_cloud_from_module(module)
net_name_map = get_net_name_map(conn, role_net_map)
update_environment(environment, node_port_map, role_net_map,
net_name_map, templates)
result['success'] = True
module.exit_json(**result)
except Exception as err:
result['error'] = str(err)
result['msg'] = "Error overcloud network provision failed!"
module.fail_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
|
the-stack_106_18501
|
"""
Incident Update Web Controller
"""
# Standard Library
import os
import markdown2
# Third Party Library
from django.views import View
from django.http import Http404
from django.shortcuts import render
from django.utils.translation import gettext as _
# Local Library
from app.modules.core.context import Context
from app.modules.core.incident import Incident as IncidentModule
from app.modules.core.decorators import login_if_not_authenticated
from app.modules.core.component import Component as ComponentModule
from app.modules.core.component_group import ComponentGroup as ComponentGroupModule
from app.modules.core.incident_update import IncidentUpdate as IncidentUpdateModule
from app.modules.core.incident_update_component import IncidentUpdateComponent as IncidentUpdateComponentModule
from app.modules.core.incident_update_notification import IncidentUpdateNotification as IncidentUpdateNotificationModule
class IncidentUpdateAdd(View):
template_name = 'templates/admin/incident/update/add.html'
__context = Context()
__incident = IncidentModule()
__incident_update = IncidentUpdateModule()
__component = ComponentModule()
__component_group = ComponentGroupModule()
__correlation_id = None
@login_if_not_authenticated
def get(self, request, incident_id):
self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
incident = self.__incident.get_one_by_id(incident_id)
if not incident:
raise Http404("Incident not found.")
self.__context.autoload_options()
self.__context.autoload_user(request.user.id if request.user.is_authenticated else None)
self.__context.push({
"page_title": _("Add Incident Update · %s") % self.__context.get("app_name", os.getenv("APP_NAME", "Silverback")),
"incident": incident
})
return render(request, self.template_name, self.__context.get())
class IncidentUpdateView(View):
template_name = 'templates/admin/incident/update/view.html'
__context = Context()
__incident = IncidentModule()
__incident_update = IncidentUpdateModule()
__incident_update_component = IncidentUpdateComponentModule()
__component = ComponentModule()
__component_group = ComponentGroupModule()
__incident_update_notification = IncidentUpdateNotificationModule()
__correlation_id = None
@login_if_not_authenticated
def get(self, request, incident_id, update_id):
self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
incident = self.__incident.get_one_by_id(incident_id)
if not incident:
raise Http404("Incident not found.")
update = self.__incident_update.get_one_by_id(update_id)
if not update:
raise Http404("Incident update not found.")
update["datetime"] = update["datetime"].strftime("%b %d %Y %H:%M:%S")
update["message"] = markdown2.markdown(update["message"])
update["notified_subscribers"] = self.__incident_update_notification.count_by_update_status(
update["id"],
IncidentUpdateNotificationModule.SUCCESS
)
update["failed_subscribers"] = self.__incident_update_notification.count_by_update_status(
update["id"],
IncidentUpdateNotificationModule.FAILED
)
components = self.__format_components(self.__component.get_all())
affected_components = self.__format_affected_components(self.__incident_update_component.get_all(update_id))
self.__context.autoload_options()
self.__context.autoload_user(request.user.id if request.user.is_authenticated else None)
self.__context.push({
"page_title": _("View Incident Update · %s") % self.__context.get("app_name", os.getenv("APP_NAME", "Silverback")),
"update": update,
"incident": incident,
"components": components,
"affected_components": affected_components
})
return render(request, self.template_name, self.__context.get())
def __format_components(self, components):
components_list = []
for component in components:
components_list.append({
"id": component.id,
"name": component.name
})
return components_list
def __format_affected_components(self, affected_components):
affected_components_list = []
for affected_component in affected_components:
affected_components_list.append({
"id": affected_component.id,
"component_id": affected_component.component.id,
"component_name": affected_component.component.name,
"type": affected_component.type
})
return affected_components_list
class IncidentUpdateEdit(View):
template_name = 'templates/admin/incident/update/edit.html'
__context = Context()
__incident = IncidentModule()
__incident_update = IncidentUpdateModule()
__component = ComponentModule()
__component_group = ComponentGroupModule()
__correlation_id = None
@login_if_not_authenticated
def get(self, request, incident_id, update_id):
self.__correlation_id = request.META["X-Correlation-ID"] if "X-Correlation-ID" in request.META else ""
incident = self.__incident.get_one_by_id(incident_id)
if not incident:
raise Http404("Incident not found.")
update = self.__incident_update.get_one_by_id(update_id)
if not update:
raise Http404("Incident update not found.")
self.__context.autoload_options()
self.__context.autoload_user(request.user.id if request.user.is_authenticated else None)
self.__context.push({
"page_title": _("Edit Incident Update · %s") % self.__context.get("app_name", os.getenv("APP_NAME", "Silverback")),
"update": update,
"incident": incident
})
return render(request, self.template_name, self.__context.get())
|
the-stack_106_18502
|
"""Support for Freedompro climate."""
import json
import logging
from pyfreedompro import put_state
from homeassistant.components.climate import ClimateEntity, ClimateEntityFeature
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, CONF_API_KEY, TEMP_CELSIUS
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
HVAC_MAP = {
0: HVAC_MODE_OFF,
1: HVAC_MODE_HEAT,
2: HVAC_MODE_COOL,
}
HVAC_INVERT_MAP = {v: k for k, v in HVAC_MAP.items()}
SUPPORTED_HVAC_MODES = [HVAC_MODE_OFF, HVAC_MODE_HEAT, HVAC_MODE_COOL]
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Freedompro climate."""
api_key = entry.data[CONF_API_KEY]
coordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
Device(
aiohttp_client.async_get_clientsession(hass), api_key, device, coordinator
)
for device in coordinator.data
if device["type"] == "thermostat"
)
class Device(CoordinatorEntity, ClimateEntity):
"""Representation of an Freedompro climate."""
_attr_hvac_modes = SUPPORTED_HVAC_MODES
_attr_temperature_unit = TEMP_CELSIUS
def __init__(self, session, api_key, device, coordinator):
"""Initialize the Freedompro climate."""
super().__init__(coordinator)
self._session = session
self._api_key = api_key
self._attr_name = device["name"]
self._attr_unique_id = device["uid"]
self._characteristics = device["characteristics"]
self._attr_device_info = DeviceInfo(
identifiers={
(DOMAIN, self.unique_id),
},
manufacturer="Freedompro",
model=device["type"],
name=self.name,
)
self._attr_supported_features = ClimateEntityFeature.TARGET_TEMPERATURE
self._attr_current_temperature = 0
self._attr_target_temperature = 0
self._attr_hvac_mode = HVAC_MODE_OFF
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
device = next(
(
device
for device in self.coordinator.data
if device["uid"] == self._attr_unique_id
),
None,
)
if device is not None and "state" in device:
state = device["state"]
if "currentTemperature" in state:
self._attr_current_temperature = state["currentTemperature"]
if "targetTemperature" in state:
self._attr_target_temperature = state["targetTemperature"]
if "heatingCoolingState" in state:
self._attr_hvac_mode = HVAC_MAP[state["heatingCoolingState"]]
super()._handle_coordinator_update()
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self._handle_coordinator_update()
async def async_set_hvac_mode(self, hvac_mode):
"""Async function to set mode to climate."""
if hvac_mode not in SUPPORTED_HVAC_MODES:
raise ValueError(f"Got unsupported hvac_mode {hvac_mode}")
payload = {}
payload["heatingCoolingState"] = HVAC_INVERT_MAP[hvac_mode]
payload = json.dumps(payload)
await put_state(
self._session,
self._api_key,
self.unique_id,
payload,
)
await self.coordinator.async_request_refresh()
async def async_set_temperature(self, **kwargs):
"""Async function to set temperature to climate."""
payload = {}
if ATTR_HVAC_MODE in kwargs:
if kwargs[ATTR_HVAC_MODE] not in SUPPORTED_HVAC_MODES:
_LOGGER.error(
"Got unsupported hvac_mode %s, expected one of %s",
kwargs[ATTR_HVAC_MODE],
SUPPORTED_HVAC_MODES,
)
return
payload["heatingCoolingState"] = HVAC_INVERT_MAP[kwargs[ATTR_HVAC_MODE]]
if ATTR_TEMPERATURE in kwargs:
payload["targetTemperature"] = kwargs[ATTR_TEMPERATURE]
payload = json.dumps(payload)
await put_state(
self._session,
self._api_key,
self.unique_id,
payload,
)
await self.coordinator.async_request_refresh()
|
the-stack_106_18503
|
# need a dict to set bloody .name field
from io import BytesIO
import logging
import os
import stat
from unittest import SkipTest
import uuid
import git
from git.cmd import Git
from git.compat import (
string_types,
defenc,
is_win,
)
from git.config import (
SectionConstraint,
GitConfigParser,
cp
)
from git.exc import (
InvalidGitRepositoryError,
NoSuchPathError,
RepositoryDirtyError
)
from git.objects.base import IndexObject, Object
from git.objects.util import Traversable
from git.util import (
Iterable,
join_path_native,
to_native_path_linux,
RemoteProgress,
rmtree,
unbare_repo
)
from git.util import HIDE_WINDOWS_KNOWN_ERRORS
import os.path as osp
from .util import (
mkhead,
sm_name,
sm_section,
SubmoduleConfigParser,
find_first_remote_branch
)
__all__ = ["Submodule", "UpdateProgress"]
log = logging.getLogger('git.objects.submodule.base')
log.addHandler(logging.NullHandler())
class UpdateProgress(RemoteProgress):
"""Class providing detailed progress information to the caller who should
derive from it and implement the ``update(...)`` message"""
CLONE, FETCH, UPDWKTREE = [1 << x for x in range(RemoteProgress._num_op_codes, RemoteProgress._num_op_codes + 3)]
_num_op_codes = RemoteProgress._num_op_codes + 3
__slots__ = ()
BEGIN = UpdateProgress.BEGIN
END = UpdateProgress.END
CLONE = UpdateProgress.CLONE
FETCH = UpdateProgress.FETCH
UPDWKTREE = UpdateProgress.UPDWKTREE
# IndexObject comes via util module, its a 'hacky' fix thanks to pythons import
# mechanism which cause plenty of trouble of the only reason for packages and
# modules is refactoring - subpackages shouldn't depend on parent packages
class Submodule(IndexObject, Iterable, Traversable):
"""Implements access to a git submodule. They are special in that their sha
represents a commit in the submodule's repository which is to be checked out
at the path of this instance.
The submodule type does not have a string type associated with it, as it exists
solely as a marker in the tree and index.
All methods work in bare and non-bare repositories."""
_id_attribute_ = "name"
k_modules_file = '.gitmodules'
k_head_option = 'branch'
k_head_default = 'master'
k_default_mode = stat.S_IFDIR | stat.S_IFLNK # submodules are directories with link-status
# this is a bogus type for base class compatibility
type = 'submodule'
__slots__ = ('_parent_commit', '_url', '_branch_path', '_name', '__weakref__')
_cache_attrs = ('path', '_url', '_branch_path')
def __init__(self, repo, binsha, mode=None, path=None, name=None, parent_commit=None, url=None, branch_path=None):
"""Initialize this instance with its attributes. We only document the ones
that differ from ``IndexObject``
:param repo: Our parent repository
:param binsha: binary sha referring to a commit in the remote repository, see url parameter
:param parent_commit: see set_parent_commit()
:param url: The url to the remote repository which is the submodule
:param branch_path: full (relative) path to ref to checkout when cloning the remote repository"""
super(Submodule, self).__init__(repo, binsha, mode, path)
self.size = 0
self._parent_commit = parent_commit
if url is not None:
self._url = url
if branch_path is not None:
assert isinstance(branch_path, string_types)
self._branch_path = branch_path
if name is not None:
self._name = name
def _set_cache_(self, attr):
if attr in ('path', '_url', '_branch_path'):
reader = self.config_reader()
# default submodule values
try:
self.path = reader.get('path')
except cp.NoSectionError:
raise ValueError("This submodule instance does not exist anymore in '%s' file"
% osp.join(self.repo.working_tree_dir, '.gitmodules'))
# end
self._url = reader.get('url')
# git-python extension values - optional
self._branch_path = reader.get_value(self.k_head_option, git.Head.to_full_path(self.k_head_default))
elif attr == '_name':
raise AttributeError("Cannot retrieve the name of a submodule if it was not set initially")
else:
super(Submodule, self)._set_cache_(attr)
# END handle attribute name
def _get_intermediate_items(self, item):
""":return: all the submodules of our module repository"""
try:
return type(self).list_items(item.module())
except InvalidGitRepositoryError:
return []
# END handle intermediate items
@classmethod
def _need_gitfile_submodules(cls, git):
return git.version_info[:3] >= (1, 7, 5)
def __eq__(self, other):
"""Compare with another submodule"""
# we may only compare by name as this should be the ID they are hashed with
# Otherwise this type wouldn't be hashable
# return self.path == other.path and self.url == other.url and super(Submodule, self).__eq__(other)
return self._name == other._name
def __ne__(self, other):
"""Compare with another submodule for inequality"""
return not (self == other)
def __hash__(self):
"""Hash this instance using its logical id, not the sha"""
return hash(self._name)
def __str__(self):
return self._name
def __repr__(self):
return "git.%s(name=%s, path=%s, url=%s, branch_path=%s)"\
% (type(self).__name__, self._name, self.path, self.url, self.branch_path)
@classmethod
def _config_parser(cls, repo, parent_commit, read_only):
""":return: Config Parser constrained to our submodule in read or write mode
:raise IOError: If the .gitmodules file cannot be found, either locally or in the repository
at the given parent commit. Otherwise the exception would be delayed until the first
access of the config parser"""
parent_matches_head = True
if parent_commit is not None:
try:
parent_matches_head = repo.head.commit == parent_commit
except ValueError:
# We are most likely in an empty repository, so the HEAD doesn't point to a valid ref
pass
# end handle parent_commit
if not repo.bare and parent_matches_head:
fp_module = osp.join(repo.working_tree_dir, cls.k_modules_file)
else:
assert parent_commit is not None, "need valid parent_commit in bare repositories"
try:
fp_module = cls._sio_modules(parent_commit)
except KeyError:
raise IOError("Could not find %s file in the tree of parent commit %s" %
(cls.k_modules_file, parent_commit))
# END handle exceptions
# END handle non-bare working tree
if not read_only and (repo.bare or not parent_matches_head):
raise ValueError("Cannot write blobs of 'historical' submodule configurations")
# END handle writes of historical submodules
return SubmoduleConfigParser(fp_module, read_only=read_only)
def _clear_cache(self):
# clear the possibly changed values
for name in self._cache_attrs:
try:
delattr(self, name)
except AttributeError:
pass
# END try attr deletion
# END for each name to delete
@classmethod
def _sio_modules(cls, parent_commit):
""":return: Configuration file as BytesIO - we only access it through the respective blob's data"""
sio = BytesIO(parent_commit.tree[cls.k_modules_file].data_stream.read())
sio.name = cls.k_modules_file
return sio
def _config_parser_constrained(self, read_only):
""":return: Config Parser constrained to our submodule in read or write mode"""
try:
pc = self.parent_commit
except ValueError:
pc = None
# end handle empty parent repository
parser = self._config_parser(self.repo, pc, read_only)
parser.set_submodule(self)
return SectionConstraint(parser, sm_section(self.name))
@classmethod
def _module_abspath(cls, parent_repo, path, name):
if cls._need_gitfile_submodules(parent_repo.git):
return osp.join(parent_repo.git_dir, 'modules', name)
return osp.join(parent_repo.working_tree_dir, path)
# end
@classmethod
def _clone_repo(cls, repo, url, path, name, **kwargs):
""":return: Repo instance of newly cloned repository
:param repo: our parent repository
:param url: url to clone from
:param path: repository-relative path to the submodule checkout location
:param name: canonical of the submodule
:param kwrags: additinoal arguments given to git.clone"""
module_abspath = cls._module_abspath(repo, path, name)
module_checkout_path = module_abspath
if cls._need_gitfile_submodules(repo.git):
kwargs['separate_git_dir'] = module_abspath
module_abspath_dir = osp.dirname(module_abspath)
if not osp.isdir(module_abspath_dir):
os.makedirs(module_abspath_dir)
module_checkout_path = osp.join(repo.working_tree_dir, path)
# end
clone = git.Repo.clone_from(url, module_checkout_path, **kwargs)
if cls._need_gitfile_submodules(repo.git):
cls._write_git_file_and_module_config(module_checkout_path, module_abspath)
# end
return clone
@classmethod
def _to_relative_path(cls, parent_repo, path):
""":return: a path guaranteed to be relative to the given parent-repository
:raise ValueError: if path is not contained in the parent repository's working tree"""
path = to_native_path_linux(path)
if path.endswith('/'):
path = path[:-1]
# END handle trailing slash
if osp.isabs(path):
working_tree_linux = to_native_path_linux(parent_repo.working_tree_dir)
if not path.startswith(working_tree_linux):
raise ValueError("Submodule checkout path '%s' needs to be within the parents repository at '%s'"
% (working_tree_linux, path))
path = path[len(working_tree_linux.rstrip('/')) + 1:]
if not path:
raise ValueError("Absolute submodule path '%s' didn't yield a valid relative path" % path)
# end verify converted relative path makes sense
# end convert to a relative path
return path
@classmethod
def _write_git_file_and_module_config(cls, working_tree_dir, module_abspath):
"""Writes a .git file containing a (preferably) relative path to the actual git module repository.
It is an error if the module_abspath cannot be made into a relative path, relative to the working_tree_dir
:note: will overwrite existing files !
:note: as we rewrite both the git file as well as the module configuration, we might fail on the configuration
and will not roll back changes done to the git file. This should be a non-issue, but may easily be fixed
if it becomes one
:param working_tree_dir: directory to write the .git file into
:param module_abspath: absolute path to the bare repository
"""
git_file = osp.join(working_tree_dir, '.git')
rela_path = osp.relpath(module_abspath, start=working_tree_dir)
if is_win:
if osp.isfile(git_file):
os.remove(git_file)
with open(git_file, 'wb') as fp:
fp.write(("gitdir: %s" % rela_path).encode(defenc))
with GitConfigParser(osp.join(module_abspath, 'config'),
read_only=False, merge_includes=False) as writer:
writer.set_value('core', 'worktree',
to_native_path_linux(osp.relpath(working_tree_dir, start=module_abspath)))
#{ Edit Interface
@classmethod
def add(cls, repo, name, path, url=None, branch=None, no_checkout=False):
"""Add a new submodule to the given repository. This will alter the index
as well as the .gitmodules file, but will not create a new commit.
If the submodule already exists, no matter if the configuration differs
from the one provided, the existing submodule will be returned.
:param repo: Repository instance which should receive the submodule
:param name: The name/identifier for the submodule
:param path: repository-relative or absolute path at which the submodule
should be located
It will be created as required during the repository initialization.
:param url: git-clone compatible URL, see git-clone reference for more information
If None, the repository is assumed to exist, and the url of the first
remote is taken instead. This is useful if you want to make an existing
repository a submodule of anotherone.
:param branch: name of branch at which the submodule should (later) be checked out.
The given branch must exist in the remote repository, and will be checked
out locally as a tracking branch.
It will only be written into the configuration if it not None, which is
when the checked out branch will be the one the remote HEAD pointed to.
The result you get in these situation is somewhat fuzzy, and it is recommended
to specify at least 'master' here.
Examples are 'master' or 'feature/new'
:param no_checkout: if True, and if the repository has to be cloned manually,
no checkout will be performed
:return: The newly created submodule instance
:note: works atomically, such that no change will be done if the repository
update fails for instance"""
if repo.bare:
raise InvalidGitRepositoryError("Cannot add submodules to bare repositories")
# END handle bare repos
path = cls._to_relative_path(repo, path)
# assure we never put backslashes into the url, as some operating systems
# like it ...
if url is not None:
url = to_native_path_linux(url)
# END assure url correctness
# INSTANTIATE INTERMEDIATE SM
sm = cls(repo, cls.NULL_BIN_SHA, cls.k_default_mode, path, name, url='invalid-temporary')
if sm.exists():
# reretrieve submodule from tree
try:
sm = repo.head.commit.tree[path]
sm._name = name
return sm
except KeyError:
# could only be in index
index = repo.index
entry = index.entries[index.entry_key(path, 0)]
sm.binsha = entry.binsha
return sm
# END handle exceptions
# END handle existing
# fake-repo - we only need the functionality on the branch instance
br = git.Head(repo, git.Head.to_full_path(str(branch) or cls.k_head_default))
has_module = sm.module_exists()
branch_is_default = branch is None
if has_module and url is not None:
if url not in [r.url for r in sm.module().remotes]:
raise ValueError(
"Specified URL '%s' does not match any remote url of the repository at '%s'" % (url, sm.abspath))
# END check url
# END verify urls match
mrepo = None
if url is None:
if not has_module:
raise ValueError("A URL was not given and existing repository did not exsit at %s" % path)
# END check url
mrepo = sm.module()
urls = [r.url for r in mrepo.remotes]
if not urls:
raise ValueError("Didn't find any remote url in repository at %s" % sm.abspath)
# END verify we have url
url = urls[0]
else:
# clone new repo
kwargs = {'n': no_checkout}
if not branch_is_default:
kwargs['b'] = br.name
# END setup checkout-branch
# _clone_repo(cls, repo, url, path, name, **kwargs):
mrepo = cls._clone_repo(repo, url, path, name, **kwargs)
# END verify url
## See #525 for ensuring git urls in config-files valid under Windows.
url = Git.polish_url(url)
# It's important to add the URL to the parent config, to let `git submodule` know.
# otherwise there is a '-' character in front of the submodule listing
# a38efa84daef914e4de58d1905a500d8d14aaf45 mymodule (v0.9.0-1-ga38efa8)
# -a38efa84daef914e4de58d1905a500d8d14aaf45 submodules/intermediate/one
with sm.repo.config_writer() as writer:
writer.set_value(sm_section(name), 'url', url)
# update configuration and index
index = sm.repo.index
with sm.config_writer(index=index, write=False) as writer:
writer.set_value('url', url)
writer.set_value('path', path)
sm._url = url
if not branch_is_default:
# store full path
writer.set_value(cls.k_head_option, br.path)
sm._branch_path = br.path
# we deliberately assume that our head matches our index !
sm.binsha = mrepo.head.commit.binsha
index.add([sm], write=True)
return sm
def update(self, recursive=False, init=True, to_latest_revision=False, progress=None, dry_run=False,
force=False, keep_going=False):
"""Update the repository of this submodule to point to the checkout
we point at with the binsha of this instance.
:param recursive: if True, we will operate recursively and update child-
modules as well.
:param init: if True, the module repository will be cloned into place if necessary
:param to_latest_revision: if True, the submodule's sha will be ignored during checkout.
Instead, the remote will be fetched, and the local tracking branch updated.
This only works if we have a local tracking branch, which is the case
if the remote repository had a master branch, or of the 'branch' option
was specified for this submodule and the branch existed remotely
:param progress: UpdateProgress instance or None if no progress should be shown
:param dry_run: if True, the operation will only be simulated, but not performed.
All performed operations are read-only
:param force:
If True, we may reset heads even if the repository in question is dirty. Additinoally we will be allowed
to set a tracking branch which is ahead of its remote branch back into the past or the location of the
remote branch. This will essentially 'forget' commits.
If False, local tracking branches that are in the future of their respective remote branches will simply
not be moved.
:param keep_going: if True, we will ignore but log all errors, and keep going recursively.
Unless dry_run is set as well, keep_going could cause subsequent/inherited errors you wouldn't see
otherwise.
In conjunction with dry_run, it can be useful to anticipate all errors when updating submodules
:note: does nothing in bare repositories
:note: method is definitely not atomic if recurisve is True
:return: self"""
if self.repo.bare:
return self
# END pass in bare mode
if progress is None:
progress = UpdateProgress()
# END handle progress
prefix = ''
if dry_run:
prefix = "DRY-RUN: "
# END handle prefix
# to keep things plausible in dry-run mode
if dry_run:
mrepo = None
# END init mrepo
try:
# ASSURE REPO IS PRESENT AND UPTODATE
#####################################
try:
mrepo = self.module()
rmts = mrepo.remotes
len_rmts = len(rmts)
for i, remote in enumerate(rmts):
op = FETCH
if i == 0:
op |= BEGIN
# END handle start
progress.update(op, i, len_rmts, prefix + "Fetching remote %s of submodule %r"
% (remote, self.name))
#===============================
if not dry_run:
remote.fetch(progress=progress)
# END handle dry-run
#===============================
if i == len_rmts - 1:
op |= END
# END handle end
progress.update(op, i, len_rmts, prefix + "Done fetching remote of submodule %r" % self.name)
# END fetch new data
except InvalidGitRepositoryError:
if not init:
return self
# END early abort if init is not allowed
# there is no git-repository yet - but delete empty paths
checkout_module_abspath = self.abspath
if not dry_run and osp.isdir(checkout_module_abspath):
try:
os.rmdir(checkout_module_abspath)
except OSError:
raise OSError("Module directory at %r does already exist and is non-empty"
% checkout_module_abspath)
# END handle OSError
# END handle directory removal
# don't check it out at first - nonetheless it will create a local
# branch according to the remote-HEAD if possible
progress.update(BEGIN | CLONE, 0, 1, prefix + "Cloning url '%s' to '%s' in submodule %r" %
(self.url, checkout_module_abspath, self.name))
if not dry_run:
mrepo = self._clone_repo(self.repo, self.url, self.path, self.name, n=True)
# END handle dry-run
progress.update(END | CLONE, 0, 1, prefix + "Done cloning to %s" % checkout_module_abspath)
if not dry_run:
# see whether we have a valid branch to checkout
try:
# find a remote which has our branch - we try to be flexible
remote_branch = find_first_remote_branch(mrepo.remotes, self.branch_name)
local_branch = mkhead(mrepo, self.branch_path)
# have a valid branch, but no checkout - make sure we can figure
# that out by marking the commit with a null_sha
local_branch.set_object(Object(mrepo, self.NULL_BIN_SHA))
# END initial checkout + branch creation
# make sure HEAD is not detached
mrepo.head.set_reference(local_branch, logmsg="submodule: attaching head to %s" % local_branch)
mrepo.head.ref.set_tracking_branch(remote_branch)
except (IndexError, InvalidGitRepositoryError):
log.warn("Failed to checkout tracking branch %s", self.branch_path)
# END handle tracking branch
# NOTE: Have to write the repo config file as well, otherwise
# the default implementation will be offended and not update the repository
# Maybe this is a good way to assure it doesn't get into our way, but
# we want to stay backwards compatible too ... . Its so redundant !
with self.repo.config_writer() as writer:
writer.set_value(sm_section(self.name), 'url', self.url)
# END handle dry_run
# END handle initialization
# DETERMINE SHAS TO CHECKOUT
############################
binsha = self.binsha
hexsha = self.hexsha
if mrepo is not None:
# mrepo is only set if we are not in dry-run mode or if the module existed
is_detached = mrepo.head.is_detached
# END handle dry_run
if mrepo is not None and to_latest_revision:
msg_base = "Cannot update to latest revision in repository at %r as " % mrepo.working_dir
if not is_detached:
rref = mrepo.head.ref.tracking_branch()
if rref is not None:
rcommit = rref.commit
binsha = rcommit.binsha
hexsha = rcommit.hexsha
else:
log.error("%s a tracking branch was not set for local branch '%s'", msg_base, mrepo.head.ref)
# END handle remote ref
else:
log.error("%s there was no local tracking branch", msg_base)
# END handle detached head
# END handle to_latest_revision option
# update the working tree
# handles dry_run
if mrepo is not None and mrepo.head.commit.binsha != binsha:
# We must assure that our destination sha (the one to point to) is in the future of our current head.
# Otherwise, we will reset changes that might have been done on the submodule, but were not yet pushed
# We also handle the case that history has been rewritten, leaving no merge-base. In that case
# we behave conservatively, protecting possible changes the user had done
may_reset = True
if mrepo.head.commit.binsha != self.NULL_BIN_SHA:
base_commit = mrepo.merge_base(mrepo.head.commit, hexsha)
if len(base_commit) == 0 or base_commit[0].hexsha == hexsha:
if force:
msg = "Will force checkout or reset on local branch that is possibly in the future of"
msg += "the commit it will be checked out to, effectively 'forgetting' new commits"
log.debug(msg)
else:
msg = "Skipping %s on branch '%s' of submodule repo '%s' as it contains un-pushed commits"
msg %= (is_detached and "checkout" or "reset", mrepo.head, mrepo)
log.info(msg)
may_reset = False
# end handle force
# end handle if we are in the future
if may_reset and not force and mrepo.is_dirty(index=True, working_tree=True, untracked_files=True):
raise RepositoryDirtyError(mrepo, "Cannot reset a dirty repository")
# end handle force and dirty state
# end handle empty repo
# end verify future/past
progress.update(BEGIN | UPDWKTREE, 0, 1, prefix +
"Updating working tree at %s for submodule %r to revision %s"
% (self.path, self.name, hexsha))
if not dry_run and may_reset:
if is_detached:
# NOTE: for now we force, the user is no supposed to change detached
# submodules anyway. Maybe at some point this becomes an option, to
# properly handle user modifications - see below for future options
# regarding rebase and merge.
mrepo.git.checkout(hexsha, force=force)
else:
mrepo.head.reset(hexsha, index=True, working_tree=True)
# END handle checkout
# if we may reset/checkout
progress.update(END | UPDWKTREE, 0, 1, prefix + "Done updating working tree for submodule %r"
% self.name)
# END update to new commit only if needed
except Exception as err:
if not keep_going:
raise
log.error(str(err))
# end handle keep_going
# HANDLE RECURSION
##################
if recursive:
# in dry_run mode, the module might not exist
if mrepo is not None:
for submodule in self.iter_items(self.module()):
submodule.update(recursive, init, to_latest_revision, progress=progress, dry_run=dry_run,
force=force, keep_going=keep_going)
# END handle recursive update
# END handle dry run
# END for each submodule
return self
@unbare_repo
def move(self, module_path, configuration=True, module=True):
"""Move the submodule to a another module path. This involves physically moving
the repository at our current path, changing the configuration, as well as
adjusting our index entry accordingly.
:param module_path: the path to which to move our module in the parent repostory's working tree,
given as repository-relative or absolute path. Intermediate directories will be created
accordingly. If the path already exists, it must be empty.
Trailing (back)slashes are removed automatically
:param configuration: if True, the configuration will be adjusted to let
the submodule point to the given path.
:param module: if True, the repository managed by this submodule
will be moved as well. If False, we don't move the submodule's checkout, which may leave
the parent repository in an inconsistent state.
:return: self
:raise ValueError: if the module path existed and was not empty, or was a file
:note: Currently the method is not atomic, and it could leave the repository
in an inconsistent state if a sub-step fails for some reason
"""
if module + configuration < 1:
raise ValueError("You must specify to move at least the module or the configuration of the submodule")
# END handle input
module_checkout_path = self._to_relative_path(self.repo, module_path)
# VERIFY DESTINATION
if module_checkout_path == self.path:
return self
# END handle no change
module_checkout_abspath = join_path_native(self.repo.working_tree_dir, module_checkout_path)
if osp.isfile(module_checkout_abspath):
raise ValueError("Cannot move repository onto a file: %s" % module_checkout_abspath)
# END handle target files
index = self.repo.index
tekey = index.entry_key(module_checkout_path, 0)
# if the target item already exists, fail
if configuration and tekey in index.entries:
raise ValueError("Index entry for target path did already exist")
# END handle index key already there
# remove existing destination
if module:
if osp.exists(module_checkout_abspath):
if len(os.listdir(module_checkout_abspath)):
raise ValueError("Destination module directory was not empty")
# END handle non-emptiness
if osp.islink(module_checkout_abspath):
os.remove(module_checkout_abspath)
else:
os.rmdir(module_checkout_abspath)
# END handle link
else:
# recreate parent directories
# NOTE: renames() does that now
pass
# END handle existence
# END handle module
# move the module into place if possible
cur_path = self.abspath
renamed_module = False
if module and osp.exists(cur_path):
os.renames(cur_path, module_checkout_abspath)
renamed_module = True
if osp.isfile(osp.join(module_checkout_abspath, '.git')):
module_abspath = self._module_abspath(self.repo, self.path, self.name)
self._write_git_file_and_module_config(module_checkout_abspath, module_abspath)
# end handle git file rewrite
# END move physical module
# rename the index entry - have to manipulate the index directly as
# git-mv cannot be used on submodules ... yeah
previous_sm_path = self.path
try:
if configuration:
try:
ekey = index.entry_key(self.path, 0)
entry = index.entries[ekey]
del(index.entries[ekey])
nentry = git.IndexEntry(entry[:3] + (module_checkout_path,) + entry[4:])
index.entries[tekey] = nentry
except KeyError:
raise InvalidGitRepositoryError("Submodule's entry at %r did not exist" % (self.path))
# END handle submodule doesn't exist
# update configuration
with self.config_writer(index=index) as writer: # auto-write
writer.set_value('path', module_checkout_path)
self.path = module_checkout_path
# END handle configuration flag
except Exception:
if renamed_module:
os.renames(module_checkout_abspath, cur_path)
# END undo module renaming
raise
# END handle undo rename
# Auto-rename submodule if it's name was 'default', that is, the checkout directory
if previous_sm_path == self.name:
self.rename(module_checkout_path)
# end
return self
@unbare_repo
def remove(self, module=True, force=False, configuration=True, dry_run=False):
"""Remove this submodule from the repository. This will remove our entry
from the .gitmodules file and the entry in the .git/config file.
:param module: If True, the module checkout we point to will be deleted
as well. If the module is currently on a commit which is not part
of any branch in the remote, if the currently checked out branch
working tree, or untracked files,
is ahead of its tracking branch, if you have modifications in the
In case the removal of the repository fails for these reasons, the
submodule status will not have been altered.
If this submodule has child-modules on its own, these will be deleted
prior to touching the own module.
:param force: Enforces the deletion of the module even though it contains
modifications. This basically enforces a brute-force file system based
deletion.
:param configuration: if True, the submodule is deleted from the configuration,
otherwise it isn't. Although this should be enabled most of the times,
this flag enables you to safely delete the repository of your submodule.
:param dry_run: if True, we will not actually do anything, but throw the errors
we would usually throw
:return: self
:note: doesn't work in bare repositories
:note: doesn't work atomically, as failure to remove any part of the submodule will leave
an inconsistent state
:raise InvalidGitRepositoryError: thrown if the repository cannot be deleted
:raise OSError: if directories or files could not be removed"""
if not (module or configuration):
raise ValueError("Need to specify to delete at least the module, or the configuration")
# END handle parameters
# Recursively remove children of this submodule
nc = 0
for csm in self.children():
nc += 1
csm.remove(module, force, configuration, dry_run)
del(csm)
# end
if configuration and not dry_run and nc > 0:
# Assure we don't leave the parent repository in a dirty state, and commit our changes
# It's important for recursive, unforced, deletions to work as expected
self.module().index.commit("Removed at least one of child-modules of '%s'" % self.name)
# end handle recursion
# DELETE REPOSITORY WORKING TREE
################################
if module and self.module_exists():
mod = self.module()
git_dir = mod.git_dir
if force:
# take the fast lane and just delete everything in our module path
# TODO: If we run into permission problems, we have a highly inconsistent
# state. Delete the .git folders last, start with the submodules first
mp = self.abspath
method = None
if osp.islink(mp):
method = os.remove
elif osp.isdir(mp):
method = rmtree
elif osp.exists(mp):
raise AssertionError("Cannot forcibly delete repository as it was neither a link, nor a directory")
# END handle brutal deletion
if not dry_run:
assert method
method(mp)
# END apply deletion method
else:
# verify we may delete our module
if mod.is_dirty(index=True, working_tree=True, untracked_files=True):
raise InvalidGitRepositoryError(
"Cannot delete module at %s with any modifications, unless force is specified"
% mod.working_tree_dir)
# END check for dirt
# figure out whether we have new commits compared to the remotes
# NOTE: If the user pulled all the time, the remote heads might
# not have been updated, so commits coming from the remote look
# as if they come from us. But we stay strictly read-only and
# don't fetch beforehand.
for remote in mod.remotes:
num_branches_with_new_commits = 0
rrefs = remote.refs
for rref in rrefs:
num_branches_with_new_commits += len(mod.git.cherry(rref)) != 0
# END for each remote ref
# not a single remote branch contained all our commits
if len(rrefs) and num_branches_with_new_commits == len(rrefs):
raise InvalidGitRepositoryError(
"Cannot delete module at %s as there are new commits" % mod.working_tree_dir)
# END handle new commits
# have to manually delete references as python's scoping is
# not existing, they could keep handles open ( on windows this is a problem )
if len(rrefs):
del(rref) # skipcq: PYL-W0631
# END handle remotes
del(rrefs)
del(remote)
# END for each remote
# finally delete our own submodule
if not dry_run:
self._clear_cache()
wtd = mod.working_tree_dir
del(mod) # release file-handles (windows)
import gc
gc.collect()
try:
rmtree(wtd)
except Exception as ex:
if HIDE_WINDOWS_KNOWN_ERRORS:
raise SkipTest("FIXME: fails with: PermissionError\n {}".format(ex))
raise
# END delete tree if possible
# END handle force
if not dry_run and osp.isdir(git_dir):
self._clear_cache()
try:
rmtree(git_dir)
except Exception as ex:
if HIDE_WINDOWS_KNOWN_ERRORS:
raise SkipTest("FIXME: fails with: PermissionError\n %s", ex)
else:
raise
# end handle separate bare repository
# END handle module deletion
# void our data not to delay invalid access
if not dry_run:
self._clear_cache()
# DELETE CONFIGURATION
######################
if configuration and not dry_run:
# first the index-entry
parent_index = self.repo.index
try:
del(parent_index.entries[parent_index.entry_key(self.path, 0)])
except KeyError:
pass
# END delete entry
parent_index.write()
# now git config - need the config intact, otherwise we can't query
# information anymore
with self.repo.config_writer() as writer:
writer.remove_section(sm_section(self.name))
with self.config_writer() as writer:
writer.remove_section()
# END delete configuration
return self
def set_parent_commit(self, commit, check=True):
"""Set this instance to use the given commit whose tree is supposed to
contain the .gitmodules blob.
:param commit:
Commit'ish reference pointing at the root_tree, or None to always point to the
most recent commit
:param check:
if True, relatively expensive checks will be performed to verify
validity of the submodule.
:raise ValueError: if the commit's tree didn't contain the .gitmodules blob.
:raise ValueError:
if the parent commit didn't store this submodule under the current path
:return: self"""
if commit is None:
self._parent_commit = None
return self
# end handle None
pcommit = self.repo.commit(commit)
pctree = pcommit.tree
if self.k_modules_file not in pctree:
raise ValueError("Tree of commit %s did not contain the %s file" % (commit, self.k_modules_file))
# END handle exceptions
prev_pc = self._parent_commit
self._parent_commit = pcommit
if check:
parser = self._config_parser(self.repo, self._parent_commit, read_only=True)
if not parser.has_section(sm_section(self.name)):
self._parent_commit = prev_pc
raise ValueError("Submodule at path %r did not exist in parent commit %s" % (self.path, commit))
# END handle submodule did not exist
# END handle checking mode
# update our sha, it could have changed
# If check is False, we might see a parent-commit that doesn't even contain the submodule anymore.
# in that case, mark our sha as being NULL
try:
self.binsha = pctree[self.path].binsha
except KeyError:
self.binsha = self.NULL_BIN_SHA
# end
self._clear_cache()
return self
@unbare_repo
def config_writer(self, index=None, write=True):
""":return: a config writer instance allowing you to read and write the data
belonging to this submodule into the .gitmodules file.
:param index: if not None, an IndexFile instance which should be written.
defaults to the index of the Submodule's parent repository.
:param write: if True, the index will be written each time a configuration
value changes.
:note: the parameters allow for a more efficient writing of the index,
as you can pass in a modified index on your own, prevent automatic writing,
and write yourself once the whole operation is complete
:raise ValueError: if trying to get a writer on a parent_commit which does not
match the current head commit
:raise IOError: If the .gitmodules file/blob could not be read"""
writer = self._config_parser_constrained(read_only=False)
if index is not None:
writer.config._index = index
writer.config._auto_write = write
return writer
@unbare_repo
def rename(self, new_name):
"""Rename this submodule
:note: This method takes care of renaming the submodule in various places, such as
* $parent_git_dir/config
* $working_tree_dir/.gitmodules
* (git >=v1.8.0: move submodule repository to new name)
As .gitmodules will be changed, you would need to make a commit afterwards. The changed .gitmodules file
will already be added to the index
:return: this submodule instance
"""
if self.name == new_name:
return self
# .git/config
with self.repo.config_writer() as pw:
# As we ourselves didn't write anything about submodules into the parent .git/config,
# we will not require it to exist, and just ignore missing entries.
if pw.has_section(sm_section(self.name)):
pw.rename_section(sm_section(self.name), sm_section(new_name))
# .gitmodules
with self.config_writer(write=True).config as cw:
cw.rename_section(sm_section(self.name), sm_section(new_name))
self._name = new_name
# .git/modules
mod = self.module()
if mod.has_separate_working_tree():
destination_module_abspath = self._module_abspath(self.repo, self.path, new_name)
source_dir = mod.git_dir
# Let's be sure the submodule name is not so obviously tied to a directory
if destination_module_abspath.startswith(mod.git_dir):
tmp_dir = self._module_abspath(self.repo, self.path, str(uuid.uuid4()))
os.renames(source_dir, tmp_dir)
source_dir = tmp_dir
# end handle self-containment
os.renames(source_dir, destination_module_abspath)
self._write_git_file_and_module_config(mod.working_tree_dir, destination_module_abspath)
# end move separate git repository
return self
#} END edit interface
#{ Query Interface
@unbare_repo
def module(self):
""":return: Repo instance initialized from the repository at our submodule path
:raise InvalidGitRepositoryError: if a repository was not available. This could
also mean that it was not yet initialized"""
# late import to workaround circular dependencies
module_checkout_abspath = self.abspath
try:
repo = git.Repo(module_checkout_abspath)
if repo != self.repo:
return repo
# END handle repo uninitialized
except (InvalidGitRepositoryError, NoSuchPathError):
raise InvalidGitRepositoryError("No valid repository at %s" % module_checkout_abspath)
else:
raise InvalidGitRepositoryError("Repository at %r was not yet checked out" % module_checkout_abspath)
# END handle exceptions
def module_exists(self):
""":return: True if our module exists and is a valid git repository. See module() method"""
try:
self.module()
return True
except Exception:
return False
# END handle exception
def exists(self):
"""
:return: True if the submodule exists, False otherwise. Please note that
a submodule may exist (in the .gitmodules file) even though its module
doesn't exist on disk"""
# keep attributes for later, and restore them if we have no valid data
# this way we do not actually alter the state of the object
loc = locals()
for attr in self._cache_attrs:
try:
if hasattr(self, attr):
loc[attr] = getattr(self, attr)
# END if we have the attribute cache
except (cp.NoSectionError, ValueError):
# on PY3, this can happen apparently ... don't know why this doesn't happen on PY2
pass
# END for each attr
self._clear_cache()
try:
try:
self.path
return True
except Exception:
return False
# END handle exceptions
finally:
for attr in self._cache_attrs:
if attr in loc:
setattr(self, attr, loc[attr])
# END if we have a cache
# END reapply each attribute
# END handle object state consistency
@property
def branch(self):
""":return: The branch instance that we are to checkout
:raise InvalidGitRepositoryError: if our module is not yet checked out"""
return mkhead(self.module(), self._branch_path)
@property
def branch_path(self):
"""
:return: full (relative) path as string to the branch we would checkout
from the remote and track"""
return self._branch_path
@property
def branch_name(self):
""":return: the name of the branch, which is the shortest possible branch name"""
# use an instance method, for this we create a temporary Head instance
# which uses a repository that is available at least ( it makes no difference )
return git.Head(self.repo, self._branch_path).name
@property
def url(self):
""":return: The url to the repository which our module-repository refers to"""
return self._url
@property
def parent_commit(self):
""":return: Commit instance with the tree containing the .gitmodules file
:note: will always point to the current head's commit if it was not set explicitly"""
if self._parent_commit is None:
return self.repo.commit()
return self._parent_commit
@property
def name(self):
""":return: The name of this submodule. It is used to identify it within the
.gitmodules file.
:note: by default, the name is the path at which to find the submodule, but
in git-python it should be a unique identifier similar to the identifiers
used for remotes, which allows to change the path of the submodule
easily
"""
return self._name
def config_reader(self):
"""
:return: ConfigReader instance which allows you to qurey the configuration values
of this submodule, as provided by the .gitmodules file
:note: The config reader will actually read the data directly from the repository
and thus does not need nor care about your working tree.
:note: Should be cached by the caller and only kept as long as needed
:raise IOError: If the .gitmodules file/blob could not be read"""
return self._config_parser_constrained(read_only=True)
def children(self):
"""
:return: IterableList(Submodule, ...) an iterable list of submodules instances
which are children of this submodule or 0 if the submodule is not checked out"""
return self._get_intermediate_items(self)
#} END query interface
#{ Iterable Interface
@classmethod
def iter_items(cls, repo, parent_commit='HEAD'):
""":return: iterator yielding Submodule instances available in the given repository"""
pc = repo.commit(parent_commit) # parent commit instance
try:
parser = cls._config_parser(repo, pc, read_only=True)
except IOError:
return
# END handle empty iterator
rt = pc.tree # root tree
for sms in parser.sections():
n = sm_name(sms)
p = parser.get(sms, 'path')
u = parser.get(sms, 'url')
b = cls.k_head_default
if parser.has_option(sms, cls.k_head_option):
b = str(parser.get(sms, cls.k_head_option))
# END handle optional information
# get the binsha
index = repo.index
try:
sm = rt[p]
except KeyError:
# try the index, maybe it was just added
try:
entry = index.entries[index.entry_key(p, 0)]
sm = Submodule(repo, entry.binsha, entry.mode, entry.path)
except KeyError:
# The submodule doesn't exist, probably it wasn't
# removed from the .gitmodules file.
continue
# END handle keyerror
# END handle critical error
# fill in remaining info - saves time as it doesn't have to be parsed again
sm._name = n
if pc != repo.commit():
sm._parent_commit = pc
# end set only if not most recent !
sm._branch_path = git.Head.to_full_path(b)
sm._url = u
yield sm
# END for each section
#} END iterable interface
|
the-stack_106_18504
|
#! usr/bin/python3
# -*- coding: utf-8 -*-
#
# Flicket - copyright Paul Bourne: [email protected]
import os
import string
import random
from flask import flash
from werkzeug.utils import secure_filename
from application import app, db
from application.flicket.models.flicket_models import FlicketUploads
from application.flicket.models.flicket_user import FlicketUser
class UploadFile:
def __init__(self, file):
"""
Takes a file object from form submission.
:param file:
"""
self.file = file
self.file_extension = self.get_extension()
if self.file_extension:
self.file_name = self.random_filename(self.file.filename, characters=8)
else:
self.file_name = None
self.upload_folder = None
self.target_file = None
self.allowed_extensions = []
def get_extension(self):
try:
_ext = self.file.filename.rsplit('.', 1)[1]
return _ext
except IndexError:
return False
def random_filename(self, file_name, characters=8):
"""
Returns a random filename using lowercase letters and digits.
:return: string
"""
new_file_name = secure_filename(file_name)
while True:
chars = string.ascii_lowercase + string.digits
output_string = ''.join(random.choice(chars) for _ in range(characters))
new_file_name = output_string + '.' + self.file_extension
if not os.path.isfile(new_file_name):
break
return new_file_name
def check_extension(self):
"""
Checks that it is a valid filename with a valid extension.
Returns True if valid
:return: Boolean
"""
return '.' in self.file_name and self.file_extension in self.allowed_extensions
def upload_file(self):
"""
Method to upload the file. Returns True on success, otherwise False.
:return: Boolean
"""
if self.file_name and self.upload_folder:
self.target_file = os.path.join(self.upload_folder, self.file_name)
else:
# print('Problem with file_name {} or upload_folder {}.'.format(self.file_name, self.upload_folder))
return False
# Is the file extension in the list of allowed extensions.
if self.check_extension():
self.file.save(self.target_file)
return self.file
else:
# print('There was a problem with the files extension.')
return False
class UploadAvatar(UploadFile):
def __init__(self, file, user):
super().__init__(file)
self.allowed_extensions = ['jpg']
self.user = user
self.upload_folder = app.config['avatar_upload_folder']
self.delete_existing_avatar()
def delete_existing_avatar(self):
"""
Clean up old avatar before uploading new one.
:return: nowt
"""
# Find filename in the database.
_user = FlicketUser.query.filter_by(id=self.user.id).one()
# remove the file if it exists
if _user.avatar:
os.remove(os.path.join(self.upload_folder, _user.avatar))
# null the database entry.
_user.avatar = None
db.session.commit()
class UploadAttachment(object):
"""
Class created for the uploading of attachments to tickets and comments.
Initialised with a list of file objects from form submission.
"""
def __init__(self, files):
self.files = files
self.allowed_extensions = app.config['allowed_extensions']
self.upload_folder = app.config['ticket_upload_folder']
self.new_files = None
def are_attachments(self):
"""
Check self.files to see if any files were added to the upload form. Return True if there were.
:return: Boolean
"""
if len(self.files) == 0:
return False
if self.files[0].filename == '':
return False
return True
def upload_files(self):
"""
Upload files to self.upload_upload.
:return: list[str(original_filename), str(new_filename)]
"""
# Were any files added to form?
if not self.are_attachments():
return False
self.new_files = list()
for file in self.files:
uploaded_file = UploadFile(file)
uploaded_file.upload_folder = self.upload_folder
uploaded_file.allowed_extensions = self.allowed_extensions
new_file_name = False
if uploaded_file.upload_file():
new_file_name = uploaded_file.file_name
self.new_files.append((file.filename, new_file_name))
return self.new_files
def populate_db(self, flicketobject):
topic = None
post = None
if type(flicketobject).__name__ == 'FlicketTicket':
topic = flicketobject
if type(flicketobject).__name__ == 'FlicketPost':
post = flicketobject
if self.new_files:
for new_file in self.new_files:
if new_file[1] is False:
flash('There was a problem uploading one or more of the files.', category='warning')
else:
# all looks good, so add file to the database.
new_image = FlicketUploads(
topic=topic,
post=post,
filename=new_file[1],
original_filename=new_file[0]
)
db.session.add(new_image)
|
the-stack_106_18505
|
from flask import Blueprint, jsonify
from limbook_api.v1.auth.utils import requires_auth
from limbook_api.v1.comments import Comment
from limbook_api.v1.posts import Post
stats = Blueprint('stats', __name__)
# ====================================
# SECURE ROUTES
# ====================================
@stats.route("/stats")
@requires_auth('read:stats')
def get_stats():
"""Get stats"""
return jsonify({
"success": True,
"stats": {
"posts": Post.query.count(),
"comments": Comment.query.count()
}
})
|
the-stack_106_18511
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Set up custom environment before nearly anything else is imported
# NOTE: this should be the first import (no not reorder)
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
import argparse
import os
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
# Check if we can enable mixed-precision via apex.amp
try:
from apex import amp
except ImportError:
raise ImportError('Use APEX for mixed precision via apex.amp')
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Inference")
parser.add_argument(
"--config-file",
default="/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"--ckpt",
help="The path to the checkpoint for test, default is the latest checkpoint.",
default=None,
)
parser.add_argument(
"--is_histo_nr",
help="whether test on histo normal region dataset",
default=False,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
distributed = num_gpus > 1
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
synchronize()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
save_dir = ""
logger = setup_logger("maskrcnn_benchmark", save_dir, get_rank())
logger.info("Using {} GPUs".format(num_gpus))
logger.info(cfg)
logger.info("Collecting env info (might take some time)")
logger.info("\n" + collect_env_info())
model = build_detection_model(cfg)
model.to(cfg.MODEL.DEVICE)
# Initialize mixed-precision if necessary
use_mixed_precision = cfg.DTYPE == 'float16'
amp_handle = amp.init(enabled=use_mixed_precision, verbose=cfg.AMP_VERBOSE)
output_dir = os.path.join(cfg.OUTPUT_DIR, cfg.OUTPUT_FOLDER)
checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)
ckpt = cfg.MODEL.WEIGHT if args.ckpt is None else args.ckpt
_ = checkpointer.load(ckpt, use_latest=args.ckpt is None)
iou_types = ("bbox",)
if cfg.MODEL.MASK_ON:
iou_types = iou_types + ("segm",)
if cfg.MODEL.KEYPOINT_ON:
iou_types = iou_types + ("keypoints",)
output_folders = [None] * len(cfg.DATASETS.TEST)
dataset_names = cfg.DATASETS.TEST
if cfg.OUTPUT_DIR:
for idx, dataset_name in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, cfg.OUTPUT_FOLDER, "inference", dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, phase='test', is_train=False, is_distributed=distributed)
if args.is_histo_nr:
# the last dataloader is for normal region
data_loader_val_nr = data_loaders_val[-1]
# remove the last dataloader
data_loaders_val = data_loaders_val.pop()
print("predict on normal region dataset!!!!!!!!!")
else:
data_loader_val_nr = None
for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):
inference(
model,
data_loader_val,
dataset_name=dataset_name,
iou_types=iou_types,
box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=output_folder,
data_loader_nr=data_loader_val_nr,
)
synchronize()
if __name__ == "__main__":
main()
|
the-stack_106_18512
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import copy
import logging
import os
from datadog_checks.base.stubs.aggregator import AggregatorStub
from datadog_checks.base.utils.common import get_docker_hostname
from datadog_checks.dev.docker import get_container_ip
from datadog_checks.snmp import SnmpCheck
log = logging.getLogger(__name__)
HOST = get_docker_hostname()
PORT = 1161
HERE = os.path.dirname(os.path.abspath(__file__))
COMPOSE_DIR = os.path.join(HERE, 'compose')
AUTH_PROTOCOLS = {'MD5': 'usmHMACMD5AuthProtocol', 'SHA': 'usmHMACSHAAuthProtocol'}
PRIV_PROTOCOLS = {'DES': 'usmDESPrivProtocol', 'AES': 'usmAesCfb128Protocol'}
AUTH_KEY = 'doggiepass'
PRIV_KEY = 'doggiePRIVkey'
SNMP_CONTAINER_NAME = 'dd-snmp'
CHECK_TAGS = ['snmp_device:{}'.format(HOST)]
SNMP_CONF = {'name': 'snmp_conf', 'ip_address': HOST, 'port': PORT, 'community_string': 'public'}
SNMP_V3_CONF = {
'name': 'snmp_v3_conf',
'ip_address': HOST,
'port': PORT,
'user': None,
'authKey': None,
'privKey': None,
'authProtocol': None,
'privProtocol': None,
'context_name': 'public',
}
MIBS_FOLDER = {'mibs_folder': os.path.join(HERE, "mibs")}
IGNORE_NONINCREASING_OID = {'ignore_nonincreasing_oid': True}
SUPPORTED_METRIC_TYPES = [
{'OID': "1.3.6.1.2.1.7.1.0", 'name': "IAmACounter32"}, # Counter32
{'OID': "1.3.6.1.2.1.4.31.1.1.6.1", 'name': "IAmACounter64"}, # Counter64
{'OID': "1.3.6.1.2.1.4.24.6.0", 'name': "IAmAGauge32"}, # Gauge32
{'OID': "1.3.6.1.2.1.88.1.1.1.0", 'name': "IAmAnInteger"}, # Integer
]
UNSUPPORTED_METRICS = [{'OID': "1.3.6.1.2.1.25.6.3.1.5.1", 'name': "IAmString"}] # String (not supported)
CAST_METRICS = [
{'OID': "1.3.6.1.4.1.2021.10.1.3.1", 'name': "cpuload1"}, # OctetString
{'OID': "1.3.6.1.4.1.2021.10.1.6.1", 'name': "cpuload2"}, # Opaque
]
CONSTRAINED_OID = [{"MIB": "TCP-MIB", "symbol": "tcpRtoAlgorithm"}]
DUMMY_MIB_OID = [
({"MIB": "DUMMY-MIB", "symbol": "scalar"}, AggregatorStub.GAUGE, 10), # Integer
# Additional types we support but that are not part of the original SNMP protocol.
({"MIB": "DUMMY-MIB", "symbol": "dummyCounterGauge"}, AggregatorStub.GAUGE, 90), # CounterBasedGauge64
({"MIB": "DUMMY-MIB", "symbol": "dummyZeroCounter"}, AggregatorStub.RATE, 120), # ZeroBasedCounter64
]
FORCED_METRICS = [
{'OID': "1.3.6.1.2.1.4.24.6.0", 'name': "IAmAGauge32", 'forced_type': 'counter'}, # Gauge32
{'OID': "1.3.6.1.2.1.4.31.1.1.6.1", 'name': "IAmACounter64", 'forced_type': 'gauge'}, # Counter32
]
INVALID_FORCED_METRICS = [
{'OID': "1.3.6.1.2.1.4.24.6.0", 'name': "IAmAGauge32", 'forced_type': 'counter'}, # Gauge32
{'OID': "1.3.6.1.2.1.4.31.1.1.6.1", 'name': "IAmACounter64", 'forced_type': 'histogram'}, # Counter32
]
SCALAR_OBJECTS = [
{'OID': "1.3.6.1.2.1.7.1.0", 'name': "udpDatagrams"},
{'OID': "1.3.6.1.2.1.6.10.0", 'name': "tcpInSegs"},
{'OID': ".1.3.6.1.6.3.10.2.1.3.0", 'name': "snmpEngineTime"}, # OID with leading dot
{'MIB': "TCP-MIB", 'symbol': "tcpCurrEstab"},
]
SCALAR_OBJECTS_WITH_TAGS = [
{'OID': "1.3.6.1.2.1.7.1.0", 'name': "udpDatagrams", 'metric_tags': ['udpdgrams', 'UDP']},
{'OID': "1.3.6.1.2.1.6.10.0", 'name': "tcpInSegs", 'metric_tags': ['tcpinsegs', 'TCP']},
{'MIB': "TCP-MIB", 'symbol': "tcpCurrEstab", 'metric_tags': ['MIB', 'TCP', 'estab']},
]
TABULAR_OBJECTS = [
{
'MIB': "IF-MIB",
'table': "ifTable",
'symbols': ["ifInOctets", "ifOutOctets"],
'metric_tags': [{'tag': "interface", 'column': "ifDescr"}, {'tag': "dumbindex", 'index': 1}],
}
]
BULK_TABULAR_OBJECTS = [
{
'MIB': "IF-MIB",
'table': "ifTable",
'symbols': [
"ifInOctets",
"ifOutOctets",
"ifInUcastPkts",
"ifInUcastPkts",
"ifInNUcastPkts",
"ifInDiscards",
"ifInErrors",
"ifInUnknownProtos",
],
'metric_tags': [{'tag': "interface", 'column': "ifDescr"}, {'tag': "dumbindex", 'index': 1}],
},
{
'MIB': "IP-MIB",
'table': "ipSystemStatsTable",
'symbols': [
"ipSystemStatsInReceives",
"ipSystemStatsHCInReceives",
"ipSystemStatsInOctets",
"ipSystemStatsHCInOctets",
"ipSystemStatsInHdrErrors",
"ipSystemStatsInNoRoutes",
"ipSystemStatsInAddrErrors",
"ipSystemStatsInUnknownProtos",
"ipSystemStatsInTruncatedPkts",
"ipSystemStatsInForwDatagrams",
"ipSystemStatsHCInForwDatagrams",
"ipSystemStatsReasmReqds",
"ipSystemStatsReasmOKs",
"ipSystemStatsReasmFails",
"ipSystemStatsInDiscards",
"ipSystemStatsInDelivers",
"ipSystemStatsHCInDelivers",
"ipSystemStatsOutRequests",
"ipSystemStatsHCOutRequests",
"ipSystemStatsOutNoRoutes",
"ipSystemStatsOutForwDatagrams",
"ipSystemStatsHCOutForwDatagrams",
"ipSystemStatsOutDiscards",
"ipSystemStatsOutFragReqds",
"ipSystemStatsOutFragOKs",
"ipSystemStatsOutFragFails",
"ipSystemStatsOutFragCreates",
"ipSystemStatsOutTransmits",
"ipSystemStatsHCOutTransmits",
"ipSystemStatsOutOctets",
"ipSystemStatsHCOutOctets",
"ipSystemStatsInMcastPkts",
],
'metric_tags': [{'tag': "dumbindex", 'index': 1}],
},
]
INVALID_METRICS = [
{
'MIB': "IF-MIB",
'table': "noIdeaWhatIAmDoingHere",
'symbols': ["ImWrong", "MeToo"],
'metric_tags': [{'tag': "dumbindex", 'index': 1}],
}
]
PLAY_WITH_GET_NEXT_METRICS = [
{"OID": "1.3.6.1.2.1.4.31.3.1.3.2", "name": "needFallback"},
{"OID": "1.3.6.1.2.1.4.31.3.1.3.2.1", "name": "noFallbackAndSameResult"},
]
RESOLVED_TABULAR_OBJECTS = [
{
"MIB": "IF-MIB",
"table": "ifTable",
"symbols": [
{"name": "ifInOctets", "OID": "1.3.6.1.2.1.2.2.1.10"},
{"name": "ifOutOctets", "OID": "1.3.6.1.2.1.2.2.1.16"},
],
"metric_tags": [
{"tag": "interface", "column": {"name": "ifDescr", "OID": "1.3.6.1.2.1.2.2.1.2"}},
{"tag": "dumbindex", "index": 1, "mapping": {1: "one", 2: "two", 3: "three", 90: "other"}},
],
}
]
def generate_instance_config(metrics, template=None):
template = template if template else SNMP_CONF
instance_config = copy.copy(template)
instance_config['metrics'] = metrics
instance_config['name'] = HOST
return instance_config
def generate_container_instance_config(metrics):
conf = copy.deepcopy(SNMP_CONF)
conf['ip_address'] = get_container_ip(SNMP_CONTAINER_NAME)
return generate_instance_config(metrics, template=conf)
def generate_v3_instance_config(metrics, name=None, user=None, auth=None, auth_key=None, priv=None, priv_key=None):
instance_config = generate_instance_config(metrics, SNMP_V3_CONF)
if name:
instance_config['name'] = name
if user:
instance_config['user'] = user
if auth:
instance_config['authProtocol'] = auth
if auth_key:
instance_config['authKey'] = auth_key
if priv:
instance_config['privProtocol'] = priv
if priv_key:
instance_config['privKey'] = priv_key
return instance_config
def create_check(instance):
return SnmpCheck('snmp', {}, [instance])
def assert_common_metrics(aggregator, tags=None):
aggregator.assert_metric('snmp.devices_monitored', metric_type=aggregator.GAUGE, tags=tags)
|
the-stack_106_18513
|
from planner.frame.figure import Figure
from planner.frame.aperture import Aperture
from planner.frame.bulkhead import Bulkhead
from svgwrite import shapes
class RectFrame(Figure):
""" Rectangle frame representation """
DEFAULT_PARAMS = {"stroke": "#000", "stroke-width": "2"}
def __init__(self, x=0, y=0, width=1, height=1, wall_width=1, **attribs):
self.corner = (x, y)
self.size = (width, height)
self.inner_corner = (x + wall_width, y + wall_width)
self.inner_size = (width - 2 * wall_width, height - 2 * wall_width)
self.x = x
self.y = y
self.width = width
self.height = height
self.wall_width = wall_width
self.attribs = attribs or dict()
self.apertures = []
self.bulkheads = []
self.stroke_width = attribs.get('stroke-width') or self.DEFAULT_PARAMS.get('stroke-width')
def _draw(self):
rect_params = self.DEFAULT_PARAMS.copy()
rect_params.update(self.attribs)
res = []
# Hatching and filling
if hasattr(self, "hatch") and self.hatch:
rect_params['style'] = "fill: url(#{})".format(self._hatching_id)
res.append(self.hatch)
if hasattr(self, "filling"):
rect_params['fill'] = self.filling
else:
if 'fill' not in rect_params:
rect_params['fill'] = "#fff"
# Create outer and inner rects
del rect_params['stroke-width']
del rect_params['stroke']
top_rect = shapes.Rect(self.corner, (self.size[0], self.wall_width), **rect_params)
left_rect = shapes.Rect(self.corner, (self.wall_width, self.size[1]), **rect_params)
right_rect = shapes.Rect(
(self.corner[0] + self.size[0] - self.wall_width, self.corner[1]),
(self.wall_width, self.size[1]), **rect_params)
bottom_rect = shapes.Rect(
(self.corner[0], self.corner[1] + self.size[1] - self.wall_width),
(self.size[0], self.wall_width), **rect_params)
inner_params = self.DEFAULT_PARAMS.copy()
inner_params.update(self.attribs)
inner_params['fill-opacity'] = "0"
rect = shapes.Rect(self.corner, self.size, **inner_params)
inner_rect = shapes.Rect(self.inner_corner, self.inner_size, **inner_params)
res.extend((top_rect, left_rect, right_rect, bottom_rect, rect, inner_rect))
# Apertures
if self.apertures:
for aperture in self.apertures:
res.append(aperture._draw())
# Bulkheads
borders = []
backgrounds = []
if self.bulkheads:
for bulkhead in self.bulkheads:
border, *background = bulkhead._draw()
borders.append(border)
backgrounds.extend(background)
res.extend(borders)
res.extend(backgrounds)
return res
def _get_aperture_lines_coordinates(self):
outer_lines = []
# left
outer_lines.append(((self.x, self.y + self.wall_width), (self.x, self.y + self.height - self.wall_width)))
# top
outer_lines.append(((self.x + self.wall_width, self.y), (self.x + self.width - self.wall_width, self.y)))
# right
outer_lines.append((
(self.x + self.width - self.wall_width, self.y + self.wall_width),
(self.x + self.width - self.wall_width, self.y + self.height - self.wall_width)))
# bottom
outer_lines.append((
(self.x + self.wall_width, self.y + self.height - self.wall_width),
(self.x + self.width - self.wall_width, self.y + self.height - self.wall_width)))
return outer_lines
def _is_point_on_lines(self, lines, point):
return any([self._is_point_on_line(p[0], p[1], point) for p in lines])
def add_aperture(self, x, y, width, **attribs):
"""
Add aperture (door, window, etc) to the wall.
x, y - coordinates of left-top corner, should be located on wall border
"""
outer_lines = self._get_aperture_lines_coordinates()
coords = (x, y)
# Propagate stroke-width
if 'stroke-width' not in attribs:
attribs['stroke-width'] = self.stroke_width
aperture = Aperture.match_wall_and_create(coords, width, outer_lines, self.wall_width, **attribs)
if not aperture:
raise ValueError("Coordinates {}, {} of aparture left corner not located on the wall border".format(x, y))
self.apertures.append(aperture)
return aperture
def add_bulkhead(self, x, y, width, **attribs):
"""
Add bulkhead to current frame,
x, y - should be coordinates of left-top corner and lay on left or top inner wall border
"""
top_left_corner = (self.x + self.wall_width, self.y + self.wall_width)
bottom_left_corner = (self.x + self.wall_width, self.y + self.height - 2 * self.wall_width)
top_right_corner = (self.x + self.width - 2 * self.wall_width, self.y + self.wall_width)
# horizontal
if self._is_point_on_line(top_left_corner, bottom_left_corner, (x, y)):
end_point = (x + self.width - 2 * self.wall_width, y + width)
# vertical
elif self._is_point_on_line(top_left_corner, top_right_corner, (x, y)):
end_point = (x + width, y + self.height - 2 * self.wall_width)
# error
else:
raise ValueError('Wrong coordinates, left-top corner should lay on left or top inner wall border')
# Propagate stroke-width
if 'stroke-width' not in attribs:
attribs['stroke-width'] = self.stroke_width
bulkhead = Bulkhead((x, y), end_point, **attribs)
self.bulkheads.append(bulkhead)
return bulkhead
|
the-stack_106_18514
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
"""Tutorial code to play around with graph recompilation and executable loading
Parameters to play around with are CACHING, NOMULTISESSION, PLACEHOLDER,
and SAMEBATCH. Some comments in the document refer to the underlying tutorial
in the documentation portal.
The code will print out what the expected behaviour should look like.
"""
import os
import numpy as np
import tensorflow as tf
from tf.python import ipu
from tf.python.ipu.scopes import ipu_scope
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# Consideration 0: Environment setup
CACHING = True # Cache compiled graph. The folder is tmp_tutorial.
# Consideration 1: Sessions
NOMULTISESSION = True # Avoid using different sessions.
# Consideration 2, 4, 5: Graphs, Weights, Constants
# Use a placeholder that is handed over to the graph instead of a hard coded
# hyperparameter that might change between executions.
PLACEHOLDER = True
# Consideration 3: Batch size
SAMEBATCH = True # Change the batch size between executions.
# Consideration 0: Environment setup
if "TF_POPLAR_FLAGS" in os.environ and not CACHING:
os.environ["TF_POPLAR_FLAGS"] = ""
else:
os.environ["TF_POPLAR_FLAGS"] = "--executable_cache_path=tmp_tutorial"
if "POPLAR_LOG_LEVEL" not in os.environ or \
os.environ["POPLAR_LOG_LEVEL"] != "INFO":
print("Setting POPLAR_LOG_LEVEL to INFO for graph compilation information.")
os.environ["POPLAR_LOG_LEVEL"] = "INFO"
# Consideration 6
os.environ["XLA_FLAGS"] = "--xla_dump_to=tmp_xla_{} ".format(
np.random.randint(2, 101))
os.environ["XLA_FLAGS"] += " --xla_dump_hlo_pass_re=forward-allocation "
os.environ["XLA_FLAGS"] += " --xla_hlo_graph_sharding_color "
os.environ["XLA_FLAGS"] += " --xla_dump_hlo_as_text "
# Configure arguments for targeting the IPU
cfg = ipu.utils.create_ipu_config()
cfg = ipu.utils.auto_select_ipus(cfg, 1)
ipu.utils.configure_ipu_system(cfg)
with tf.device("cpu"):
pa = tf.placeholder(np.float32, [None, 2], name="a")
pb = tf.placeholder(np.float32, [None, 2], name="b")
pc = tf.placeholder(np.float32, [None, 2], name="c")
if PLACEHOLDER:
mult = tf.placeholder(np.float32, [], name="multiplier")
else:
mult = np.random.uniform(0, 1)
def basic_graph(pa, pb, pc):
# Do basic addition with tensors
o1 = pa + pb
o2 = pa + pc
simple_graph_output = mult * (o1 + o2)
return simple_graph_output
with ipu_scope("/device:IPU:0"):
comp_graph = basic_graph(pa, pb, pc)
print("\nWarm up & Caching Test: ")
print("No compilation after first execution expected but executable load. \n")
with tf.Session() as sess1, tf.Session() as sess2:
# Run the graph through the session feeding it an arbitrary dictionary
if PLACEHOLDER:
result0 = sess1.run(comp_graph,
feed_dict={
pa: [[1., 1.]],
pb: [[0., 1.]],
pc: [[1., 5.]],
mult: 10.0
})
else:
result0 = sess1.run(comp_graph,
feed_dict={
pa: [[1., 1.]],
pb: [[0., 1.]],
pc: [[1., 5.]],
})
# Consideration 2, 4, 5: Graphs, Weights, Constants
m = np.random.uniform(0, 1)
if not PLACEHOLDER:
mult = m
with ipu_scope("/device:IPU:0"):
comp_graph = basic_graph(pa, pb, pc)
with tf.Session() as sess1, tf.Session() as sess2:
print("\nPlaceholder test. ")
print("No recompilation but executable switch should occur.\n")
# Run the graph through the session feeding it an arbitrary dictionary
if PLACEHOLDER:
result1 = sess1.run(comp_graph,
feed_dict={
pa: [[1., 1.]],
pb: [[0., 1.]],
pc: [[1., 5.]],
mult: m
})
else:
result1 = sess1.run(comp_graph,
feed_dict={
pa: [[1., 1.]],
pb: [[0., 1.]],
pc: [[1., 5.]],
})
# Consideration 1: Sessions
if NOMULTISESSION:
sess2 = sess1
else:
print("Switching session.")
print("\nSession Test.")
print("No recompilation or executable switch should occur.\n")
if PLACEHOLDER:
result2 = sess2.run(comp_graph,
feed_dict={
pa: [[1., 1.]],
pb: [[0., 1.]],
pc: [[1., 5.]],
mult: m
})
else:
result2 = sess2.run(comp_graph,
feed_dict={
pa: [[1., 1.]],
pb: [[0., 1.]],
pc: [[1., 5.]],
})
# Consideration 3: Batch size
if SAMEBATCH:
bs = 1
else:
bs = np.random.randint(2, 101)
print("\nBatch Size Test with batch size %d." % bs)
print("No recompilation or executable switch should occur.")
print("Batch size should be the original 1.\n")
if PLACEHOLDER:
result3 = sess2.run(comp_graph,
feed_dict={
pa: [[1., 1.]] * bs,
pb: [[0., 1.]] * bs,
pc: [[1., 5.]] * bs,
mult: m
})
else:
result3 = sess2.run(comp_graph,
feed_dict={
pa: [[1., 1.]] * bs,
pb: [[0., 1.]] * bs,
pc: [[1., 5.]] * bs,
})
print("\nFirst two results should be different (different multiplier).\n")
print("Caching/warm up test:\t", result0)
print()
print("Placeholder test: \t", result1)
print()
print("Session test: \t", result2)
print()
if bs > 1:
print("Batch size test: \t", result3[:2], "...")
else:
print("Batch size test: \t", result3)
|
the-stack_106_18515
|
#!/usr/bin/env python
import os
import sys
import signal
CURDIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(CURDIR, 'helpers'))
from client import client, prompt, end_of_block
log = None
# uncomment the line below for debugging
#log=sys.stdout
with client(name='client1>', log=log) as client1, client(name='client2>', log=log) as client2:
client1.expect(prompt)
client2.expect(prompt)
client1.send('SET allow_experimental_live_view = 1')
client1.expect(prompt)
client2.send('SET allow_experimental_live_view = 1')
client2.expect(prompt)
client1.send('DROP TABLE IF EXISTS test.lv')
client1.expect(prompt)
client1.send('DROP TABLE IF EXISTS test.mt')
client1.expect(prompt)
client1.send('CREATE TABLE test.mt (a Int32) Engine=MergeTree order by tuple()')
client1.expect(prompt)
client1.send('CREATE LIVE VIEW test.lv AS SELECT sum(a) FROM test.mt')
client1.expect(prompt)
client1.send('WATCH test.lv')
client1.expect('_version')
client1.expect(r'0.*1' + end_of_block)
client2.send('INSERT INTO test.mt VALUES (1),(2),(3)')
client1.expect(r'6.*2' + end_of_block)
client2.expect(prompt)
client2.send('INSERT INTO test.mt VALUES (4),(5),(6)')
client1.expect(r'21.*3' + end_of_block)
client2.expect(prompt)
for i in range(1, 129):
client2.send('INSERT INTO test.mt VALUES (1)')
client1.expect(r'%d.*%d' % (21 + i, 3 + i) + end_of_block)
client2.expect(prompt)
# send Ctrl-C
client1.send('\x03', eol='')
match = client1.expect('(%s)|([#\$] )' % prompt)
if match.groups()[1]:
client1.send(client1.command)
client1.expect(prompt)
client1.send('DROP TABLE test.lv')
client1.expect(prompt)
client1.send('DROP TABLE test.mt')
client1.expect(prompt)
|
the-stack_106_18516
|
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.parsers import FormParser, JSONParser, MultiPartParser
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from base.apis import viewsets
from summers_api.trakt.models import Show, UsersShow
from .serializers import (
MarkEpisodeSerializer,
ShowCreateSerializer,
ShowDetailSerializer,
ShowUpdateSeasonSerializer,
)
User = get_user_model()
class ShowModelViewSet(viewsets.BaseModelViewSet):
"""
Following Endpoints are created by this modelviewset.
Create: POST `/`
List: GET `/`
Retrieve: GET `/<pk>/`
Destroy: DELETE `/<pk>/`
my: GET `/my/`
wishlist: POST `/<pk>/wishlist/`
season: PUT `/<pk>/season/`
Mark episode: PUT `/<pk>/mark-episode/`
"""
parser_classes = (MultiPartParser, FormParser, JSONParser)
permission_classes = [AllowAny]
serializer_class = ShowDetailSerializer
queryset = Show.objects.all()
models = Show
def get_serializer_class(self):
if self.action == "create":
return ShowCreateSerializer
elif self.action == "mark_episode":
return MarkEpisodeSerializer
elif self.action == "season":
return ShowUpdateSeasonSerializer
return super().get_serializer_class()
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
show = serializer.save()
data = ShowDetailSerializer(instance=show, context={"request": request}).data
return Response(data=data, status=status.HTTP_201_CREATED)
def list(self, request, *args, **kwargs):
if request.user.is_authenticated:
# if current user is authenticated remove already added show and return
usersshows = request.user.usersshow.all()
queryset = Show.objects.exclude(usersshow__in=usersshows)
else:
# if current user is not authenticated return all
queryset = super().get_queryset()
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
@action(
methods=["GET"],
detail=False,
permission_classes=[IsAuthenticated],
)
def my(self, request, *args, **kwargs):
# return all show of current user
usersshows = request.user.usersshow.all()
queryset = Show.objects.filter(usersshow__in=usersshows).order_by(
"usersshow",
)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
@action(
methods=["POST"],
detail=True,
permission_classes=[IsAuthenticated],
)
def wishlist(self, request, *args, **kwargs):
"""Add show into wishlist"""
user = self.request.user
show = self.get_object()
if not user.usersshow.filter(show=show).exists():
UsersShow.objects.create(user=user, show=show)
serializer = self.get_serializer(show, many=False)
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
data = {"message": "Show is already added into wishlist"}
return Response(data=data, status=status.HTTP_304_NOT_MODIFIED)
@action(
methods=["PUT"],
detail=True,
permission_classes=[IsAuthenticated],
url_path="season",
)
def season(self, request, *args, **kwargs):
"""Add new season in show."""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
show = self.get_object()
show = serializer.save(show=show)
data = ShowDetailSerializer(instance=show, context={"request": request}).data
return Response(data=data, status=status.HTTP_201_CREATED)
@action(
methods=["PUT"],
detail=True,
permission_classes=[IsAuthenticated],
url_path="mark-episode",
)
def mark_episode(self, request, *args, **kwargs):
"""Mark episode of show"""
show = self.get_object()
serializer = self.get_serializer(
data=request.data,
context={"request": request, "show": show},
)
serializer.is_valid(raise_exception=True)
usersshow = serializer.save()
return Response(
data=ShowDetailSerializer(
instance=usersshow.show,
context={"request": request},
).data,
status=status.HTTP_200_OK,
)
|
the-stack_106_18522
|
from __future__ import annotations
import logging
from config import get_config, ConfigurationError, msgbox
from split_export import get_files, ExportFile
from repo import RepositorySourceItem
from convert import setup_session, cleanup as cleanup_convert, convert
from deployment import add_deployment
def main():
# Get configuration and handle command line arguments
config = get_config()
# Get object representing repo
repo = get_repo(config)
# Create the export file(s)
run(config, repo)
def run(config, repo):
# Setup basic auth handler for IRIS, if we need to convert UDL to XML
if config.Source.srctype == 'udl':
setup_session(config)
# Get list of files to create, with their items
files = get_files(config, repo)
# Convert item data from UDL to XML, if needed
if config.Source.srctype == 'udl':
convert_udl(config, files)
# Append export notes to make export usable as a deployment
if config.Local.deployment:
export = files[0]
# Create the export element
export.create_export()
# Add elements with deployment information
logging.info('Adding deployment items')
add_deployment(config, repo.name, export.root)
# Export each 1..n files in turn:
for export_file in files:
export_file.write()
# Give some feedback
total = sum(len(f.items) for f in files)
logmsg= f"\nDone; exported {total} items to {len(files)} files.\n"
logging.info(logmsg)
cleanup()
if not config.no_gui:
msgbox(logmsg)
def convert_udl(config, exports:list[ExportFile]):
""" Converts all items from UDL to XML, where needed """
to_convert = []
for ef in exports:
for item in ef.items:
if item.kind != 'src':
continue
assert isinstance(item, RepositorySourceItem)
if not item.is_udl:
continue
to_convert.append(item)
convert(config, to_convert, config.Local.threads)
def get_repo(config):
"""Returns the configured repository."""
if config.Source.type == 'github':
from github import get_data
elif config.Source.type == 'directory':
from fsrepo import get_data
else:
raise ConfigurationError(f"Invalid repository type '{config.Source.type}' in configuration.")
return get_data(config)
def cleanup():
cleanup_convert()
cleanup_logging()
def cleanup_logging():
""" Closes all resources taken by the loggers' handlers """
# Get root logger
loggers = [logging.getLogger()]
# Get all other loggers, if any
logger_names = logging.root.manager.loggerDict # pylint: disable=no-member
loggers = loggers + [logging.getLogger(name) for name in logger_names]
# Call close() on each handler in each logger
for logger in loggers:
for handler in logger.handlers:
handler.close()
logger.handlers.clear()
if __name__ == '__main__':
main()
|
the-stack_106_18525
|
"""Tools for working with Q (Forbes) polynomials."""
# not special engine, only concerns scalars here
from collections import defaultdict
from scipy import special
from .jacobi import jacobi, jacobi_sequence
from prysm.mathops import np, kronecker, gamma, sign
def g_qbfs(n_minus_1):
"""g(m-1) from oe-18-19-19700 eq. (A.15)."""
if n_minus_1 == 0:
return - 1 / 2
else:
n_minus_2 = n_minus_1 - 1
return - (1 + g_qbfs(n_minus_2) * h_qbfs(n_minus_2)) / f_qbfs(n_minus_1)
def h_qbfs(n_minus_2):
"""h(m-2) from oe-18-19-19700 eq. (A.14)."""
n = n_minus_2 + 2
return -n * (n - 1) / (2 * f_qbfs(n_minus_2))
def f_qbfs(n):
"""f(m) from oe-18-19-19700 eq. (A.16)."""
if n == 0:
return 2
elif n == 1:
return np.sqrt(19) / 2
else:
term1 = n * (n + 1) + 3
term2 = g_qbfs(n - 1) ** 2
term3 = h_qbfs(n - 2) ** 2
return np.sqrt(term1 - term2 - term3)
def Qbfs(n, x):
"""Qbfs polynomial of order n at point(s) x.
Parameters
----------
n : int
polynomial order
x : `numpy.array`
point(s) at which to evaluate
Returns
-------
`numpy.ndarray`
Qbfs_n(x)
"""
# to compute the Qbfs polynomials, compute the auxiliary polynomial P_n
# recursively. Simultaneously use the recurrence relation for Q_n
# to compute the intermediary Q polynomials.
# for input x, transform r = x ^ 2
# then compute P(r) and consequently Q(r)
# and scale outputs by Qbfs = r*(1-r) * Q
# the auxiliary polynomials are the jacobi polynomials with
# alpha,beta = (-1/2,+1/2),
# also known as the asymmetric chebyshev polynomials
rho = x ** 2
# c_Q is the leading term used to convert Qm to Qbfs
c_Q = rho * (1 - rho)
if n == 0:
return np.ones_like(x) * c_Q
if n == 1:
return 1 / np.sqrt(19) * (13 - 16 * rho) * c_Q
# c is the leading term of the recurrence relation for P
c = 2 - 4 * rho
# P0, P1 are the first two terms of the recurrence relation for auxiliary
# polynomial P_n
P0 = np.ones_like(x) * 2
P1 = 6 - 8 * rho
Pnm2 = P0
Pnm1 = P1
# Q0, Q1 are the first two terms of the recurrence relation for Qm
Q0 = np.ones_like(x)
Q1 = 1 / np.sqrt(19) * (13 - 16 * rho)
Qnm2 = Q0
Qnm1 = Q1
for nn in range(2, n+1):
Pn = c * Pnm1 - Pnm2
Pnm2 = Pnm1
Pnm1 = Pn
g = g_qbfs(nn - 1)
h = h_qbfs(nn - 2)
f = f_qbfs(nn)
Qn = (Pn - g * Qnm1 - h * Qnm2) * (1/f) # small optimization; mul by 1/f instead of div by f
Qnm2 = Qnm1
Qnm1 = Qn
# Qn is certainly defined (flake8 can't tell the previous ifs bound the loop
# to always happen once)
return Qn * c_Q # NOQA
def Qbfs_sequence(ns, x):
"""Qbfs polynomials of orders ns at point(s) x.
Parameters
----------
ns : `Iterable` of int
polynomial orders
x : `numpy.array`
point(s) at which to evaluate
Returns
-------
generator of `numpy.ndarray`
yielding one order of ns at a time
"""
# see the leading comment of Qbfs for some explanation of this code
# and prysm:jacobi.py#jacobi_sequence the "_sequence" portion
ns = list(ns)
min_i = 0
rho = x ** 2
# c_Q is the leading term used to convert Qm to Qbfs
c_Q = rho * (1 - rho)
if ns[min_i] == 0:
yield np.ones_like(x) * c_Q
min_i += 1
if ns[min_i] == 1:
yield 1 / np.sqrt(19) * (13 - 16 * rho) * c_Q
min_i += 1
# c is the leading term of the recurrence relation for P
c = 2 - 4 * rho
# P0, P1 are the first two terms of the recurrence relation for auxiliary
# polynomial P_n
P0 = np.ones_like(x) * 2
P1 = 6 - 8 * rho
Pnm2 = P0
Pnm1 = P1
# Q0, Q1 are the first two terms of the recurrence relation for Qbfs_n
Q0 = np.ones_like(x)
Q1 = 1 / np.sqrt(19) * (13 - 16 * rho)
Qnm2 = Q0
Qnm1 = Q1
for nn in range(2, ns[-1]+1):
Pn = c * Pnm1 - Pnm2
Pnm2 = Pnm1
Pnm1 = Pn
g = g_qbfs(nn - 1)
h = h_qbfs(nn - 2)
f = f_qbfs(nn)
Qn = (Pn - g * Qnm1 - h * Qnm2) * (1/f) # small optimization; mul by 1/f instead of div by f
Qnm2 = Qnm1
Qnm1 = Qn
if ns[min_i] == nn:
yield Qn * c_Q
min_i += 1
def Qcon(n, x):
"""Qcon polynomial of order n at point(s) x.
Parameters
----------
n : int
polynomial order
x : `numpy.array`
point(s) at which to evaluate
Returns
-------
`numpy.ndarray`
Qcon_n(x)
Notes
-----
The argument x is notionally uniformly spaced 0..1.
The Qcon polynomials are obtained by computing c = x^4.
A transformation is then made, x => 2x^2 - 1
and the Qcon polynomials are defined as the jacobi polynomials with
alpha=0, beta=4, the same order n, and the transformed x.
The result of that is multiplied by c to yield a Qcon polynomial.
Sums can more quickly be calculated by deferring the multiplication by
c.
"""
xx = x ** 2
xx = 2 * xx - 1
Pn = jacobi(n, 0, 4, xx)
return Pn * x ** 4
def Qcon_sequence(ns, x):
"""Qcon polynomials of orders ns at point(s) x.
Parameters
----------
ns : `Iterable` of int
polynomial orders
x : `numpy.array`
point(s) at which to evaluate
Returns
-------
generator of `numpy.ndarray`
yielding one order of ns at a time
"""
xx = x ** 2
xx = 2 * xx - 1
x4 = x ** 4
Pns = jacobi_sequence(ns, 0, 4, xx)
for Pn in Pns:
yield Pn * x4
def abc_q2d(n, m):
"""A, B, C terms for 2D-Q polynomials. oe-20-3-2483 Eq. (A.3).
Parameters
----------
n : `int`
radial order
m : `int`
azimuthal order
Returns
-------
`float`, `float`, `float`
A, B, C
"""
# D is used everywhere
D = (4 * n ** 2 - 1) * (m + n - 2) * (m + 2 * n - 3)
# A
term1 = (2 * n - 1) * (m + 2 * n - 2)
term2 = (4 * n * (m + n - 2) + (m - 3) * (2 * m - 1))
A = (term1 * term2) / D
# B
num = -2 * (2 * n - 1) * (m + 2 * n - 3) * (m + 2 * n - 2) * (m + 2 * n - 1)
B = num / D
# C
num = n * (2 * n - 3) * (m + 2 * n - 1) * (2 * m + 2 * n - 3)
C = num / D
return A, B, C
def G_q2d(n, m):
"""G term for 2D-Q polynomials. oe-20-3-2483 Eq. (A.15).
Parameters
----------
n : `int`
radial order
m : `int`
azimuthal order
Returns
-------
`float`
G
"""
if n == 0:
num = special.factorial2(2 * m - 1)
den = 2 ** (m + 1) * special.factorial(m - 1)
return num / den
elif n > 0 and m == 1:
t1num = (2 * n ** 2 - 1) * (n ** 2 - 1)
t1den = 8 * (4 * n ** 2 - 1)
term1 = -t1num / t1den
term2 = 1 / 24 * kronecker(n, 1)
return term1 - term2
else:
# nt1 = numerator term 1, d = denominator...
nt1 = 2 * n * (m + n - 1) - m
nt2 = (n + 1) * (2 * m + 2 * n - 1)
num = nt1 * nt2
dt1 = (m + 2 * n - 2) * (m + 2 * n - 1)
dt2 = (m + 2 * n) * (2 * n + 1)
den = dt1 * dt2
term1 = -num / den
return term1 * gamma(n, m)
def F_q2d(n, m):
"""F term for 2D-Q polynomials. oe-20-3-2483 Eq. (A.13).
Parameters
----------
n : `int`
radial order
m : `int`
azimuthal order
Returns
-------
`float`
F
"""
if n == 0:
num = m ** 2 * special.factorial2(2 * m - 3)
den = 2 ** (m + 1) * special.factorial(m - 1)
return num / den
elif n > 0 and m == 1:
t1num = 4 * (n - 1) ** 2 * n ** 2 + 1
t1den = 8 * (2 * n - 1) ** 2
term1 = t1num / t1den
term2 = 11 / 32 * kronecker(n, 1)
return term1 + term2
else:
Chi = m + n - 2
nt1 = 2 * n * Chi * (3 - 5 * m + 4 * n * Chi)
nt2 = m ** 2 * (3 - m + 4 * n * Chi)
num = nt1 + nt2
dt1 = (m + 2 * n - 3) * (m + 2 * n - 2)
dt2 = (m + 2 * n - 1) * (2 * n - 1)
den = dt1 * dt2
term1 = num / den
return term1 * gamma(n, m)
def g_q2d(n, m):
"""Lowercase g term for 2D-Q polynomials. oe-20-3-2483 Eq. (A.18a).
Parameters
----------
n : `int`
radial order less one (n - 1)
m : `int`
azimuthal order
Returns
-------
`float`
g
"""
return G_q2d(n, m) / f_q2d(n, m)
def f_q2d(n, m):
"""Lowercase f term for 2D-Q polynomials. oe-20-3-2483 Eq. (A.18b).
Parameters
----------
n : `int`
radial order
m : `int`
azimuthal order
Returns
-------
`float`
f
"""
if n == 0:
return np.sqrt(F_q2d(n=0, m=m))
else:
return np.sqrt(F_q2d(n, m) - g_q2d(n-1, m) ** 2)
def Q2d(n, m, r, t):
"""2D Q polynomial, aka the Forbes polynomials.
Parameters
----------
n : `int`
radial polynomial order
m : `int`
azimuthal polynomial order
r : `numpy.ndarray`
radial coordinate, slope orthogonal in [0,1]
t : `numpy.ndarray`
azimuthal coordinate, radians
Returns
-------
`numpy.ndarray`
array containing Q2d_n^m(r,t)
the leading coefficient u^m or u^2 (1 - u^2) and sines/cosines
are included in the return
"""
# Q polynomials have auxiliary polynomials "P"
# which are scaled jacobi polynomials under the change of variables
# x => 2x - 1 with alpha = -3/2, beta = m-3/2
# the scaling prefix may be found in A.4 of oe-20-3-2483
# impl notes:
# Pn is computed using a recurrence over order n. The recurrence is for
# a single value of m, and the 'seed' depends on both m and n.
#
# in general, Q_n^m = [P_n^m(x) - g_n-1^m Q_n-1^m] / f_n^m
# for the sake of consistency, this function takes args of (r,t)
# but the papers define an argument of u (really, u^2...)
# which is what I call rho (or r).
# for the sake of consistency of impl, I alias r=>u
# and compute x = u**2 to match the papers
u = r
x = u ** 2
if m == 0:
return Qbfs(n, r)
# m == 0 already was short circuited, so we only
# need to consider the m =/= 0 case for azimuthal terms
if sign(m) == -1:
m = abs(m)
prefix = u ** m * np.sin(m*t)
else:
prefix = u ** m * np.cos(m*t)
m = abs(m)
P0 = 1/2
if m == 1 and n == 1:
P1 = 1 - x/2
else:
P1 = (m - .5) + (1 - m) * x
f0 = f_q2d(0, m)
Q0 = 1 / (2 * f0)
if n == 0:
return Q0 * prefix
g0 = g_q2d(0, m)
f1 = f_q2d(1, m)
Q1 = (P1 - g0 * Q0) * (1/f1)
if n == 1:
return Q1 * prefix
# everything above here works, or at least everything in the returns works
if m == 1:
P2 = (3 - x * (12 - 8 * x)) / 6
P3 = (5 - x * (60 - x * (120 - 64 * x))) / 10
g1 = g_q2d(1, m)
f2 = f_q2d(2, m)
Q2 = (P2 - g1 * Q1) * (1/f2)
g2 = g_q2d(2, m)
f3 = f_q2d(3, m)
Q3 = (P3 - g2 * Q2) * (1/f3)
# Q2, Q3 correct
if n == 2:
return Q2 * prefix
elif n == 3:
return Q3 * prefix
Pnm2, Pnm1 = P2, P3
Qnm1 = Q3
min_n = 4
else:
Pnm2, Pnm1 = P0, P1
Qnm1 = Q1
min_n = 2
for nn in range(min_n, n+1):
A, B, C = abc_q2d(nn-1, m)
Pn = (A + B * x) * Pnm1 - C * Pnm2
gnm1 = g_q2d(nn-1, m)
fn = f_q2d(nn, m)
Qn = (Pn - gnm1 * Qnm1) * (1/fn)
Pnm2, Pnm1 = Pnm1, Pn
Qnm1 = Qn
# flake8 can't prove that the branches above the loop guarantee that we
# enter the loop and Qn is defined
return Qn * prefix # NOQA
def Q2d_sequence(nms, r, t):
"""Sequence of 2D-Q polynomials.
Parameters
----------
nms : iterable of `tuple`
(n,m) for each desired term
r : `numpy.ndarray`
radial coordinates
t : `numpy.ndarray`
azimuthal coordinates
Returns
-------
generator
yields one term for each element of nms
"""
# see Q2d for general sense of this algorithm.
# the way this one works is to compute the maximum N for each |m|, and then
# compute the recurrence for each of those sequences and storing it. A loop
# is then iterated over the input nms, and selected value with appropriate
# prefixes / other terms yielded.
u = r
x = u ** 2
def factory():
return 0
# maps |m| => N
m_has_pos = set()
m_has_neg = set()
max_ns = defaultdict(factory)
for n, m in nms:
m_ = abs(m)
if max_ns[m_] < n:
max_ns[m_] = n
if m > 0:
m_has_pos.add(m_)
else:
m_has_neg.add(m_)
# precompute these reusable pieces of data
u_scales = {}
sin_scales = {}
cos_scales = {}
for absm in max_ns.keys():
u_scales[absm] = u ** absm
if absm in m_has_neg:
sin_scales[absm] = np.sin(absm * t)
if absm in m_has_pos:
cos_scales[absm] = np.cos(absm * t)
sequences = {}
for m, N in max_ns.items():
if m == 0:
sequences[m] = list(Qbfs_sequence(range(N+1), r))
else:
sequences[m] = []
P0 = 1/2
if m == 1 and N == 1:
P1 = 1 - x/2
else:
P1 = (m - .5) + (1 - m) * x
f0 = f_q2d(0, m)
Q0 = 1 / (2 * f0)
sequences[m].append(Q0)
if N == 0:
continue
g0 = g_q2d(0, m)
f1 = f_q2d(1, m)
Q1 = (P1 - g0 * Q0) * (1/f1)
sequences[m].append(Q1)
if N == 1:
continue
# everything above here works, or at least everything in the returns works
if m == 1:
P2 = (3 - x * (12 - 8 * x)) / 6
P3 = (5 - x * (60 - x * (120 - 64 * x))) / 10
g1 = g_q2d(1, m)
f2 = f_q2d(2, m)
Q2 = (P2 - g1 * Q1) * (1/f2)
g2 = g_q2d(2, m)
f3 = f_q2d(3, m)
Q3 = (P3 - g2 * Q2) * (1/f3)
sequences[m].append(Q2)
sequences[m].append(Q3)
# Q2, Q3 correct
if N <= 3:
continue
Pnm2, Pnm1 = P2, P3
Qnm1 = Q3
min_n = 4
else:
Pnm2, Pnm1 = P0, P1
Qnm1 = Q1
min_n = 2
for nn in range(min_n, N+1):
A, B, C = abc_q2d(nn-1, m)
Pn = (A + B * x) * Pnm1 - C * Pnm2
gnm1 = g_q2d(nn-1, m)
fn = f_q2d(nn, m)
Qn = (Pn - gnm1 * Qnm1) * (1/fn)
sequences[m].append(Qn)
Pnm2, Pnm1 = Pnm1, Pn
Qnm1 = Qn
for n, m in nms:
if m != 0:
if m < 0:
# m < 0, double neg = pos
prefix = sin_scales[-m] * u_scales[-m]
else:
prefix = cos_scales[m] * u_scales[m]
yield sequences[abs(m)][n] * prefix
else:
yield sequences[0][n]
|
the-stack_106_18528
|
import numpy as np
import seaborn as sns
from matplotlib.colorbar import cm
def color_picker(palette, center='light', n_colors=10, show=False):
'''COLOR PICKER
Provides access to several color palettes that will
serve the purpose of most usecases with astetik plots.
PARAMETERS
----------
palette :: name of the palette (see list below)
center :: if the center of the palette is 'light' or 'dark'
n :: number of colors in the color
show :: if the palette will be shown for visual reference
PALETTES
--------
NOTE: all two color palettes will reflect the first color
in the selected range. For example blue_to_red will have
two shades of blue.
'blue_to_red'
'blue_to_green'
'red_to_green'
'green_to_red'
'violet_to_blue'
'brown_to_green'
'green_to_marine'
'''
if palette == 'default':
palette = 'blue_to_red'
# handle the case where few colors only are used
if n_colors <= 5:
n_input = n_colors
n_colors = 8 # this decides how dark colors will be
else:
n_input = n_colors
if palette == 'blue_to_red':
out = sns.color_palette("RdBu_r", n_colors=n_colors)
elif palette == 'blue_to_green':
out = sns.color_palette("GnBu_d", n_colors=n_colors)
elif palette == 'red_to_green':
out = sns.diverging_palette(16, 180, sep=5, center=center, n=n_colors)
elif palette == 'green_to_red':
out = sns.diverging_palette(180, 16, sep=5, center=center, n=n_colors)
elif palette == 'violet_to_blue':
out = sns.diverging_palette(1, 255, sep=5, center=center, n=n_colors)
elif palette == 'brown_to_green':
out = sns.diverging_palette(50, 100, sep=5, center=center, n=n_colors)
elif palette == 'green_to_marine':
out = sns.diverging_palette(100, 200, sep=5, center=center, n=n_colors)
if n_input == 1:
out = out[0]
elif n_input == 2:
out = out[:2]
if show == True:
sns.palplot(out)
if np.ndim(out) == 1:
out = [out]
return out
def color_blind(mode='colorblind'):
'''COLOR BLIND COLORS
Provides a color palette that is colorblind friendly.
'''
if mode == 'colorblind':
colors = [[0, 0, 0],
[255/255, 255/255, 109/255],
[255/255, 109/255, 182/255],
[0, 73/255, 73/255],
[0, 146/255, 146/255],
[255/255, 182/255, 119/255],
[73/255, 0, 146/255],
[0, 109/255, 219/255],
[182/255, 109/255, 255/255],
[109/255, 182/255, 255/255],
[182/255, 219/255, 255/255],
[146/255, 0, 0],
[146/255, 73/255, 0],
[219/255, 209/255, 0],
[36/255, 255/255, 36/255]]
elif mode == 'colorblind6':
colors = [[0, 0, 0],
[230/255, 159/255, 0],
[86/255, 180/255, 233/255],
[0, 158/255, 115/255],
[213/255, 94/255, 0],
[0, 114/255, 178/255]]
elif mode == 'colorblind1':
colors = [[222/255, 188/255, 146/255],
[251/255, 175/255, 148/255],
[131/255, 215/255, 142/255],
[225/255, 191/255, 147/255],
[250/255, 109/255, 81/255],
[101/255, 170/255, 53/255],
[204/255, 146/255, 68/255],
[221/255, 51/255, 48/255],
[95/255, 130/255, 24/255],
[149/255, 115/255, 32/255],
[164/255, 23/255, 30/255],
[61/255, 105/255, 22/255],
[119/255, 81/255, 24/255]]
return colors
def cmaps(cmap):
if cmap == 'paired':
cmap = cm.Paired
elif cmap == 'jet':
cmap = cm.jet
elif cmap == 'prism':
cmap = cm.prism
elif cmap == 'RdYlGn':
cmap = cm.RdYlGn
elif cmap == 'seismic':
cmap = cm.seismic
elif cmap == 'coolwarm':
cmap = cm.coolwarm
elif cmap == 'inferno':
cmap = cm.inferno
elif cmap == 'plasma':
cmap = cm.plasma
elif cmap == 'OrRd':
cmap = cm.OrRd
elif cmap == 'tab20c':
cmap = cm.tab20c
return cmap
def _label_to_hex(label, n_colors):
hex = sns.color_palette(label, n_colors)
return hex.as_hex()
|
the-stack_106_18531
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 6 20:36:02 2020
@author: atekawade
"""
import numpy as np
import os
import sys
from ct_segnet.data_utils import data_io
import argparse
from argparse import ArgumentTypeError
from tqdm import tqdm
import ast
from shutil import rmtree
import time
mem_thres = 0.0
def main(args):
data_io.show_header()
# Understand input data format
if args.input_fname.split('.')[-1] in ("hdf5", "h5"):
tiff_input = False
else:
raise ArgumentTypeError("input file type not recognized. must be hdf5 file")
input_fname = args.input_fname
# set up output file name / path / chunks parameter
if args.output_fpath == "":
args.output_fpath = os.path.split(args.input_fname)[0]
if args.output_fname == "converted_tiff":
output_fname = args.input_fname.split('.')[0]
else:
output_fname = os.path.join(args.output_fpath, args.output_fname)
# print("Overwrite OK is %s"%args.overwrite_OK)
# print("Stats only is %s"%args.stats_only)
# print("Delete is %s"%args.delete)
# sys.exit()
# Define DataFile instances - quit here if stats_only requested
r_dfile = data_io.DataFile(input_fname, tiff = tiff_input, \
data_tag = args.dataset_name, \
VERBOSITY = args.verbosity)
print("Input data stats:")
r_dfile.show_stats()
if args.stats_only:
sys.exit()
w_shape = r_dfile.d_shape # future implementation must allow resampling dataset
w_dtype = r_dfile.d_type # future implementation must allow changing dtype (with renormalization)
w_dfile = data_io.DataFile(output_fname, tiff = True, \
VERBOSITY = args.verbosity, \
d_shape = w_shape, d_type = w_dtype)
str_prompt = "\ntiff file will be saved to the following location.\n%s"%output_fname
if not args.yes:
input(str_prompt + "\nPress any key to continue")
else:
print(str_prompt)
w_dfile.create_new(overwrite = args.overwrite_OK)
t0 = time.time()
slice_start = 0
print("\n")
pbar = tqdm(total = r_dfile.d_shape[0])
while slice_start < r_dfile.d_shape[0]:
dd, s = r_dfile.read_chunk(axis = 0, slice_start = slice_start, \
max_GB = mem_thres, \
chunk_shape = r_dfile.chunk_shape)
w_dfile.write_chunk(dd, axis = 0, s = s)
slice_start = s.stop
pbar.update(s.stop - s.start)
pbar.close()
total_time = (time.time() - t0)/60.0 # minutes
print("\nTotal time: %.2f minutes"%(total_time))
if args.delete:
if not args.yes: input("Delete old file? Press any key")
if tiff_input:
rmtree(input_fname)
else:
os.remove(input_fname)
if __name__ == "__main__":
# Arg parser stuff
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f', "--input-fname", required = True, type = str, help = "Path to tiff folder or hdf5 file")
parser.add_argument('-i', "--stats_only", required = False, action = "store_true", default = False, help = "show stats only")
parser.add_argument('-d', "--delete", required = False, action = "store_true", default = False, help = "delete input file")
parser.add_argument('-o', "--output-fpath", required = False, default = "", type = str, help = "Parent folder path for saving output. (optional) If not provided, file will be written to same parent folder as input file.")
parser.add_argument('-n', "--output-fname", required = False, default = "converted_tiff", type = str, help = "Name of output tiff file.")
parser.add_argument('-x', "--dataset-name", required = False, type = str, default = "", help = "Dataset name for hdf5; required if input is hdf5 file")
parser.add_argument('-v', "--verbosity", required = False, type = int, default = 0, help = "read / write verbosity; 0 - silent, 1 - important stuff, 2 - print everything")
parser.add_argument('-c', "--chunk-param", required = False, type = ast.literal_eval, default = None, help = "chunk_size (MB) or chunk shape (tuple)")
parser.add_argument('--chunked-slice-size', required = False, type = ast.literal_eval, default = None, help = "alternately to --chunk_param, provide maximum size (GB) of a chunk of slice along any axis")
parser.add_argument('-w', "--overwrite_OK", required = False, action = "store_true", default = False, help = "if output file exists, overwrite")
# parser.add_argument('-r', "--resample-factor", required = False, type = int, help = "resample to reduce dataset size by cube of this factor")
parser.add_argument('-y', "--yes", required = False, action = "store_true", default = False, help = "say yes to all prompts")
args = parser.parse_args()
main(args)
|
the-stack_106_18533
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2010 Prof. William H. Green ([email protected]) and the
# RMG Team ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
#global imports
import copy
import os.path
import numpy as np
import re
#local imports
from rmgpy.chemkin import getSpeciesIdentifier
from rmgpy.scoop_framework.util import broadcast, get, map_
from rmgpy.scoop_framework.util import logger as logging
from rmgpy.rmg.main import RMG
from model import ReductionReaction
from rates import isImportant
#global variables
reactions = None
def simulateOne(reactionModel, atol, rtol, reactionSystem):
"""
Simulates one reaction system, listener registers results,
which are returned at the end.
The returned data consists of a array of the species names,
and the concentration data.
The concentration data consists of a number of elements for each timestep
the solver took to reach the end time of the batch reactor simulation.
Each element consists of the time and the concentration data of the species at that
particular timestep in the order of the species names.
"""
#register as a listener
listener = ConcentrationListener()
coreSpecies = reactionModel.core.species
regex = r'\([0-9]+\)'#cut of '(one or more digits)'
speciesNames = []
for spc in coreSpecies:
name = getSpeciesIdentifier(spc)
name_cutoff = re.split(regex, name)[0]
speciesNames.append(name_cutoff)
listener.speciesNames = speciesNames
reactionSystem.attach(listener)
pdepNetworks = []
for source, networks in reactionModel.networkDict.items():
pdepNetworks.extend(networks)
terminated, obj = reactionSystem.simulate(
coreSpecies = reactionModel.core.species,
coreReactions = reactionModel.core.reactions,
edgeSpecies = reactionModel.edge.species,
edgeReactions = reactionModel.edge.reactions,
toleranceKeepInEdge = 0,
toleranceMoveToCore = 1,
toleranceInterruptSimulation = 1,
pdepNetworks = pdepNetworks,
absoluteTolerance = atol,
relativeTolerance = rtol,
)
assert terminated
#unregister as a listener
reactionSystem.detach(listener)
return listener.speciesNames, listener.data
def simulateAll(rmg):
"""
Simulate the RMG job,
for each of the simulated reaction systems.
Each element i of the data corresponds to a reaction system.
"""
reactionModel = rmg.reactionModel
data = []
atol, rtol = rmg.absoluteTolerance, rmg.relativeTolerance
for reactionSystem in rmg.reactionSystems:
data.append(simulateOne(reactionModel, atol, rtol, reactionSystem))
return data
def initialize(wd, rxns):
global working_dir, reactions
working_dir = wd
assert os.path.isdir(working_dir)
#set global variable here such that functions executed in the root worker have access to it.
reactions = [ReductionReaction(rxn) for rxn in rxns]
broadcast(reactions, 'reactions')
def retrieveReactions():
"""
Reactions can be retrieved either through the global variable 'reactions' if parallel computing
is not used.
With the use of multiple workers, the reactions are retrieved from the previously broadcasted
constant.
In any case, the references to the original reactions of the reaction model are assumed to be
broken.
"""
global reactions
broadcastedReactions = get('reactions')
if broadcastedReactions:
reactions = broadcastedReactions
return reactions
def findImportantReactions(rmg, tolerance):
"""
This function:
- loops over all the species involved in a specific reaction
- decides whether the specific reaction is important for the species.
Whenever it is found that a reaction is important for a species, we break
the species loop, and keep the reaction in the model.
Returns:
a list of rxns that can be removed.
"""
# run the simulation, creating concentration profiles for each reaction system defined in input.
simdata = simulateAll(rmg)
reduceReactions = retrieveReactions()
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i+n]
CHUNKSIZE = 40
boolean_array = []
for chunk in chunks(reduceReactions,CHUNKSIZE):
N = len(chunk)
partial_results = list(
map_(
assessReaction, chunk, [rmg.reactionSystems] * N, [tolerance] * N, [simdata] * N
)
)
boolean_array.extend(partial_results)
"""
Assuming that the order of the reduced reactions array and the core reactions of the reaction model
are identical, iterate over the boolean array and retain those reactions of the reaction model
that are deemed 'important'.
"""
importantRxns = []
for isImport, rxn in zip(boolean_array, rmg.reactionModel.core.reactions):
logging.debug('Is rxn {rxn} important? {isImport}'.format(**locals()))
if isImport:
importantRxns.append(rxn)
return importantRxns
def assessReaction(rxn, reactionSystems, tolerance, data):
"""
Returns whether the reaction is important or not in the reactions.
It iterates over the reaction systems, and loads the concentration profile
of each reaction system.
It iterates over a number of samples in profile and
evaluates the importance of the reaction at every sample.
"""
logging.debug('Assessing reaction {}'.format(rxn))
reactions = retrieveReactions()
# read in the intermediate state variables
for datum, reactionSystem in zip(data, reactionSystems):
T, P = reactionSystem.T.value_si, reactionSystem.P.value_si
speciesNames, profile = datum
# take N evenly spaced indices from the table with simulation results:
"""
Number of time steps between start and end time of the batch reactor simulation at which the importance of
reactions should be evaluated.
The more timesteps, the less chance we have to remove an important reactions, but the more simulations
need to be carried out.
"""
timesteps = len(profile) / 2
logging.debug('Evaluating the importance of a reaction at {} time samples.'.format(timesteps))
assert timesteps <= len(profile)
indices = map(int, np.linspace(0, len(profile)-1, num = timesteps))
for index in indices:
assert profile[index] is not None
timepoint, coreSpeciesConcentrations = profile[index]
coreSpeciesConcentrations = {key: float(value) for (key, value) in zip(speciesNames, coreSpeciesConcentrations)}
for species_i in rxn.reactants:
if isImportant(rxn, species_i, reactions, 'reactant', tolerance, T, P, coreSpeciesConcentrations):
return True
#only continue if the reaction is not important yet.
for species_i in rxn.products:
if isImportant(rxn, species_i, reactions, 'product', tolerance, T, P, coreSpeciesConcentrations):
return True
return False
def searchTargetIndex(targetLabel, reactionModel):
"""
Searches for the Species object in the core species
of the reaction that has the same label as the parameter string.
reactionModel must be of class CoreEdgeReactionModel
Has known issues dealing with duplicate labels. See reductionTest.py
for a unittest of this issue.
"""
for i, spc in enumerate(reactionModel.core.species):
if spc.label == targetLabel:
return i
raise Exception('{} could not be found...'.format(targetLabel))
def computeObservables(targets, reactionModel, reactionSystem, atol, rtol):
"""
Computes the observables of the targets, provided in the function signature.
Currently, the species mole fractions at the end time of the
batch reactor simulation are the only observables that can be computed.
- resetting the reaction system, initialing with empty variables
- running the simulation at the conditions stored in the reaction system
"""
reactionSystem.initializeModel(\
reactionModel.core.species, reactionModel.core.reactions,\
reactionModel.edge.species, reactionModel.edge.reactions, \
[], atol, rtol)
#run the simulation:
simulateOne(reactionModel, atol, rtol, reactionSystem)
observables = computeMoleFractions(targets, reactionModel, reactionSystem)
return observables
def computeMoleFractions(targets, reactionModel, reactionSystem):
"""
Computes the mole fractions of the targets, identified by the list
of species names in the function signature.
Returns a numpy array with the mole fractions at the end time of the reactor
simulation.
- searching the index of the target species in the core species
of the global reduction variable
- fetching the computed moles variable y
"""
moleFractions = np.zeros(len(targets), np.float64)
for i, label in enumerate(targets):
targetIndex = searchTargetIndex(label, reactionModel)
moleFractions[i] = reactionSystem.y[targetIndex]
return moleFractions
def computeConversion(targetLabel, reactionModel, reactionSystem, atol, rtol):
"""
Computes the conversion of a target molecule by
- searching the index of the target species in the core species
of the global reduction variable
- resetting the reaction system, initialing with empty variables
- fetching the initial moles variable y0
- running the simulation at the conditions stored in the reaction system
- fetching the computed moles variable y
- computing conversion
"""
targetIndex = searchTargetIndex(targetLabel, reactionModel)
#reset reaction system variables:
logging.info('No. of rxns in core reactions: {}'.format(len(reactionModel.core.reactions)))
reactionSystem.initializeModel(\
reactionModel.core.species, reactionModel.core.reactions,\
reactionModel.edge.species, reactionModel.edge.reactions, \
[], atol, rtol)
#get the initial moles:
y0 = reactionSystem.y.copy()
#run the simulation:
simulateOne(reactionModel, atol, rtol, reactionSystem)
#compute conversion:
conv = 1 - (reactionSystem.y[targetIndex] / y0[targetIndex])
return conv
def reduceModel(tolerance, targets, reactionModel, rmg, reactionSystemIndex):
"""
Reduces the model for the given tolerance and evaluates the
target observables.
"""
# reduce model with the tolerance specified earlier:
importantReactions = findImportantReactions(rmg, tolerance)
original_size = len(reactionModel.core.reactions)
no_importantReactions = len(importantReactions)
logging.info('No. of reactions in tested reduced model: {}'.format(no_importantReactions))
#set the core reactions to the reduced reaction set:
originalReactions = reactionModel.core.reactions
rmg.reactionModel.core.reactions = importantReactions
#re-compute observables:
observables = computeObservables(targets, rmg.reactionModel,\
rmg.reactionSystems[reactionSystemIndex],\
rmg.absoluteTolerance, rmg.relativeTolerance)
#reset the reaction model to its original state:
rmg.reactionModel.core.reactions = originalReactions
logging.info('Observables of reduced model ({} rxns):'.format(no_importantReactions))
for target, observable in zip(targets, observables):
logging.info('Observable in reduced model: {}: {:.2f}%'.format(target, observable * 100))
return observables, importantReactions
class ConcentrationListener(object):
"""Returns the species concentration profiles at each time step."""
def __init__(self):
self.speciesNames = []
self.data = []
def update(self, subject):
"""
Register the time (t) and the species mole fractions at the
given time.
The snapshots variable stores time and Volume as the first two
elements in the array.
"""
data = subject.snapshots
self.data = process(data)
def process(data):
"""
The data is structured as a list of lists.
Each list contains [time, Volume, [species mole fractions]]
The volume is cut out of each list, the remaining part is stored as a tuple.
"""
processed = []
for d in data:
processed.append((d[0], d[2:]))
return processed
|
the-stack_106_18535
|
#!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
#
# profile Profile CPU usage by sampling stack traces at a timed interval.
# For Linux, uses BCC, BPF, perf_events. Embedded C.
#
# This is an efficient profiler, as stack traces are frequency counted in
# kernel context, rather than passing every stack to user space for frequency
# counting there. Only the unique stacks and counts are passed to user space
# at the end of the profile, greatly reducing the kernel<->user transfer.
#
# This uses perf_event_open to setup a timer which is instrumented by BPF,
# and for efficiency it does not initialize the perf ring buffer, so the
# redundant perf samples are not collected.
#
# Kernel stacks are post-process in user-land to skip the interrupt framework
# frames. You can improve efficiency a little by specifying the exact number
# of frames to skip with -s, provided you know what that is. If you get -s
# wrong, note that the first line is the IP, and then the (skipped) stack.
#
# Note: if another perf-based sampling session is active, the output may become
# polluted with their events. On older kernels, the ouptut may also become
# polluted with tracing sessions (when the kprobe is used instead of the
# tracepoint). If this becomes a problem, logic can be added to filter events.
#
# REQUIRES: Linux 4.6+ (BPF_MAP_TYPE_STACK_TRACE support), and the
# perf_misc_flags() function symbol to exist. The latter may or may not
# exist depending on your kernel build. Linux 4.9 provides a proper solution
# to this (this tool will be updated).
#
# Copyright 2016 Netflix, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# THANKS: Sasha Goldshtein, Andrew Birchall, and Evgeny Vereshchagin, who wrote
# much of the code here, borrowed from tracepoint.py and offcputime.py.
#
# 15-Jul-2016 Brendan Gregg Created this.
from __future__ import print_function
from bcc import BPF, Perf
from sys import stderr
from time import sleep
import argparse
import signal
import os
import errno
import multiprocessing
import ctypes as ct
#
# Process Arguments
#
# arg validation
def positive_int(val):
try:
ival = int(val)
except ValueError:
raise argparse.ArgumentTypeError("must be an integer")
if ival < 0:
raise argparse.ArgumentTypeError("must be positive")
return ival
def positive_nonzero_int(val):
ival = positive_int(val)
if ival == 0:
raise argparse.ArgumentTypeError("must be nonzero")
return ival
# arguments
examples = """examples:
./profile # profile stack traces at 49 Hertz until Ctrl-C
./profile -F 99 # profile stack traces at 99 Hertz
./profile 5 # profile at 49 Hertz for 5 seconds only
./profile -f 5 # output in folded format for flame graphs
./profile -p 185 # only profile threads for PID 185
./profile -U # only show user space stacks (no kernel)
./profile -K # only show kernel space stacks (no user)
./profile -S 11 # always skip 11 frames of kernel stack
"""
parser = argparse.ArgumentParser(
description="Profile CPU stack traces at a timed interval",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
thread_group = parser.add_mutually_exclusive_group()
thread_group.add_argument("-p", "--pid", type=positive_int,
help="profile this PID only")
# TODO: add options for user/kernel threads only
stack_group = parser.add_mutually_exclusive_group()
stack_group.add_argument("-U", "--user-stacks-only", action="store_true",
help="show stacks from user space only (no kernel space stacks)")
stack_group.add_argument("-K", "--kernel-stacks-only", action="store_true",
help="show stacks from kernel space only (no user space stacks)")
parser.add_argument("-F", "--frequency", type=positive_int, default=49,
help="sample frequency, Hertz (default 49)")
parser.add_argument("-d", "--delimited", action="store_true",
help="insert delimiter between kernel/user stacks")
parser.add_argument("-a", "--annotations", action="store_true",
help="add _[k] annotations to kernel frames")
parser.add_argument("-f", "--folded", action="store_true",
help="output folded format, one line per stack (for flame graphs)")
parser.add_argument("--stack-storage-size", default=2048,
type=positive_nonzero_int,
help="the number of unique stack traces that can be stored and "
"displayed (default 2048)")
parser.add_argument("-S", "--kernel-skip", type=positive_int, default=0,
help="skip this many kernel frames (default 3)")
parser.add_argument("duration", nargs="?", default=99999999,
type=positive_nonzero_int,
help="duration of trace, in seconds")
# option logic
args = parser.parse_args()
skip = args.kernel_skip
pid = int(args.pid) if args.pid is not None else -1
duration = int(args.duration)
debug = 0
need_delimiter = args.delimited and not (args.kernel_stacks_only or
args.user_stacks_only)
# TODO: add stack depth, and interval
#
# Setup BPF
#
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
struct key_t {
u32 pid;
u64 kernel_ip;
u64 kernel_ret_ip;
int user_stack_id;
int kernel_stack_id;
char name[TASK_COMM_LEN];
};
BPF_HASH(counts, struct key_t);
BPF_HASH(start, u32);
BPF_STACK_TRACE(stack_traces, STACK_STORAGE_SIZE);
// This code gets a bit complex. Probably not suitable for casual hacking.
PERF_TRACE_EVENT {
u32 pid = bpf_get_current_pid_tgid();
if (!(THREAD_FILTER))
return 0;
// create map key
u64 zero = 0, *val;
struct key_t key = {.pid = pid};
bpf_get_current_comm(&key.name, sizeof(key.name));
// get stacks
key.user_stack_id = USER_STACK_GET;
key.kernel_stack_id = KERNEL_STACK_GET;
if (key.kernel_stack_id >= 0) {
// populate extras to fix the kernel stack
struct pt_regs regs = {};
bpf_probe_read(®s, sizeof(regs), (void *)REGS_LOCATION);
u64 ip = PT_REGS_IP(®s);
// if ip isn't sane, leave key ips as zero for later checking
#ifdef CONFIG_RANDOMIZE_MEMORY
if (ip > __PAGE_OFFSET_BASE) {
#else
if (ip > PAGE_OFFSET) {
#endif
key.kernel_ip = ip;
if (DO_KERNEL_RIP) {
/*
* User didn't specify a skip value (-s), so we will figure
* out how many interrupt framework frames to skip by recording
* the kernel rip, then later scanning for it on the stack.
* This is likely x86_64 specific; can use -s as a workaround
* until this supports your architecture.
*/
bpf_probe_read(&key.kernel_ret_ip, sizeof(key.kernel_ret_ip),
(void *)(regs.bp + 8));
}
}
}
val = counts.lookup_or_init(&key, &zero);
(*val)++;
return 0;
}
"""
# set thread filter
thread_context = ""
perf_filter = "-a"
if args.pid is not None:
thread_context = "PID %s" % args.pid
thread_filter = 'pid == %s' % args.pid
perf_filter = '-p %s' % args.pid
else:
thread_context = "all threads"
thread_filter = '1'
bpf_text = bpf_text.replace('THREAD_FILTER', thread_filter)
# set stack storage size
bpf_text = bpf_text.replace('STACK_STORAGE_SIZE', str(args.stack_storage_size))
# handle stack args
kernel_stack_get = "stack_traces.get_stackid(args, " \
"%d | BPF_F_REUSE_STACKID)" % skip
user_stack_get = \
"stack_traces.get_stackid(args, BPF_F_REUSE_STACKID | BPF_F_USER_STACK)"
stack_context = ""
if args.user_stacks_only:
stack_context = "user"
kernel_stack_get = "-1"
elif args.kernel_stacks_only:
stack_context = "kernel"
user_stack_get = "-1"
else:
stack_context = "user + kernel"
bpf_text = bpf_text.replace('USER_STACK_GET', user_stack_get)
bpf_text = bpf_text.replace('KERNEL_STACK_GET', kernel_stack_get)
if skip:
# don't record the rip, as we won't use it
bpf_text = bpf_text.replace('DO_KERNEL_RIP', '0')
else:
# rip is used to skip interrupt infrastructure frames
bpf_text = bpf_text.replace('DO_KERNEL_RIP', '1')
# header
if not args.folded:
print("Sampling at %d Hertz of %s by %s stack" %
(args.frequency, thread_context, stack_context), end="")
if duration < 99999999:
print(" for %d secs." % duration)
else:
print("... Hit Ctrl-C to end.")
# kprobe perf_misc_flags()
bpf_text = bpf_text.replace('PERF_TRACE_EVENT',
'int kprobe__perf_misc_flags(struct pt_regs *args)')
bpf_text = bpf_text.replace('REGS_LOCATION', 'PT_REGS_PARM1(args)')
if debug:
print(bpf_text)
# initialize BPF
try:
b = BPF(text=bpf_text)
except:
print("BPF initialization failed. perf_misc_flags() may be inlined in " +
"your kernel build.\nThis tool will be updated in the future to " +
"support Linux 4.9, which has reliable profiling support. Exiting.")
exit()
# signal handler
def signal_ignore(signal, frame):
print()
#
# Setup perf_events
#
# use perf_events to sample
try:
Perf.perf_event_open(0, pid=-1, ptype=Perf.PERF_TYPE_SOFTWARE,
freq=args.frequency)
except:
print("ERROR: initializing perf_events for sampling.\n"
"To debug this, try running the following command:\n"
" perf record -F 49 -e cpu-clock %s -- sleep 1\n"
"If that also doesn't work, fix it first." % perf_filter, file=stderr)
exit(0)
#
# Output Report
#
# collect samples
try:
sleep(duration)
except KeyboardInterrupt:
# as cleanup can take some time, trap Ctrl-C:
signal.signal(signal.SIGINT, signal_ignore)
if not args.folded:
print()
def aksym(addr):
if args.annotations:
return b.ksym(addr) + "_[k]"
else:
return b.ksym(addr)
# output stacks
missing_stacks = 0
has_enomem = False
counts = b.get_table("counts")
stack_traces = b.get_table("stack_traces")
for k, v in sorted(counts.items(), key=lambda counts: counts[1].value):
# handle get_stackid erorrs
if (not args.user_stacks_only and k.kernel_stack_id < 0 and
k.kernel_stack_id != -errno.EFAULT) or \
(not args.kernel_stacks_only and k.user_stack_id < 0 and
k.user_stack_id != -errno.EFAULT):
missing_stacks += 1
# check for an ENOMEM error
if k.kernel_stack_id == -errno.ENOMEM or \
k.user_stack_id == -errno.ENOMEM:
has_enomem = True
user_stack = [] if k.user_stack_id < 0 else \
stack_traces.walk(k.user_stack_id)
kernel_tmp = [] if k.kernel_stack_id < 0 else \
stack_traces.walk(k.kernel_stack_id)
# fix kernel stack
kernel_stack = []
if k.kernel_stack_id >= 0:
if skip:
# fixed skip
for addr in kernel_tmp:
kernel_stack.append(addr)
kernel_stack = kernel_stack[skip:]
else:
# skip the interrupt framework stack by searching for our RIP
skipping = 1
for addr in kernel_tmp:
if k.kernel_ret_ip == addr:
skipping = 0
if not skipping:
kernel_stack.append(addr)
if k.kernel_ip:
kernel_stack.insert(0, k.kernel_ip)
do_delimiter = need_delimiter and kernel_stack
if args.folded:
# print folded stack output
user_stack = list(user_stack)
kernel_stack = list(kernel_stack)
line = [k.name.decode()] + \
[b.sym(addr, k.pid) for addr in reversed(user_stack)] + \
(do_delimiter and ["-"] or []) + \
[aksym(addr) for addr in reversed(kernel_stack)]
print("%s %d" % (";".join(line), v.value))
else:
# print default multi-line stack output.
for addr in kernel_stack:
print(" %s" % aksym(addr))
if do_delimiter:
print(" --")
for addr in user_stack:
print(" %s" % b.sym(addr, k.pid))
print(" %-16s %s (%d)" % ("-", k.name, k.pid))
print(" %d\n" % v.value)
# check missing
if missing_stacks > 0:
enomem_str = "" if not has_enomem else \
" Consider increasing --stack-storage-size."
print("WARNING: %d stack traces could not be displayed.%s" %
(missing_stacks, enomem_str),
file=stderr)
|
the-stack_106_18537
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.teleport.InteriorDoorTeleportActorOV
from pirates.piratesbase import PiratesGlobals
from pirates.teleport.DoorTeleportActorOV import DoorTeleportActorOV
class InteriorDoorTeleportActorOV(DoorTeleportActorOV):
__module__ = __name__
@report(types=['args'], dConfigParam=['dteleport'])
def __init__(self, cr, name='InteriorDoorTeleportActorOV'):
DoorTeleportActorOV.__init__(self, cr, name)
self.doorRequest = None
return
def disable(self):
if self.doorRequest:
base.cr.relatedObjectMgr.abortRequest(self.doorRequest)
self.doorRequest = None
DoorTeleportActorOV.disable(self)
return
@report(types=['args'], dConfigParam=['dteleport'])
def enterOpenGame(self, areaDoId, doorId):
self.areaDoId = areaDoId
self.doorId = doorId
world = self.cr.getDo(self.worldDoId)
world.goOnStage()
area = self.cr.getDo(areaDoId)
area.goOnStage()
def doorArrived(door):
doorLocator = door.getDoorLocator()
localAvatar.reparentTo(doorLocator)
localAvatar.setPosHpr(0, 10, 0, 0, 0, 0)
localAvatar.wrtReparentTo(area)
localAvatar.setP(0)
localAvatar.setR(0)
localAvatar.setScale(1)
area.parentObjectToArea(localAvatar)
area.handleEnterGameArea(None)
localAvatar.enableGridInterest()
area.manageChild(localAvatar)
self._requestWhenInterestComplete('GameOpen')
return
def doorNotArrived(doIdList):
self.notify.error('InteriorDoorTeleportActorOV.enterOpenGame: door %s never arrived in area %s' % (self.doorId, area.uniqueId))
self.doorRequest = base.cr.relatedObjectMgr.requestObjects([self.doorId], eachCallback=doorArrived, timeout=60, timeoutCallback=doorNotArrived)
@report(types=['args'], dConfigParam=['dteleport'])
def enterStartShow(self, *args):
DoorTeleportActorOV.enterStartShow(self, *args)
door = self.cr.getDo(self.doorId)
messenger.send('fadeInInteriorDoor', [door.getBuildingUid()])
base.cr.loadingScreen.hide()
|
the-stack_106_18538
|
#!/usr/bin/env python
# Test whether a valid CONNECT results in the correct CONNACK packet using an SSL connection.
import socket
import ssl
import sys
if sys.version < '2.7':
print("WARNING: SSL not supported on Python 2.6")
exit(0)
import inspect, os
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
def write_config(filename, port1, port2):
with open(filename, 'w') as f:
f.write("port %d\n" % (port2))
f.write("listener %d\n" % (port1))
f.write("cafile ../ssl/all-ca.crt\n")
f.write("certfile ../ssl/server.crt\n")
f.write("keyfile ../ssl/server.key\n")
f.write("require_certificate true\n")
(port1, port2) = mosq_test.get_port(2)
conf_file = os.path.basename(__file__).replace('.py', '.conf')
write_config(conf_file, port1, port2)
rc = 1
keepalive = 10
connect_packet = mosq_test.gen_connect("connect-success-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
broker = mosq_test.start_broker(filename=os.path.basename(__file__), port=port2, use_conf=True)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssock = ssl.wrap_socket(sock, ca_certs="../ssl/test-root-ca.crt", certfile="../ssl/client.crt", keyfile="../ssl/client.key", cert_reqs=ssl.CERT_REQUIRED)
ssock.settimeout(20)
ssock.connect(("localhost", port1))
mosq_test.do_send_receive(ssock, connect_packet, connack_packet, "connack")
rc = 0
ssock.close()
finally:
os.remove(conf_file)
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde)
exit(rc)
|
the-stack_106_18539
|
import os
import numpy as np
from pynif3d.common.verification import check_path_exists, check_pos_int
def load_pose_from_file(filename):
check_path_exists(filename)
pose = np.loadtxt(filename)
pose = pose.reshape([4, 4]).astype(np.float32)
return pose
def load_poses_from_dir(poses_dir):
check_path_exists(poses_dir)
files = sorted(os.listdir(poses_dir))
check_pos_int(len(files), "files")
poses = [
load_pose_from_file(os.path.join(poses_dir, f))
for f in files
if f.endswith("txt")
]
poses = np.stack(poses, 0).astype(np.float32)
transformation_matrix = np.array(
[
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1.0],
],
dtype=np.float32,
)
poses = poses @ transformation_matrix
poses = poses[:, :3, :4].astype(np.float32)
return poses
|
the-stack_106_18540
|
""" This module extends the folium Map class. It is designed to be used in Google Colab, as Google Colab currently does not support ipyleaflet.
"""
import os
import ee
import folium
from folium import plugins
from .common import *
from .conversion import *
from .legends import builtin_legends
# More WMS basemaps can be found at https://viewer.nationalmap.gov/services/
ee_basemaps = {
"ROADMAP": folium.TileLayer(
tiles="https://mt1.google.com/vt/lyrs=m&x={x}&y={y}&z={z}",
attr="Google",
name="Google Maps",
overlay=True,
control=True,
),
"SATELLITE": folium.TileLayer(
tiles="https://mt1.google.com/vt/lyrs=s&x={x}&y={y}&z={z}",
attr="Google",
name="Google Satellite",
overlay=True,
control=True,
),
"TERRAIN": folium.TileLayer(
tiles="https://mt1.google.com/vt/lyrs=p&x={x}&y={y}&z={z}",
attr="Google",
name="Google Terrain",
overlay=True,
control=True,
),
"HYBRID": folium.TileLayer(
tiles="https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}",
attr="Google",
name="Google Satellite",
overlay=True,
control=True,
),
"ESRI": folium.TileLayer(
tiles="https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Satellite",
overlay=True,
control=True,
),
"Esri Ocean": folium.TileLayer(
tiles="https://services.arcgisonline.com/ArcGIS/rest/services/Ocean/World_Ocean_Base/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Ocean",
overlay=True,
control=True,
),
"Esri Satellite": folium.TileLayer(
tiles="https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Satellite",
overlay=True,
control=True,
),
"Esri Standard": folium.TileLayer(
tiles="https://server.arcgisonline.com/ArcGIS/rest/services/World_Street_Map/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Standard",
overlay=True,
control=True,
),
"Esri Terrain": folium.TileLayer(
tiles="https://server.arcgisonline.com/ArcGIS/rest/services/World_Terrain_Base/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Terrain",
overlay=True,
control=True,
),
"Esri Transportation": folium.TileLayer(
tiles="https://server.arcgisonline.com/ArcGIS/rest/services/Reference/World_Transportation/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Transportation",
overlay=True,
control=True,
),
"Esri Topo World": folium.TileLayer(
tiles="https://services.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Topo World",
overlay=True,
control=True,
),
"Esri National Geographic": folium.TileLayer(
tiles="http://services.arcgisonline.com/ArcGIS/rest/services/NatGeo_World_Map/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri National Geographic",
overlay=True,
control=True,
),
"Esri Shaded Relief": folium.TileLayer(
tiles="https://services.arcgisonline.com/arcgis/rest/services/World_Shaded_Relief/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Shaded Relief",
overlay=True,
control=True,
),
"Esri Physical Map": folium.TileLayer(
tiles="https://services.arcgisonline.com/arcgis/rest/services/World_Physical_Map/MapServer/tile/{z}/{y}/{x}",
attr="Esri",
name="Esri Physical Map",
overlay=True,
control=True,
),
"Bing VirtualEarth": folium.TileLayer(
tiles="http://ecn.t3.tiles.virtualearth.net/tiles/a{q}.jpeg?g=1",
attr="Microsoft",
name="Bing VirtualEarth",
overlay=True,
control=True,
),
"3DEP Elevation": folium.WmsTileLayer(
url="https://elevation.nationalmap.gov/arcgis/services/3DEPElevation/ImageServer/WMSServer?",
layers="3DEPElevation:None",
attr="USGS",
name="3DEP Elevation",
overlay=True,
control=True,
),
"NAIP Imagery": folium.WmsTileLayer(
url="https://services.nationalmap.gov/arcgis/services/USGSNAIPImagery/ImageServer/WMSServer?",
layers="0",
attr="USGS",
name="NAIP Imagery",
overlay=True,
control=True,
),
}
class Map(folium.Map):
"""The Map class inherits from folium.Map. By default, the Map will add Google Maps as the basemap. Set add_google_map = False to use OpenStreetMap as the basemap.
Returns:
object: folium map object.
"""
def __init__(self, **kwargs):
import logging
logging.getLogger("googleapiclient.discovery_cache").setLevel(logging.ERROR)
if "ee_initialize" not in kwargs.keys():
kwargs["ee_initialize"] = True
if kwargs["ee_initialize"]:
ee_initialize()
# Default map center location and zoom level
latlon = [40, -100]
zoom = 4
# Interchangeable parameters between ipyleaflet and folium
if "center" in kwargs.keys():
kwargs["location"] = kwargs["center"]
kwargs.pop("center")
if "location" in kwargs.keys():
latlon = kwargs["location"]
else:
kwargs["location"] = latlon
if "zoom" in kwargs.keys():
kwargs["zoom_start"] = kwargs["zoom"]
kwargs.pop("zoom")
if "zoom_start" in kwargs.keys():
zoom = kwargs["zoom_start"]
else:
kwargs["zoom_start"] = zoom
if "add_google_map" not in kwargs.keys() and "basemap" not in kwargs.keys():
kwargs["add_google_map"] = True
if "plugin_LatLngPopup" not in kwargs.keys():
kwargs["plugin_LatLngPopup"] = True
if "plugin_Fullscreen" not in kwargs.keys():
kwargs["plugin_Fullscreen"] = True
if "plugin_Draw" not in kwargs.keys():
kwargs["plugin_Draw"] = False
if "Draw_export" not in kwargs.keys():
kwargs["Draw_export"] = True
if "plugin_MiniMap" not in kwargs.keys():
kwargs["plugin_MiniMap"] = False
if "plugin_LayerControl" not in kwargs.keys():
kwargs["plugin_LayerControl"] = False
super().__init__(**kwargs)
self.baseclass = "folium"
if kwargs.get("add_google_map"):
ee_basemaps["ROADMAP"].add_to(self)
if kwargs.get("basemap"):
ee_basemaps[kwargs.get("basemap")].add_to(self)
if kwargs.get("plugin_LatLngPopup"):
folium.LatLngPopup().add_to(self)
if kwargs.get("plugin_Fullscreen"):
plugins.Fullscreen().add_to(self)
if kwargs.get("plugin_Draw"):
plugins.Draw(export=kwargs.get("Draw_export")).add_to(self)
if kwargs.get("plugin_MiniMap"):
plugins.MiniMap().add_to(self)
if kwargs.get("plugin_LayerControl"):
folium.LayerControl().add_to(self)
self.fit_bounds([latlon, latlon], max_zoom=zoom)
def setOptions(self, mapTypeId="HYBRID", styles={}, types=[]):
"""Adds Google basemap to the map.
Args:
mapTypeId (str, optional): A mapTypeId to set the basemap to. Can be one of "ROADMAP", "SATELLITE", "HYBRID" or "TERRAIN" to select one of the standard Google Maps API map types. Defaults to 'HYBRID'.
styles ([type], optional): A dictionary of custom MapTypeStyle objects keyed with a name that will appear in the map's Map Type Controls. Defaults to None.
types ([type], optional): A list of mapTypeIds to make available. If omitted, but opt_styles is specified, appends all of the style keys to the standard Google Maps API map types.. Defaults to None.
"""
try:
ee_basemaps[mapTypeId].add_to(self)
except Exception:
raise Exception(
"Basemap can only be one of the following: {}".format(
", ".join(ee_basemaps.keys())
)
)
set_options = setOptions
def add_basemap(self, basemap="HYBRID"):
"""Adds a basemap to the map.
Args:
basemap (str, optional): Can be one of string from ee_basemaps. Defaults to 'HYBRID'.
"""
try:
ee_basemaps[basemap].add_to(self)
except Exception:
raise Exception(
"Basemap can only be one of the following: {}".format(
", ".join(ee_basemaps.keys())
)
)
def add_layer(
self,
ee_object,
vis_params={},
name="Layer untitled",
shown=True,
opacity=1.0,
**kwargs,
):
"""Adds a given EE object to the map as a layer.
Args:
ee_object (Collection|Feature|Image|MapId): The object to add to the map.
vis_params (dict, optional): The visualization parameters. Defaults to {}.
name (str, optional): The name of the layer. Defaults to 'Layer untitled'.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
opacity (float, optional): The layer's opacity represented as a number between 0 and 1. Defaults to 1.
"""
from box import Box
image = None
if (
not isinstance(ee_object, ee.Image)
and not isinstance(ee_object, ee.ImageCollection)
and not isinstance(ee_object, ee.FeatureCollection)
and not isinstance(ee_object, ee.Feature)
and not isinstance(ee_object, ee.Geometry)
):
err_str = "\n\nThe image argument in 'addLayer' function must be an instace of one of ee.Image, ee.Geometry, ee.Feature or ee.FeatureCollection."
raise AttributeError(err_str)
if (
isinstance(ee_object, ee.geometry.Geometry)
or isinstance(ee_object, ee.feature.Feature)
or isinstance(ee_object, ee.featurecollection.FeatureCollection)
):
features = ee.FeatureCollection(ee_object)
width = 2
if "width" in vis_params:
width = vis_params["width"]
color = "000000"
if "color" in vis_params:
color = vis_params["color"]
image_fill = features.style(**{"fillColor": color}).updateMask(
ee.Image.constant(0.5)
)
image_outline = features.style(
**{"color": color, "fillColor": "00000000", "width": width}
)
image = image_fill.blend(image_outline)
elif isinstance(ee_object, ee.image.Image):
image = ee_object
elif isinstance(ee_object, ee.imagecollection.ImageCollection):
image = ee_object.mosaic()
if "palette" in vis_params and isinstance(vis_params["palette"], Box):
try:
vis_params["palette"] = vis_params["palette"]["default"]
except Exception as e:
print("The provided palette is invalid.")
raise Exception(e)
map_id_dict = ee.Image(image).getMapId(vis_params)
# if a layer starts with a number, add "Layer" to name.
if name[0].isdigit():
name = "Layer " + name
folium.raster_layers.TileLayer(
tiles=map_id_dict["tile_fetcher"].url_format,
attr="Google Earth Engine",
name=name,
overlay=True,
control=True,
show=shown,
opacity=opacity,
**kwargs,
).add_to(self)
addLayer = add_layer
def _repr_mimebundle_(self, include, exclude, **kwargs):
"""Adds Layer control to the map. Referece: https://ipython.readthedocs.io/en/stable/config/integrating.html#MyObject._repr_mimebundle_
Args:
include ([type]): [description]
exclude ([type]): [description]
"""
self.add_layer_control()
def set_center(self, lon, lat, zoom=10):
"""Centers the map view at a given coordinates with the given zoom level.
Args:
lon (float): The longitude of the center, in degrees.
lat (float): The latitude of the center, in degrees.
zoom (int, optional): The zoom level, from 1 to 24. Defaults to 10.
"""
self.fit_bounds([[lat, lon], [lat, lon]], max_zoom=zoom)
setCenter = set_center
def center_object(self, ee_object, zoom=10):
"""Centers the map view on a given object.
Args:
ee_object (Element|Geometry): An Earth Engine object to center on - a geometry, image or feature.
zoom (int, optional): The zoom level, from 1 to 24. Defaults to 10.
"""
lat = 0
lon = 0
bounds = [[lat, lon], [lat, lon]]
if isinstance(ee_object, ee.geometry.Geometry):
centroid = ee_object.centroid()
lon, lat = centroid.getInfo()["coordinates"]
bounds = [[lat, lon], [lat, lon]]
elif isinstance(ee_object, ee.featurecollection.FeatureCollection):
centroid = ee_object.geometry().centroid()
lon, lat = centroid.getInfo()["coordinates"]
bounds = [[lat, lon], [lat, lon]]
elif isinstance(ee_object, ee.image.Image):
geometry = ee_object.geometry()
coordinates = geometry.getInfo()["coordinates"][0]
bounds = [coordinates[0][::-1], coordinates[2][::-1]]
elif isinstance(ee_object, ee.imagecollection.ImageCollection):
geometry = ee_object.geometry()
coordinates = geometry.getInfo()["coordinates"][0]
bounds = [coordinates[0][::-1], coordinates[2][::-1]]
else:
bounds = [[0, 0], [0, 0]]
self.fit_bounds(bounds, max_zoom=zoom)
centerObject = center_object
def set_control_visibility(
self, layerControl=True, fullscreenControl=True, latLngPopup=True
):
"""Sets the visibility of the controls on the map.
Args:
layerControl (bool, optional): Whether to show the control that allows the user to toggle layers on/off. Defaults to True.
fullscreenControl (bool, optional): Whether to show the control that allows the user to make the map full-screen. Defaults to True.
latLngPopup (bool, optional): Whether to show the control that pops up the Lat/lon when the user clicks on the map. Defaults to True.
"""
if layerControl:
folium.LayerControl().add_to(self)
if fullscreenControl:
plugins.Fullscreen().add_to(self)
if latLngPopup:
folium.LatLngPopup().add_to(self)
setControlVisibility = set_control_visibility
def add_layer_control(self):
"""Adds layer control to the map."""
layer_ctrl = False
for item in self.to_dict()["children"]:
if item.startswith("layer_control"):
layer_ctrl = True
break
if not layer_ctrl:
folium.LayerControl().add_to(self)
addLayerControl = add_layer_control
def add_wms_layer(
self,
url,
layers,
name=None,
attribution="",
overlay=True,
control=True,
shown=True,
format="image/png",
transparent=False,
version="1.1.1",
styles="",
**kwargs,
):
"""Add a WMS layer to the map.
Args:
url (str): The URL of the WMS web service.
layers (str): Comma-separated list of WMS layers to show.
name (str, optional): The layer name to use on the layer control. Defaults to None.
attribution (str, optional): The attribution of the data layer. Defaults to ''.
overlay (str, optional): Allows overlay. Defaults to True.
control (str, optional): Adds the layer to the layer control. Defaults to True.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
format (str, optional): WMS image format (use ‘image/png’ for layers with transparency). Defaults to 'image/png'.
transparent (bool, optional): Whether the layer shall allow transparency. Defaults to False.
version (str, optional): Version of the WMS service to use. Defaults to "1.1.1".
styles (str, optional): Comma-separated list of WMS styles. Defaults to "".
"""
try:
folium.raster_layers.WmsTileLayer(
url=url,
layers=layers,
name=name,
attr=attribution,
overlay=overlay,
control=control,
show=shown,
styles=styles,
fmt=format,
transparent=transparent,
version=version,
**kwargs,
).add_to(self)
except Exception:
raise Exception("Failed to add the specified WMS TileLayer.")
def add_tile_layer(
self,
tiles="OpenStreetMap",
name="Untitled",
attribution=".",
overlay=True,
control=True,
shown=True,
opacity=1.0,
API_key=None,
**kwargs,
):
"""Add a XYZ tile layer to the map.
Args:
tiles (str): The URL of the XYZ tile service.
name (str, optional): The layer name to use on the layer control. Defaults to 'Untitled'.
attribution (str, optional): The attribution of the data layer. Defaults to '.'.
overlay (str, optional): Allows overlay. Defaults to True.
control (str, optional): Adds the layer to the layer control. Defaults to True.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
opacity (float, optional): Sets the opacity for the layer.
API_key (str, optional): – API key for Cloudmade or Mapbox tiles. Defaults to True.
"""
try:
folium.raster_layers.TileLayer(
tiles=tiles,
name=name,
attr=attribution,
overlay=overlay,
control=control,
show=shown,
opacity=opacity,
API_key=API_key,
**kwargs,
).add_to(self)
except Exception:
raise Exception("Failed to add the specified TileLayer.")
def add_COG_layer(
self,
url,
name="Untitled",
attribution=".",
opacity=1.0,
shown=True,
titiler_endpoint="https://api.cogeo.xyz/",
**kwargs,
):
"""Adds a COG TileLayer to the map.
Args:
url (str): The URL of the COG tile layer.
name (str, optional): The layer name to use for the layer. Defaults to 'Untitled'.
attribution (str, optional): The attribution to use. Defaults to '.'.
opacity (float, optional): The opacity of the layer. Defaults to 1.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
titiler_endpoint (str, optional): Titiler endpoint. Defaults to "https://api.cogeo.xyz/".
"""
tile_url = get_COG_tile(url, titiler_endpoint, **kwargs)
center = get_COG_center(url, titiler_endpoint) # (lon, lat)
self.add_tile_layer(
tiles=tile_url,
name=name,
attribution=attribution,
opacity=opacity,
shown=shown,
)
self.set_center(lon=center[0], lat=center[1], zoom=10)
def add_COG_mosaic(
self,
links,
name="Untitled",
attribution=".",
opacity=1.0,
shown=True,
titiler_endpoint="https://api.cogeo.xyz/",
username="anonymous",
overwrite=False,
show_footprints=False,
verbose=True,
**kwargs,
):
"""Add a virtual mosaic of COGs to the map.
Args:
links (list): A list of links pointing to COGs.
name (str, optional): The layer name to use for the layer. Defaults to 'Untitled'.
attribution (str, optional): The attribution to use. Defaults to '.'.
opacity (float, optional): The opacity of the layer. Defaults to 1.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
titiler_endpoint (str, optional): Titiler endpoint. Defaults to "https://api.cogeo.xyz/".
username (str, optional): The username to create mosaic using the titiler endpoint. Defaults to 'anonymous'.
overwrite (bool, optional): Whether or not to replace existing layer with the same layer name. Defaults to False.
show_footprints (bool, optional): Whether or not to show footprints of COGs. Defaults to False.
verbose (bool, optional): Whether or not to print descriptions. Defaults to True.
"""
layername = name.replace(" ", "_")
tile = get_COG_mosaic(
links,
titiler_endpoint=titiler_endpoint,
username=username,
layername=layername,
overwrite=overwrite,
verbose=verbose,
)
self.add_tile_layer(tile, name, attribution, opacity, shown)
if show_footprints:
if verbose:
print(
f"Generating footprints of {len(links)} COGs. This might take a while ..."
)
coords = []
for link in links:
coord = get_COG_bounds(link)
if coord is not None:
coords.append(coord)
fc = coords_to_geojson(coords)
# style_function = lambda x: {'opacity': 1, 'dashArray': '1', 'fillOpacity': 0, 'weight': 1}
folium.GeoJson(
data=fc,
# style_function=style_function,
name="Footprints",
).add_to(self)
center = get_center(fc)
if verbose:
print("The footprint layer has been added.")
else:
center = get_COG_center(links[0], titiler_endpoint)
self.set_center(center[0], center[1], zoom=6)
def add_STAC_layer(
self,
url,
bands=None,
name="Untitled",
attribution=".",
opacity=1.0,
shown=True,
titiler_endpoint="https://api.cogeo.xyz/",
**kwargs,
):
"""Adds a STAC TileLayer to the map.
Args:
url (str): The URL of the COG tile layer.
name (str, optional): The layer name to use for the layer. Defaults to 'Untitled'.
attribution (str, optional): The attribution to use. Defaults to '.'.
opacity (float, optional): The opacity of the layer. Defaults to 1.
shown (bool, optional): A flag indicating whether the layer should be on by default. Defaults to True.
titiler_endpoint (str, optional): Titiler endpoint. Defaults to "https://api.cogeo.xyz/".
"""
tile_url = get_STAC_tile(url, bands, titiler_endpoint, **kwargs)
center = get_STAC_center(url, titiler_endpoint)
self.add_tile_layer(
tiles=tile_url,
name=name,
attribution=attribution,
opacity=opacity,
shown=shown,
)
self.set_center(lon=center[0], lat=center[1], zoom=10)
def add_legend(
self,
title="Legend",
colors=None,
labels=None,
legend_dict=None,
builtin_legend=None,
opacity=1.0,
):
"""Adds a customized basemap to the map. Reference: https://bit.ly/3oV6vnH
Args:
title (str, optional): Title of the legend. Defaults to 'Legend'. Defaults to "Legend".
colors ([type], optional): A list of legend colors. Defaults to None.
labels ([type], optional): A list of legend labels. Defaults to None.
legend_dict ([type], optional): A dictionary containing legend items as keys and color as values. If provided, legend_keys and legend_colors will be ignored. Defaults to None.
builtin_legend ([type], optional): Name of the builtin legend to add to the map. Defaults to None.
opacity (float, optional): The opacity of the legend. Defaults to 1.0.
"""
import pkg_resources
from branca.element import Template, MacroElement
pkg_dir = os.path.dirname(
pkg_resources.resource_filename("geemap", "geemap.py")
)
legend_template = os.path.join(pkg_dir, "data/template/legend.txt")
if not os.path.exists(legend_template):
raise FileNotFoundError("The legend template does not exist.")
if labels is not None:
if not isinstance(labels, list):
raise ValueError("The legend labels must be a list.")
else:
labels = ["One", "Two", "Three", "Four", "ect"]
if colors is not None:
if not isinstance(colors, list):
raise ValueError("The legend colors must be a list.")
elif all(isinstance(item, tuple) for item in colors):
try:
colors = ["#" + rgb_to_hex(x) for x in colors]
except Exception as e:
raise Exception(e)
elif all((item.startswith("#") and len(item) == 7) for item in colors):
pass
elif all((len(item) == 6) for item in colors):
pass
else:
raise ValueError("The legend colors must be a list of tuples.")
else:
colors = ["#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3"]
if len(labels) != len(colors):
raise ValueError("The legend keys and values must be the same length.")
allowed_builtin_legends = builtin_legends.keys()
if builtin_legend is not None:
if builtin_legend not in allowed_builtin_legends:
raise ValueError(
"The builtin legend must be one of the following: {}".format(
", ".join(allowed_builtin_legends)
)
)
else:
legend_dict = builtin_legends[builtin_legend]
labels = list(legend_dict.keys())
colors = list(legend_dict.values())
if all(isinstance(item, tuple) for item in colors):
try:
colors = [rgb_to_hex(x) for x in colors]
except Exception as e:
raise Exception(e)
elif all(isinstance(item, str) for item in colors):
colors = ["#" + color for color in colors]
if legend_dict is not None:
if not isinstance(legend_dict, dict):
raise ValueError("The legend dict must be a dictionary.")
else:
labels = list(legend_dict.keys())
colors = list(legend_dict.values())
if all(isinstance(item, tuple) for item in colors):
try:
colors = [rgb_to_hex(x) for x in colors]
except Exception as e:
raise Exception(e)
elif all(isinstance(item, str) for item in colors):
colors = ["#" + color for color in colors]
content = []
with open(legend_template) as f:
lines = f.readlines()
for index, line in enumerate(lines):
if index < 36:
content.append(line)
elif index == 36:
line = lines[index].replace("Legend", title)
content.append(line)
elif index < 39:
content.append(line)
elif index == 39:
for i, color in enumerate(colors):
item = f" <li><span style='background:{check_color(color)};opacity:{opacity};'></span>{labels[i]}</li>\n"
content.append(item)
elif index > 41:
content.append(line)
template = "".join(content)
macro = MacroElement()
macro._template = Template(template)
self.get_root().add_child(macro)
def add_colorbar(
self,
colors,
vmin=0,
vmax=1.0,
index=None,
caption="",
categorical=False,
step=None,
**kwargs,
):
"""Add a colorbar to the map.
Args:
colors (list): The set of colors to be used for interpolation. Colors can be provided in the form: * tuples of RGBA ints between 0 and 255 (e.g: (255, 255, 0) or (255, 255, 0, 255)) * tuples of RGBA floats between 0. and 1. (e.g: (1.,1.,0.) or (1., 1., 0., 1.)) * HTML-like string (e.g: “#ffff00) * a color name or shortcut (e.g: “y” or “yellow”)
vmin (int, optional): The minimal value for the colormap. Values lower than vmin will be bound directly to colors[0].. Defaults to 0.
vmax (float, optional): The maximal value for the colormap. Values higher than vmax will be bound directly to colors[-1]. Defaults to 1.0.
index (list, optional):The values corresponding to each color. It has to be sorted, and have the same length as colors. If None, a regular grid between vmin and vmax is created.. Defaults to None.
caption (str, optional): The caption for the colormap. Defaults to "".
categorical (bool, optional): Whether or not to create a categorical colormap. Defaults to False.
step (int, optional): The step to split the LinearColormap into a StepColormap. Defaults to None.
"""
from box import Box
from branca.colormap import LinearColormap
if isinstance(colors, Box):
try:
colors = list(colors["default"])
except Exception as e:
print("The provided color list is invalid.")
raise Exception(e)
if all(len(color) == 6 for color in colors):
colors = ["#" + color for color in colors]
colormap = LinearColormap(
colors=colors, index=index, vmin=vmin, vmax=vmax, caption=caption
)
if categorical:
if step is not None:
colormap = colormap.to_step(step)
elif index is not None:
colormap = colormap.to_step(len(index) - 1)
else:
colormap = colormap.to_step(3)
self.add_child(colormap)
def add_styled_vector(
self, ee_object, column, palette, layer_name="Untitled", **kwargs
):
"""Adds a styled vector to the map.
Args:
ee_object (object): An ee.FeatureCollection.
column (str): The column name to use for styling.
palette (list): The palette (e.g., list of colors) to use for styling.
layer_name (str, optional): The name to be used for the new layer. Defaults to "Untitled".
"""
styled_vector = vector_styling(ee_object, column, palette, **kwargs)
self.addLayer(styled_vector.style(**{"styleProperty": "style"}), {}, layer_name)
def add_shapefile(self, in_shp, name="Untitled", **kwargs):
"""Adds a shapefile to the map. See https://python-visualization.github.io/folium/modules.html#folium.features.GeoJson for more info about setting style.
Args:
in_shp (str): The input file path to the shapefile.
name (str, optional): The layer name to be used. Defaults to "Untitled".
Raises:
FileNotFoundError: The provided shapefile could not be found.
"""
if not os.path.exists(in_shp):
raise FileNotFoundError("The provided shapefile could not be found.")
data = shp_to_geojson(in_shp)
geo_json = folium.GeoJson(data=data, name=name, **kwargs)
geo_json.add_to(self)
def add_geojson(self, in_geojson, name="Untitled", **kwargs):
"""Adds a GeoJSON file to the map.
Args:
in_geojson (str): The input file path to the GeoJSON.
name (str, optional): The layer name to be used. Defaults to "Untitled".
Raises:
FileNotFoundError: The provided GeoJSON file could not be found.
"""
import json
if not os.path.exists(in_geojson):
raise FileNotFoundError("The provided GeoJSON file could not be found.")
with open(in_geojson) as f:
data = json.load(f)
geo_json = folium.GeoJson(data=data, name=name, **kwargs)
geo_json.add_to(self)
def add_kml(self, in_kml, name="Untitled", **kwargs):
"""Adds a KML file to the map.
Args:
in_kml (str): The input file path to the KML.
name (str, optional): The layer name to be used. Defaults to "Untitled".
Raises:
FileNotFoundError: The provided KML file could not be found.
"""
import json
if not os.path.exists(in_kml):
raise FileNotFoundError("The provided KML file could not be found.")
out_json = os.path.join(os.getcwd(), "tmp.geojson")
kml_to_geojson(in_kml, out_json)
with open(out_json) as f:
data = json.load(f)
geo_json = folium.GeoJson(data=data, name=name, **kwargs)
geo_json.add_to(self)
os.remove(out_json)
def publish(
self,
name=None,
headline="Untitled",
visibility="PUBLIC",
overwrite=True,
open=True,
):
"""Publish the map to datapane.com
Args:
name (str, optional): The URL of the map. Defaults to None.
headline (str, optional): Title of the map. Defaults to 'Untitled'.
visibility (str, optional): Visibility of the map. It can be one of the following: PUBLIC, PRIVATE, ORG. Defaults to 'PUBLIC'.
overwrite (bool, optional): Whether to overwrite the existing map with the same name. Defaults to True.
open (bool, optional): Whether to open the map. Defaults to True.
"""
import webbrowser
try:
import datapane as dp
except Exception:
webbrowser.open_new_tab(
"https://docs.datapane.com/tutorials/tut-getting-started"
)
raise ImportError(
"The datapane Python package is not installed. You need to install and authenticate datapane first."
)
# import datapane as dp
# import logging
# logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)
if name is None:
name = "folium_" + random_string(6)
visibility = visibility.upper()
if visibility not in ["PUBLIC", "PRIVATE", "ORG"]:
visibility = "PRIVATE"
if overwrite:
delete_dp_report(name)
report = dp.Report(dp.Plot(self))
report.publish(name=name, headline=headline, visibility=visibility, open=open)
def delete_dp_report(name):
"""Deletes a datapane report.
Args:
name (str): Name of the report to delete.
"""
try:
import datapane as dp
reports = dp.Report.list()
items = list(reports)
names = list(map(lambda item: item["name"], items))
if name in names:
report = dp.Report.get(name)
url = report.blocks[0]["url"]
# print('Deleting {}...'.format(url))
dp.Report.delete(dp.Report.by_id(url))
except Exception as e:
print(e)
return
def delete_dp_reports():
"""Deletes all datapane reports."""
try:
import datapane as dp
reports = dp.Report.list()
for item in reports:
print(item["name"])
report = dp.Report.get(item["name"])
url = report.blocks[0]["url"]
print("Deleting {}...".format(url))
dp.Report.delete(dp.Report.by_id(url))
except Exception as e:
print(e)
return
|
the-stack_106_18542
|
from __future__ import absolute_import, division, print_function
import os
os.environ["CDF_LIB"] = "/usr/local/cdf"
from spacepy import pycdf
import numpy as np
import h5py as h5
import re
from glob import glob
from tqdm import tqdm
dataset = 'Human36'
subjects = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']
cameras = ['54138969', '55011271', '58860488', '60457274']
actions = ['Directions', 'Discussion', 'Eating', 'Greeting', 'Phoning',
'Posing', 'Purchases', 'Sitting', 'SittingDown', 'Smoking',
'Photo', 'Waiting', 'Walking', 'WalkDog', 'WalkTogether']
def read_pose(x):
return np.reshape(np.transpose(np.array(x), [2, 0, 1]), [32, 3, -1])
if __name__ == "__main__":
prog = re.compile('(S\d+)/MyPoseFeatures/D3_Positions/([^ ]+)[ ]*(\d)*.cdf')
# First renaming files that need a 0
found_files = sorted([file for file in glob('extracted/S*/MyPoseFeatures/D3_Positions/*.cdf')])
for f, found_file in enumerate(tqdm(found_files)):
confpars = prog.findall(found_file)[0]
if confpars[2] == '':
os.rename(found_file, found_file[:-4] + ' 0.cdf')
# Then renaming files to 1, 2
found_files = sorted([file for file in glob('extracted/S*/MyPoseFeatures/D3_Positions/*.cdf')])
prev_subject = ''
prev_action = ''
for f, found_file in enumerate(tqdm(found_files)):
confpars = prog.findall(found_file)[0]
if confpars[0] == prev_subject and confpars[1] == prev_action:
subaction = 2
else:
subaction = 1
os.rename(found_file, found_file[:-5] + str(subaction) + '.cdf.tmp')
prev_subject = confpars[0]
prev_action = confpars[1]
found_files = sorted([file for file in glob('extracted/S*/MyPoseFeatures/D3_Positions/*.cdf.tmp')])
for f, found_file in enumerate(tqdm(found_files)):
os.rename(found_file, found_file[:-4])
found_files = sorted([file for file in glob('extracted/S*/MyPoseFeatures/D3_Positions/*.cdf')])
print('Processing {} files...'.format(len(found_files)))
h5file = h5.File(dataset + "v1.h5", "w")
max_len = 0
for f, found_file in enumerate(tqdm(found_files)):
confpars = prog.findall(found_file)[0]
subject = [i for i, x in enumerate(subjects) if x == confpars[0]][-1]
action = [i for i, x in enumerate(actions) if x in confpars[1]][-1]
subaction = int(confpars[2])
# print(found_file)
# print(subject, action)
subarray = np.array(subject + 1)
actarray = np.array(action + 1)
sactarray = np.array(subaction)
pose3dcdf = pycdf.CDF(found_file)
posearray = read_pose(pose3dcdf['Pose'])
pose3dcdf.close()
# S5 will be the Validate split the rest of the subjects are the training set
datasplit = 'Validate' if subjects[subject] == 'S5' else 'Train'
datapath = '{}/{}/SEQ{}/'.format(dataset, datasplit, f)
h5file.create_dataset(
datapath + 'Subject', np.shape(subarray),
dtype='int32', data=subarray
)
h5file.create_dataset(
datapath + 'Action', np.shape(actarray),
dtype='int32', data=actarray
)
h5file.create_dataset(
datapath + 'Subaction', np.shape(sactarray),
dtype='int32', data=sactarray
)
h5file.create_dataset(
datapath + 'Pose', np.shape(posearray),
dtype='float32', data=posearray
)
max_len = max(max_len, posearray.shape[2])
print('Dataset sample: ')
print(h5file.get(dataset + '/Validate/'))
print('max length', max_len)
h5file.flush()
h5file.close()
|
the-stack_106_18548
|
tests = [
("python", "UnitTestFingerprints.py", {}),
("python", "UnitTestSimScreener.py", {}),
("python", "DbFpSupplier.py", {}),
]
longTests = []
if __name__ == '__main__':
import sys
from rdkit import TestRunner
failed, tests = TestRunner.RunScript('test_list.py', 0, 1)
sys.exit(len(failed))
|
the-stack_106_18549
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Libra Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Verify that starting libra with -h works as expected."""
from test_framework.test_framework import LibraTestFramework
from test_framework.util import assert_equal
class HelpTest(LibraTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes)
# Don't start the node
def get_node_output(self, *, ret_code_expected):
ret_code = self.nodes[0].process.wait(timeout=5)
assert_equal(ret_code, ret_code_expected)
self.nodes[0].stdout.seek(0)
self.nodes[0].stderr.seek(0)
out = self.nodes[0].stdout.read()
err = self.nodes[0].stderr.read()
self.nodes[0].stdout.close()
self.nodes[0].stderr.close()
# Clean up TestNode state
self.nodes[0].running = False
self.nodes[0].process = None
self.nodes[0].rpc_connected = False
self.nodes[0].rpc = None
return out, err
def run_test(self):
self.log.info("Start libra with -h for help text")
self.nodes[0].start(extra_args=['-h'])
# Node should exit immediately and output help to stdout.
output, _ = self.get_node_output(ret_code_expected=0)
assert b'Options' in output
self.log.info("Help text received: {} (...)".format(output[0:60]))
self.log.info("Start libra with -version for version information")
self.nodes[0].start(extra_args=['-version'])
# Node should exit immediately and output version to stdout.
output, _ = self.get_node_output(ret_code_expected=0)
assert b'version' in output
self.log.info("Version text received: {} (...)".format(output[0:60]))
# Test that arguments not in the help results in an error
self.log.info("Start librad with -fakearg to make sure it does not start")
self.nodes[0].start(extra_args=['-fakearg'])
# Node should exit immediately and output an error to stderr
_, output = self.get_node_output(ret_code_expected=1)
assert b'Error parsing command line arguments' in output
self.log.info("Error message received: {} (...)".format(output[0:60]))
if __name__ == '__main__':
HelpTest().main()
|
the-stack_106_18550
|
"""
BSD 3-Clause License
Copyright (c) 2019, Steven F. Hoover
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
This mini web server provides a REST API to reset an ec2_time_bomb.
(NOTE: There is similar support built into server.py, which is used by examples.
THIS SCRIPT IS NOT IN USE AND IS PROBABLY BROKEN.)
Usage:
nohup python3 ec2_time_bomb_server.py <time_bomb-file> <port> &
(Nohup ensures that the service continues running after its shell is closed.)
API:
GET request to :<port>/reset_ec2_time_bomb resets the time bomb.
"""
import tornado.httpserver
import tornado.ioloop
import tornado.web
import os.path
import subprocess
import sys
import json
"""
Time Bomb Reset Handler
"""
class TimeBombHandler(tornado.web.RequestHandler):
# Set the headers to avoid access-control-allow-origin errors when sending get requests from the client
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
self.set_header("Connection", "keep-alive")
self.set_header("Content-Type", "text/plain")
# Reset GET request.
def get(self):
status = 0
args = [TimeBombApplication.mydir + "/ec2_time_bomb", "reset", TimeBombApplication.time_bomb_file]
try:
out = subprocess.check_output(args, universal_newlines=True)
except:
out = "Error: " + ' '.join(args)
status = 1
print("Time bomb reset returned: %s" % (out))
self.write(str(status))
class TimeBombApplication(tornado.web.Application):
def __init__(self, time_bomb_file, port):
TimeBombApplication.time_bomb_file = time_bomb_file
TimeBombApplication.mydir = os.path.dirname(__file__)
if TimeBombApplication.mydir == "":
TimeBombApplication.mydir = "."
print(TimeBombApplication.mydir)
routes = [
(r"/reset_ec2_time_bomb", TimeBombHandler)
]
super(TimeBombApplication, self).__init__(routes)
server = tornado.httpserver.HTTPServer(self)
server.listen(port)
# Report external URL for the web server.
# Get Real IP Address using 3rd-party service.
# Local IP: myIP = socket.gethostbyname(socket.gethostname())
port_str = "" if port == 80 else ":" + str(port)
try:
external_ip = subprocess.check_output(["wget", "-qO-", "ifconfig.me"], universal_newlines=True)
print('*** Time Bomb Server Started, (http://%s%s) ***' % (external_ip, port_str))
except:
print("Python: TimeBombApplication failed to acquire external IP address.")
external_ip = None
print('*** Time Bomb Server Started (http://localhost%s) ***' % port_str)
# Starting web server
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
# Command-line options
if len(sys.argv) < 2:
print("Usage: python3 ec2_time_bomb_server <time-bomb-file> <port>")
sys.exit(1)
port = 65261 # (0xFEED in decimal)
if len(sys.argv) > 2:
port = int(sys.argv[2])
application = TimeBombApplication(sys.argv[1], port)
|
the-stack_106_18551
|
"""
UBXReader class.
Reads and parses individual UBX or NMEA messages from any stream
which supports a read(n) -> bytes method.
Returns both the raw binary data (as bytes) and the parsed data
(as a UBXMessage or NMEAMessage object).
'protfilter' governs which protocols (NMEA and/or UBX) are processed
'quitonerror' governs how errors are handled
Created on 2 Oct 2020
:author: semuadmin
:copyright: SEMU Consulting © 2020
:license: BSD 3-Clause
"""
from pynmeagps import NMEAReader
import pynmeagps.exceptions as nme
from pyubx2 import UBXMessage
from pyubx2.ubxhelpers import calc_checksum, val2bytes, bytes2val
import pyubx2.ubxtypes_core as ubt
import pyubx2.exceptions as ube
class UBXReader:
"""
UBXReader class.
"""
def __init__(self, datastream, **kwargs):
"""Constructor.
:param datastream stream: input data stream
:param int quitonerror: (kwarg) 0 = ignore errors, 1 = log errors and continue, 2 = (re)raise errors (1)
:param int protfilter: (kwarg) protocol filter 1 = NMEA, 2 = UBX, 3 = BOTH (3)
:param int validate: (kwarg) 0 = ignore invalid checksum, 1 = validate checksum (1)
:param int msgmode: (kwarg) 0=GET, 1=SET, 2=POLL (0)
:param bool parsebitfield: (kwarg) 1 = parse bitfields, 0 = leave as bytes (1)
:raises: UBXStreamError (if mode is invalid)
"""
self._stream = datastream
self._protfilter = int(
kwargs.get("protfilter", ubt.NMEA_PROTOCOL | ubt.UBX_PROTOCOL)
)
self._quitonerror = int(kwargs.get("quitonerror", ubt.ERR_LOG))
# self._ubxonly = kwargs.get("ubxonly", False) # flag superceded by protfilter
self._validate = int(kwargs.get("validate", ubt.VALCKSUM))
self._parsebf = int(kwargs.get("parsebitfield", True))
self._msgmode = int(kwargs.get("msgmode", 0))
if self._msgmode not in (0, 1, 2):
raise ube.UBXStreamError(
f"Invalid stream mode {self._msgmode} - must be 0, 1 or 2"
)
def __iter__(self):
"""Iterator."""
return self
def __next__(self) -> tuple:
"""
Return next item in iteration.
:return: tuple of (raw_data as bytes, parsed_data as UBXMessage)
:rtype: tuple
:raises: StopIteration
"""
(raw_data, parsed_data) = self.read()
if raw_data is not None:
return (raw_data, parsed_data)
raise StopIteration
def read(self) -> tuple:
"""
Read a single NMEA or UBX message from the stream buffer
and return both raw and parsed data.
'protfilter' determines which protocols are parsed.
'quitonerror' determines whether to raise, log or ignore parsing errors.
:return: tuple of (raw_data as bytes, parsed_data as UBXMessage or NMEAMessage)
:rtype: tuple
:raises: UBXStreamError (if unrecognised protocol in data stream)
"""
parsing = True
try:
while parsing: # loop until end of valid message or EOF
raw_data = None
parsed_data = None
byte1 = self._stream.read(1) # read the first byte
if len(byte1) < 1:
raise EOFError()
# if not UBX or NMEA, discard and continue
if byte1 not in (b"\xb5", b"\x24"):
continue
byte2 = self._stream.read(1)
if len(byte2) < 1:
raise EOFError()
bytehdr = byte1 + byte2
# if it's a UBX message (b'\xb5\x62')
if bytehdr == ubt.UBX_HDR:
(raw_data, parsed_data) = self._parse_ubx(bytehdr)
# if protocol filter passes UBX, return message,
# otherwise discard and continue
if self._protfilter & ubt.UBX_PROTOCOL:
parsing = False
else:
continue
# if it's an NMEA message ('$G' or '$P')
elif bytehdr in ubt.NMEA_HDR:
(raw_data, parsed_data) = self._parse_nmea(bytehdr)
# if protocol filter passes NMEA, return message,
# otherwise discard and continue
if self._protfilter & ubt.NMEA_PROTOCOL:
parsing = False
else:
continue
# unrecognised protocol header
else:
if self._quitonerror == ubt.ERR_RAISE:
raise ube.UBXStreamError(f"Unknown protocol {bytehdr}.")
elif self._quitonerror == ubt.ERR_LOG:
return (bytehdr, f"<UNKNOWN PROTOCOL(header={bytehdr})>")
else: # ignore unknown protocol and continue
continue
except EOFError:
return (None, None)
return (raw_data, parsed_data)
def _parse_ubx(self, hdr: bytes) -> tuple:
"""
Parse remainder of UBX message.
:param bytes hdr: UBX header (b'\xb5\x62')
:return: tuple of (raw_data as bytes, parsed_data as UBXMessage or None)
:rtype: tuple
"""
# read the rest of the UBX message from the buffer
byten = self._stream.read(4)
if len(byten) < 4: # EOF
raise EOFError()
clsid = byten[0:1]
msgid = byten[1:2]
lenb = byten[2:4]
leni = int.from_bytes(lenb, "little", signed=False)
byten = self._stream.read(leni + 2)
if len(byten) < leni + 2: # EOF
raise EOFError()
plb = byten[0:leni]
cksum = byten[leni : leni + 2]
raw_data = hdr + clsid + msgid + lenb + plb + cksum
# only parse if we need to (filter passes UBX)
if self._protfilter & ubt.UBX_PROTOCOL:
parsed_data = self.parse(
raw_data,
validate=self._validate,
msgmode=self._msgmode,
parsebitfield=self._parsebf,
)
else:
parsed_data = None
return (raw_data, parsed_data)
def _parse_nmea(self, hdr: bytes) -> tuple:
"""
Parse remainder of NMEA message (using pynmeagps library).
:param bytes hdr: NMEA header ($G or $P)
:return: tuple of (raw_data as bytes, parsed_data as NMEAMessage or None)
:rtype: tuple
"""
# read the rest of the NMEA message from the buffer
byten = self._stream.readline() # NMEA protocol is CRLF-terminated
if byten[-2:] != b"\x0d\x0a":
raise EOFError()
raw_data = hdr + byten
# only parse if we need to (filter passes NMEA)
if self._protfilter & ubt.NMEA_PROTOCOL:
# invoke pynmeagps parser
parsed_data = NMEAReader.parse(
raw_data,
validate=self._validate,
msgmode=self._msgmode,
)
else:
parsed_data = None
return (raw_data, parsed_data)
def iterate(self, **kwargs) -> tuple:
"""
Invoke the iterator within an exception handling framework.
:param int quitonerror: (kwarg) 0 = ignore errors, 1 = log errors and continue, 2 = (re)raise errors (0)
:param object errorhandler: (kwarg) Optional error handler (None)
:return: tuple of (raw_data as bytes, parsed_data as UBXMessage or NMEAMessage)
:rtype: tuple
:raises: UBX/NMEA...Error (if quitonerror is set and stream is invalid)
"""
quitonerror = kwargs.get("quitonerror", ubt.ERR_IGNORE)
errorhandler = kwargs.get("errorhandler", None)
while True:
try:
yield next(self) # invoke the iterator
except StopIteration:
break
except (
ube.UBXMessageError,
ube.UBXTypeError,
ube.UBXParseError,
ube.UBXStreamError,
nme.NMEAMessageError,
nme.NMEATypeError,
nme.NMEAParseError,
nme.NMEAStreamError,
) as err:
# raise, log or ignore any error depending
# on the quitonerror setting
if quitonerror == ubt.ERR_RAISE:
raise err
elif quitonerror == ubt.ERR_LOG:
# pass to error handler if there is one
if errorhandler is None:
print(err)
else:
errorhandler(err)
# continue
@property
def datastream(self) -> object:
"""
Getter for stream.
:return: data stream
:rtype: object
"""
return self._stream
@staticmethod
def parse(message: bytes, **kwargs) -> object:
"""
Parse UBX byte stream to UBXMessage object.
Includes option to validate incoming payload length and checksum
(the UBXMessage constructor can calculate and assign its own values anyway).
:param bytes message: binary message to parse
:param int validate: (kwarg) validate cksum (VALCKSUM (1)=True (default), VALNONE (0)=False)
:param int msgmode: (kwarg) message mode (0=GET (default), 1=SET, 2=POLL)
:param bool parsebitfield: (kwarg) parse bitfields True (default)/False
:return: UBXMessage object
:rtype: UBXMessage
:raises: UBXParseError (if data stream contains invalid data or unknown message type)
"""
msgmode = kwargs.get("msgmode", ubt.GET)
validate = kwargs.get("validate", ubt.VALCKSUM)
parsebf = kwargs.get("parsebitfield", True)
if msgmode not in (0, 1, 2):
raise ube.UBXParseError(
f"Invalid message mode {msgmode} - must be 0, 1 or 2"
)
lenm = len(message)
hdr = message[0:2]
clsid = message[2:3]
msgid = message[3:4]
lenb = message[4:6]
if lenb == b"\x00\x00":
payload = None
leni = 0
else:
payload = message[6 : lenm - 2]
leni = len(payload)
ckm = message[lenm - 2 : lenm]
if payload is not None:
ckv = calc_checksum(clsid + msgid + lenb + payload)
else:
ckv = calc_checksum(clsid + msgid + lenb)
if validate & ubt.VALCKSUM:
if hdr != ubt.UBX_HDR:
raise ube.UBXParseError(
(f"Invalid message header {hdr}" f" - should be {ubt.UBX_HDR}")
)
if leni != bytes2val(lenb, ubt.U2):
raise ube.UBXParseError(
(
f"Invalid payload length {lenb}"
f" - should be {val2bytes(leni, ubt.U2)}"
)
)
if ckm != ckv:
raise ube.UBXParseError(
(f"Message checksum {ckm}" f" invalid - should be {ckv}")
)
try:
if payload is None:
return UBXMessage(clsid, msgid, msgmode)
return UBXMessage(
clsid,
msgid,
msgmode,
payload=payload,
parsebitfield=parsebf,
)
except KeyError as err:
modestr = ["GET", "SET", "POLL"][msgmode]
raise ube.UBXParseError(
(f"Unknown message type clsid {clsid}, msgid {msgid}, mode {modestr}")
) from err
|
the-stack_106_18554
|
import numpy as np
import pytest
import subprocess
import sys
from ...test.test_integrate import bwf_file
def test_dump_axml(tmpdir):
filename = str(tmpdir / 'test_axml.wav')
axml = b"AXML"
from ...fileio import openBw64
with openBw64(filename, 'w', axml=axml) as outfile:
outfile.write(np.zeros((1000, 1)))
assert subprocess.check_output(["ear-utils", "dump_axml", filename]) == axml
def test_dump_chna(tmpdir):
filename = str(tmpdir / 'test_chna.wav')
from ...fileio import openBw64
from ...fileio.bw64.chunks import ChnaChunk, AudioID
chna = ChnaChunk()
audioID = AudioID(1, u'ATU_00000001', u'AT_00010001_01', u'AP_00010003')
chna.appendAudioID(audioID)
with openBw64(filename, 'w', chna=chna) as outfile:
outfile.write(np.zeros((1000, 1)))
expected = str(audioID) + "\n"
output = subprocess.check_output(["ear-utils", "dump_chna", filename]).decode("utf8")
assert output == expected
expected = chna.asByteArray()[8:] # strip marker and size
output = subprocess.check_output(["ear-utils", "dump_chna", "--binary", filename])
assert output == expected
def test_replace_axml_basic(tmpdir):
filename_in = str(tmpdir / 'test_replace_axml_in.wav')
filename_axml = str(tmpdir / 'test_replace_axml_new_axml.xml')
filename_out = str(tmpdir / 'test_replace_axml_out.wav')
from ...fileio import openBw64
axml_in = b'axml'
axml_out = b'axml2'
with open(filename_axml, 'wb') as f:
f.write(axml_out)
with openBw64(filename_in, 'w', axml=axml_in) as outfile:
outfile.write(np.zeros((1000, 1)))
assert subprocess.check_call(["ear-utils", "replace_axml", "-a", filename_axml,
filename_in, filename_out]) == 0
with openBw64(filename_out, 'r') as infile:
assert infile.axml == axml_out
def test_replace_axml_regenerate(tmpdir):
filename_axml = str(tmpdir / 'test_replace_axml_new_axml.xml')
filename_out = str(tmpdir / 'test_replace_axml_out.wav')
from ...fileio import openBw64
with openBw64(bwf_file, 'r') as f:
axml_a = f.axml
assert f.chna.audioIDs[-1].trackIndex == 4
axml_out = axml_a.replace(b"ATU_00000005", b"ATU_00000006")
with open(filename_axml, 'wb') as f:
f.write(axml_out)
assert subprocess.check_call(["ear-utils", "replace_axml", "-a", filename_axml, "--gen-chna",
bwf_file, filename_out]) == 0
with openBw64(filename_out, 'r') as f:
assert f.axml == axml_out
assert f.chna.audioIDs[-1].trackIndex == 6
@pytest.mark.xfail(
sys.version_info < (3, 6),
reason="output may vary on platforms where dictionaries are not ordered",
)
def test_regenerate(tmpdir):
bwf_out = str(tmpdir / "test_regenerate_out.wav")
args = [
"ear-utils",
"regenerate",
"--enable-block-duration-fix",
bwf_file,
bwf_out,
]
assert subprocess.check_call(args) == 0
assert open(bwf_out, "rb").read() == open(bwf_file, "rb").read()
|
the-stack_106_18555
|
# coding: utf-8
"""
This module contains task manager.
"""
from __future__ import absolute_import
# Standard imports
from multiprocessing.pool import ThreadPool
import sys
from traceback import format_exc
class TaskManager(object):
"""
Task manager that manages a thread pool. It is used to run hotkey \
functions in the thread pool.
"""
def __init__(self):
"""
Constructor.
:return: None.
"""
# Create thread pool
self._thread_pool = ThreadPool(3)
def add_task(self, func):
"""
Add task function to be run in the task manager's thread pool.
:param func: Task function.
:return: None.
"""
# Create wrapper function
def func_wrapper():
try:
# Call given function
func()
# If have error
except BaseException:
# Get traceback message
tb_msg = format_exc()
# Get error message
msg = (
'# Error calling function in task thread:\n'
'---\n{0}---\n'
).format(tb_msg)
# Print error message
sys.stderr.write(msg)
# Run the wrapper function in the thread pool
self._thread_pool.apply_async(func_wrapper)
# Create task manager
_TASK_MANAGER = TaskManager()
def add_task(func):
"""
Add task function to be run in the task manager's thread pool.
:param func: Task function.
:return: None.
"""
# Add task function to be run in the task manager's thread pool
_TASK_MANAGER.add_task(func)
|
the-stack_106_18556
|
#! /usr/bin/env python
###############################################################################
# gmaps_directions.py
#
# Simple script to get directions using the Google Maps API
#
# Requires
# * Python client library for Google Maps API Web Services to be installed
# - https://github.com/googlemaps/google-maps-services-python
# - can be installed via: conda install -c conda-forge googlemaps
# * API keys with Google. How to obtained these is explained in the link above
# * gmplot for plotting the route
# - https://github.com/vgm64/gmplot
# - can be installed by conda install -c mlgill gmplot
#
# NOTE: Any plotting is set up for output, not viewing on screen.
# So, it will likely be ugly on screen. The saved PDFs should look
# better.
#
# Created: 04/03/18
# - Joshua Vaughan
# - [email protected]
# - http://www.ucs.louisiana.edu/~jev9637
#
# Modified:
# *
#
# TODO:
# *
###############################################################################
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import googlemaps
import gmplot
gmaps = googlemaps.Client(key='AIzaSyDin8BFtWW9NxpQap17YFDp-MfL7pVJfEo')
# Request directions via public transit
now = datetime.now()
directions_result = gmaps.directions("Rougeou Hall, Lafayette, LA",
"Martin Hall, Lafayette, LA",
mode="walking",
departure_time=now)
# The results are returned as a json object, so we need to parse them to get
# the GPS coordinates of the start and end and waypoint locations
start_latLon = directions_result[0]['legs'][0]['start_location']
end_latLon = directions_result[0]['legs'][0]['end_location']
# create an array to hold the waypoints of the path
wayPoint_array = np.zeros((len(directions_result[0]['legs'][0]['steps'])+1, 2))
# Create an array to hold the distance to the next waypoint
wayPoint_distance = np.zeros((len(directions_result[0]['legs'][0]['steps']), 1))
# Fill the first value of the waypoint array with the start location
wayPoint_array[0,:] = [start_latLon['lat'], start_latLon['lng']]
# Now, loop through the steps of the directions to extract the necessary
# coordinates to form the array of waypoints.
for index, step in enumerate(directions_result[0]['legs'][0]['steps']):
# We only need to grab the end_location for each step because the start
# location of the next step is the same.
next_waypoint_latLon = step['end_location']
wayPoint_array[index + 1] = [next_waypoint_latLon['lat'], next_waypoint_latLon['lng']]
# Get the distance to the next waypoint and add it to the array
distance_to_next_waypoint = step['distance']['value']
wayPoint_distance[index] = distance_to_next_waypoint
# Now, let's plot those waypoints on a map
# We first create the map object, passing its center lat, lon and a zoom level
# We'll just using the "middle" waypoint as the center
gmap = gmplot.GoogleMapPlotter(wayPoint_array[len(wayPoint_array)//2, :][0],
wayPoint_array[len(wayPoint_array)//2, :][1],
17)
wayPoint_array = np.array([[-92.022457, 30.209695],
[-92.022628, 30.209846],
[-92.022777, 30.209977],
[-92.023212, 30.210361],
[-92.022834, 30.210739],
[-92.021956, 30.211616],
[-92.021177, 30.212395],
[-92.020224, 30.213348],
[-92.01976, 30.212989],
[-92.019425, 30.213326],
[-92.018719, 30.214036],
[-92.018216, 30.21454],
[-92.017822, 30.214933],
[-92.017768, 30.214987],
[-92.017309, 30.215453],
[-92.016977, 30.215789],
[-92.017599, 30.21625],
[-92.017892, 30.216466],
[-92.018016, 30.216307],
[-92.017999, 30.216217],
[-92.017954, 30.216078],
[-92.018007, 30.215941],
[-92.01819, 30.215814],
[-92.018474, 30.215793],
[-92.018616, 30.21564],
[-92.018742, 30.215503],
[-92.018913, 30.215633]])
# Now, plot the path
gmap.plot(wayPoint_array[:,1], wayPoint_array[:,0], 'cornflowerblue', edge_width=10)
# And draw the map
gmap.draw("my_map.html")
|
the-stack_106_18559
|
"""Classes for VeSync Switch Devices."""
import logging
import json
from abc import ABCMeta, abstractmethod
from typing import Dict, Union
from pyvesync.helpers import Helpers as helpers
from pyvesync.vesyncbasedevice import VeSyncBaseDevice
logger = logging.getLogger(__name__)
feature_dict: Dict[str, Dict[str, Union[list, str]]] = {
'ESWL01': {
'module': 'VeSyncWallSwitch',
'features': []
},
'ESWD16': {
'module': 'VeSyncDimmerSwitch',
'features': ['dimmable']
},
'ESWL03': {
'module': 'VeSyncWallSwitch',
'features': []
}
}
switch_modules: dict = {k: v['module']
for k, v in feature_dict.items()}
__all__: list = list(switch_modules.values()) + ['switch_modules']
class VeSyncSwitch(VeSyncBaseDevice):
"""Etekcity Switch Base Class."""
__metaclasss__ = ABCMeta
def __init__(self, details, manager):
"""Initialize Switch Base Class."""
super().__init__(details, manager)
self.features = feature_dict.get(self.device_type, {}).get('features')
if self.features is None:
logger.error('% device configuration not set', self.device_name)
raise Exception
self.details = {}
def is_dimmable(self) -> bool:
"""Return True if switch is dimmable."""
return bool('dimmable' in self.features)
@abstractmethod
def get_details(self) -> None:
"""Get Device Details."""
@abstractmethod
def turn_on(self) -> bool:
"""Turn Switch On."""
@abstractmethod
def turn_off(self) -> bool:
"""Turn switch off."""
@abstractmethod
def get_config(self) -> None:
"""Get configuration and firmware deatils."""
@property
def active_time(self) -> int:
"""Get active time of switch."""
return self.details.get('active_time', 0)
def update(self) -> None:
"""Update device details."""
self.get_details()
class VeSyncWallSwitch(VeSyncSwitch):
"""Etekcity standard wall switch class."""
def __init__(self, details, manager):
"""Initialize standard etekcity wall switch class."""
super().__init__(details, manager)
def get_details(self) -> None:
"""Get switch device details."""
body = helpers.req_body(self.manager, 'devicedetail')
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/inwallswitch/v1/device/devicedetail', 'post',
headers=head, json=body
)
if r is not None and helpers.code_check(r):
self.device_status = r.get('deviceStatus', self.device_status)
self.details['active_time'] = r.get('activeTime', 0)
self.connection_status = r.get(
'connectionStatus', self.connection_status)
else:
logger.debug('Error getting %s details', self.device_name)
def get_config(self) -> None:
"""Get switch device configuration info."""
body = helpers.req_body(self.manager, 'devicedetail')
body['method'] = 'configurations'
body['uuid'] = self.uuid
r, _ = helpers.call_api(
'/inwallswitch/v1/device/configurations',
'post',
headers=helpers.req_headers(self.manager),
json=body,
)
if helpers.code_check(r):
self.config = helpers.build_config_dict(r)
else:
logger.warning('Unable to get %s config info',
self.device_name)
def turn_off(self) -> bool:
"""Turn off switch device."""
body = helpers.req_body(self.manager, 'devicestatus')
body['status'] = 'off'
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/inwallswitch/v1/device/devicestatus', 'put',
headers=head, json=body
)
if r is not None and helpers.code_check(r):
self.device_status = 'off'
return True
logger.warning('Error turning %s off', self.device_name)
return False
def turn_on(self) -> bool:
"""Turn on switch device."""
body = helpers.req_body(self.manager, 'devicestatus')
body['status'] = 'on'
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/inwallswitch/v1/device/devicestatus', 'put',
headers=head, json=body
)
if r is not None and helpers.code_check(r):
self.device_status = 'on'
return True
logger.warning('Error turning %s on', self.device_name)
return False
class VeSyncDimmerSwitch(VeSyncSwitch):
"""Vesync Dimmer Switch Class with RGB Faceplate."""
def __init__(self, details, manager):
"""Initilize dimmer switch class."""
super().__init__(details, manager)
self._brightness = 0
self._rgb_value = {'red': 0, 'blue': 0, 'green': 0}
self._rgb_status = 'unknown'
self._indicator_light = 'unknown'
def get_details(self) -> None:
"""Get dimmer switch details."""
body = helpers.req_body(self.manager, 'devicedetail')
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/dimmer/v1/device/devicedetail', 'post', headers=head, json=body
)
if r is not None and helpers.code_check(r):
self.device_status = r.get('deviceStatus', self.device_status)
self.details['active_time'] = r.get('activeTime', 0)
self.connection_status = r.get(
'connectionStatus', self.connection_status)
self._brightness = r.get('brightness')
self._rgb_status = r.get('rgbStatus')
self._rgb_value = r.get('rgbValue')
self._indicator_light = r.get('indicatorlightStatus')
else:
logger.debug('Error getting %s details', self.device_name)
@property
def brightness(self) -> float:
"""Return brightness in percent."""
return self._brightness
@property
def indicator_light_status(self) -> str:
"""Faceplate brightness light status."""
return self._indicator_light
@property
def rgb_light_status(self) -> str:
"""RGB Faceplate light status."""
return self._rgb_status
@property
def rgb_light_value(self) -> dict:
"""RGB Light Values."""
return self._rgb_value
def switch_toggle(self, status: str) -> bool:
"""Toggle switch status."""
if status not in ['on', 'off']:
logger.debug('Invalid status passed to wall switch')
return False
body = helpers.req_body(self.manager, 'devicestatus')
body['status'] = status
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/dimmer/v1/device/devicestatus', 'put',
headers=head, json=body
)
if r is not None and helpers.code_check(r):
self.device_status = status
return True
logger.warning('Error turning %s %s',
self.device_name, status)
return False
def turn_on(self) -> bool:
"""Turn switch on."""
return self.switch_toggle('on')
def turn_off(self) -> bool:
"""Turn switch off."""
return self.switch_toggle('off')
def indicator_light_toggle(self, status: str) -> bool:
"""Toggle indicator light."""
if status not in ['on', 'off']:
logger.debug('Invalid status for wall switch')
return False
body = helpers.req_body(self.manager, 'devicestatus')
body['status'] = status
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/dimmer/v1/device/indicatorlightstatus',
'put', headers=head, json=body
)
if r is not None and helpers.code_check(r):
self.device_status = status
return True
logger.warning('Error turning %s indicator light %s',
self.device_name, status)
return False
def indicator_light_on(self) -> bool:
"""Turn Indicator light on."""
return self.indicator_light_toggle('on')
def indicator_light_off(self) -> bool:
"""Turn indicator light off."""
return self.indicator_light_toggle('off')
def rgb_color_status(
self, status: str,
red: int = None, blue: int = None, green: int = None
) -> bool:
"""Set faceplate RGB color."""
body = helpers.req_body(self.manager, 'devicestatus')
body['status'] = status
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
if red is not None and blue is not None and green is not None:
body['rgbValue'] = {'red': red, 'blue': blue, 'green': green}
r, _ = helpers.call_api(
'/dimmer/v1/device/devicergbstatus', 'put',
headers=head, json=body
)
if r is not None and helpers.code_check(r):
self._rgb_status = status
if body.get('rgbValue') is not None:
self._rgb_value = {'red': red, 'blue': blue, 'green': green}
return True
logger.warning('Error turning %s off', self.device_name)
return False
def rgb_color_off(self) -> bool:
"""Turn RGB Color Off."""
return self.rgb_color_status('off')
def rgb_color_on(self) -> bool:
"""Turn RGB Color Off."""
return self.rgb_color_status('on')
def rgb_color_set(self, red: int, green: int, blue: int) -> bool:
"""Set RGB color of faceplate."""
if isinstance(red, int) and isinstance(
green, int) and isinstance(blue, int):
for color in [red, green, blue]:
if color < 0 or color > 255:
logger.warning('Invalid RGB value')
return False
return bool(self.rgb_color_status('on', red, green, blue))
return False
def set_brightness(self, brightness: int) -> bool:
"""Set brightness of dimmer - 1 - 100."""
if isinstance(brightness, int) and (
brightness > 0 or brightness <= 100):
body = helpers.req_body(self.manager, 'devicestatus')
body['brightness'] = brightness
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
r, _ = helpers.call_api(
'/dimmer/v1/device/updatebrightness', 'put',
headers=head, json=body
)
if r is not None and helpers.code_check(r):
self._brightness = brightness
return True
logger.warning('Error setting %s brightness', self.device_name)
else:
logger.warning('Invalid brightness')
return False
def displayJSON(self) -> str:
"""JSON API for dimmer switch."""
sup_val = json.loads(super().displayJSON())
if self.is_dimmable: # pylint: disable=using-constant-test
sup_val.update(
{
'Indicator Light': str(self.active_time),
'Brightness': str(self._brightness),
'RGB Light': str(self._rgb_status),
}
)
return sup_val
def get_config(self) -> None:
"""Get dimmable switch device configuration info."""
body = helpers.req_body(self.manager, 'devicedetail')
body['method'] = 'configurations'
body['uuid'] = self.uuid
r, _ = helpers.call_api(
'/dimmer/v1/device/configurations',
'post',
headers=helpers.req_headers(self.manager),
json=body,
)
if helpers.code_check(r):
self.config = helpers.build_config_dict(r)
else:
logger.warning('Unable to get %s config info', self.device_name)
|
the-stack_106_18560
|
import glob
import os
from datetime import datetime
import logging
from libs import BNB_DATE_FORMAT
logger = logging.getLogger("utils")
def list_statement_files(dir, file_extension):
statement_files = []
if not os.path.exists(dir):
raise Exception(f"Statement directory[{dir}] doesn't exists.")
expresion = os.path.join(dir, "**", f"*.{file_extension}")
for file in glob.glob(expresion, recursive=True):
if not os.path.isfile(file):
continue
statement_files.append(file)
return statement_files
def humanize_date(list_object):
result = []
for elements in list_object:
item = {}
for key, value in elements.items():
if isinstance(value, datetime):
item[key] = value.strftime(BNB_DATE_FORMAT)
continue
item[key] = value
result.append(item)
return result
def get_parsers(supported_parsers, parsers, input_dir=None):
if not parsers:
parsers = ["revolut"]
if len(parsers) == 1:
if parsers[0] not in supported_parsers:
return [], [parsers[0]]
if input_dir is None:
logger.error(f"No input direcory provided. Please, use -i argument.")
raise SystemExit(1)
return [(parsers[0], supported_parsers[parsers[0]].Parser(input_dir))], []
supported_parsers = []
unsupported_parsers = []
for parser in parsers:
parser_name, input_dir = parsers.split(":")
if parser not in supported_parsers:
unsupported_parsers.append(parser_name)
continue
supported_parsers.append((parser_name, supported_parsers[parser].Parser(input_dir)))
return supported_parsers, unsupported_parsers
def get_unsupported_activity_types(supported_parsers, parser_statements):
unsupported_activity_types = {}
for parser_name, statements in parser_statements.items():
parser_unsupported_activity_types = supported_parsers[parser_name].get_unsupported_activity_types(statements)
if parser_unsupported_activity_types:
unsupported_activity_types[parser_name] = parser_unsupported_activity_types
return unsupported_activity_types
def merge_dict_of_dicts(parser_statements):
merged_dict = {}
for _, statements in parser_statements.items():
for dict_key, dict_list in statements.items():
if dict_key in merged_dict:
merged_dict[dict_key].extend(dict_list)
continue
merged_dict[dict_key] = dict_list
return merged_dict
def merge_dict_of_lists(parser_statements):
merged_list = []
for _, statements in parser_statements.items():
merged_list.extend(statements)
return merged_list
|
the-stack_106_18561
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 10 19:00:23 2020
@author: ahmad
"""
# Cyclic rotation
# param A, array
# param K, intereger number of rotation
def solution(A, K):
try:
for n in range(K,0,-1):
temp = A[len(A)-1]
for j in range(len(A)-1,0,-1):
A[j] = A[j-1]
A[0] = temp
return A
except Exception as e:
print(str(e))
return -1
def main():
array_1 = []
result = solution(array_1,1)
print(result)
if __name__ == '__main__':
main()
def main():
A = [3, 8, 9, 7, 6]
K = 3
solution(A,K)
|
the-stack_106_18562
|
#! /usr/bin/python3
import nltk
import pdb
import pickle
import pandas as pd
import numpy as np
import json
stemmer = nltk.stem.porter.PorterStemmer()
stop_words = set(nltk.corpus.stopwords.words('english'))
def is_alphanumeric(character):
to_ord = ord(character)
is_alpha = (to_ord >= ord('A') and to_ord <= ord('Z')) or (to_ord >= ord('a') and to_ord <= ord('z'))
is_numeric = to_ord >= ord('0') and to_ord <= ord('9')
return is_alpha or is_numeric
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def reduce_stem(stem):
# Remove unwanted characters such as punctuations
reduced = []
for character in stem:
if not is_alphanumeric(character):
continue
reduced.append(character)
return ''.join(reduced)
def tokenize(text):
text = text.decode('utf-8').lower()
# Replace periods with spaces. This fixes cases
# where there's no space after a period. Punctuation
# will be dropped later in processing, anyway.
text.replace('.', ' ')
tokens = nltk.word_tokenize(text)
stems = stem_tokens(tokens, stemmer)
# Remove punctuations and stop words
stems_reduced = []
for ix, stem in enumerate(stems):
if stem in stop_words:
continue
reduced_stem = reduce_stem(stem)
if len(reduced_stem) > 0:
stems_reduced.append(reduced_stem)
return stems_reduced
articles = []
article_id = 0
def cache_for_analysis(url, title, stems, feed_id):
global article_id
article_id = article_id + 1
articles.append((article_id, url, title, stems, feed_id))
def dump_articles():
pickle.dump(articles, open("articles.pickle", "wb"))
def article_to_json(row, all_terms):
stems = row[3]
vec = [("1" if term in stems else "0") for term in all_terms]
vec_string = "".join(vec)
return {
"article_id": row[0],
"url": row[1],
"title": row[2],
"feed_id": row[4],
"vec": vec_string
}
def analyze_articles():
seen_stems = set()
# do some kind of clustering with tf/idf!
# build up a data frame with schema:
# terms (string) | a1_terms (bool) | a2_terms
for row in articles:
stems = row[3]
for stem in stems:
seen_stems.add(stem)
all_terms = list(seen_stems)
for row in articles:
json_article = article_to_json(row, all_terms)
print(json.dumps(json_article))
if __name__ == "__main__":
articles = pickle.load(open("articles.pickle", "rb"))
analyze_articles()
|
the-stack_106_18564
|
import os
import sys
import autograd.scipy.stats
import autograd.scipy as sp
import autograd.numpy as np
from autograd import grad, jacobian, elementwise_grad
base_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../..")
sys.path.append(base_dir)
EPS = 1e-8
from src.contextualmodelings.base import ContextualModelling
from utils.prepare import get_feature_map_by_name
class CounterfactualLossPredictor(ContextualModelling):
""" Counterfactual Loss Predictor contextual modelling
Inherits from the parent class ContextualModelling
"""
def __init__(self, *args):
"""Initializes the class
Attributes:
hyperparams (dict): dictionnary parameters
name (str): name of the distribution
K (int): number of action anchor points
feature_map (FeatureMap): see src.kernels.feature_map
"""
super(CounterfactualLossPredictor, self).__init__(*args)
self.name = 'clp'
self.K = self.hyperparams['nb_quantile']
self.feature_map = get_feature_map_by_name(self.hyperparams)
def get_starting_parameter(self, dataset):
""" Creates starting parameter
Args:
dataset (dataset)
"""
m, v = self._prepare_starting_parameter(dataset)
ctxt_size = self.feature_map.contextual_feature_map_size(self.d)
action_size = self.feature_map.action_feature_map_size(dataset)
return np.concatenate([self.rng.normal(m, v, size=1),
self.rng.normal(scale=self.scale, size=((action_size + 2) * ctxt_size + 1))])
def get_parameters(self, parameter, features, actions, reinitialize):
""" Updates the parameters of the distribution
Args:
parameter (np.array): parameter of the distribution
features (np.array): observation features
actions (np.array): observation actions
reinitialize (bool): for bucketizer to be applied on new features
"""
m, v = parameter[:-1], parameter[-1]
if not self.feature_map.anchor_points_initialized:
self.feature_map.initialize_anchor_points(features, actions)
self.action_anchors = self.feature_map.action_anchor_points
self._set_feature_map_all_actions(features)
if reinitialize:
self._set_feature_map_all_actions(features)
predicted_m = self.prediction(m, features)
return predicted_m, v
def _set_feature_map_all_actions(self, features):
""" Builds feature map for all actions in action set, uses method self.feature_map
Args:
features (np.array): observation features
"""
self._feature_map_all_actions = np.real(self.feature_map.joint_feature_map_for_all_action_anchor_points(features))
def soft_argmax(self, parameter, feature_map):
""" Compute soft argmax for action prediction
Args:
parameter (np.array): parameter to be minimized
feature_map (np.array): feature_map on which to make prediction
"""
intercept = parameter[0]
exp = np.exp(self.hyperparams['gamma'] * (np.dot(feature_map, parameter[1:])+intercept))
return np.sum(np.einsum('ij,j->ij', exp / (np.sum(exp, axis=1, keepdims=True)+EPS), self.action_anchors), axis=1)
def discrete_prediction(self, parameter, feature_map):
""" Makes discrete prediction
Args:
parameter (np.array): parameter to be minimized
feature_map (np.array): feature_map on which to make prediction
"""
intercept = parameter[0]
preds = np.dot(feature_map, parameter[1:])+intercept
return self.action_anchors[np.argmax(preds, axis=1)]
def prediction(self, parameter, features):
""" Makes prediction
Args:
parameter (np.array): parameter to be minimized
features (np.array): observation features
"""
return self.soft_argmax(parameter, self._feature_map_all_actions)
|
the-stack_106_18565
|
import pyaudio
filename = "/home/pi/mytest.wav"
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 8000
CHUNK = 1024
RECORD_SECONDS=10
audio = pyaudio.PyAudio()
# start Recording
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=CHUNK)
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
# stop Recording
stream.stop_stream()
stream.close()
audio.terminate()
waveFile = wave.open(filename, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(audio.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()
|
the-stack_106_18566
|
#!/usr/bin/env python
#
# display live code coverage of DACMAN Colecovision game running in MAME
# drawn as Hilbert curve
#
# $ mame -v coleco -video soft -cart /path/to/DACMAN.ROM -window -nomax -resolution 560x432 -debugger gdbstub -debug
# $ ./dacman_live_hilbert /path/to/DACMAN.ROM
#
import os
import sys
import math
import time
import struct
import platform
from collections import defaultdict
import binaryninja
from binaryninja.binaryview import BinaryViewType
from PIL import Image, ImageDraw
# globals
n = None
draw = None
#------------------------------------------------------------------------------
# Hilbert curve mapping algorithms from:
# https://en.wikipedia.org/wiki/Hilbert_curve
#------------------------------------------------------------------------------
def rot(n, x, y, rx, ry):
if ry == 0:
if rx == 1:
x = n-1 - x;
y = n-1 - y;
(y,x) = (x,y)
return (x,y)
def d2xy(n, d):
(x,y,t) = (0,0,d)
level = 1
while level<n:
rx = 1 & (t//2)
ry = 1 & (t ^ rx)
(x, y) = rot(level, x, y, rx, ry)
x += level * rx
y += level * ry
t //= 4
level *= 2
return (x,y)
def xy2d(n, x, y):
(rx,ry,s,d)=(0,0,0,0)
s = n//2
while s > 0:
rx = int((x & s) > 0)
ry = int((y & s) > 0)
d += s * s * ((3 * rx) ^ ry)
(x, y) = rot(n, x, y, rx, ry)
s //= 2
return d
#------------------------------------------------------------------------------
# Hilbert curve drawing helpers
#------------------------------------------------------------------------------
# trace a Hilbert region by "wall following"
def wall_follower(d0, d1):
global n
def ok(x, y):
if x<0 or y<0: return False
d = xy2d(n**2, x, y)
#print('is %d within %d,%d' % (d, d0, d1))
return d>=0 and d>=d0 and d<d1
# move left until stop
(x,y) = d2xy(n**2, d0)
while 1:
if x == 0: break
if not ok(x-1,y): break
x = x-1
start = (x,y)
trace = [start]
direction = 'down'
tendencies = ['right', 'down', 'left', 'up']
while 1:
#print('at (%d,%d) heading %s' % (x,y,direction))
tendency = tendencies[(tendencies.index(direction)+1) % 4]
xmod = {'right':1, 'down':0, 'left':-1, 'up':0}
ymod = {'right':0, 'down':-1, 'left':0, 'up':1}
moved = False
# case A: we can turn right
x_try = x+xmod[tendency]
y_try = y+ymod[tendency]
if ok(x_try, y_try):
direction = tendency
(x,y) = (x_try, y_try)
moved = True
else:
# case B: we can continue in current direction
x_try = x+xmod[direction]
y_try = y+ymod[direction]
if ok(x_try, y_try):
(x,y) = (x_try, y_try)
moved = True
else:
# case C: we can't continue! ah!
direction = tendencies[(tendencies.index(direction)-1)%4]
if moved:
trace.append((x,y))
if (x,y) == start:
break
return trace
# [start, stop)
def draw_hilbert(start, stop, color='#ffffff'):
global n
global draw
pts = [d2xy(n, x) for x in range(start, stop)]
lines = zip(pts[:-1], pts[1:])
for line in lines:
((x1,y1),(x2,y2)) = line
#print('drawing line (%d,%d) -> (%d,%d)' % (x1,y1,x2,y2))
draw.line((x1,y1,x2,y2), width=1, fill=color)
def draw_region(start, stop, color1='#00ff00', color2=None):
global draw
trace = wall_follower(start, stop)
draw.polygon(trace, outline=color1, fill=color2)
#------------------------------------------------------------------------------
# main()
#------------------------------------------------------------------------------
if __name__ == '__main__':
# analyze functions
fpath = sys.argv[1]
bv = BinaryViewType.get_view_of_file(fpath)
bv.update_analysis_and_wait()
lowest = None
highest = None
addr2func = {}
for f in bv.functions:
addr_start = f.start
addr_end = f.start + f.total_bytes
if lowest==None or addr_start < lowest:
lowest = addr_start
if highest==None or addr_end >= highest:
highest = addr_end
addr2func[addr_start] = f
print('lowest address: 0x%04X' % lowest)
print('highest address: 0x%04X' % highest)
# launch debugger, set breakpoints
from debugger import DebugAdapter, gdblike
adapter = gdblike.connect_sense('localhost', 23946)
for addr in addr2func:
print('setting breakpoint at %04X: %s' % (addr, addr2func[addr].symbol.full_name))
adapter.breakpoint_set(addr)
# calculate image size
pixels = 1
while pixels < (highest-lowest):
pixels *= 4
n = int(math.sqrt(pixels))
print('n:', n)
img = Image.new('RGB', (n,n))
draw = ImageDraw.Draw(img)
# intialize pygame
import pygame
from pygame.locals import *
pygame.init()
surface = pygame.display.set_mode((4*n, 4*n), RESIZABLE)
pygame.display.set_caption('DACMAN code coverage')
# palette is "tab20" from matplotlib
palette_i = 0
palette = [
'#1F77B4', '#AEC7E8', '#FF7F0E', '#FFBB78', '#2CA02C', '#98DF8A', '#D62728', '#FF9896',
'#9467BD', '#C5B0D5', '#8C564B', '#C49C94', '#E377C2', '#F7B6D2', '#7F7F7F', '#C7C7C7',
'#BCBD22', '#DBDB8D', '#17BECF', '#9EDAE5'
]
print('reading to rock, press any key!')
input()
while 1:
# process pygame events
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
# wait for breakpoint, clear it
(reason, data) = adapter.go()
assert reason in [DebugAdapter.STOP_REASON.BREAKPOINT, DebugAdapter.STOP_REASON.SINGLE_STEP]
pc = adapter.reg_read('pc')
f = addr2func[pc]
print('%s()' % f.symbol.full_name)
adapter.breakpoint_clear(pc)
# draw function
addr_start = f.start
addr_end = f.start + f.total_bytes
if addr_end - addr_start < 4:
continue
print('drawing %s [0x%04X, 0x%04X)' % (f.symbol.full_name, addr_start, addr_end))
draw_region(addr_start - lowest, addr_end - lowest, None, palette[palette_i])
palette_i = (palette_i+1) % len(palette)
# drawing to pygame
raw_str = img.tobytes('raw', 'RGB')
img_surface = pygame.image.fromstring(raw_str, (n, n), 'RGB')
img_surface = pygame.transform.scale(img_surface, (4*n, 4*n))
surface.blit(img_surface, (0,0))
pygame.display.update()
#time.sleep(.1)
|
the-stack_106_18567
|
from __future__ import (division, absolute_import, print_function, unicode_literals)
import os
import numpy as np
import gzip
import os.path
import nltk
import logging
from nltk import FreqDist
from .WordEmbeddings import wordNormalize
from .CoNLL import readCoNLL
import sys
if (sys.version_info > (3, 0)):
import pickle as pkl
else: #Python 2.7 imports
import cPickle as pkl
from io import open
def perpareDataset(embeddingsPath, datasets, frequencyThresholdUnknownTokens=50, reducePretrainedEmbeddings=False, valTransformations=None, padOneTokenSentence=True):
"""
Reads in the pre-trained embeddings (in text format) from embeddingsPath and prepares those to be used with the LSTM network.
Unknown words in the trainDataPath-file are added, if they appear at least frequencyThresholdUnknownTokens times
# Arguments:
embeddingsPath: Full path to the pre-trained embeddings file. File must be in text format.
datasetFiles: Full path to the [train,dev,test]-file
frequencyThresholdUnknownTokens: Unknown words are added, if they occure more than frequencyThresholdUnknownTokens times in the train set
reducePretrainedEmbeddings: Set to true, then only the embeddings needed for training will be loaded
valTransformations: Column specific value transformations
padOneTokenSentence: True to pad one sentence tokens (needed for CRF classifier)
"""
embeddingsName = os.path.splitext(embeddingsPath)[0]
pklName = "_".join(sorted(datasets.keys()) + [embeddingsName])
outputPath = 'pkl/' + pklName + '.pkl'
if os.path.isfile(outputPath):
logging.info("Using existent pickle file: %s" % outputPath)
return outputPath
casing2Idx = getCasingVocab()
embeddings, word2Idx = readEmbeddings(embeddingsPath, datasets, frequencyThresholdUnknownTokens, reducePretrainedEmbeddings)
mappings = {'tokens': word2Idx, 'casing': casing2Idx}
pklObjects = {'embeddings': embeddings, 'mappings': mappings, 'datasets': datasets, 'data': {}}
for datasetName, dataset in datasets.items():
datasetColumns = dataset['columns']
commentSymbol = dataset['commentSymbol']
trainData = 'data/%s/train.txt' % datasetName
devData = 'data/%s/dev.txt' % datasetName
testData = 'data/%s/test.txt' % datasetName
paths = [trainData, devData, testData]
logging.info(":: Transform "+datasetName+" dataset ::")
pklObjects['data'][datasetName] = createPklFiles(paths, mappings, datasetColumns, commentSymbol, valTransformations, padOneTokenSentence)
f = open(outputPath, 'wb')
pkl.dump(pklObjects, f, -1)
f.close()
logging.info("DONE - Embeddings file saved: %s" % outputPath)
return outputPath
def loadDatasetPickle(embeddingsPickle):
""" Loads the cPickle file, that contains the word embeddings and the datasets """
f = open(embeddingsPickle, 'rb')
pklObjects = pkl.load(f)
f.close()
return pklObjects['embeddings'], pklObjects['mappings'], pklObjects['data']
def readEmbeddings(embeddingsPath, datasetFiles, frequencyThresholdUnknownTokens, reducePretrainedEmbeddings):
"""
Reads the embeddingsPath.
:param embeddingsPath: File path to pretrained embeddings
:param datasetName:
:param datasetFiles:
:param frequencyThresholdUnknownTokens:
:param reducePretrainedEmbeddings:
:return:
"""
# Check that the embeddings file exists
if not os.path.isfile(embeddingsPath):
if embeddingsPath in ['komninos_english_embeddings.gz', 'levy_english_dependency_embeddings.gz', 'reimers_german_embeddings.gz']:
getEmbeddings(embeddingsPath)
else:
print("The embeddings file %s was not found" % embeddingsPath)
exit()
logging.info("Generate new embeddings files for a dataset")
neededVocab = {}
if reducePretrainedEmbeddings:
logging.info("Compute which tokens are required for the experiment")
def createDict(filename, tokenPos, vocab):
for line in open(filename):
if line.startswith('#'):
continue
splits = line.strip().split()
if len(splits) > 1:
word = splits[tokenPos]
wordLower = word.lower()
wordNormalized = wordNormalize(wordLower)
vocab[word] = True
vocab[wordLower] = True
vocab[wordNormalized] = True
for dataset_name, dataset in datasetFiles.items():
dataColumnsIdx = {y: x for x, y in dataset['columns'].items()}
tokenIdx = dataColumnsIdx['tokens']
datasetPath = 'data/%s/' % dataset_name
for dataset_file_name in ['train.txt', 'dev.txt', 'test.txt']:
createDict(datasetPath + dataset_file_name, tokenIdx, neededVocab)
# :: Read in word embeddings ::
logging.info("Read file: %s" % embeddingsPath)
word2Idx = {}
embeddings = []
embeddingsIn = gzip.open(embeddingsPath, "rt") if embeddingsPath.endswith('.gz') else open(embeddingsPath,
encoding="utf8")
embeddingsDimension = None
for line in embeddingsIn:
split = line.rstrip().split(" ")
word = split[0]
if embeddingsDimension == None:
embeddingsDimension = len(split) - 1
if (len(
split) - 1) != embeddingsDimension: # Assure that all lines in the embeddings file are of the same length
print("ERROR: A line in the embeddings file had more or less dimensions than expected. Skip token.")
continue
if len(word2Idx) == 0: # Add padding+unknown
word2Idx["PADDING_TOKEN"] = len(word2Idx)
vector = np.zeros(embeddingsDimension)
embeddings.append(vector)
word2Idx["UNKNOWN_TOKEN"] = len(word2Idx)
vector = np.random.uniform(-0.25, 0.25, embeddingsDimension) # Alternativ -sqrt(3/dim) ... sqrt(3/dim)
embeddings.append(vector)
vector = np.array([float(num) for num in split[1:]])
if len(neededVocab) == 0 or word in neededVocab:
if word not in word2Idx:
embeddings.append(vector)
word2Idx[word] = len(word2Idx)
# Extend embeddings file with new tokens
def createFD(filename, tokenIndex, fd, word2Idx):
for line in open(filename):
if line.startswith('#'):
continue
splits = line.strip().split()
if len(splits) > 1:
word = splits[tokenIndex]
wordLower = word.lower()
wordNormalized = wordNormalize(wordLower)
if word not in word2Idx and wordLower not in word2Idx and wordNormalized not in word2Idx:
fd[wordNormalized] += 1
if frequencyThresholdUnknownTokens != None and frequencyThresholdUnknownTokens >= 0:
fd = nltk.FreqDist()
for datasetName, datasetFile in datasetFiles.items():
dataColumnsIdx = {y: x for x, y in datasetFile['columns'].items()}
tokenIdx = dataColumnsIdx['tokens']
datasetPath = 'data/%s/' % datasetName
createFD(datasetPath + 'train.txt', tokenIdx, fd, word2Idx)
addedWords = 0
for word, freq in fd.most_common(10000):
if freq < frequencyThresholdUnknownTokens:
break
addedWords += 1
word2Idx[word] = len(word2Idx)
vector = np.random.uniform(-0.25, 0.25, len(split) - 1) # Alternativ -sqrt(3/dim) ... sqrt(3/dim)
embeddings.append(vector)
assert (len(word2Idx) == len(embeddings))
logging.info("Added words: %d" % addedWords)
embeddings = np.array(embeddings)
return embeddings, word2Idx
def addCharInformation(sentences):
"""Breaks every token into the characters"""
for sentenceIdx in range(len(sentences)):
sentences[sentenceIdx]['characters'] = []
for tokenIdx in range(len(sentences[sentenceIdx]['tokens'])):
token = sentences[sentenceIdx]['tokens'][tokenIdx]
chars = [c for c in token]
sentences[sentenceIdx]['characters'].append(chars)
def addCasingInformation(sentences):
"""Adds information of the casing of words"""
for sentenceIdx in range(len(sentences)):
sentences[sentenceIdx]['casing'] = []
for tokenIdx in range(len(sentences[sentenceIdx]['tokens'])):
token = sentences[sentenceIdx]['tokens'][tokenIdx]
sentences[sentenceIdx]['casing'].append(getCasing(token))
def getCasing(word):
"""Returns the casing for a word"""
casing = 'other'
numDigits = 0
for char in word:
if char.isdigit():
numDigits += 1
digitFraction = numDigits / float(len(word))
if word.isdigit(): #Is a digit
casing = 'numeric'
elif digitFraction > 0.5:
casing = 'mainly_numeric'
elif word.islower(): #All lower case
casing = 'allLower'
elif word.isupper(): #All upper case
casing = 'allUpper'
elif word[0].isupper(): #is a title, initial char upper, then all lower
casing = 'initialUpper'
elif numDigits > 0:
casing = 'contains_digit'
return casing
def getCasingVocab():
entries = ['PADDING', 'other', 'numeric', 'mainly_numeric', 'allLower', 'allUpper', 'initialUpper', 'contains_digit']
return {entries[idx]:idx for idx in range(len(entries))}
def createMatrices(sentences, mappings, padOneTokenSentence):
data = []
numTokens = 0
numUnknownTokens = 0
missingTokens = FreqDist()
paddedSentences = 0
for sentence in sentences:
row = {name: [] for name in list(mappings.keys())+['raw_tokens']}
for mapping, str2Idx in mappings.items():
if mapping not in sentence:
continue
for entry in sentence[mapping]:
if mapping.lower() == 'tokens':
numTokens += 1
idx = str2Idx['UNKNOWN_TOKEN']
if entry in str2Idx:
idx = str2Idx[entry]
elif entry.lower() in str2Idx:
idx = str2Idx[entry.lower()]
elif wordNormalize(entry) in str2Idx:
idx = str2Idx[wordNormalize(entry)]
else:
numUnknownTokens += 1
missingTokens[wordNormalize(entry)] += 1
row['raw_tokens'].append(entry)
elif mapping.lower() == 'characters':
idx = []
for c in entry:
if c in str2Idx:
idx.append(str2Idx[c])
else:
idx.append(str2Idx['UNKNOWN'])
else:
idx = str2Idx[entry]
row[mapping].append(idx)
if len(row['tokens']) == 1 and padOneTokenSentence:
paddedSentences += 1
for mapping, str2Idx in mappings.items():
if mapping.lower() == 'tokens':
row['tokens'].append(mappings['tokens']['PADDING_TOKEN'])
row['raw_tokens'].append('PADDING_TOKEN')
elif mapping.lower() == 'characters':
row['characters'].append([0])
else:
row[mapping].append(0)
data.append(row)
if numTokens > 0:
logging.info("Unknown-Tokens: %.2f%%" % (numUnknownTokens/float(numTokens)*100))
return data
def createPklFiles(datasetFiles, mappings, cols, commentSymbol, valTransformation, padOneTokenSentence):
trainSentences = readCoNLL(datasetFiles[0], cols, commentSymbol, valTransformation)
devSentences = readCoNLL(datasetFiles[1], cols, commentSymbol, valTransformation)
testSentences = readCoNLL(datasetFiles[2], cols, commentSymbol, valTransformation)
extendMappings(mappings, trainSentences+devSentences+testSentences)
charset = {"PADDING":0, "UNKNOWN":1}
for c in " 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.,-_()[]{}!?:;#'\"/\\%$`&=*+@^~|":
charset[c] = len(charset)
mappings['characters'] = charset
addCharInformation(trainSentences)
addCasingInformation(trainSentences)
addCharInformation(devSentences)
addCasingInformation(devSentences)
addCharInformation(testSentences)
addCasingInformation(testSentences)
logging.info(":: Create Train Matrix ::")
trainMatrix = createMatrices(trainSentences, mappings, padOneTokenSentence)
logging.info(":: Create Dev Matrix ::")
devMatrix = createMatrices(devSentences, mappings, padOneTokenSentence)
logging.info(":: Create Test Matrix ::")
testMatrix = createMatrices(testSentences, mappings, padOneTokenSentence)
data = {
'trainMatrix': trainMatrix,
'devMatrix': devMatrix,
'testMatrix': testMatrix
}
return data
def extendMappings(mappings, sentences):
sentenceKeys = list(sentences[0].keys())
sentenceKeys.remove('tokens') #No need to map tokens
for sentence in sentences:
for name in sentenceKeys:
if name not in mappings:
mappings[name] = {'O':0} #'O' is also used for padding
for item in sentence[name]:
if item not in mappings[name]:
mappings[name][item] = len(mappings[name])
def getEmbeddings(name):
if not os.path.isfile(name):
download("https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/"+name)
def getLevyDependencyEmbeddings():
"""
Downloads from https://levyomer.wordpress.com/2014/04/25/dependency-based-word-embeddings/
the dependency based word embeddings and unzips them
"""
if not os.path.isfile("levy_deps.words.bz2"):
print("Start downloading word embeddings from Levy et al. ...")
os.system("wget -O levy_deps.words.bz2 http://u.cs.biu.ac.il/~yogo/data/syntemb/deps.words.bz2")
print("Start unzip word embeddings ...")
os.system("bzip2 -d levy_deps.words.bz2")
def getReimersEmbeddings():
"""
Downloads from https://www.ukp.tu-darmstadt.de/research/ukp-in-challenges/germeval-2014/
embeddings for German
"""
if not os.path.isfile("2014_tudarmstadt_german_50mincount.vocab.gz"):
print("Start downloading word embeddings from Reimers et al. ...")
os.system("wget https://public.ukp.informatik.tu-darmstadt.de/reimers/2014_german_embeddings/2014_tudarmstadt_german_50mincount.vocab.gz")
if sys.version_info >= (3,):
import urllib.request as urllib2
import urllib.parse as urlparse
from urllib.request import urlretrieve
else:
import urllib2
import urlparse
from urllib import urlretrieve
def download(url, destination=os.curdir, silent=False):
filename = os.path.basename(urlparse.urlparse(url).path) or 'downloaded.file'
def get_size():
meta = urllib2.urlopen(url).info()
meta_func = meta.getheaders if hasattr(
meta, 'getheaders') else meta.get_all
meta_length = meta_func('Content-Length')
try:
return int(meta_length[0])
except:
return 0
def kb_to_mb(kb):
return kb / 1024.0 / 1024.0
def callback(blocks, block_size, total_size):
current = blocks * block_size
percent = 100.0 * current / total_size
line = '[{0}{1}]'.format(
'=' * int(percent / 2), ' ' * (50 - int(percent / 2)))
status = '\r{0:3.0f}%{1} {2:3.1f}/{3:3.1f} MB'
sys.stdout.write(
status.format(
percent, line, kb_to_mb(current), kb_to_mb(total_size)))
path = os.path.join(destination, filename)
logging.info(
'Downloading: {0} ({1:3.1f} MB)'.format(url, kb_to_mb(get_size())))
try:
(path, headers) = urlretrieve(url, path, None if silent else callback)
except:
os.remove(path)
raise Exception("Can't download {0}".format(path))
else:
print()
logging.info('Downloaded to: {0}'.format(path))
return path
|
the-stack_106_18568
|
#!/usr/bin/env python
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and other.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# Unittest for yardstick.benchmark.scenarios.compute.cyclictest.Cyclictest
from __future__ import absolute_import
import unittest
import mock
from oslo_serialization import jsonutils
from yardstick.benchmark.scenarios.compute import cyclictest
@mock.patch('yardstick.benchmark.scenarios.compute.cyclictest.ssh')
class CyclictestTestCase(unittest.TestCase):
def setUp(self):
self.scenario_cfg = {
"host": "kvm.LF",
"setup_options": {
"rpm_dir": "/opt/rpm",
"host_setup_seqs": [
"host-setup0.sh",
"host-setup1.sh",
"host-run-qemu.sh"
],
"script_dir": "/opt/scripts",
"image_dir": "/opt/image",
"guest_setup_seqs": [
"guest-setup0.sh",
"guest-setup1.sh"
]
},
"sla": {
"action": "monitor",
"max_min_latency": 50,
"max_avg_latency": 100,
"max_max_latency": 1000
},
"options": {
"priority": 99,
"threads": 1,
"loops": 1000,
"affinity": 1,
"interval": 1000,
"histogram": 90
}
}
self.context_cfg = {
"host": {
"ip": "10.229.43.154",
"key_filename": "/yardstick/resources/files/yardstick_key",
"role": "BareMetal",
"name": "kvm.LF",
"user": "root"
}
}
def test_cyclictest_successful_setup(self, mock_ssh):
c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
mock_ssh.SSH().execute.return_value = (0, '', '')
c.setup()
self.assertIsNotNone(c.guest)
self.assertIsNotNone(c.host)
self.assertEqual(c.setup_done, True)
def test_cyclictest_successful_no_sla(self, mock_ssh):
result = {}
self.scenario_cfg.pop("sla", None)
c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
mock_ssh.SSH().execute.return_value = (0, '', '')
c.setup()
c.guest = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
c.run(result)
expected_result = jsonutils.loads(sample_output)
self.assertEqual(result, expected_result)
def test_cyclictest_successful_sla(self, mock_ssh):
result = {}
self.scenario_cfg.update({"sla": {
"action": "monitor",
"max_min_latency": 100,
"max_avg_latency": 500,
"max_max_latency": 1000
}
})
c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
mock_ssh.SSH().execute.return_value = (0, '', '')
c.setup()
c.guest = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
c.run(result)
expected_result = jsonutils.loads(sample_output)
self.assertEqual(result, expected_result)
def test_cyclictest_unsuccessful_sla_min_latency(self, mock_ssh):
result = {}
self.scenario_cfg.update({"sla": {"max_min_latency": 10}})
c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
mock_ssh.SSH().execute.return_value = (0, '', '')
c.setup()
c.guest = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
self.assertRaises(AssertionError, c.run, result)
def test_cyclictest_unsuccessful_sla_avg_latency(self, mock_ssh):
result = {}
self.scenario_cfg.update({"sla": {"max_avg_latency": 10}})
c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
mock_ssh.SSH().execute.return_value = (0, '', '')
c.setup()
c.guest = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
self.assertRaises(AssertionError, c.run, result)
def test_cyclictest_unsuccessful_sla_max_latency(self, mock_ssh):
result = {}
self.scenario_cfg.update({"sla": {"max_max_latency": 10}})
c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
mock_ssh.SSH().execute.return_value = (0, '', '')
c.setup()
c.guest = mock_ssh.SSH()
sample_output = '{"min": 100, "avg": 500, "max": 1000}'
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
self.assertRaises(AssertionError, c.run, result)
def test_cyclictest_unsuccessful_script_error(self, mock_ssh):
result = {}
self.scenario_cfg.update({"sla": {"max_max_latency": 10}})
c = cyclictest.Cyclictest(self.scenario_cfg, self.context_cfg)
mock_ssh.SSH().execute.return_value = (0, '', '')
c.setup()
c.guest = mock_ssh.SSH()
mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, c.run, result)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
the-stack_106_18570
|
# Import my solver Code
from solver import *
#Creating a list to hold the levels and answers of Sudoku board
list_lvl = ['easy','normal','hard','answer']
for level in list_lvl:
print("Im starting with " + level + " boards")
#Open file stream of the respective level sudoku
filename = level.capitalize() + "Sudoku.txt"
sudokuFile = open(filename, "a")
#Generate 5 Sudokus for the current level and store the board and solution
#in its file
for i in range(0,5):
result = init_board(lvl = level, generating = True)
game = result[0].copy()
sol = result[1].copy()
sudokuFile.write(toString_sudoku(game))
sudokuFile.write(toString_sudoku(sol))
sudokuFile.write("\n")
sudokuFile.close()
print("Im done with " + level + " boards\n")
|
the-stack_106_18571
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from metadata.config.common import ConfigModel
from metadata.ingestion.api.common import Record, WorkflowContext
from metadata.ingestion.api.sink import Sink, SinkStatus
from metadata.ingestion.models.user import MetadataUser
from metadata.ingestion.ometa.ometa_api import OpenMetadata
from metadata.ingestion.ometa.openmetadata_rest import MetadataServerConfig
logger = logging.getLogger(__name__)
class LDAPSourceConfig(ConfigModel):
api_end_point: str
class LdapRestUsersSink(Sink):
config: LDAPSourceConfig
status: SinkStatus
def __init__(
self,
ctx: WorkflowContext,
config: LDAPSourceConfig,
metadata_config: MetadataServerConfig,
):
super().__init__(ctx)
self.config = config
self.metadata_config = metadata_config
self.status = SinkStatus()
self.api_users = "/users"
self.rest = OpenMetadata(metadata_config).client
@classmethod
def create(
cls, config_dict: dict, metadata_config_dict: dict, ctx: WorkflowContext
):
config = LDAPSourceConfig.parse_obj(config_dict)
metadata_config = MetadataServerConfig.parse_obj(metadata_config_dict)
return cls(ctx, config, metadata_config)
def write_record(self, record: Record) -> None:
self._create_user(record)
def _create_user(self, record: MetadataUser) -> None:
metadata_user = MetadataUser(
name=record.github_username[0],
display_name=record.name[0],
email=record.email[0],
)
self.rest.post(self.api_users, data=metadata_user.to_json())
self.status.records_written(record.name[0])
def get_status(self):
return self.status
def close(self):
pass
|
the-stack_106_18572
|
#!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_network_load_balancer_network_security_groups_update
short_description: Manage a NetworkSecurityGroupsUpdate resource in Oracle Cloud Infrastructure
description:
- This module allows the user to update a NetworkSecurityGroupsUpdate resource in Oracle Cloud Infrastructure
version_added: "2.9.0"
author: Oracle (@oracle)
options:
network_load_balancer_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the network load balancer to update.
type: str
aliases: ["id"]
required: true
network_security_group_ids:
description:
- An array of network security group L(OCIDs,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) associated with the network
load
balancer.
- During the creation of the network load balancer, the service adds the new network load balancer to the specified network security groups.
- "The benefits of associating the network load balancer with network security groups include:"
- "* Network security groups define network security rules to govern ingress and egress traffic for the network load balancer."
- "* The network security rules of other resources can reference the network security groups associated with the network load balancer
to ensure access."
- This parameter is updatable.
type: list
elements: str
state:
description:
- The state of the NetworkSecurityGroupsUpdate.
- Use I(state=present) to update an existing a NetworkSecurityGroupsUpdate.
type: str
required: false
default: 'present'
choices: ["present"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Update network_security_groups_update
oci_network_load_balancer_network_security_groups_update:
# required
network_load_balancer_id: "ocid1.networkloadbalancer.oc1..xxxxxxEXAMPLExxxxxx"
# optional
network_security_group_ids: [ "network_security_group_ids_example" ]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.network_load_balancer import NetworkLoadBalancerClient
from oci.network_load_balancer.models import UpdateNetworkSecurityGroupsDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class NetworkLoadBalancerNetworkSecurityGroupsUpdateHelperGen(OCIResourceHelperBase):
"""Supported operations: update"""
def get_possible_entity_types(self):
return super(
NetworkLoadBalancerNetworkSecurityGroupsUpdateHelperGen, self
).get_possible_entity_types() + [
"networksecuritygroupsupdate",
"networksecuritygroupsupdates",
"networkLoadBalancernetworksecuritygroupsupdate",
"networkLoadBalancernetworksecuritygroupsupdates",
"networksecuritygroupsupdateresource",
"networksecuritygroupsupdatesresource",
"networksecuritygroup",
"networksecuritygroups",
"networkLoadBalancernetworksecuritygroup",
"networkLoadBalancernetworksecuritygroups",
"networksecuritygroupresource",
"networksecuritygroupsresource",
"networkloadbalancer",
]
def get_module_resource_id_param(self):
return "network_load_balancer_id"
def get_module_resource_id(self):
return self.module.params.get("network_load_balancer_id")
def get_update_model_class(self):
return UpdateNetworkSecurityGroupsDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_network_security_groups,
call_fn_args=(),
call_fn_kwargs=dict(
network_load_balancer_id=self.module.params.get(
"network_load_balancer_id"
),
update_network_security_groups_details=update_details,
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
NetworkLoadBalancerNetworkSecurityGroupsUpdateHelperCustom = get_custom_class(
"NetworkLoadBalancerNetworkSecurityGroupsUpdateHelperCustom"
)
class ResourceHelper(
NetworkLoadBalancerNetworkSecurityGroupsUpdateHelperCustom,
NetworkLoadBalancerNetworkSecurityGroupsUpdateHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=True
)
module_args.update(
dict(
network_load_balancer_id=dict(aliases=["id"], type="str", required=True),
network_security_group_ids=dict(type="list", elements="str"),
state=dict(type="str", default="present", choices=["present"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="network_security_groups_update",
service_client_class=NetworkLoadBalancerClient,
namespace="network_load_balancer",
)
result = dict(changed=False)
if resource_helper.is_update():
result = resource_helper.update()
module.exit_json(**result)
if __name__ == "__main__":
main()
|
the-stack_106_18573
|
"""
This module has functions to help processing the data from
PacketIn and PacketOut.
"""
from pyof.foundation.basic_types import BinaryData
from libs.tcpiplib.packet import Ethernet, VLAN, IP, TCP, LLDP, ARP, OessFvd
def dissect_data(data, start=0):
"""
This function aims to dissect PacketIn and PacketOut data
It assumes it is
Ethernet [qinq] [vlan] (BDDP|LLDP|ARP|IP) [TCP|UDP]
Args:
data: BinaryData
start: offset
Returns:
payload: array with all classes
"""
if len(data.value) == 0:
# No Data
return 0
packet = data.value
payload = []
# Ethernet
eth = Ethernet()
eth.parse(packet[start:start + 14], 1)
payload.append(eth)
# VLAN or not - ETYPE 0x8100 or 33024
etype = '0x0000'
start += 14
if eth.protocol in [34984]: # 0x88a8
# Frame has QinQ
vlan = VLAN()
vlan.parse(packet[start:start + 4])
payload.append(vlan)
etype = vlan.protocol
start += 4
else:
etype = eth.protocol
# if there is no content, return
if len(packet[start:]) == 0:
return payload
if not start: # In case there was a QinQ header.
start += 14
if etype in [33024] or eth.protocol in [33024]:
# Frame has VLAN
vlan = VLAN()
vlan.parse(packet[start:start + 4])
payload.append(vlan)
etype = vlan.protocol
start += 4
else:
if not etype:
etype = eth.protocol
# if there is no content, return
if len(packet[start:]) == 0:
return payload
# OESS FVD
if etype in [34998]:
fvd = OessFvd()
try:
fvd.parse(packet[start:])
except Exception as error:
print(error)
payload.append(fvd)
return payload
# LLDP - ETYPE 0x88CC or 35020 or
# BBDP - ETYPE 0x8942 or 35138
if etype in [35020, 35138]:
lldp = LLDP()
try:
lldp.parse(packet[start:])
except:
pass
if not isinstance(lldp, LLDP):
lldp.c_id = 0
else:
payload.append(lldp)
return payload
# IP - ETYPE 0x800 or 2048
if etype in [2048]:
ip_addr = IP()
ip_addr.parse(packet, start)
payload.append(ip_addr)
if ip_addr.protocol is 6:
tcp = TCP()
tcp.parse(packet, start + ip_addr.length)
payload.append(tcp)
return payload
# ARP - ETYPE 0x806 or 2054
if etype in [2054]:
arp = ARP()
arp.parse(packet[start:])
payload.append(arp)
return payload
return payload
def is_protocol(data, lldp=False, oess=False, arp=False):
"""
Check if Data is protocol provided
Args:
data: PacketOut/PacketIn/OESS data
lldp: check for lldp
oess: check for oess
arp: check for arp
Returns:
protocol class if True
False if it is not
"""
protocol = []
return_protocol = False
if lldp:
protocol.append(35020) # LLDP
protocol.append(35138) # BDDP
elif oess:
protocol.append(34998) # Private
elif arp:
protocol.append(2054) # ARP 0x806
else:
return_protocol = True
if isinstance(data, BinaryData):
data = dissect_data(data)
try:
eth = data.pop(0)
next_protocol = eth.protocol
if next_protocol in [33024, 34984]:
vlan = data.pop(0)
if return_protocol:
return vlan.protocol
next_protocol = vlan.protocol
if next_protocol in protocol:
return True
return False
except Exception as error:
print(error)
return False
def get_protocol(data, lldp=False, oess=False, arp=False):
"""
Get protocol from data
Args:
data: PacketOut/PacketIn/OESS data
lldp: check for lldp
oess: check for oess
arp: check for arp
Returns:
protocol class if True
False if it is not
"""
protocol = []
if lldp:
protocol.append(35020) # LLDP
protocol.append(35138) # BDDP
elif oess:
protocol.append(34998) # Private
elif arp:
protocol.append(2054) # ARP 0x806
else:
return False
if isinstance(data, BinaryData):
data = dissect_data(data)
if isinstance(data, int):
return False
try:
eth = data.pop(0)
next_protocol = eth.protocol
if next_protocol in [33024]:
vlan = data.pop(0)
next_protocol = vlan.protocol
if next_protocol in protocol:
return data.pop(0)
return False
except Exception as error:
print(error)
return False
|
the-stack_106_18574
|
"""
Don't see the standings during the contest!!! you will lose motivation.
"""
# ---------------------------------------------------Import Libraries---------------------------------------------------
import sys
import time
import os
from math import sqrt, log, log2, ceil, log10, gcd, floor, pow, sin, cos, tan, pi, inf, factorial
from copy import copy, deepcopy
from sys import exit, stdin, stdout
from collections import Counter, defaultdict, deque
from itertools import permutations
import heapq
from bisect import bisect_left as bl
# If the element is already present in the list,
# the left most position where element has to be inserted is returned.
from bisect import bisect_right as br
from bisect import bisect
# If the element is already present in the list,
# the right most position where element has to be inserted is r
# ---------------------------------------------------Global Variables---------------------------------------------------
# sys.setrecursionlimit(100000000)
mod = 1000000007
# ---------------------------------------------------Helper Functions---------------------------------------------------
iinp = lambda: int(sys.stdin.readline())
inp = lambda: sys.stdin.readline().strip()
strl = lambda: list(inp().strip().split(" "))
intl = lambda: list(map(int, inp().split(" ")))
mint = lambda: map(int, inp().split())
flol = lambda: list(map(float, inp().split(" ")))
flush = lambda: stdout.flush()
def permute(nums):
def fun(arr, nums, cur, v):
if len(cur) == len(nums):
arr.append(cur.copy())
i = 0
while i < len(nums):
if v[i]:
i += 1
continue
else:
cur.append(nums[i])
v[i] = 1
fun(arr, nums, cur, v)
cur.pop()
v[i] = 0
i += 1
# while i<len(nums) and nums[i]==nums[i-1]:i+=1 # Uncomment for unique permutations
return arr
res = []
nums.sort()
v = [0] * len(nums)
return fun(res, nums, [], v)
def subsets(res, index, arr, cur):
res.append(cur.copy())
for i in range(index, len(arr)):
cur.append(arr[i])
subsets(res, i + 1, arr, cur)
cur.pop()
return res
def sieve(N):
root = int(sqrt(N))
primes = [1] * (N + 1)
primes[0], primes[1] = 0, 0
for i in range(2, root + 1):
if primes[i]:
for j in range(i * i, N + 1, i):
primes[j] = 0
return primes
def bs(arr, l, r, x):
if x < arr[0] or x > arr[len(arr) - 1]:
return -1
while l <= r:
mid = l + (r - l) // 2
if arr[mid] == x:
return mid
elif arr[mid] < x:
l = mid + 1
else:
r = mid - 1
return -1
def isPrime(n):
if n <= 1: return False
if n <= 3: return True
if n % 2 == 0 or n % 3 == 0: return False
p = int(sqrt(n))
for i in range(5, p + 1, 6):
if n % i == 0 or n % (i + 2) == 0:
return False
return True
# -------------------------------------------------------Functions------------------------------------------------------
arr=[2]
while arr[-1]<=10**9:
arr.append(arr[-1]*2)
arr=set(arr)
def solve():
n=iinp()
if n%2==0 and sqrt(n//2)==int(sqrt(n//2)):
print("YES")
elif n%4==0 and sqrt(n//4)==int(sqrt(n//4)):
print("YES")
else:
print("NO")
# -------------------------------------------------------Main Code------------------------------------------------------
start_time = time.time()
for _ in range(iinp()):
solve()
# print("--- %s seconds ---" % (time.time() - start_time))
s
|
the-stack_106_18575
|
# coding=utf-8
# MIT License
# Copyright (c) 2020 Carnegie Mellon University, Auton Lab
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module is a wrapper around torch implementations and
provides a convenient API to train Deep Survival Machines.
"""
from dsm.dsm_torch import DeepSurvivalMachinesTorch
from dsm.dsm_torch import DeepRecurrentSurvivalMachinesTorch
from dsm.dsm_torch import DeepConvolutionalSurvivalMachinesTorch
from dsm.dsm_torch import DeepCNNRNNSurvivalMachinesTorch
import dsm.losses as losses
from dsm.utilities import train_dsm
from dsm.utilities import _get_padded_features, _get_padded_targets
from dsm.utilities import _reshape_tensor_with_nans
import torch
import numpy as np
__pdoc__ = {}
__pdoc__["DeepSurvivalMachines.fit"] = True
__pdoc__["DeepSurvivalMachines._eval_nll"] = True
__pdoc__["DeepConvolutionalSurvivalMachines._eval_nll"] = True
__pdoc__["DSMBase"] = False
class DSMBase:
"""Base Class for all DSM models"""
def __init__(
self,
k=3,
layers=None,
distribution="Weibull",
temp=1.0,
discount=1.0,
device="cpu",
):
self.k = k
self.layers = layers
self.dist = distribution
self.temp = temp
self.discount = discount
self.fitted = False
self.device = device
def _gen_torch_model(self, inputdim, optimizer, risks):
"""Helper function to return a torch model."""
return DeepSurvivalMachinesTorch(
inputdim,
k=self.k,
layers=self.layers,
dist=self.dist,
temp=self.temp,
discount=self.discount,
optimizer=optimizer,
risks=risks,
).to(self.device)
def fit(
self,
x,
t,
e,
vsize=0.15,
val_data=None,
iters=1,
learning_rate=1e-3,
weight_decay=1e-5,
batch_size=128,
elbo=True,
optimizer="Adam",
random_state=100,
):
r"""This method is used to train an instance of the DSM model.
Parameters
----------
x: np.ndarray
A numpy array of the input features, \( x \).
t: np.ndarray
A numpy array of the event/censoring times, \( t \).
e: np.ndarray
A numpy array of the event/censoring indicators, \( \delta \).
\( \delta = 1 \) means the event took place.
vsize: float
Amount of data to set aside as the validation set.
val_data: tuple
A tuple of the validation dataset. If passed vsize is ignored.
iters: int
The maximum number of training iterations on the training dataset.
learning_rate: float
The learning rate for the `Adam` optimizer.
batch_size: int
learning is performed on mini-batches of input data. this parameter
specifies the size of each mini-batch.
elbo: bool
Whether to use the Evidence Lower Bound for optimization.
Default is True.
optimizer: str
The choice of the gradient based optimization method. One of
'Adam', 'RMSProp' or 'SGD'.
random_state: float
random seed that determines how the validation set is chosen.
"""
processed_data = self._preprocess_training_data(
x, t, e, vsize, val_data, random_state
)
x_train, t_train, e_train, x_val, t_val, e_val = processed_data
self.lr = learning_rate
self.weight_decay = weight_decay
self.batch_size = batch_size
# Todo: Change this somehow. The base design shouldn't depend on child
if type(self).__name__ in [
"DeepConvolutionalSurvivalMachines",
"DeepCNNRNNSurvivalMachines",
]:
inputdim = tuple(x_train.shape)[-2:]
else:
inputdim = x_train.shape[-1]
maxrisk = int(np.nanmax(e_train.cpu().numpy()))
model = self._gen_torch_model(inputdim, optimizer, risks=maxrisk)
model, self.min_loss = train_dsm(
model,
x_train,
t_train,
e_train,
x_val,
t_val,
e_val,
n_iter=iters,
lr=self.lr,
weight_decay=self.weight_decay,
elbo=elbo,
bs=self.batch_size,
device=self.device,
)
self.torch_model = model.eval()
self.fitted = True
return self
def compute_nll(self, x, t, e):
r"""This function computes the negative log likelihood of the given data.
In case of competing risks, the negative log likelihoods are summed over
the different events' type.
Parameters
----------
x: np.ndarray
A numpy array of the input features, \( x \).
t: np.ndarray
A numpy array of the event/censoring times, \( t \).
e: np.ndarray
A numpy array of the event/censoring indicators, \( \delta \).
\( \delta = r \) means the event r took place.
Returns:
float: Negative log likelihood.
"""
if not self.fitted:
raise Exception(
"The model has not been fitted yet. Please fit the "
+ "model using the `fit` method on some training data "
+ "before calling `_eval_nll`."
)
processed_data = self._preprocess_training_data(x, t, e, 0, None, 0)
_, _, _, x_val, t_val, e_val = processed_data
x_val, t_val, e_val = (
x_val,
_reshape_tensor_with_nans(t_val),
_reshape_tensor_with_nans(e_val),
)
loss = 0
for r in range(self.torch_model.risks):
loss += float(
losses.conditional_loss(
self.torch_model, x_val, t_val, e_val, elbo=False, risk=str(r + 1)
)
.detach()
.cpu()
.numpy()
)
return loss
def _preprocess_test_data(self, x):
return torch.from_numpy(x).float().to(self.device)
def _preprocess_training_data(self, x, t, e, vsize, val_data, random_state):
idx = list(range(x.shape[0]))
np.random.seed(random_state)
np.random.shuffle(idx)
x_train, t_train, e_train = x[idx], t[idx], e[idx]
x_train = torch.from_numpy(x_train).float().to(self.device)
t_train = torch.from_numpy(t_train).float().to(self.device)
e_train = torch.from_numpy(e_train).float().to(self.device)
if val_data is None:
vsize = int(vsize * x_train.shape[0])
x_val, t_val, e_val = x_train[-vsize:], t_train[-vsize:], e_train[-vsize:]
x_train = x_train[:-vsize]
t_train = t_train[:-vsize]
e_train = e_train[:-vsize]
else:
x_val, t_val, e_val = val_data
x_val = torch.from_numpy(x_val).float().to(self.device)
t_val = torch.from_numpy(t_val).float().to(self.device)
e_val = torch.from_numpy(e_val).float().to(self.device)
return (x_train, t_train, e_train, x_val, t_val, e_val)
def predict_mean(self, x, risk=1):
r"""Returns the mean Time-to-Event \( t \)
Parameters
----------
x: np.ndarray
A numpy array of the input features, \( x \).
Returns:
np.array: numpy array of the mean time to event.
"""
if self.fitted:
x = self._preprocess_test_data(x)
scores = losses.predict_mean(self.torch_model, x, risk=str(risk))
return scores
else:
raise Exception(
"The model has not been fitted yet. Please fit the "
+ "model using the `fit` method on some training data "
+ "before calling `predict_mean`."
)
def predict_risk(self, x, t, risk=1):
r"""Returns the estimated risk of an event occuring before time \( t \)
\( \widehat{\mathbb{P}}(T\leq t|X) \) for some input data \( x \).
Parameters
----------
x: np.ndarray
A numpy array of the input features, \( x \).
t: list or float
a list or float of the times at which survival probability is
to be computed
Returns:
np.array: numpy array of the risks at each time in t.
"""
if self.fitted:
return 1 - self.predict_survival(x, t, risk=str(risk))[0]
else:
raise Exception(
"The model has not been fitted yet. Please fit the "
+ "model using the `fit` method on some training data "
+ "before calling `predict_risk`."
)
def predict_survival(self, x, t, risk=1):
r"""Returns the estimated survival probability at time \( t \),
\( \widehat{\mathbb{P}}(T > t|X) \) for some input data \( x \).
Parameters
----------
x: np.ndarray
A numpy array of the input features, \( x \).
t: list or float
a list or float of the times at which survival probability is
to be computed
Returns:
np.array: numpy array of the survival probabilites at each time in t.
"""
x = self._preprocess_test_data(x)
if not isinstance(t, list):
t = [t]
if self.fitted:
scores, std = losses.predict_cdf(
self.torch_model, x, t, risk=str(risk), device=self.device
)
return np.array(scores).T, np.array(std).T
else:
raise Exception(
"The model has not been fitted yet. Please fit the "
+ "model using the `fit` method on some training data "
+ "before calling `predict_survival`."
)
def predict_pdf(self, x, t, risk=1):
r"""Returns the estimated pdf at time \( t \),
\( \widehat{\mathbb{P}}(T = t|X) \) for some input data \( x \).
Parameters
----------
x: np.ndarray
A numpy array of the input features, \( x \).
t: list or float
a list or float of the times at which pdf is
to be computed
Returns:
np.array: numpy array of the estimated pdf at each time in t.
"""
x = self._preprocess_test_data(x)
if not isinstance(t, list):
t = [t]
if self.fitted:
scores = losses.predict_pdf(self.torch_model, x, t, risk=str(risk))
return np.exp(np.array(scores)).T
else:
raise Exception(
"The model has not been fitted yet. Please fit the "
+ "model using the `fit` method on some training data "
+ "before calling `predict_survival`."
)
class DeepSurvivalMachines(DSMBase):
"""A Deep Survival Machines model.
This is the main interface to a Deep Survival Machines model.
A model is instantiated with approporiate set of hyperparameters and
fit on numpy arrays consisting of the features, event/censoring times
and the event/censoring indicators.
For full details on Deep Survival Machines, refer to our paper [1].
References
----------
[1] <a href="https://arxiv.org/abs/2003.01176">Deep Survival Machines:
Fully Parametric Survival Regression and
Representation Learning for Censored Data with Competing Risks."
arXiv preprint arXiv:2003.01176 (2020)</a>
Parameters
----------
k: int
The number of underlying parametric distributions.
layers: list
A list of integers consisting of the number of neurons in each
hidden layer.
distribution: str
Choice of the underlying survival distributions.
One of 'Weibull', 'LogNormal'.
Default is 'Weibull'.
temp: float
The logits for the gate are rescaled with this value.
Default is 1000.
discount: float
a float in [0,1] that determines how to discount the tail bias
from the uncensored instances.
Default is 1.
Example
-------
>>> from dsm import DeepSurvivalMachines
>>> model = DeepSurvivalMachines()
>>> model.fit(x, t, e)
"""
def __call__(self):
if self.fitted:
print("A fitted instance of the Deep Survival Machines model")
else:
print("An unfitted instance of the Deep Survival Machines model")
print("Number of underlying distributions (k):", self.k)
print("Hidden Layers:", self.layers)
print("Distribution Choice:", self.dist)
class DeepRecurrentSurvivalMachines(DSMBase):
"""The Deep Recurrent Survival Machines model to handle data with
time-dependent covariates.
For full details on Deep Recurrent Survival Machines, refer to our paper [1].
References
----------
[1] <a href="http://proceedings.mlr.press/v146/nagpal21a.html">
Deep Parametric Time-to-Event Regression with Time-Varying Covariates
AAAI Spring Symposium on Survival Prediction</a>
"""
def __init__(
self,
k=3,
layers=None,
hidden=None,
distribution="Weibull",
temp=1.0,
discount=1.0,
typ="LSTM",
device="cpu",
):
super(DeepRecurrentSurvivalMachines, self).__init__(
k=k,
layers=layers,
distribution=distribution,
temp=temp,
discount=discount,
device=device,
)
self.hidden = hidden
self.typ = typ
self.device = device
def _gen_torch_model(self, inputdim, optimizer, risks):
"""Helper function to return a torch model."""
return DeepRecurrentSurvivalMachinesTorch(
inputdim,
k=self.k,
layers=self.layers,
hidden=self.hidden,
dist=self.dist,
temp=self.temp,
discount=self.discount,
optimizer=optimizer,
typ=self.typ,
risks=risks,
).to(self.device)
def _preprocess_test_data(self, x):
return torch.from_numpy(_get_padded_features(x)).float().to(self.device)
def _preprocess_training_data(self, x, t, e, vsize, val_data, random_state):
"""RNNs require different preprocessing for variable length sequences"""
idx = list(range(x.shape[0]))
np.random.seed(random_state)
np.random.shuffle(idx)
x = _get_padded_features(x)
t = _get_padded_targets(t)
e = _get_padded_targets(e)
x_train, t_train, e_train = x[idx], t[idx], e[idx]
x_train = torch.from_numpy(x_train).float().to(self.device)
t_train = torch.from_numpy(t_train).float().to(self.device)
e_train = torch.from_numpy(e_train).float().to(self.device)
if val_data is None:
vsize = int(vsize * x_train.shape[0])
x_val, t_val, e_val = x_train[-vsize:], t_train[-vsize:], e_train[-vsize:]
x_train = x_train[:-vsize]
t_train = t_train[:-vsize]
e_train = e_train[:-vsize]
else:
x_val, t_val, e_val = val_data
x_val = _get_padded_features(x_val)
t_val = _get_padded_features(t_val)
e_val = _get_padded_features(e_val)
x_val = torch.from_numpy(x_val).float().to(self.device)
t_val = torch.from_numpy(t_val).float().to(self.device)
e_val = torch.from_numpy(e_val).float().to(self.device)
return (x_train, t_train, e_train, x_val, t_val, e_val)
class DeepConvolutionalSurvivalMachines(DSMBase):
"""The Deep Convolutional Survival Machines model to handle data with
image-based covariates.
"""
def __init__(
self,
k=3,
layers=None,
hidden=None,
distribution="Weibull",
temp=1.0,
discount=1.0,
typ="ConvNet",
):
super(DeepConvolutionalSurvivalMachines, self).__init__(
k=k, distribution=distribution, temp=temp, discount=discount
)
self.hidden = hidden
self.typ = typ
def _gen_torch_model(self, inputdim, optimizer, risks):
"""Helper function to return a torch model."""
return DeepConvolutionalSurvivalMachinesTorch(
inputdim,
k=self.k,
hidden=self.hidden,
dist=self.dist,
temp=self.temp,
discount=self.discount,
optimizer=optimizer,
typ=self.typ,
risks=risks,
)
class DeepCNNRNNSurvivalMachines(DeepRecurrentSurvivalMachines):
"""The Deep CNN-RNN Survival Machines model to handle data with
moving image streams.
"""
def __init__(
self,
k=3,
layers=None,
hidden=None,
distribution="Weibull",
temp=1.0,
discount=1.0,
typ="LSTM",
):
super(DeepCNNRNNSurvivalMachines, self).__init__(
k=k, layers=layers, distribution=distribution, temp=temp, discount=discount
)
self.hidden = hidden
self.typ = typ
def _gen_torch_model(self, inputdim, optimizer, risks):
"""Helper function to return a torch model."""
return DeepCNNRNNSurvivalMachinesTorch(
inputdim,
k=self.k,
layers=self.layers,
hidden=self.hidden,
dist=self.dist,
temp=self.temp,
discount=self.discount,
optimizer=optimizer,
typ=self.typ,
risks=risks,
)
|
the-stack_106_18577
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name, not-context-manager
"""
Decorator and utilities for the integration with TOPI and Relay
99.9% copy-paste of implementation by @MerryMercy
"""
import threading
import logging
import warnings
import tvm
from tvm.autotvm.task.dispatcher import DispatchContext, FallbackContext
from tvm.target import Target
from .task import create
from .topi_integration import TaskExtractEnv
logger = logging.getLogger("autotvm")
# TODO(moreau89) find a more elegant way to lower for VTAs
def _lower(mod, target, params, opt_level=3):
"""Helper to lower VTA properly."""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm.relay.backend import graph_executor_codegen
if hasattr(target, "device_name") and target.device_name == "vta":
import vta
with vta.build_config(opt_level=opt_level, disabled_pass={"AlterOpLayout"}):
mod, _ = relay.optimize(mod, target, params)
grc = graph_executor_codegen.GraphExecutorCodegen(None, target)
grc.codegen(mod, mod["main"])
return
# Alter op layout code has been written expecting that tuning is applied
# without it, so we disable AlterOpLayout to maintain that behavior.
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass={"AlterOpLayout"}):
compiler = relay.vm.VMCompiler()
if params:
compiler.set_params(params)
compiler.lower(mod, target=target)
def extract_from_program(mod, params, target, target_host=None, ops=None):
"""Extract tuning tasks from a relay program.
This function is the single program version of extract_from_multiple_program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: tvm.target.Target
The compilation target
target_host: tvm.target.Target
The host compilation target
ops: List[tvm.ir.Op] or None
List of relay ops to be tuned. If not specified, all tunable ops will be extracted.
Returns
-------
task: Array of autotvm.task.Task
collected tasks
"""
if target_host is not None:
warnings.warn(
"target_host parameter is going to be deprecated. "
"Please pass in tvm.target.Target(target, host=target_host) instead."
)
target, target_host = Target.check_and_update_host_consist(target, target_host)
return extract_from_multiple_program([mod], [params], target, ops=ops)
def extract_from_multiple_program(mods, params, target, target_host=None, ops=None):
"""Extract tuning tasks from multiple relay programs.
This function collects tuning tasks by building a list of programs
with a "tracing" target and tracing all the calls to topi.
Parameters
----------
mods: List[tvm.IRModule] or List[relay.function.Function]
The list of modules or functions to tune
params: List of dict of str to numpy array
The associated parameters of the programs
target: tvm.target.Target
The compilation target
target_host: tvm.target.Target
The host compilation target
ops: List[tvm.ir.Op] or None
List of relay ops to be tuned. If not specified, all tunable ops will be extracted.
Returns
-------
task: Array of autotvm.task.Task
collected tasks
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm import topi
env = TaskExtractEnv.get()
# merge target and target host
target, target_host = Target.check_and_update_host_consist(target, target_host)
# run compiler to collect all TOPI calls during compilation
env.reset(ops)
with env:
# disable logger temporarily
old_state = logger.disabled
logger.disabled = True
for mod, param in zip(mods, params):
if isinstance(mod, relay.function.Function):
mod = tvm.IRModule.from_expr(mod)
assert isinstance(
mod, tvm.IRModule
), "only support relay Module or Function to be tuned"
relay.backend.te_compiler.get().clear()
# wrap build call in thread to avoid multiprocessing problems
build_thread = threading.Thread(target=_lower, args=(mod, target, param))
build_thread.start()
build_thread.join()
relay.backend.te_compiler.get().clear()
# Clear the warning message cache in FallbackContext
if isinstance(DispatchContext.current, FallbackContext):
DispatchContext.current.memory = {}
DispatchContext.warning_messages = set()
logger.disabled = old_state
# create tasks for target
tasks = []
for task_name, args in env.get_tasks():
try:
tsk = create(task_name, args, target=target)
tasks.append(tsk)
except topi.InvalidShapeError:
logger.warning("Invalid shape during AutoTVM task creation")
return tasks
|
the-stack_106_18578
|
"""A time estimator by running TensorFlow operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import logging
import tensorflow as tf
import numpy as np
from six.moves import range
from paleo.profilers.base import BaseProfiler, TimeMeasure
#from pynvml import * #Ermao
import os, os.path, subprocess, signal #Ermao
class TensorFlowProfiler(BaseProfiler):
def __init__(self, options, device='/gpu:0'):
super(TensorFlowProfiler, self).__init__('TensorFlowProfiler', options)
self._device = device
self._logger.info('TensorFlow version: %s' % tf.__version__)
def profile(self, layer):
graph = tf.Graph()
ops, bwd_ops = None, None
#print(layer.inputs) #Ermao
#print(type(layer))
if layer.name != 'data':
print("\n%s Input: %s" % (layer, layer.inputs), end=' ') #Ermao
if layer.layertype == 'conv2d':
ops, bwd_ops = self._ops_conv2d(layer, graph)
elif layer.layertype == 'innerproduct':
ops, bwd_ops = self._ops_innerproduct(layer, graph)
elif layer.layertype == 'pool2d':
ops, bwd_ops = self._ops_pool2d(layer, graph)
elif layer.layertype == 'dropout':
ops, bwd_ops = self._ops_dropout(layer, graph)
elif layer.layertype == 'concat':
ops, bwd_ops = self._ops_concat(layer, graph)
elif layer.layertype == 'reshape':
ops, bwd_ops = self._ops_reshape(layer, graph)
# added by Ermao
elif layer.layertype == 'generic_LRN':
ops, bwd_ops = self._ops_lrn(layer, graph)
elif layer.layertype == 'generic_BatchNorm':
ops, bwd_ops = self._ops_batchnorm(layer, graph)
elif layer.layertype == 'elementwise':
ops, bwd_ops = self._ops_elementwise(layer, graph)
else:
self._logger.warning('Unimplemented \'%s\'' % layer.layertype)
return self._execute(ops, bwd_ops, graph)
def profile_full_pass(self, layers):
graph, end_points, variables = self._compose_full_graph(layers)
# Forward pass.
if layers[-1].layertype in ['softmax', 'sigmoid']:
last_op = end_points[layers[-2].name]
loss_op = end_points[layers[-1].name]
else:
last_op = end_points[layers[-1].name]
loss_op = None
print("FullForward", end=' ') # Ermao
print(end_points)
forward_time = self._execute(last_op, None, graph)
# Backward pass.
softmax_time = TimeMeasure()
backward_time = TimeMeasure()
loss_op = None #Ermao
if loss_op is not None:
softmax_time = self._execute(loss_op, None, graph)
with graph.as_default():
grad_op = tf.gradients(loss_op, variables)
backward_time = self._execute(grad_op, None, graph)
backward_time = backward_time - softmax_time
softmax_time = softmax_time - forward_time
return forward_time, softmax_time, backward_time
def _compose_full_graph(self, layers):
graph = tf.Graph()
end_points = dict() # collects out tensors for each layer
variables = [None] # collects trainable variables
for layer in layers:
if layer.layertype == 'conv2d':
ops, _ = self._ops_conv2d(layer, graph, end_points, variables)
elif layer.layertype == 'deconv2d':
ops, _ = self._ops_deconv2d(layer, graph, end_points,
variables)
elif layer.layertype == 'innerproduct':
ops, _ = self._ops_innerproduct(layer, graph, end_points,
variables)
elif layer.layertype == 'pool2d':
ops, _ = self._ops_pool2d(layer, graph, end_points)
elif layer.layertype == 'upsampling2d':
ops, _ = self._ops_upsampling2d(layer, graph, end_points)
elif layer.layertype == 'dropout':
ops, _ = self._ops_dropout(layer, graph, end_points)
elif layer.layertype == 'concat':
ops, _ = self._ops_concat(layer, graph, end_points)
elif layer.layertype == 'reshape':
ops, _ = self._ops_reshape(layer, graph, end_points)
elif layer.layertype == 'softmax':
ops, _ = self._ops_softmax(layer, graph, end_points)
elif layer.layertype == 'sigmoid':
ops, _ = self._ops_sigmoid(layer, graph, end_points)
# added by Ermao
elif layer.layertype == 'generic_LRN':
ops, bwd_ops = self._ops_lrn(layer, graph, end_points)
elif layer.layertype == 'generic_BatchNorm':
ops, bwd_ops = self._ops_batchnorm(layer, graph, end_points)
elif layer.layertype == 'elementwise':
ops, bwd_ops = self._ops_elementwise(layer, graph)
elif layer.layertype == 'input':
# skip data/input layer.
continue
else:
raise NotImplementedError('Cannot create ops for layer %s [%s]'
% (layer.name, layer.layertype))
end_points[layer.name] = ops
return graph, end_points, variables[1:]
def _get_inputs(self, layer, end_points=None):
if end_points is None or layer.parents[0] == 'data':
# Isolation mode: inputs for the layer are random constants.
inputs = tf.constant(
2 * np.random.random_sample(layer.inputs) - 1,
dtype=tf.float32,
name="fake_inputs")
return inputs
else:
# Chain mode: get inputs from parent layer outputs.
inputs = [end_points[p] for p in layer.parents]
if len(inputs) == 1:
return inputs[0]
return inputs
def _get_variable(self, shape, name='constant'):
return tf.Variable(
tf.truncated_normal(
shape, dtype=tf.float32, stddev=1e-1),
name='rand_{}'.format(name))
def _get_fake_targets(self, batch_size, num_classes):
labels = np.random.randint(0, num_classes, batch_size)
return tf.constant(labels, dtype=tf.int32, name='fake_targets')
def _ops_conv2d(self, layer, graph, end_points=None, variables=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
filters = self._get_variable(layer.filters, name='filters')
if variables:
variables.append(filters)
conv = None
if self.options.direction == 'forward':
conv = tf.nn.conv2d(
inputs, filters, layer.strides, padding=layer.padding)
bwd_inputs_op, bwd_filter_op = None, None
if self.options.direction == 'backward':
if self.options.gradient_wrt == 'data' and layer.backprop:
bwd_inputs_op = tf.nn.conv2d_backprop_input(
layer.inputs,
filters,
self._get_variable(
layer.outputs, name='outputs'),
layer.strides,
layer.padding)
elif self.options.gradient_wrt == 'filter':
bwd_filter_op = tf.nn.conv2d_backprop_filter(
inputs, layer.filters,
self._get_variable(layer.outputs, 'outputs'),
layer.strides, layer.padding)
return conv, [bwd_inputs_op, bwd_filter_op]
def _ops_deconv2d(self, layer, graph, end_points=None, variables=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
filters = self._get_variable(layer.filters, name='filters')
if variables:
variables.append(filters)
deconv = tf.nn.conv2d_transpose(
inputs,
filters,
output_shape=layer.outputs,
strides=layer.strides)
return deconv, None
def _ops_innerproduct_backup(self, layer, graph, end_points=None, variables=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
weights = self._get_variable(layer.weights, name='weights')
if variables:
variables.append(weights)
if len(inputs.get_shape().as_list()) > 2:
inputs = tf.reshape(inputs, [-1, -1])
innerprod = tf.matmul(inputs, weights)
return innerprod, None
def _ops_innerproduct(self, layer, graph, end_points=None, variables=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
weights = self._get_variable(layer.weights, name='weights')
if variables:
variables.append(weights)
innerprod = tf.matmul(inputs, weights)
return innerprod, None
def _ops_pool2d(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
if layer.pool_type == 'max':
pool_op = tf.nn.max_pool
elif layer.pool_type == 'avg':
pool_op = tf.nn.avg_pool
else:
raise NotImplementedError('Invalid pool type: %s' %
layer.pool_type)
pool = pool_op(
inputs, layer.kernel, layer.strides, padding=layer.padding)
return pool, None
def _ops_upsampling2d(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
upsampling = tf.image.resize_nearest_neighbor(
inputs, layer.outputs[1:3])
return upsampling, None
def _ops_dropout(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
dropout = tf.nn.dropout(inputs, layer.keep_prob)
return dropout, None
def _ops_concat(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
if end_points:
inputs = self._get_inputs(layer, end_points)
else:
inputs = [tf.Variable(tf.random_normal(inp))
for inp in layer.inputs]
concat = tf.concat(axis=layer.dim, values=inputs)
return concat, None
def _ops_reshape(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
reshape = tf.reshape(inputs, layer.outputs)
return reshape, None
# Ermao
def _ops_lrn(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
reshape = tf.nn.lrn(inputs)
return reshape, None
# Ermao
def _ops_batchnorm(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
reshape = tf.nn.lrn(inputs)
return reshape, None
# Ermao
def _ops_elementwise(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
# inputs = self._get_inputs(layer, end_points)
if end_points:
inputs = self._get_inputs(layer, end_points)
else:
inputs = [tf.Variable(tf.random_normal(inp))
for inp in layer.inputs]
eltwise = tf.add(inputs[0], inputs[1], name='eltwise')
return eltwise, None
def _ops_softmax(self, layer, graph, end_points=None):
# For simplicity, here combine softmax and loss
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
print(type(inputs))
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.squeeze(inputs), labels=self._get_fake_targets(
layer.outputs[0], layer.outputs[1])))
return loss, None
def _ops_sigmoid(self, layer, graph, end_points=None):
with graph.as_default():
with tf.device(self._device):
inputs = self._get_inputs(layer, end_points)
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=inputs, labels=tf.zeros(
layer.outputs)))
return loss, None
def _execute(self, layer_ops, bwd_ops, graph):
with graph.as_default():
with tf.device(self._device):
config = tf.ConfigProto(
allow_soft_placement=False,
log_device_placement=(
self._logger.getEffectiveLevel() == logging.DEBUG),
graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0)))
ops_to_run = None
if self.options.direction == 'forward':
if layer_ops is None:
return TimeMeasure()
if isinstance(layer_ops, list):
target_fwd_op = [tf.group(op) for op in layer_ops]
else:
# shape = tf.shape(layer_ops) #Ermao
#print(layer_ops.shape)
target_fwd_op = tf.group(layer_ops) #Ermao
ops_to_run = target_fwd_op
elif self.options.direction == 'backward':
if bwd_ops is None:
return TimeMeasure()
else:
if self.options.gradient_wrt == 'data':
target = bwd_ops[0]
elif self.options.gradient_wrt == 'filter':
target = bwd_ops[1]
else:
self._logger.warning(
'TensorFlowProfiler cannot run two'
'backward ops for now.')
return TimeMeasure()
if target is None:
return TimeMeasure()
target_bwd_op = tf.group(target) #Ermao
# target_bwd_op = tf.group(tf.shape(target)) #Ermao
ops_to_run = target_bwd_op
init = tf.global_variables_initializer()
# Create a session and initialize variables.
with tf.Session(config=config) as sess:
# writer = tf.train.SummaryWriter('logs/', sess.graph)
sess.run(init)
# Ermao initiate nvml
# nvmlInit()
# handle = nvmlDeviceGetHandleByIndex(1)
# Run the ops.
durations = []
# durations_nvml = []
durations_smi = []
# power_vals_nvml = []
power_all = []
for i in range(self.options.num_warmup +
self.options.num_iter):
start_time = time.time()
sess.run(ops_to_run)
duration = time.time() - start_time
if i >= self.options.num_warmup:
# Mesure time in milliseconds.
durations.append(duration * (10**3))
# pow_val = 100.0
# proc = subprocess.Popen("nvidia-smi -i 1", stdout=subprocess.PIPE, shell=True)
#print(time.localtime())
#measure Power Ermao
proc = subprocess.Popen(["nvidia-smi", "--query-gpu=power.draw", "--format=csv", "-lms", "1", "-i", "1"], stdout=subprocess.PIPE)
for i in range(self.options.num_iter):
start_time = time.time()
sess.run(ops_to_run)
duration = time.time() - start_time
durations_smi.append(duration * (10 ** 3))
os.kill(proc.pid, signal.SIGTERM)
for line in proc.stdout:
if "power" not in line:
power_all.append(float(line.split()[-2]))
mean_time = np.mean(durations)
max_power = np.max(power_all)
mean_power_smi = np.mean(power_all)
# print('Runtime: %.3f Power: %.3f' % (mean_time,mean_power_smi))
print('Runtime: %.3f Power: %.3f %.3f' % (mean_time, max_power, mean_power_smi))
# print('%.3f %.3f %.3f' % (mean_time, mean_time_nvml, mean_time_smi))
# print('%.3f %.3f %d' % (mean_power_nvml, mean_power_smi, len(power_all)))
#print(power_vals_nvml)
#print(power_all)
#print('The average power is: %.2f' % mean_power)
tf.reset_default_graph()
return TimeMeasure(total_time=mean_time)
|
the-stack_106_18579
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobConstraints(Model):
"""
Specifies the execution constraints for jobs created on a schedule.
:param max_wall_clock_time: The maximum elapsed time that the job may
run, measured from the time the job starts. If the job does not complete
within the time limit, the Batch service terminates it and any tasks
that are still running.
:type max_wall_clock_time: timedelta
:param max_task_retry_count: The maximum number of times each task may be
retried. The Batch service retries a task if its exit code is nonzero.
:type max_task_retry_count: int
"""
_attribute_map = {
'max_wall_clock_time': {'key': 'maxWallClockTime', 'type': 'duration'},
'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'},
}
def __init__(self, max_wall_clock_time=None, max_task_retry_count=None):
self.max_wall_clock_time = max_wall_clock_time
self.max_task_retry_count = max_task_retry_count
|
the-stack_106_18580
|
from setuptools import setup
with open('requirements.txt') as fp:
requirements = fp.read().splitlines()
setup(
name='mrkd',
description='Write man pages using Markdown, and convert them to Roff or HTML',
version='0.1.3',
author='Ryan Gonzalez',
author_email='[email protected]',
license='BSD',
url='https://github.com/kirbyfan64/mrkd',
py_modules=['mrkd'],
entry_points={
'console_scripts': [
'mrkd=mrkd:main',
],
},
package_data={
'': ['template.html'],
},
install_requires=requirements,
classifiers=[
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: BSD License',
],
)
|
the-stack_106_18581
|
import lvgl as lv
def set_value(bar, v):
bar.set_value(v, lv.ANIM.OFF)
def event_cb(e):
dsc = lv.obj_draw_part_dsc_t.__cast__(e.get_param())
if dsc.part != lv.PART.INDICATOR:
return
obj= e.get_target()
label_dsc = lv.draw_label_dsc_t()
label_dsc.init()
# label_dsc.font = LV_FONT_DEFAULT;
value_txt = str(obj.get_value())
txt_size = lv.point_t()
lv.txt_get_size(txt_size, value_txt, label_dsc.font, label_dsc.letter_space, label_dsc.line_space, lv.COORD.MAX, label_dsc.flag)
txt_area = lv.area_t()
# If the indicator is long enough put the text inside on the right
if dsc.draw_area.get_width() > txt_size.x + 20:
txt_area.x2 = dsc.draw_area.x2 - 5
txt_area.x1 = txt_area.x2 - txt_size.x + 1
label_dsc.color = lv.color_white()
# If the indicator is still short put the text out of it on the right*/
else:
txt_area.x1 = dsc.draw_area.x2 + 5
txt_area.x2 = txt_area.x1 + txt_size.x - 1
label_dsc.color = lv.color_black()
txt_area.y1 = dsc.draw_area.y1 + (dsc.draw_area.get_height() - txt_size.y) // 2
txt_area.y2 = txt_area.y1 + txt_size.y - 1
lv.draw_label(txt_area, dsc.clip_area, label_dsc, value_txt, None)
#
# Custom drawer on the bar to display the current value
#
bar = lv.bar(lv.scr_act())
bar.add_event_cb(event_cb, lv.EVENT.DRAW_PART_END, None)
bar.set_size(200, 20)
bar.center()
a = lv.anim_t()
a.init()
a.set_var(bar)
a.set_values(0, 100)
a.set_custom_exec_cb(lambda a,val: set_value(bar,val))
a.set_time(2000)
a.set_playback_time(2000)
a.set_repeat_count(lv.ANIM_REPEAT.INFINITE)
lv.anim_t.start(a)
|
the-stack_106_18583
|
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
Integer,
NoneSet,
Set,
Float,
Bool,
DateTime,
String,
Alias,
Bool,
Sequence,
)
from openpyxl.descriptors.excel import ExtensionList, Relation
from openpyxl.descriptors.nested import NestedInteger
from openpyxl.descriptors.sequence import NestedSequence
from openpyxl.xml.constants import SHEET_MAIN_NS
from openpyxl.xml.functions import tostring
from openpyxl.packaging.relationship import (
RelationshipList,
Relationship,
get_rels_path
)
from openpyxl.worksheet.filters import (
AutoFilter,
CellRange,
ColorFilter,
CustomFilter,
CustomFilters,
DateGroupItem,
DynamicFilter,
FilterColumn,
Filters,
IconFilter,
SortCondition,
SortState,
Top10,
)
class HierarchyUsage(Serialisable):
tagname = "hierarchyUsage"
hierarchyUsage = Integer()
def __init__(self,
hierarchyUsage=None,
):
self.hierarchyUsage = hierarchyUsage
class ColHierarchiesUsage(Serialisable):
tagname = "colHierarchiesUsage"
colHierarchyUsage = Sequence(expected_type=HierarchyUsage, )
__elements__ = ('colHierarchyUsage',)
__attrs__ = ('count', )
def __init__(self,
count=None,
colHierarchyUsage=(),
):
self.colHierarchyUsage = colHierarchyUsage
@property
def count(self):
return len(self.colHierarchyUsage)
class RowHierarchiesUsage(Serialisable):
tagname = "rowHierarchiesUsage"
rowHierarchyUsage = Sequence(expected_type=HierarchyUsage, )
__elements__ = ('rowHierarchyUsage',)
__attrs__ = ('count', )
def __init__(self,
count=None,
rowHierarchyUsage=(),
):
self.rowHierarchyUsage = rowHierarchyUsage
@property
def count(self):
return len(self.rowHierarchyUsage)
class PivotFilter(Serialisable):
fld = Integer()
mpFld = Integer(allow_none=True)
type = Set(values=(['unknown', 'count', 'percent', 'sum', 'captionEqual',
'captionNotEqual', 'captionBeginsWith', 'captionNotBeginsWith',
'captionEndsWith', 'captionNotEndsWith', 'captionContains',
'captionNotContains', 'captionGreaterThan', 'captionGreaterThanOrEqual',
'captionLessThan', 'captionLessThanOrEqual', 'captionBetween',
'captionNotBetween', 'valueEqual', 'valueNotEqual', 'valueGreaterThan',
'valueGreaterThanOrEqual', 'valueLessThan', 'valueLessThanOrEqual',
'valueBetween', 'valueNotBetween', 'dateEqual', 'dateNotEqual',
'dateOlderThan', 'dateOlderThanOrEqual', 'dateNewerThan',
'dateNewerThanOrEqual', 'dateBetween', 'dateNotBetween', 'tomorrow',
'today', 'yesterday', 'nextWeek', 'thisWeek', 'lastWeek', 'nextMonth',
'thisMonth', 'lastMonth', 'nextQuarter', 'thisQuarter', 'lastQuarter',
'nextYear', 'thisYear', 'lastYear', 'yearToDate', 'Q1', 'Q2', 'Q3', 'Q4',
'M1', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8', 'M9', 'M10', 'M11',
'M12']))
evalOrder = Integer(allow_none=True)
id = Integer()
iMeasureHier = Integer(allow_none=True)
iMeasureFld = Integer(allow_none=True)
name = String(allow_none=True)
description = String()
stringValue1 = String()
stringValue2 = String()
autoFilter = Typed(expected_type=AutoFilter, )
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('autoFilter',)
def __init__(self,
fld=None,
mpFld=None,
type=None,
evalOrder=None,
id=None,
iMeasureHier=None,
iMeasureFld=None,
name=None,
description=None,
stringValue1=None,
stringValue2=None,
autoFilter=None,
extLst=None,
):
self.fld = fld
self.mpFld = mpFld
self.type = type
self.evalOrder = evalOrder
self.id = id
self.iMeasureHier = iMeasureHier
self.iMeasureFld = iMeasureFld
self.name = name
self.description = description
self.stringValue1 = stringValue1
self.stringValue2 = stringValue2
self.autoFilter = autoFilter
self.extLst = extLst
class PivotFilters(Serialisable):
count = Integer()
filter = Typed(expected_type=PivotFilter, allow_none=True)
__elements__ = ('filter',)
def __init__(self,
count=None,
filter=None,
):
self.filter = filter
class PivotTableStyle(Serialisable):
tagname = "pivotTableStyleInfo"
name = String(allow_none=True)
showRowHeaders = Bool()
showColHeaders = Bool()
showRowStripes = Bool()
showColStripes = Bool()
showLastColumn = Bool()
def __init__(self,
name=None,
showRowHeaders=None,
showColHeaders=None,
showRowStripes=None,
showColStripes=None,
showLastColumn=None,
):
self.name = name
self.showRowHeaders = showRowHeaders
self.showColHeaders = showColHeaders
self.showRowStripes = showRowStripes
self.showColStripes = showColStripes
self.showLastColumn = showLastColumn
class MemberList(Serialisable):
tagname = "members"
level = Integer(allow_none=True)
member = NestedSequence(expected_type=String, attribute="name")
__elements__ = ('member',)
def __init__(self,
count=None,
level=None,
member=(),
):
self.level = level
self.member = member
@property
def count(self):
return len(self.member)
class MemberProperty(Serialisable):
tagname = "mps"
name = String(allow_none=True)
showCell = Bool(allow_none=True)
showTip = Bool(allow_none=True)
showAsCaption = Bool(allow_none=True)
nameLen = Integer(allow_none=True)
pPos = Integer(allow_none=True)
pLen = Integer(allow_none=True)
level = Integer(allow_none=True)
field = Integer()
def __init__(self,
name=None,
showCell=None,
showTip=None,
showAsCaption=None,
nameLen=None,
pPos=None,
pLen=None,
level=None,
field=None,
):
self.name = name
self.showCell = showCell
self.showTip = showTip
self.showAsCaption = showAsCaption
self.nameLen = nameLen
self.pPos = pPos
self.pLen = pLen
self.level = level
self.field = field
class PivotHierarchy(Serialisable):
tagname = "pivotHierarchy"
outline = Bool()
multipleItemSelectionAllowed = Bool()
subtotalTop = Bool()
showInFieldList = Bool()
dragToRow = Bool()
dragToCol = Bool()
dragToPage = Bool()
dragToData = Bool()
dragOff = Bool()
includeNewItemsInFilter = Bool()
caption = String(allow_none=True)
mps = NestedSequence(expected_type=MemberProperty, count=True)
members = Typed(expected_type=MemberList, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('mps', 'members',)
def __init__(self,
outline=None,
multipleItemSelectionAllowed=None,
subtotalTop=None,
showInFieldList=None,
dragToRow=None,
dragToCol=None,
dragToPage=None,
dragToData=None,
dragOff=None,
includeNewItemsInFilter=None,
caption=None,
mps=(),
members=None,
extLst=None,
):
self.outline = outline
self.multipleItemSelectionAllowed = multipleItemSelectionAllowed
self.subtotalTop = subtotalTop
self.showInFieldList = showInFieldList
self.dragToRow = dragToRow
self.dragToCol = dragToCol
self.dragToPage = dragToPage
self.dragToData = dragToData
self.dragOff = dragOff
self.includeNewItemsInFilter = includeNewItemsInFilter
self.caption = caption
self.mps = mps
self.members = members
self.extLst = extLst
class Reference(Serialisable):
tagname = "reference"
field = Integer(allow_none=True)
selected = Bool(allow_none=True)
byPosition = Bool(allow_none=True)
relative = Bool(allow_none=True)
defaultSubtotal = Bool(allow_none=True)
sumSubtotal = Bool(allow_none=True)
countASubtotal = Bool(allow_none=True)
avgSubtotal = Bool(allow_none=True)
maxSubtotal = Bool(allow_none=True)
minSubtotal = Bool(allow_none=True)
productSubtotal = Bool(allow_none=True)
countSubtotal = Bool(allow_none=True)
stdDevSubtotal = Bool(allow_none=True)
stdDevPSubtotal = Bool(allow_none=True)
varSubtotal = Bool(allow_none=True)
varPSubtotal = Bool(allow_none=True)
x = NestedInteger(allow_none=True, attribute="v")
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('x',)
def __init__(self,
field=None,
count=None,
selected=None,
byPosition=None,
relative=None,
defaultSubtotal=None,
sumSubtotal=None,
countASubtotal=None,
avgSubtotal=None,
maxSubtotal=None,
minSubtotal=None,
productSubtotal=None,
countSubtotal=None,
stdDevSubtotal=None,
stdDevPSubtotal=None,
varSubtotal=None,
varPSubtotal=None,
x=None,
extLst=None,
):
self.field = field
self.selected = selected
self.byPosition = byPosition
self.relative = relative
self.defaultSubtotal = defaultSubtotal
self.sumSubtotal = sumSubtotal
self.countASubtotal = countASubtotal
self.avgSubtotal = avgSubtotal
self.maxSubtotal = maxSubtotal
self.minSubtotal = minSubtotal
self.productSubtotal = productSubtotal
self.countSubtotal = countSubtotal
self.stdDevSubtotal = stdDevSubtotal
self.stdDevPSubtotal = stdDevPSubtotal
self.varSubtotal = varSubtotal
self.varPSubtotal = varPSubtotal
self.x = x
@property
def count(self):
return len(self.field)
class PivotArea(Serialisable):
tagname = "pivotArea"
references = NestedSequence(expected_type=Reference, count=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
field = Integer(allow_none=True)
type = NoneSet(values=(['normal', 'data', 'all', 'origin', 'button',
'topEnd', 'topRight']))
dataOnly = Bool(allow_none=True)
labelOnly = Bool(allow_none=True)
grandRow = Bool(allow_none=True)
grandCol = Bool(allow_none=True)
cacheIndex = Bool(allow_none=True)
outline = Bool(allow_none=True)
offset = String(allow_none=True)
collapsedLevelsAreSubtotals = Bool(allow_none=True)
axis = NoneSet(values=(['axisRow', 'axisCol', 'axisPage', 'axisValues']))
fieldPosition = Integer(allow_none=True)
__elements__ = ('references',)
def __init__(self,
references=(),
extLst=None,
field=None,
type="normal",
dataOnly=True,
labelOnly=None,
grandRow=None,
grandCol=None,
cacheIndex=None,
outline=True,
offset=None,
collapsedLevelsAreSubtotals=None,
axis=None,
fieldPosition=None,
):
self.references = references
self.extLst = extLst
self.field = field
self.type = type
self.dataOnly = dataOnly
self.labelOnly = labelOnly
self.grandRow = grandRow
self.grandCol = grandCol
self.cacheIndex = cacheIndex
self.outline = outline
self.offset = offset
self.collapsedLevelsAreSubtotals = collapsedLevelsAreSubtotals
self.axis = axis
self.fieldPosition = fieldPosition
class ChartFormat(Serialisable):
tagname = "chartFormat"
chart = Integer()
format = Integer()
series = Bool()
pivotArea = Typed(expected_type=PivotArea, )
__elements__ = ('pivotArea',)
def __init__(self,
chart=None,
format=None,
series=None,
pivotArea=None,
):
self.chart = chart
self.format = format
self.series = series
self.pivotArea = pivotArea
class ConditionalFormat(Serialisable):
tagname = "conditionalFormat"
scope = Set(values=(['selection', 'data', 'field']))
type = NoneSet(values=(['all', 'row', 'column']))
priority = Integer()
pivotAreas = NestedSequence(expected_type=PivotArea)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('pivotAreas',)
def __init__(self,
scope=None,
type=None,
priority=None,
pivotAreas=(),
extLst=None,
):
self.scope = scope
self.type = type
self.priority = priority
self.pivotAreas = pivotAreas
self.extLst = extLst
class Format(Serialisable):
tagname = "format"
action = NoneSet(values=(['blank', 'formatting', 'drill', 'formula']))
dxfId = Integer()
pivotArea = Typed(expected_type=PivotArea, )
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('pivotArea',)
def __init__(self,
action=None,
dxfId=None,
pivotArea=None,
extLst=None,
):
self.action = action
self.dxfId = dxfId
self.pivotArea = pivotArea
self.extLst = extLst
class DataField(Serialisable):
tagname = "dataField"
name = String(allow_none=True)
fld = Integer()
subtotal = Set(values=(['average', 'count', 'countNums', 'max', 'min',
'product', 'stdDev', 'stdDevp', 'sum', 'var', 'varp']))
showDataAs = Set(values=(['normal', 'difference', 'percent',
'percentDiff', 'runTotal', 'percentOfRow', 'percentOfCol',
'percentOfTotal', 'index']))
baseField = Integer()
baseItem = Integer()
numFmtId = Integer(allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ()
def __init__(self,
name=None,
fld=None,
subtotal="sum",
showDataAs="normal",
baseField=-1,
baseItem=1048832,
numFmtId=None,
extLst=None,
):
self.name = name
self.fld = fld
self.subtotal = subtotal
self.showDataAs = showDataAs
self.baseField = baseField
self.baseItem = baseItem
self.numFmtId = numFmtId
self.extLst = extLst
class PageField(Serialisable):
tagname = "pageField"
fld = Integer()
item = Integer(allow_none=True)
hier = Integer()
name = String(allow_none=True)
cap = String(allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ()
def __init__(self,
fld=None,
item=None,
hier=None,
name=None,
cap=None,
extLst=None,
):
self.fld = fld
self.item = item
self.hier = hier
self.name = name
self.cap = cap
self.extLst = extLst
class RowColItem(Serialisable):
tagname = "i"
t = Set(values=(['data', 'default', 'sum', 'countA', 'avg', 'max', 'min',
'product', 'count', 'stdDev', 'stdDevP', 'var', 'varP', 'grand',
'blank']))
r = Integer()
i = Integer()
x = NestedInteger(allow_none=True, attribute="v")
__elements__ = ('x',)
def __init__(self,
t="data",
r=0,
i=0,
x=None,
):
self.t = t
self.r = r
self.i = i
self.x = x
class RowColField(Serialisable):
tagname = "field"
x = Integer()
def __init__(self,
x=None,
):
self.x = x
class AutoSortScope(Serialisable):
pivotArea = Typed(expected_type=PivotArea, )
__elements__ = ('pivotArea',)
def __init__(self,
pivotArea=None,
):
self.pivotArea = pivotArea
class FieldItem(Serialisable):
tagname = "item"
n = String(allow_none=True)
t = Set(values=(['data', 'default', 'sum', 'countA', 'avg', 'max', 'min',
'product', 'count', 'stdDev', 'stdDevP', 'var', 'varP', 'grand',
'blank']))
h = Bool(allow_none=True)
s = Bool(allow_none=True)
sd = Bool(allow_none=True)
f = Bool(allow_none=True)
m = Bool(allow_none=True)
c = Bool(allow_none=True)
x = Integer(allow_none=True)
d = Bool(allow_none=True)
e = Bool(allow_none=True)
def __init__(self,
n=None,
t="data",
h=None,
s=None,
sd=True,
f=None,
m=None,
c=None,
x=None,
d=None,
e=None,
):
self.n = n
self.t = t
self.h = h
self.s = s
self.sd = sd
self.f = f
self.m = m
self.c = c
self.x = x
self.d = d
self.e = e
class PivotField(Serialisable):
tagname = "pivotField"
items = NestedSequence(expected_type=FieldItem, count=True)
autoSortScope = Typed(expected_type=AutoSortScope, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
name = String(allow_none=True)
axis = NoneSet(values=(['axisRow', 'axisCol', 'axisPage', 'axisValues']))
dataField = Bool(allow_none=True)
subtotalCaption = String(allow_none=True)
showDropDowns = Bool(allow_none=True)
hiddenLevel = Bool(allow_none=True)
uniqueMemberProperty = String(allow_none=True)
compact = Bool(allow_none=True)
allDrilled = Bool(allow_none=True)
numFmtId = Integer(allow_none=True)
outline = Bool(allow_none=True)
subtotalTop = Bool(allow_none=True)
dragToRow = Bool(allow_none=True)
dragToCol = Bool(allow_none=True)
multipleItemSelectionAllowed = Bool(allow_none=True)
dragToPage = Bool(allow_none=True)
dragToData = Bool(allow_none=True)
dragOff = Bool(allow_none=True)
showAll = Bool(allow_none=True)
insertBlankRow = Bool(allow_none=True)
serverField = Bool(allow_none=True)
insertPageBreak = Bool(allow_none=True)
autoShow = Bool(allow_none=True)
topAutoShow = Bool(allow_none=True)
hideNewItems = Bool(allow_none=True)
measureFilter = Bool(allow_none=True)
includeNewItemsInFilter = Bool(allow_none=True)
itemPageCount = Integer(allow_none=True)
sortType = Set(values=(['manual', 'ascending', 'descending']))
dataSourceSort = Bool(allow_none=True)
nonAutoSortDefault = Bool(allow_none=True)
rankBy = Integer(allow_none=True)
defaultSubtotal = Bool(allow_none=True)
sumSubtotal = Bool(allow_none=True)
countASubtotal = Bool(allow_none=True)
avgSubtotal = Bool(allow_none=True)
maxSubtotal = Bool(allow_none=True)
minSubtotal = Bool(allow_none=True)
productSubtotal = Bool(allow_none=True)
countSubtotal = Bool(allow_none=True)
stdDevSubtotal = Bool(allow_none=True)
stdDevPSubtotal = Bool(allow_none=True)
varSubtotal = Bool(allow_none=True)
varPSubtotal = Bool(allow_none=True)
showPropCell = Bool(allow_none=True)
showPropTip = Bool(allow_none=True)
showPropAsCaption = Bool(allow_none=True)
defaultAttributeDrillState = Bool(allow_none=True)
__elements__ = ('items', 'autoSortScope',)
def __init__(self,
items=(),
autoSortScope=None,
name=None,
axis=None,
dataField=None,
subtotalCaption=None,
showDropDowns=True,
hiddenLevel=None,
uniqueMemberProperty=None,
compact=True,
allDrilled=None,
numFmtId=None,
outline=True,
subtotalTop=True,
dragToRow=True,
dragToCol=True,
multipleItemSelectionAllowed=None,
dragToPage=True,
dragToData=True,
dragOff=True,
showAll=True,
insertBlankRow=None,
serverField=None,
insertPageBreak=None,
autoShow=None,
topAutoShow=True,
hideNewItems=None,
measureFilter=None,
includeNewItemsInFilter=None,
itemPageCount=10,
sortType="manual",
dataSourceSort=None,
nonAutoSortDefault=None,
rankBy=None,
defaultSubtotal=True,
sumSubtotal=None,
countASubtotal=None,
avgSubtotal=None,
maxSubtotal=None,
minSubtotal=None,
productSubtotal=None,
countSubtotal=None,
stdDevSubtotal=None,
stdDevPSubtotal=None,
varSubtotal=None,
varPSubtotal=None,
showPropCell=None,
showPropTip=None,
showPropAsCaption=None,
defaultAttributeDrillState=None,
extLst=None,
):
self.items = items
self.autoSortScope = autoSortScope
self.name = name
self.axis = axis
self.dataField = dataField
self.subtotalCaption = subtotalCaption
self.showDropDowns = showDropDowns
self.hiddenLevel = hiddenLevel
self.uniqueMemberProperty = uniqueMemberProperty
self.compact = compact
self.allDrilled = allDrilled
self.numFmtId = numFmtId
self.outline = outline
self.subtotalTop = subtotalTop
self.dragToRow = dragToRow
self.dragToCol = dragToCol
self.multipleItemSelectionAllowed = multipleItemSelectionAllowed
self.dragToPage = dragToPage
self.dragToData = dragToData
self.dragOff = dragOff
self.showAll = showAll
self.insertBlankRow = insertBlankRow
self.serverField = serverField
self.insertPageBreak = insertPageBreak
self.autoShow = autoShow
self.topAutoShow = topAutoShow
self.hideNewItems = hideNewItems
self.measureFilter = measureFilter
self.includeNewItemsInFilter = includeNewItemsInFilter
self.itemPageCount = itemPageCount
self.sortType = sortType
self.dataSourceSort = dataSourceSort
self.nonAutoSortDefault = nonAutoSortDefault
self.rankBy = rankBy
self.defaultSubtotal = defaultSubtotal
self.sumSubtotal = sumSubtotal
self.countASubtotal = countASubtotal
self.avgSubtotal = avgSubtotal
self.maxSubtotal = maxSubtotal
self.minSubtotal = minSubtotal
self.productSubtotal = productSubtotal
self.countSubtotal = countSubtotal
self.stdDevSubtotal = stdDevSubtotal
self.stdDevPSubtotal = stdDevPSubtotal
self.varSubtotal = varSubtotal
self.varPSubtotal = varPSubtotal
self.showPropCell = showPropCell
self.showPropTip = showPropTip
self.showPropAsCaption = showPropAsCaption
self.defaultAttributeDrillState = defaultAttributeDrillState
class Location(Serialisable):
tagname = "location"
ref = String()
firstHeaderRow = Integer()
firstDataRow = Integer()
firstDataCol = Integer()
rowPageCount = Integer(allow_none=True)
colPageCount = Integer(allow_none=True)
def __init__(self,
ref=None,
firstHeaderRow=None,
firstDataRow=None,
firstDataCol=None,
rowPageCount=None,
colPageCount=None,
):
self.ref = ref
self.firstHeaderRow = firstHeaderRow
self.firstDataRow = firstDataRow
self.firstDataCol = firstDataCol
self.rowPageCount = rowPageCount
self.colPageCount = colPageCount
class TableDefinition(Serialisable):
mime_type = "application/vnd.openxmlformats-officedocument.spreadsheetml.pivotTable+xml"
rel_type = "http://schemas.openxmlformats.org/officeDocument/2006/relationships/pivotTable"
_id = 1
_path = "/xl/pivotTables/pivotTable{0}.xml"
tagname = "pivotTableDefinition"
cache = None
name = String()
cacheId = Integer()
dataOnRows = Bool()
dataPosition = Integer(allow_none=True)
dataCaption = String()
grandTotalCaption = String(allow_none=True)
errorCaption = String(allow_none=True)
showError = Bool()
missingCaption = String(allow_none=True)
showMissing = Bool()
pageStyle = String(allow_none=True)
pivotTableStyle = String(allow_none=True)
vacatedStyle = String(allow_none=True)
tag = String(allow_none=True)
updatedVersion = Integer()
minRefreshableVersion = Integer()
asteriskTotals = Bool()
showItems = Bool()
editData = Bool()
disableFieldList = Bool()
showCalcMbrs = Bool()
visualTotals = Bool()
showMultipleLabel = Bool()
showDataDropDown = Bool()
showDrill = Bool()
printDrill = Bool()
showMemberPropertyTips = Bool()
showDataTips = Bool()
enableWizard = Bool()
enableDrill = Bool()
enableFieldProperties = Bool()
preserveFormatting = Bool()
useAutoFormatting = Bool()
pageWrap = Integer()
pageOverThenDown = Bool()
subtotalHiddenItems = Bool()
rowGrandTotals = Bool()
colGrandTotals = Bool()
fieldPrintTitles = Bool()
itemPrintTitles = Bool()
mergeItem = Bool()
showDropZones = Bool()
createdVersion = Integer()
indent = Integer()
showEmptyRow = Bool()
showEmptyCol = Bool()
showHeaders = Bool()
compact = Bool()
outline = Bool()
outlineData = Bool()
compactData = Bool()
published = Bool()
gridDropZones = Bool()
immersive = Bool()
multipleFieldFilters = Bool()
chartFormat = Integer()
rowHeaderCaption = String(allow_none=True)
colHeaderCaption = String(allow_none=True)
fieldListSortAscending = Bool()
mdxSubqueries = Bool()
customListSort = Bool(allow_none=True)
autoFormatId = Integer(allow_none=True)
applyNumberFormats = Bool()
applyBorderFormats = Bool()
applyFontFormats = Bool()
applyPatternFormats = Bool()
applyAlignmentFormats = Bool()
applyWidthHeightFormats = Bool()
location = Typed(expected_type=Location, )
pivotFields = NestedSequence(expected_type=PivotField, count=True)
rowFields = NestedSequence(expected_type=RowColField, count=True)
rowItems = NestedSequence(expected_type=RowColItem, count=True)
colFields = NestedSequence(expected_type=RowColField, count=True)
colItems = NestedSequence(expected_type=RowColItem, count=True)
pageFields = NestedSequence(expected_type=PageField, count=True)
dataFields = NestedSequence(expected_type=DataField, count=True)
formats = NestedSequence(expected_type=Format, count=True)
conditionalFormats = NestedSequence(expected_type=ConditionalFormat, count=True)
chartFormats = NestedSequence(expected_type=ChartFormat, count=True)
pivotHierarchies = NestedSequence(expected_type=PivotHierarchy, count=True)
pivotTableStyleInfo = Typed(expected_type=PivotTableStyle, allow_none=True)
filters = NestedSequence(expected_type=PivotFilter, count=True)
rowHierarchiesUsage = Typed(expected_type=RowHierarchiesUsage, allow_none=True)
colHierarchiesUsage = Typed(expected_type=ColHierarchiesUsage, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
id = Relation()
__elements__ = ('location', 'pivotFields', 'rowFields', 'rowItems',
'colFields', 'colItems', 'pageFields', 'dataFields', 'formats',
'conditionalFormats', 'chartFormats', 'pivotHierarchies',
'pivotTableStyleInfo', 'filters', 'rowHierarchiesUsage',
'colHierarchiesUsage',)
def __init__(self,
name=None,
cacheId=None,
dataOnRows=False,
dataPosition=None,
dataCaption=None,
grandTotalCaption=None,
errorCaption=None,
showError=False,
missingCaption=None,
showMissing=True,
pageStyle=None,
pivotTableStyle=None,
vacatedStyle=None,
tag=None,
updatedVersion=0,
minRefreshableVersion=0,
asteriskTotals=False,
showItems=True,
editData=False,
disableFieldList=False,
showCalcMbrs=True,
visualTotals=True,
showMultipleLabel=True,
showDataDropDown=True,
showDrill=True,
printDrill=False,
showMemberPropertyTips=True,
showDataTips=True,
enableWizard=True,
enableDrill=True,
enableFieldProperties=True,
preserveFormatting=True,
useAutoFormatting=False,
pageWrap=0,
pageOverThenDown=False,
subtotalHiddenItems=False,
rowGrandTotals=True,
colGrandTotals=True,
fieldPrintTitles=False,
itemPrintTitles=False,
mergeItem=False,
showDropZones=True,
createdVersion=0,
indent=1,
showEmptyRow=False,
showEmptyCol=False,
showHeaders=True,
compact=True,
outline=False,
outlineData=False,
compactData=True,
published=False,
gridDropZones=False,
immersive=True,
multipleFieldFilters=None,
chartFormat=0,
rowHeaderCaption=None,
colHeaderCaption=None,
fieldListSortAscending=None,
mdxSubqueries=None,
customListSort=None,
autoFormatId=None,
applyNumberFormats=False,
applyBorderFormats=False,
applyFontFormats=False,
applyPatternFormats=False,
applyAlignmentFormats=False,
applyWidthHeightFormats=False,
location=None,
pivotFields=(),
rowFields=(),
rowItems=(),
colFields=(),
colItems=(),
pageFields=(),
dataFields=(),
formats=(),
conditionalFormats=(),
chartFormats=(),
pivotHierarchies=(),
pivotTableStyleInfo=None,
filters=(),
rowHierarchiesUsage=None,
colHierarchiesUsage=None,
extLst=None,
id=None,
):
self.name = name
self.cacheId = cacheId
self.dataOnRows = dataOnRows
self.dataPosition = dataPosition
self.dataCaption = dataCaption
self.grandTotalCaption = grandTotalCaption
self.errorCaption = errorCaption
self.showError = showError
self.missingCaption = missingCaption
self.showMissing = showMissing
self.pageStyle = pageStyle
self.pivotTableStyle = pivotTableStyle
self.vacatedStyle = vacatedStyle
self.tag = tag
self.updatedVersion = updatedVersion
self.minRefreshableVersion = minRefreshableVersion
self.asteriskTotals = asteriskTotals
self.showItems = showItems
self.editData = editData
self.disableFieldList = disableFieldList
self.showCalcMbrs = showCalcMbrs
self.visualTotals = visualTotals
self.showMultipleLabel = showMultipleLabel
self.showDataDropDown = showDataDropDown
self.showDrill = showDrill
self.printDrill = printDrill
self.showMemberPropertyTips = showMemberPropertyTips
self.showDataTips = showDataTips
self.enableWizard = enableWizard
self.enableDrill = enableDrill
self.enableFieldProperties = enableFieldProperties
self.preserveFormatting = preserveFormatting
self.useAutoFormatting = useAutoFormatting
self.pageWrap = pageWrap
self.pageOverThenDown = pageOverThenDown
self.subtotalHiddenItems = subtotalHiddenItems
self.rowGrandTotals = rowGrandTotals
self.colGrandTotals = colGrandTotals
self.fieldPrintTitles = fieldPrintTitles
self.itemPrintTitles = itemPrintTitles
self.mergeItem = mergeItem
self.showDropZones = showDropZones
self.createdVersion = createdVersion
self.indent = indent
self.showEmptyRow = showEmptyRow
self.showEmptyCol = showEmptyCol
self.showHeaders = showHeaders
self.compact = compact
self.outline = outline
self.outlineData = outlineData
self.compactData = compactData
self.published = published
self.gridDropZones = gridDropZones
self.immersive = immersive
self.multipleFieldFilters = multipleFieldFilters
self.chartFormat = chartFormat
self.rowHeaderCaption = rowHeaderCaption
self.colHeaderCaption = colHeaderCaption
self.fieldListSortAscending = fieldListSortAscending
self.mdxSubqueries = mdxSubqueries
self.customListSort = customListSort
self.autoFormatId = autoFormatId
self.applyNumberFormats = applyNumberFormats
self.applyBorderFormats = applyBorderFormats
self.applyFontFormats = applyFontFormats
self.applyPatternFormats = applyPatternFormats
self.applyAlignmentFormats = applyAlignmentFormats
self.applyWidthHeightFormats = applyWidthHeightFormats
self.location = location
self.pivotFields = pivotFields
self.rowFields = rowFields
self.rowItems = rowItems
self.colFields = colFields
self.colItems = colItems
self.pageFields = pageFields
self.dataFields = dataFields
self.formats = formats
self.conditionalFormats = conditionalFormats
self.chartFormats = chartFormats
self.pivotHierarchies = pivotHierarchies
self.pivotTableStyleInfo = pivotTableStyleInfo
self.filters = filters
self.rowHierarchiesUsage = rowHierarchiesUsage
self.colHierarchiesUsage = colHierarchiesUsage
self.extLst = extLst
self.id = id
def to_tree(self):
tree = super(TableDefinition, self).to_tree()
tree.set("xmlns", SHEET_MAIN_NS)
return tree
@property
def path(self):
return self._path.format(self._id)
def _write(self, archive, manifest):
"""
Add to zipfile and update manifest
"""
self._write_rels(archive, manifest)
xml = tostring(self.to_tree())
archive.writestr(self.path[1:], xml)
manifest.append(self)
def _write_rels(self, archive, manifest):
"""
Write the relevant child objects and add links
"""
if self.cache is None:
return
rels = RelationshipList()
r = Relationship(Type=self.cache.rel_type, Target=self.cache.path)
rels.append(r)
self.id = r.id
if self.cache.path[1:] not in archive.namelist():
self.cache._write(archive, manifest)
path = get_rels_path(self.path)
xml = tostring(rels.to_tree())
archive.writestr(path[1:], xml)
|
the-stack_106_18584
|
from django.core.management.base import BaseCommand, CommandError
from pybb import compat
from pybb.models import Forum
class Command(BaseCommand):
help = "Set and remove moderator to all forums"
args = "{add|del} username"
def handle(self, *args, **kwargs):
if len(args) != 2:
raise CommandError("Enter action {add|del} and username")
action, username = args
if action not in ("add", "del"):
raise AssertionError
user = compat.get_user_model().objects.get(
**{compat.get_username_field(): username}
)
forums = Forum.objects.all()
for forum in forums:
forum.moderators.remove(user)
if action == "add":
forum.moderators.add(user)
|
the-stack_106_18585
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An abstraction around the source and classfiles for a Java application."""
from builtins import object
import os
import os.path
import google
_SDKROOT = os.path.dirname(os.path.dirname(google.__file__))
class JavaApplication(object):
"""An abstraction around the compiled class files for a Java application."""
def __init__(self, module_configuration):
"""Initializer for Module.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
"""
self._module_configuration = module_configuration
def get_environment(self):
"""Return the environment that should be used to run the Java executable."""
environ = {'SDKROOT': _SDKROOT,
'PWD': self._module_configuration.application_root,
'TZ': 'UTC',
'APPLICATION_ID': self._module_configuration.application}
for var in ('PATH', 'SYSTEMROOT', 'USER'):
if var in os.environ:
environ[var] = os.environ[var]
return environ
|
the-stack_106_18587
|
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
import pytest
import pandas as pd
import numpy as np
from dash.dependencies import Input, Output
from dash.testing.wait import until
@pytest.mark.parametrize(
"fmt", ("csv", "json", "html", "feather", "parquet", "stata", "pickle")
)
def test_dldf001_download_dataframe(fmt, dash_dcc):
df = pd.DataFrame({"a": [1, 2, 3, 4], "b": [2, 1, 5, 6], "c": ["x", "x", "y", "y"]})
reader = getattr(pd, "read_{}".format(fmt)) # e.g. read_csv
writer = getattr(df, "to_{}".format(fmt)) # e.g. to_csv
filename = "df.{}".format(fmt)
# Create app.
app = dash.Dash(__name__, prevent_initial_callbacks=True)
app.layout = html.Div(
[html.Button("Click me", id="btn"), dcc.Download(id="download")]
)
@app.callback(Output("download", "data"), Input("btn", "n_clicks"))
def download(_):
# For csv and html, the index must be removed to preserve the structure.
if fmt in ["csv", "html", "excel"]:
return dcc.send_data_frame(writer, filename, index=False)
# For csv and html, the index must be removed to preserve the structure.
if fmt in ["stata"]:
a = dcc.send_data_frame(writer, filename, write_index=False)
return a
# For other formats, no modifications are needed.
return dcc.send_data_frame(writer, filename)
dash_dcc.start_server(app)
# Check that there is nothing before clicking
fp = os.path.join(dash_dcc.download_path, filename)
assert not os.path.isfile(fp)
dash_dcc.find_element("#btn").click()
# Check that a file has been download, and that it's content matches the original data frame.
until(lambda: os.path.exists(fp), 10)
df_download = reader(fp)
if isinstance(df_download, list):
df_download = df_download[0]
# For stata data, pandas equals fails. Hence, a custom equals is used instead.
assert df.columns.equals(df_download.columns)
assert df.index.equals(df_download.index)
np.testing.assert_array_equal(df.values, df_download.values)
assert dash_dcc.get_logs() == []
|
the-stack_106_18589
|
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import render
from bootcamp.activities.models import Notification
from bootcamp.decorators import ajax_required
@login_required
def notifications(request):
user = request.user
notifications = Notification.objects.filter(to_user=user)
unread = Notification.objects.filter(to_user=user, is_read=False)
for notification in unread:
notification.is_read = True
notification.save()
return render(request, 'activities/notifications.html',
{'notifications': notifications})
@login_required
@ajax_required
def last_notifications(request):
user = request.user
notifications = Notification.objects.filter(to_user=user,
is_read=False)[:5]
for notification in notifications:
notification.is_read = True
notification.save()
return render(request,
'activities/last_notifications.html',
{'notifications': notifications})
@login_required
@ajax_required
def check_notifications(request):
user = request.user
notifications = Notification.objects.filter(to_user=user,
is_read=False)[:5]
return HttpResponse(len(notifications))
|
the-stack_106_18590
|
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib as mpl
import matplotlib.pylab as plt
from mypy.grid_toolkit import shiftgrid
from PySHTOOLS import SHExpandDH, MakeGrid2D
from __init__ import *
def test1():
mpl.rcParams['contour.negative_linestyle'] = 'solid'
data = np.loadtxt(join(test_dir, "dyntopoC2E5_l1-22.txt"))
cilm = SHExpandDH(data[:-1,:-1], 8)
grid = MakeGrid2D(cilm, 181, 361)
a, b = grid.shape
latitudes = np.linspace(90, -90, a)
centeredLons = np.linspace(-180, 180, b)
LONS, LATS = np.meshgrid(centeredLons, latitudes)
grid = shiftgrid(grid, b/2)
Figure = plt.figure(figsize=(22,10))
Map = Basemap(projection='robin', lon_0=0, resolution='l')
x, y = Map(LONS, LATS)
fcp = Map.contourf(x, y, grid, 30, interpolation="bicubic", alpha=1.0, cmap=mpl.cm.Spectral)
levels = fcp.get_array()
cp = Map.contour(x, y, grid, 30, interpolation="bicubic", linewidth=0.5, colors='k', alpha=0.5)
cb = Map.colorbar(fcp, "bottom", size="5%", pad='5%', extendrect=False)
cb.ax.tick_params(labelsize=18)
cb.solids.set_edgecolor("face")
cb.set_label("Km",fontsize=18)
cb.ax.set_aspect(0.047)
Map.drawcoastlines(linewidth=1)
Map.drawmapboundary(linewidth=1)
Map.drawmeridians([-150,-100,-50,0,50,100, 150],labels=[1,1,1,0],fontsize=18)
Map.drawparallels([-60,-30,0,30,60],labels=[1,1,1,1],fontsize=18)
def test2():
data = periodic2D(50, 100, 3, 4)
cilm = SHExpandDH(data, 5)
grid = MakeGrid2D(cilm, 51, 101)
a, b = grid.shape
plt.figure()
plt.imshow(grid)
# plt.show()
if __name__ =="__main__":
print("Test 1")
test1()
print("Test 2")
test2()
plt.show()
|
the-stack_106_18591
|
#!/bin/env python
# -*- coding: utf-8 -*-
##
# test_problem.py: Checks correctness of azure.quantum.optimization module.
##
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
##
import unittest
from unittest.mock import Mock, patch
from typing import TYPE_CHECKING
from azure.quantum.optimization import Problem, ProblemType, Term, GroupedTerm
import azure.quantum.optimization.problem
from common import expected_terms
import numpy
import os
class TestProblemClass(unittest.TestCase):
def setUp(self):
self.mock_ws = Mock()
self.mock_ws.get_container_uri = Mock(return_value = "mock_container_uri/foo/bar")
## QUBO problem
self.problem = Problem(name="test")
self.problem.terms = [
Term(c=3, indices=[1, 0]),
Term(c=5, indices=[2, 0]),
]
self.problem.uploaded_blob_uri = "mock_blob_uri"
# Create equivalent NPZ file for translation
self.problem.row = numpy.array([1, 2])
self.problem.col = numpy.array([0, 0])
self.problem.data = numpy.array([3, 5])
# If arguments are passed to savez with no keywords
# then default names are used (e.g. "arr_0", "arr_1", etc)
# otherwise it uses those supplied by user (e.g. "row", "col", etc)
self.default_qubo_filename = "default_qubo.npz"
numpy.savez(self.default_qubo_filename,
self.problem.row,
self.problem.col,
self.problem.data
)
self.with_keywords_qubo_filename = "with_keywords_qubo.npz"
numpy.savez(self.with_keywords_qubo_filename,
row=self.problem.row,
col=self.problem.col,
data=self.problem.data
)
## PUBO problem
self.pubo_problem = Problem(name="test")
self.pubo_problem.terms = [
Term(c=3, indices=[1, 0, 1]),
Term(c=5, indices=[2, 0, 0]),
Term(c=-1, indices=[1, 0, 0]),
Term(c=4, indices=[0, 2, 1])
]
# Create equivalent NPZ file for translation
self.pubo_problem.i = numpy.array([1, 2, 1, 0])
self.pubo_problem.j = numpy.array([0, 0, 0, 2])
self.pubo_problem.k = numpy.array([1, 0, 0, 1])
self.pubo_problem.c = numpy.array([3, 5, -1, 4])
self.default_pubo_filename = "default_pubo.npz"
numpy.savez(self.default_pubo_filename,
self.pubo_problem.i,
self.pubo_problem.j,
self.pubo_problem.k,
self.pubo_problem.c
)
self.with_keywords_pubo_filename = "with_keywords_pubo.npz"
numpy.savez(self.with_keywords_pubo_filename,
i=self.pubo_problem.i,
j=self.pubo_problem.j,
k=self.pubo_problem.k,
c=self.pubo_problem.c
)
def test_upload(self):
with patch("azure.quantum.optimization.problem.BlobClient") as mock_blob_client, \
patch("azure.quantum.optimization.problem.ContainerClient") as mock_container_client, \
patch("azure.quantum.job.base_job.upload_blob") as mock_upload:
mock_blob_client.from_blob_url.return_value = Mock()
mock_container_client.from_container_url.return_value = Mock()
assert(self.pubo_problem.uploaded_blob_uri == None)
actual_result = self.pubo_problem.upload(self.mock_ws)
mock_upload.get_blob_uri_with_sas_token = Mock()
azure.quantum.job.base_job.upload_blob.assert_called_once()
def test_download(self):
with patch("azure.quantum.optimization.problem.download_blob") as mock_download_blob,\
patch("azure.quantum.optimization.problem.BlobClient") as mock_blob_client,\
patch("azure.quantum.optimization.problem.ContainerClient") as mock_container_client:
mock_download_blob.return_value=expected_terms()
mock_blob_client.from_blob_url.return_value = Mock()
mock_container_client.from_container_url.return_value = Mock()
actual_result = self.problem.download(self.mock_ws)
assert actual_result.name == "test"
azure.quantum.optimization.problem.download_blob.assert_called_once()
def test_get_term(self):
terms = self.problem.get_terms(0)
assert len(terms) == 2
def test_get_term_raise_exception(self):
test_prob = Problem(name="random")
with self.assertRaises(Exception):
test_prob.get_terms(id=0)
def test_grouped_type(self):
problem = Problem(name="test_pubo_grouped", problem_type=ProblemType.pubo)
problem.terms = [
Term(c=3, indices=[1, 0, 1]),
Term(c=5, indices=[2, 0, 0]),
Term(c=-1, indices=[1, 0, 0]),
Term(c=4, indices=[0, 2, 1])
]
assert problem.problem_type is ProblemType.pubo
problem.add_slc_term([(3,0), (2,1), (-1,None)])
assert problem.problem_type is ProblemType.pubo_grouped
def test_create_npz_file_default(self):
# When no keywords are supplied, columns have default names
# e.g. "arr_0", "arr_1" etc
# QUBO
npz_file = numpy.load(self.default_qubo_filename)
num_columns = 3
self.assertEqual(len(npz_file.files), num_columns)
for i in range(num_columns):
self.assertEqual(npz_file.files[i], "arr_%s" % i)
# PUBO
npz_file = numpy.load(self.default_pubo_filename)
num_columns = 4
self.assertEqual(len(npz_file.files), num_columns)
for i in range(num_columns):
self.assertEqual(npz_file.files[i], "arr_%s" % i)
def test_create_npz_file_with_keywords(self):
# When keywords are supplied, columns use these names
# QUBO
npz_file = numpy.load(self.with_keywords_qubo_filename)
keywords = ["row", "col", "data"]
self.assertEqual(len(npz_file.files), len(keywords))
for i in range(len(keywords)):
self.assertEqual(npz_file.files[i], keywords[i])
# PUBO
npz_file = numpy.load(self.with_keywords_pubo_filename)
keywords = ["i", "j", "k", "c"]
self.assertEqual(len(npz_file.files), len(keywords))
for i in range(len(keywords)):
self.assertEqual(npz_file.files[i], keywords[i])
def test_valid_npz(self):
default_qubo = numpy.load(self.default_qubo_filename)
with_keywords_qubo = numpy.load(self.with_keywords_qubo_filename)
default_pubo = numpy.load(self.default_pubo_filename)
with_keywords_pubo = numpy.load(self.with_keywords_pubo_filename)
## Valid files
self.assertTrue(self.problem.is_valid_npz(default_qubo.files))
self.assertTrue(self.problem.is_valid_npz(
default_qubo.files,
["arr_0", "arr_1"],
"arr_2")
)
self.assertTrue(self.problem.is_valid_npz(
with_keywords_qubo.files,
["col", "row"],
"data")
)
self.assertTrue(self.pubo_problem.is_valid_npz(
default_pubo.files,
["arr_0", "arr_1", "arr_2"],
"arr_3")
)
self.assertTrue(self.pubo_problem.is_valid_npz(
with_keywords_pubo.files,
["i", "j", "k"],
"c")
)
## Invalid files
# Too many columns
self.assertFalse(self.problem.is_valid_npz(
default_qubo.files,
["arr_0", "arr_1", "arr_2"],
"arr_3")
)
self.assertFalse(self.pubo_problem.is_valid_npz(
default_pubo.files,
["arr_0", "arr_1", "arr_2", "arr_3"],
"arr_4")
)
# Wrong column names
self.assertFalse(self.problem.is_valid_npz(
with_keywords_qubo.files,
["i", "j"],
"k")
)
self.assertFalse(self.pubo_problem.is_valid_npz(
with_keywords_pubo.files,
["x", "y", "z"],
"c")
)
# No indices column names
self.assertFalse(self.problem.is_valid_npz(
with_keywords_qubo.files,
[],
"data")
)
# Wrong coefficient column name
self.assertFalse(self.problem.is_valid_npz(
with_keywords_qubo.files,
["row", "col"],
"")
)
def test_terms_from_npz_qubo(self):
# Exceptions are raised for invalid file paths or files with incorrect naming
self.assertRaises(Exception, self.problem.terms_from_npz, "invalid_file_path.npz")
self.assertRaises(
Exception,
self.problem.terms_from_npz,
self.default_qubo_filename,
["arr_0", "arr_1", "arr_2"],
"arr_3"
)
# Terms are produced for valid files
self.assertEqual(
self.problem.terms_from_npz(self.default_qubo_filename),
self.problem.terms
)
self.assertEqual(
self.problem.terms_from_npz(
self.with_keywords_qubo_filename,
["row", "col"],
"data"
),
self.problem.terms
)
def test_terms_from_npz_pubo(self):
# Exceptions are raised for invalid file paths or files with incorrect naming
self.assertRaises(Exception, self.pubo_problem.terms_from_npz, "invalid_file_path.npz")
self.assertRaises(
Exception,
self.pubo_problem.terms_from_npz,
self.default_pubo_filename,
["arr_0", "arr_1", "arr_2", "arr_3"],
"arr_4"
)
# Terms are produced for valid files
self.assertEqual(
self.pubo_problem.terms_from_npz(
self.default_pubo_filename,
["arr_0", "arr_1", "arr_2"],
"arr_3"
),
self.pubo_problem.terms
)
self.assertEqual(
self.pubo_problem.terms_from_npz(
self.with_keywords_pubo_filename,
["i", "j", "k"],
"c"
),
self.pubo_problem.terms
)
def tearDown(self):
test_files = [
self.default_qubo_filename,
self.with_keywords_qubo_filename,
self.default_pubo_filename,
self.with_keywords_pubo_filename
]
for test_file in test_files:
if os.path.isfile(test_file):
os.remove(test_file)
|
the-stack_106_18592
|
import torch.nn as nn
import torch
import torch.nn.functional as F
class CpyAndW(nn.Module):
def __init__(self, num_heads, seq_length, sample_length):
super(CpyAndW, self).__init__()
self.num_heads = num_heads
self.seq_length = seq_length
self.downsample = nn.Linear(seq_length,sample_length)
self.ac = nn.LeakyReLU(inplace=True)
for i in range(self.num_heads):
setattr(self, 'weight_%d' % i, self.model(self.seq_length))
def model(self, sample_length):
model = nn.Sequential(
nn.Linear(sample_length, sample_length),
nn.LeakyReLU(inplace=True),
)
return model
def forward(self, x):
down_activate=self.ac(self.downsample(x))
output = []
for i in range(self.num_heads):
output.append(getattr(self, 'weight_%d' % i)(down_activate).unsqueeze(1))
# (batch,head,seq)
output_value = torch.cat(output, dim=1)
return output_value
class DownSample(nn.Module):
def __init__(self,seq_length,channel):
'''
'''
super(DownSample,self).__init__()
self.channel=channel
self.down_layer=nn.Conv1d(channel,channel,9,4,3)
self.norm_layer=nn.BatchNorm1d(1)
self.ac=nn.LeakyReLU(inplace=True)
def forward(self,x):
x=x.view(x.size(0),self.channel,x.size(1)).contiguous()
downsample_out=self.ac(self.norm_layer(self.down_layer(x)))
final_out=downsample_out.view(final_out.size(0),final_out.size(1)).contiguous()
return final_out
class Attention(nn.Module):
'''
self attention
'''
def __init__(self, seq_length,dropout=True):
'''
sample_length
'''
super(Attention,self).__init__()
self.seq_length=seq_length
# self.sample_length=sample_length
self.dropout=nn.Dropout(0.3)
self.query=nn.Linear(seq_length,seq_length)
self.key=nn.Linear(seq_length,seq_length)
self.value=nn.Linear(seq_length,seq_length)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self,x):
# b,seq_length
b,seq=x.size()
q=self.query(x).view(b,-1,seq) # b,c,seq_length
k=self.key(x).view(b,-1,seq) # b,c,seq_length
v=self.value(x).view(b,-1,seq) # b,c,seq_length
attention=torch.bmm(q.permute(0,2,1).contiguous(),k) #b,seq,seq
attention=self.softmax(attention)
attention=self.dropout(attention)
self_attention=torch.bmm(v,attention) #b,c,seq
self_attention=self_attention.view(b,seq)
out=self_attention + x
return out
class MultiHeadAttention(nn.Module):
'''multi-head attention'''
def __init__(self, num_heads, seq_length, sample_length, dropout=True):
super(MultiHeadAttention, self).__init__()
self.seq_length = seq_length
self.copy_and_weight = CpyAndW(num_heads, seq_length, sample_length)
self.dropout = nn.Dropout(0.2)
self.query = nn.Linear(sample_length, sample_length)
self.key = nn.Linear(sample_length, sample_length)
self.value = nn.Linear(sample_length, sample_length)
self.final_layer = nn.Linear(num_heads, 1)
def transpose_the_sequence(self, x):
'''shape the sequence'''
# (batch,head,seq) -> (batch,head,seq,1)
new_x = x.unsqueeze(-1)
return new_x
def forward(self, x):
input_x = self.copy_and_weight(x)
q = self.query(input_x)
k = self.key(input_x)
v = self.value(input_x)
q = self.transpose_the_sequence(q)
k = self.transpose_the_sequence(k)
v = self.transpose_the_sequence(v)
attention_scores = torch.matmul(q, k.transpose(-1, -2))
attention_scores /= self.seq_length ** 0.5
attention_prob = F.softmax(attention_scores, dim=-1)
# attention_prob = self.dropout(attention_prob)
contex_layer = torch.matmul(attention_prob, v)
contex_layer = self.dropout(contex_layer)
# (batch,head,seq,1) -> (batch,seq,head)
contex_layer = contex_layer.view(contex_layer.size(
0), contex_layer.size(1), contex_layer.size(2)).contiguous()
# out=self.final_layer(contex_layer)
return F.leaky_relu(contex_layer)
|
the-stack_106_18593
|
from numpy import log, outer, sqrt, zeros
from numpy.random import shuffle
from cogent3.format.table import formatted_cells, rich_html, simple_format
from cogent3.maths.stats import chisqprob
from cogent3.maths.stats.test import G_fit, G_ind
from cogent3.util.dict_array import DictArray, DictArrayTemplate
__author__ = "Gavin Huttley"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2020.2.7a"
__maintainer__ = "Gavin Huttley"
__email__ = "[email protected]"
__status__ = "Alpha"
def _get_bin(bins, value):
"""returns bin index corresponding to value"""
pass
# todo this should probably go into different module
def shuffled_matrix(matrix):
"""returns a randomly sampled matrix with same marginals"""
# SLOW algorithm
expanded_row = []
for i, row_total in enumerate(matrix.sum(axis=1)):
expanded_row.extend([i] * row_total)
expanded_col = []
for i, col_total in enumerate(matrix.sum(axis=0)):
expanded_col.extend([i] * col_total)
shuffled = zeros(matrix.shape, dtype=int)
shuffle(expanded_col)
for i, j in zip(expanded_row, expanded_col):
shuffled[i, j] += 1
return shuffled
# todo following functions should be moved into stats.test and replace
# or merge with the older implementations
def calc_expected(observed, pseudo_count=0):
"""returns the expected array from product of marginal frequencies"""
if pseudo_count and (observed == 0).any():
observed = observed.copy()
observed += pseudo_count
num_dim = len(observed.shape)
if num_dim == 2:
rsum = observed.sum(axis=1)
rfreq = rsum / rsum.sum()
csum = observed.sum(axis=0)
cfreq = csum / csum.sum()
expecteds = outer(rfreq, cfreq) * rsum.sum()
elif num_dim == 1:
expecteds = [observed.mean()] * observed.shape[0]
else:
raise NotImplementedError("too many dimensions")
return expecteds
def calc_chisq(observed, expected):
"""returns the chisq statistic for the two numpy arrays"""
stat = (observed - expected) ** 2
stat = (stat / expected).sum()
return stat
def calc_G(observed, expected, pseudo_count=0, williams=True):
"""returns the G statistic for the two numpy arrays"""
num_dim = len(observed.shape)
df = observed.shape[0] - 1
if num_dim == 2:
df *= observed.shape[1] - 1
non_zero = observed != 0
if not non_zero.all():
G = (
2
* (
observed[non_zero] * (log(observed[non_zero]) - log(expected[non_zero]))
).sum()
)
else:
G = 2 * (observed * (log(observed) - log(expected))).sum()
if williams and num_dim > 1:
total = observed.sum()
denom = 6 * total * df
q = (
1
+ ((total / observed.sum(axis=0)).sum() - 1)
* ((total / observed.sum(axis=1)).sum() - 1)
/ denom
)
G /= q
return G
def estimate_pval(observed, stat_func, num_reps=1000):
"""returns p-value from resampling of observed
valid for 2D categorical data only"""
expected = calc_expected(observed)
obs_stat = stat_func(observed, expected)
num_gt = 0
for i in range(num_reps):
resamp_obs = shuffled_matrix(observed)
resamp_exp = calc_expected(resamp_obs)
resamp_stat = stat_func(resamp_obs, resamp_exp)
if resamp_stat >= obs_stat:
num_gt += 1
return num_gt / num_reps
class _format_row_cell:
"""class for handling html formatting of rows"""
def __init__(self, row_labels):
self.row_labels = row_labels
def __call__(self, val, row, col):
if val in self.row_labels:
result = f"<td><b>{val}<b></td>"
else:
result = f'<td style="text-align:right">{val}</td>'
return result
class CategoryCounts:
"""CategoryCounts for performing contingency tests
Has attributes for observed, expected and residuals.
The latter is calculated using the G-test, for goodness-of-fit if expecteds
are provided, G-test of independence if not provided.
"""
def __init__(self, observed, expected=None):
"""Parameters
-------------
observed
a DictArray instance, or something that can be converted to one
expected
provide in the case where you know the prior proportions, otherwise
calculated from marginal frequencies
"""
if not isinstance(observed, DictArray):
observed = DictArray(observed)
if expected:
expected = observed.template.wrap(expected)
if observed.array.min() < 0 or expected and expected.array.min() < 0:
raise ValueError("negative values encountered")
self._observed = observed
self._expected = expected
self._residuals = None
self._df = None
self.shape = observed.shape
def _get_repr_(self, html=False):
obs = self.observed.array.tolist()
exp = self.expected.array.tolist()
res = self.residuals.array.tolist()
ndim = len(self.observed.shape)
if ndim == 1:
row_labels = "Observed", "Expected", "Residuals"
row_cell_func = _format_row_cell(row_labels)
col_labels = [str(c) for c in self.observed.template.names[0]]
rows = []
# format floats for expecteds and resid
for row_label, row in zip(row_labels, [obs, exp, res]):
if row_label == "Observed":
row = [row_label] + [f"{v:,}" for v in row]
else:
row = [row_label] + [f"{v:,.2f}" for v in row]
rows.append(row)
if html:
rows = rich_html(
rows,
header=[""] + col_labels,
row_cell_func=row_cell_func,
merge_identical=False,
)
else:
header, rows = formatted_cells(rows, header=[""] + col_labels)
rows = simple_format(header, rows)
else:
row_labels = self.observed.template.names[0]
col_labels = self.observed.template.names[1]
row_cell_func = _format_row_cell(row_labels)
result = []
for caption, table in zip(
("Observed", "Expected", "Residuals"), (obs, exp, res)
):
rows = []
for i, r in enumerate(table):
if caption == "Observed":
r = [f"{v:,}" for v in r]
else:
r = [f"{v:,.2f}" for v in r]
rows.append([row_labels[i]] + r)
if html:
result.append(
rich_html(
rows,
header=[""] + col_labels,
caption=f"<b>{caption}</b>",
row_cell_func=row_cell_func,
merge_identical=False,
)
)
else:
header, rows = formatted_cells(rows, header=[""] + col_labels)
result.append(simple_format(header, rows, title=caption))
joiner = "<br>" if html else "\n"
rows = joiner.join(result)
return rows
def _repr_html_(self):
result = self._get_repr_(html=True)
return result
def __repr__(self):
result = self._get_repr_(html=False)
return result
def __str__(self):
return self._get_repr_(html=False)
@property
def observed(self):
return self._observed
@property
def expected(self):
if not self._expected:
expecteds = calc_expected(self.observed.array)
expecteds = self.observed.template.wrap(expecteds)
self._expected = expecteds
return self._expected
@property
def residuals(self):
if not self._residuals:
r = self.observed.array - self.expected.array
r /= sqrt(self.expected.array)
self._residuals = self.observed.template.wrap(r)
return self._residuals
@property
def df(self):
if not self._df:
self._df = self.shape[0] - 1
if len(self.shape) == 2:
self._df *= self.shape[1] - 1
return self._df
def chisq_test(self, shuffled=0):
"""performs the chisq test
Parameters
----------
shuffled
pvalue is estimated via resampling from the observed data,
preserving the marginals
"""
stat = calc_chisq(self.observed.array, self.expected.array)
if not shuffled:
pval = chisqprob(stat, self.df)
else:
pval = estimate_pval(self.observed.array, calc_chisq, num_reps=shuffled)
title = "Chisq-test for independence"
result = TestResult(
self.observed,
self.expected,
self.residuals,
"chisq",
stat,
self.df,
pval,
test_name=title,
)
return result
def G_independence(self, pseudo_count=0, williams=True, shuffled=0):
"""performs the independence G test
Parameters
----------
pseudo_count : int
added to observed to avoid zero division
shuffled : int
pvalue is estimated via resampling shuffled times from the observed
data, preserving the marginals
"""
assert type(pseudo_count) == int, f"{pseudo_count} not an integer"
assert type(shuffled) == int, f"{shuffled} not an integer"
G = calc_G(
self.observed.array,
self.expected.array,
pseudo_count=pseudo_count,
williams=williams,
)
if not shuffled:
pval = chisqprob(G, self.df)
else:
pval = estimate_pval(self.observed.array, calc_G, num_reps=shuffled)
title = "G-test for independence"
if williams:
title = f"{title} (with Williams correction)"
result = TestResult(
self.observed,
self.expected,
self.residuals,
"G",
G,
self.df,
pval,
test_name=title,
)
return result
def G_fit(self, pseudo_count=0, williams=True):
"""performs the goodness-of-fit G test"""
assert type(pseudo_count) == int, f"{pseudo_count} not an integer"
obs = self.observed.array
if pseudo_count:
obs += pseudo_count
G, pval = G_fit(obs.flatten(), self.expected.array.flatten(), williams=williams)
title = "G-test goodness-of-fit"
if williams:
title = f"{title} (with Williams correction)"
result = TestResult(
self.observed,
self.expected,
self.residuals,
"G",
G,
self.df,
pval,
test_name=title,
)
return result
def to_dict(self):
result = dict(
observed=self.observed.to_dict(),
expected=self.expected.to_dict(),
residuals=self.residuals.to_dict(),
)
return result
class TestResult:
"""result of a contingency test"""
def __init__(
self, observed, expected, residuals, stat_name, stat, df, pvalue, test_name=""
):
"""
Parameters
----------
observed
observed counts as a DictArray
expected
expected counts as a DictArray
residuals
Pearson residuals between observed and expected as a DictArray
stat_name
Name of the statistic, e.g. G, chisq.
stat : float
value of the statistic
df : int
degrees of freedom for the hypothesis test
pvalue
pvalue from the hypothesis test
test_name
name of the statistical test
"""
self.pvalue = pvalue
self.df = df
self.stat = stat
self.test_name = test_name
self.stat_name = stat_name
self.residuals = residuals
self.expected = expected
self.observed = observed
setattr(self, stat_name, stat)
def _get_repr_(self):
header = [str(self.stat_name), "df", "pvalue"]
if self.pvalue > 1e-3:
pval = f"{self.pvalue:.4f}"
else:
pval = f"{self.pvalue:.4e}"
rows = [[f"{self.stat:.3f}", f"{self.df}", pval]]
return header, rows
def __repr__(self):
h, r = self._get_repr_()
h, r = formatted_cells(r, header=h)
result = simple_format(h, r, title=self.test_name)
components = CategoryCounts(
self.observed.to_dict(), expected=self.expected.to_dict()
)
result = [result, str(components)]
return "\n".join(result)
def __str__(self):
return repr(self)
def _repr_html_(self):
from cogent3.util.table import Table
h, r = self._get_repr_()
table = Table(h, r, title=self.test_name)
components = CategoryCounts(
self.observed.to_dict(), expected=self.expected.to_dict()
)
html = [table._repr_html_(include_shape=False), components._repr_html_()]
return "\n".join(html)
|
the-stack_106_18594
|
import logging
import logging.handlers
def setup_logger(logger_name, log_file, level=logging.DEBUG):
logger = logging.getLogger(logger_name)
formatter = logging.Formatter("[%(levelname)s : %(filename)s - %(lineno)s : %(asctime)s ]: %(message)s")
fileHandler = logging.handlers.RotatingFileHandler(log_file, maxBytes=1*1024*1024*1024, backupCount=5)
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
logger.setLevel(level)
logger.addHandler(fileHandler)
logger.addHandler(streamHandler)
return logger
__logger = None
def get_logger(logFileName=None):
global __logger
logFile = logFileName
if not __logger:
if not logFile:
logFile = "applog.log"
__logger = setup_logger('debug', "/opt/logs/alert_app/" + logFile)
return __logger
|
the-stack_106_18597
|
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Tensor quantizer for tf 2 keras """
import abc
import tensorflow as tf
from aimet_common.defs import MAP_QUANT_SCHEME_TO_PYMO, MAP_ROUND_MODE_TO_PYMO, QuantScheme
from aimet_common.quantsim import calculate_delta_offset
from aimet_common.utils import AimetLogger
from aimet_tensorflow.keras.quant_sim.quantsim_straight_through_grad import qc_straight_through_estimator_grad, \
quantsim_custom_grad_learned_grid
import libpymo # pylint: disable=import-error
_logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Quant)
def _load_ops():
"""
Function which loads the quantization op library. In order to load a graph with
custom quantization ops this must be called first as this provides tensorflow with
the required op definitions.
:return: Loaded library
"""
return tf.load_op_library('libaimet_tf_ops.so')
# Load the aimet ops
qcops = _load_ops()
class TensorQuantizer(tf.keras.layers.Layer, abc.ABC):
""" Tensor quantizer class containing cpp tensor quantizer and associated attributes """
# pylint: disable=too-many-arguments
# pylint: disable=unused-argument
def __init__(self, name: str, op_mode: libpymo.TensorQuantizerOpMode, quant_scheme: libpymo.QuantizationMode,
round_mode: libpymo.RoundingMode, bitwidth: int, is_symmetric: bool, use_strict_symmetric: bool,
use_unsigned_symmetric: bool, **kwargs):
super(TensorQuantizer, self).__init__(name=name)
self._quant_scheme = quant_scheme
self._tensor_quantizer = libpymo.TensorQuantizer(MAP_QUANT_SCHEME_TO_PYMO[quant_scheme],
MAP_ROUND_MODE_TO_PYMO[round_mode])
self._tensor_quantizer.setStrictSymmetric(use_strict_symmetric)
self._tensor_quantizer.setUnsignedSymmetric(use_unsigned_symmetric)
self._bitwidth = self.add_weight(name + '.bitwidth', dtype=tf.int8,
initializer=tf.constant_initializer(bitwidth), trainable=False)
self._is_symmetric = self.add_weight(name + '.is_symmetric', dtype=tf.bool,
initializer=tf.constant_initializer(is_symmetric), trainable=False)
self._encoding_min = self.add_weight(name + '.encoding_min', dtype=tf.float64, trainable=True,
initializer=tf.constant_initializer(0.))
self._encoding_max = self.add_weight(name + '.encoding_max', dtype=tf.float64, trainable=True,
initializer=tf.constant_initializer(0.))
self._quantizer_mode = self.add_weight(name + '.op_mode', dtype=tf.int32, trainable=False,
initializer=tf.constant_initializer(int(op_mode)))
# Use this flag to determine if encoding min and max values are fit to be used. Ex. Can be set to True after
# compute encodings has been called, or after encodings have been set by passing in a libpymo TfEncoding object.
# Can set to False upon changing things like quant scheme, bitwidth, is symmetric, etc.
self._is_encoding_valid = False
@property
def quant_scheme(self):
""" Quant scheme getter """
return self._quant_scheme
@quant_scheme.setter
def quant_scheme(self, quant_scheme: QuantScheme):
""" Quant scheme setter """
self._tensor_quantizer.setQuantScheme(MAP_QUANT_SCHEME_TO_PYMO[quant_scheme])
self._quant_scheme = quant_scheme
self.reset_quant_mode()
@property
def round_mode(self):
""" Quant scheme getter """
return self._tensor_quantizer.roundingMode
@round_mode.setter
def round_mode(self, round_mode: str):
""" Round mode setter """
self._tensor_quantizer.roundingMode = MAP_ROUND_MODE_TO_PYMO[round_mode]
self.reset_quant_mode()
@property
def bitwidth(self):
""" Bitwidth getter """
return tf.keras.backend.get_value(self._bitwidth)
@bitwidth.setter
def bitwidth(self, bitwidth: int):
""" Bitwidth setter """
self._bitwidth.assign(bitwidth)
self.reset_quant_mode()
@property
def is_symmetric(self):
""" Is symmetric getter """
return tf.keras.backend.get_value(self._is_symmetric)
@is_symmetric.setter
def is_symmetric(self, is_symmetric: bool):
""" Is symmetric setter """
self._is_symmetric.assign(is_symmetric)
self.reset_quant_mode()
@property
def use_strict_symmetric(self):
""" Use strict symmetric getter """
return self._tensor_quantizer.getStrictSymmetric()
@use_strict_symmetric.setter
def use_strict_symmetric(self, use_strict_symmetric: bool):
""" Use strict symmetric setter """
self._tensor_quantizer.setStrictSymmetric(use_strict_symmetric)
self.reset_quant_mode()
@property
def use_unsigned_symmetric(self):
""" Use unsigned symmetric getter """
return self._tensor_quantizer.getUnsignedSymmetric()
@use_unsigned_symmetric.setter
def use_unsigned_symmetric(self, use_unsigned_symmetric: bool):
""" Use unsigned symmetric setter """
self._tensor_quantizer.setUnsignedSymmetric(use_unsigned_symmetric)
self.reset_quant_mode()
@property
def encoding(self) -> libpymo.TfEncoding:
""" Get encodings in libpymo form """
if self._is_encoding_valid:
encodings = libpymo.TfEncoding()
# pylint: disable = protected-access
encodings.min = tf.keras.backend.get_value(self._encoding_min)
encodings.max = tf.keras.backend.get_value(self._encoding_max)
encodings.delta, encodings.offset = calculate_delta_offset(encodings.min, encodings.max, self.bitwidth)
encodings.bw = self.bitwidth
return encodings
return None
@encoding.setter
def encoding(self, encodings: libpymo.TfEncoding):
"""
Sets encoding parameter using values obtained from encodings
:param encodings: encodings value
"""
assert encodings is not None, "Encodings cannot be None if Quantizer is enabled"
assert isinstance(encodings, libpymo.TfEncoding), "Encodings should be a libpymo.TfEncoding() object"
self.bitwidth = encodings.bw
self._encoding_min.assign(encodings.min)
self._encoding_max.assign(encodings.max)
self._is_encoding_valid = True
@property
def quant_mode(self):
""" Get quant mode """
return tf.keras.backend.get_value(self._quantizer_mode)
@abc.abstractmethod
def enable(self):
""" Enable the tensor quantizer """
def disable(self):
""" Disable the tensor quantizer """
self._quantizer_mode.assign(int(libpymo.TensorQuantizerOpMode.passThrough))
def is_enabled(self) -> bool:
""" Return True if the tensor quantizer is enabled, False otherwise """
return self.quant_mode != int(libpymo.TensorQuantizerOpMode.passThrough)
def compute_encoding(self):
""" Compute encoding for the tensor quantizer """
if self.quant_mode != int(libpymo.TensorQuantizerOpMode.passThrough):
# TODO: remove last two parameters after fixing PyModelOptimizations
encoding = self._tensor_quantizer.computeEncoding(self.bitwidth, self.is_symmetric, False, False)
if self._tensor_quantizer.isEncodingValid:
self._encoding_min.assign(encoding.min)
self._encoding_max.assign(encoding.max)
if self.quant_mode == int(libpymo.TensorQuantizerOpMode.updateStats):
self._quantizer_mode.assign(int(libpymo.TensorQuantizerOpMode.quantizeDequantize))
self._is_encoding_valid = True
else:
_logger.info('Tensor quantizer %s did not have a valid encoding calculated, and has been set to '
'passThrough mode.', self.name)
self._quantizer_mode.assign(int(libpymo.TensorQuantizerOpMode.passThrough))
def reset_quant_mode(self):
""" Reset quantizer mode if applicable """
if self.quant_mode == int(libpymo.TensorQuantizerOpMode.quantizeDequantize):
self._quantizer_mode.assign(int(libpymo.TensorQuantizerOpMode.updateStats))
self._is_encoding_valid = False
# pylint: disable=arguments-differ
def call(self, tensor):
"""
Forward pass for the quantizer
"""
if self.quant_scheme in [QuantScheme.training_range_learning_with_tf_init,
QuantScheme.training_range_learning_with_tf_enhanced_init]:
return self.call_quantsim_custom_grad_learned_grid(tensor)
return self.call_quantize_straight_through_estimator_grad(tensor)
@tf.custom_gradient
def call_quantize_straight_through_estimator_grad(self, tensor):
"""
Quantizes tensor with straight through estimator grad
:param tensor: Tensor to quantize
"""
def grad(upstream, variables):
"""
Straight through estimator grad function
:param upstream: Gradient from child layers
:param variables: Variables used in forward pass to return gradients for
"""
assert len(variables) == 2, 'len variables is ' + str(len(variables))
assert 'encoding_min' in variables[0].name
return qc_straight_through_estimator_grad(tensor, self._encoding_min, self._encoding_max,
self._quantizer_mode, upstream)
return qcops.qc_quantize(name='qc_quantize_op', in_tensor=tensor,
op_mode=self._quantizer_mode,
tensor_quantizer_reference=libpymo.PtrToInt64(self._tensor_quantizer),
encoding_min=self._encoding_min,
encoding_max=self._encoding_max,
bit_width=self._bitwidth,
use_symmetric_encoding=self._is_symmetric), grad
@tf.custom_gradient
def call_quantsim_custom_grad_learned_grid(self, tensor):
"""
Quantizes tensor with range learning grad
:param tensor: Tensor to quantize
"""
def grad(upstream, variables):
"""
Range learning grad function
:param upstream: Gradient from child layers
:param variables: Variables used in forward pass to return gradients for
"""
assert len(variables) == 2, 'len variables is ' + str(len(variables))
assert 'encoding_min' in variables[0].name
return quantsim_custom_grad_learned_grid(tensor, self._encoding_min, self._encoding_max,
self._quantizer_mode, self._bitwidth, self._is_symmetric,
upstream)
return qcops.qc_quantize(name='qc_quantize_op', in_tensor=tensor,
op_mode=self._quantizer_mode,
tensor_quantizer_reference=libpymo.PtrToInt64(self._tensor_quantizer),
encoding_min=self._encoding_min,
encoding_max=self._encoding_max,
bit_width=self._bitwidth,
use_symmetric_encoding=self._is_symmetric), grad
# pylint: disable=too-many-ancestors
class ActivationTensorQuantizer(TensorQuantizer):
""" Activation tensor quantizer definition """
# pylint: disable=too-many-arguments
def __init__(self, name: str, quant_scheme: libpymo.QuantizationMode,
round_mode: libpymo.RoundingMode, bitwidth: int, is_symmetric: bool, use_strict_symmetric: bool,
use_unsigned_symmetric: bool, enabled: bool):
if enabled:
op_mode = libpymo.TensorQuantizerOpMode.updateStats
else:
op_mode = libpymo.TensorQuantizerOpMode.passThrough
super(ActivationTensorQuantizer, self).__init__(name, op_mode, quant_scheme, round_mode, bitwidth, is_symmetric,
use_strict_symmetric, use_unsigned_symmetric)
def enable(self):
""" Enable the activation tensor quantizer """
if self._is_encoding_valid:
self._quantizer_mode.assign(int(libpymo.TensorQuantizerOpMode.quantizeDequantize))
else:
self._quantizer_mode.assign(int(libpymo.TensorQuantizerOpMode.updateStats))
# pylint: disable=too-many-ancestors
class ParamTensorQuantizer(TensorQuantizer):
""" Parameter tensor quantizer definition """
# pylint: disable=too-many-arguments
def __init__(self, name: str, quant_scheme: libpymo.QuantizationMode,
round_mode: libpymo.RoundingMode, bitwidth: int, is_symmetric: bool, use_strict_symmetric: bool,
use_unsigned_symmetric: bool, enabled: bool):
if enabled:
op_mode = libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize
else:
op_mode = libpymo.TensorQuantizerOpMode.passThrough
super(ParamTensorQuantizer, self).__init__(name, op_mode, quant_scheme, round_mode, bitwidth, is_symmetric,
use_strict_symmetric, use_unsigned_symmetric)
def enable(self):
""" Enable the parameter tensor quantizer """
self._quantizer_mode.assign(int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize))
|
the-stack_106_18598
|
# -*- coding: utf-8 -*-
"""Reuters topic classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ..utils.data_utils import get_file
from ..preprocessing.sequence import _remove_long_seq
import numpy as np
import json
import warnings
def load_data(path='reuters.npz', num_words=None, skip_top=0,
maxlen=None, test_split=0.2, seed=113,
start_char=1, oov_char=2, index_from=3, **kwargs):
"""Loads the Reuters newswire classification dataset.
# Arguments
path: where to cache the data (relative to `~/.keras/dataset`).
num_words: max number of words to include. Words are ranked
by how often they occur (in the training set) and only
the most frequent words are kept
skip_top: skip the top N most frequently occurring words
(which may not be informative).
maxlen: truncate sequences after this length.
test_split: Fraction of the dataset to be used as test data.
seed: random seed for sample shuffling.
start_char: The start of a sequence will be marked with this character.
Set to 1 because 0 is usually the padding character.
oov_char: words that were cut out because of the `num_words`
or `skip_top` limit will be replaced with this character.
index_from: index actual words with this index and higher.
# Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
Note that the 'out of vocabulary' character is only used for
words that were present in the training set but are not included
because they're not making the `num_words` cut here.
Words that were not seen in the training set but are in the test set
have simply been skipped.
"""
# Legacy support
if 'nb_words' in kwargs:
warnings.warn('The `nb_words` argument in `load_data` '
'has been renamed `num_words`.')
num_words = kwargs.pop('nb_words')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
path = get_file(path,
origin='https://s3.amazonaws.com/text-datasets/reuters.npz',
file_hash='87aedbeb0cb229e378797a632c1997b6')
with np.load(path) as f:
xs, labels = f['x'], f['y']
rng = np.random.RandomState(seed)
indices = np.arange(len(xs))
rng.shuffle(indices)
xs = xs[indices]
labels = labels[indices]
if start_char is not None:
xs = [[start_char] + [w + index_from for w in x] for x in xs]
elif index_from:
xs = [[w + index_from for w in x] for x in xs]
if maxlen:
xs, labels = _remove_long_seq(maxlen, xs, labels)
if not num_words:
num_words = max([max(x) for x in xs])
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters:
# 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
xs = [[w if skip_top <= w < num_words else oov_char for w in x] for x in xs]
else:
xs = [[w for w in x if skip_top <= w < num_words] for x in xs]
idx = int(len(xs) * (1 - test_split))
x_train, y_train = np.array(xs[:idx]), np.array(labels[:idx])
x_test, y_test = np.array(xs[idx:]), np.array(labels[idx:])
return (x_train, y_train), (x_test, y_test)
def get_word_index(path='reuters_word_index.json'):
"""Retrieves the dictionary mapping words to word indices.
# Arguments
path: where to cache the data (relative to `~/.keras/dataset`).
# Returns
The word index dictionary.
"""
path = get_file(
path,
origin='https://s3.amazonaws.com/text-datasets/reuters_word_index.json',
file_hash='4d44cc38712099c9e383dc6e5f11a921')
with open(path) as f:
return json.load(f)
|
the-stack_106_18599
|
#
# 27. Remove Element
#
# Given an array nums and a value val, remove all instances of that value in-
# place and return the new length.
#
# Do not allocale extra space for another array, you must do this by modifying
# the input array in-place with O(1) extra memory.
#
# The order of elements can be changed. It doesn't matter what you leave beyond
# the new length.
#
# Example 1:
# Given nums = [3, 2, 2, 3], val = 3,
# Your function should return length = 2, with the first two elements of
# nums being 2.
# It doesn't matter what you leave beyond the returned length.
#
# Example 2:
# Given nums = [0,1,2,2,3,0,4,2], val = 2,
# Your function should return length = 5, with the first five elements of
# nums containing 0, 1, 3, 0, and 4.
# Note that the order of those five elements can be arbitrary.
# It doesn't matter what you leave beyond the returned length.
#
# Clarification:
#
# Confused why the returned value is an integer but your answer is an array?
#
# Note that the input array is passed in by reference, which means modification
# to the input array will be known to the caller as well.
#
# Internallly you can thank of this:
# // nums is passed in by reference. (i.e., without making a copy)
# int len = removeElement(nums, val);
#
# // any modification to nums in your function would be known by the caller
# // using the length returned by your function, it prints the first len elements
# for (int i = 0; i < len; i++) {
# print(nums[i]);
# }
class Solution(object):
# 双指针
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
slow, fast = 0, 0
while fast < len(nums):
if nums[fast] != val:
nums[slow] = nums[fast]
slow += 1
fast += 1
return fast
def removeElement(self, nums, val):
i = 0
length = len(nums)
while i < length:
if nums[i] == val:
nums[i] = nums[length - 1]
length -= 1
else:
i += 1
return length
|
the-stack_106_18601
|
from __future__ import unicode_literals
"""
Copyright OpenSearch Contributors
SPDX-License-Identifier: Apache-2.0
"""
import click
import re
import pyfiglet
import os
import json
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.shortcuts import PromptSession
from prompt_toolkit.filters import HasFocus, IsDone
from prompt_toolkit.lexers import PygmentsLexer
from prompt_toolkit.layout.processors import ConditionalProcessor, HighlightMatchingBracketProcessor
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from pygments.lexers.sql import SqlLexer
from .config import get_config
from .opensearch_connection import OpenSearchConnection
from .opensearch_buffer import opensearch_is_multiline
from .opensearch_style import style_factory, style_factory_output
from .formatter import Formatter
from .utils import OutputSettings
from . import __version__
# Ref: https://stackoverflow.com/questions/30425105/filter-special-chars-such-as-color-codes-from-shell-output
COLOR_CODE_REGEX = re.compile(r"\x1b(\[.*?[@-~]|\].*?(\x07|\x1b\\))")
click.disable_unicode_literals_warning = True
class OpenSearchSqlCli:
"""OpenSearchSqlCli instance is used to build and run the OpenSearch SQL CLI."""
def __init__(self, clirc_file=None, always_use_pager=False, use_aws_authentication=False, query_language="sql"):
# Load conf file
config = self.config = get_config(clirc_file)
literal = self.literal = self._get_literals()
self.prompt_app = None
self.opensearch_executor = None
self.query_language = query_language
self.always_use_pager = always_use_pager
self.use_aws_authentication = use_aws_authentication
self.keywords_list = literal["keywords"]
self.functions_list = literal["functions"]
self.syntax_style = config["main"]["syntax_style"]
self.cli_style = config["colors"]
self.table_format = config["main"]["table_format"]
self.multiline_continuation_char = config["main"]["multiline_continuation_char"]
self.multi_line = config["main"].as_bool("multi_line")
self.multiline_mode = config["main"].get("multi_line_mode", "src")
self.null_string = config["main"].get("null_string", "null")
self.style_output = style_factory_output(self.syntax_style, self.cli_style)
def build_cli(self):
# TODO: Optimize index suggestion to serve indices options only at the needed position, such as 'from'
indices_list = self.opensearch_executor.indices_list
sql_completer = WordCompleter(self.keywords_list + self.functions_list + indices_list, ignore_case=True)
# https://stackoverflow.com/a/13726418 denote multiple unused arguments of callback in Python
def get_continuation(width, *_):
continuation = self.multiline_continuation_char * (width - 1) + " "
return [("class:continuation", continuation)]
prompt_app = PromptSession(
lexer=PygmentsLexer(SqlLexer),
completer=sql_completer,
complete_while_typing=True,
# TODO: add history, refer to pgcli approach
# history=history,
style=style_factory(self.syntax_style, self.cli_style),
prompt_continuation=get_continuation,
multiline=opensearch_is_multiline(self),
auto_suggest=AutoSuggestFromHistory(),
input_processors=[
ConditionalProcessor(
processor=HighlightMatchingBracketProcessor(chars="[](){}"),
filter=HasFocus(DEFAULT_BUFFER) & ~IsDone(),
)
],
tempfile_suffix=".sql",
)
return prompt_app
def run_cli(self):
"""
Print welcome page, goodbye message.
Run the CLI and keep listening to user's input.
"""
self.prompt_app = self.build_cli()
settings = OutputSettings(
max_width=self.prompt_app.output.get_size().columns,
style_output=self.style_output,
table_format=self.table_format,
missingval=self.null_string,
)
# print Banner
banner = pyfiglet.figlet_format("OpenSearch", font="slant")
print(banner)
# print info on the welcome page
print("Server: OpenSearch %s" % self.opensearch_executor.opensearch_version)
print("CLI Version: %s" % __version__)
print("Endpoint: %s" % self.opensearch_executor.endpoint)
print("Query Language: %s" % self.query_language)
while True:
try:
text = self.prompt_app.prompt(message="opensearchsql> ")
except KeyboardInterrupt:
continue # Control-C pressed. Try again.
except EOFError:
break # Control-D pressed.
try:
output = self.opensearch_executor.execute_query(text)
if output:
formatter = Formatter(settings)
formatted_output = formatter.format_output(output)
self.echo_via_pager("\n".join(formatted_output))
except Exception as e:
print(repr(e))
print("See you next search!")
def is_too_wide(self, line):
"""Will this line be too wide to fit into terminal?"""
if not self.prompt_app:
return False
return len(COLOR_CODE_REGEX.sub("", line)) > self.prompt_app.output.get_size().columns
def is_too_tall(self, lines):
"""Are there too many lines to fit into terminal?"""
if not self.prompt_app:
return False
return len(lines) >= (self.prompt_app.output.get_size().rows - 4)
def echo_via_pager(self, text, color=None):
lines = text.split("\n")
if self.always_use_pager:
click.echo_via_pager(text, color=color)
elif self.is_too_tall(lines) or any(self.is_too_wide(l) for l in lines):
click.echo_via_pager(text, color=color)
else:
click.echo(text, color=color)
def connect(self, endpoint, http_auth=None):
self.opensearch_executor = OpenSearchConnection(endpoint, http_auth, self.use_aws_authentication, self.query_language)
self.opensearch_executor.set_connection()
def _get_literals(self):
"""Parse "opensearch_literals.json" with literal type of SQL "keywords" and "functions", which
are SQL keywords and functions supported by OpenSearch SQL Plugin.
:return: a dict that is parsed from opensearch_literals.json
"""
from .opensearch_literals import __file__ as package_root
package_root = os.path.dirname(package_root)
literal_file = os.path.join(package_root, "opensearch_literals.json")
with open(literal_file) as f:
literals = json.load(f)
return literals
|
the-stack_106_18603
|
#!/usr/bin/env python
import rospy
import cv2 as cv
import numpy as np
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
import tf
from tf.broadcaster import TransformBroadcaster
import tf_conversions
from mycobot_communication.srv import (
GetCoords,
SetCoords,
GetAngles,
SetAngles,
GripperStatus,
)
class ImageConverter:
def __init__(self):
self.br = TransformBroadcaster()
self.bridge = CvBridge()
self.aruco_dict = cv.aruco.Dictionary_get(cv.aruco.DICT_6X6_250)
self.aruo_params = cv.aruco.DetectorParameters_create()
calibrationParams = cv.FileStorage(
"calibrationFileName.xml", cv.FILE_STORAGE_READ
)
self.dist_coeffs = calibrationParams.getNode("distCoeffs").mat()
self.camera_matrix = None
# subscriber, listen wether has img come in.
self.image_sub = rospy.Subscriber("/camera/image", Image, self.callback)
def callback(self, data):
"""Callback function.
Process image with OpenCV, detect Mark to get the pose. Then acccording the
pose to transforming.
"""
try:
# trans `rgb` to `gbr` for opencv.
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
size = cv_image.shape
focal_length = size[1]
center = [size[1] / 2, size[0] / 2]
if self.camera_matrix is None:
# calc the camera matrix, if don't have.
self.camera_matrix = np.array(
[
[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1],
],
dtype=np.float32,
)
gray = cv.cvtColor(cv_image, cv.COLOR_BGR2GRAY)
# detect aruco marker.
ret = cv.aruco.detectMarkers(gray, self.aruco_dict, parameters=self.aruo_params)
corners, ids = ret[0], ret[1]
# process marker data.
if len(corners) > 0:
if ids is not None:
# print('corners:', corners, 'ids:', ids)
# detect marker pose.
# argument:
# marker corners
# marker size (meter)
ret = cv.aruco.estimatePoseSingleMarkers(
corners, 0.05, self.camera_matrix, self.dist_coeffs
)
(rvec, tvec) = (ret[0], ret[1])
(rvec - tvec).any()
print("rvec:", rvec, "tvec:", tvec)
# just select first one detected marker.
for i in range(rvec.shape[0]):
cv.aruco.drawDetectedMarkers(cv_image, corners)
cv.aruco.drawAxis(
cv_image,
self.camera_matrix,
self.dist_coeffs,
rvec[i, :, :],
tvec[i, :, :],
0.03,
)
xyz = tvec[0, 0, :]
xyz = [xyz[0] - 0.045, xyz[1], xyz[2] - 0.03]
# get quaternion for ros.
euler = rvec[0, 0, :]
tf_change = tf.transformations.quaternion_from_euler(
euler[0], euler[1], euler[2]
)
print("tf_change:", tf_change)
# trans pose according [joint1]
self.br.sendTransform(
xyz, tf_change, rospy.Time.now(), "basic_shapes", "joint6_flange"
)
# [x, y, z, -172, 3, -46.8]
cv.imshow("Image", cv_image)
cv.waitKey(3)
try:
pass
except CvBridgeError as e:
print(e)
if __name__ == "__main__":
try:
rospy.init_node("detect_marker")
rospy.loginfo("Starting cv_bridge_test node")
ImageConverter()
rospy.spin()
except KeyboardInterrupt:
print("Shutting down cv_bridge_test node.")
cv.destroyAllWindows()
|
the-stack_106_18604
|
"""
Default statement functions.
"""
from muddery.statements.statement_func_set import BaseStatementFuncSet
import muddery.statements.action as action
import muddery.statements.condition as condition
import muddery.statements.attribute as attribute
import muddery.statements.rand as rand
import muddery.statements.skill as skill
class ActionFuncSet(BaseStatementFuncSet):
"""
Statement functions used in actions.
"""
def at_creation(self):
"""
Load statement functions here.
"""
self.add(attribute.FuncSetAttr)
self.add(attribute.FuncRemoveAttr)
self.add(action.FuncLearnSkill)
self.add(action.FuncGiveObject)
self.add(action.FuncRemoveObjects)
self.add(action.FuncTeleportTo)
self.add(action.FuncFightMob)
self.add(action.FuncFightTarget)
class ConditionFuncSet(BaseStatementFuncSet):
"""
Statement functions used in conditions.
"""
def at_creation(self):
"""
Load statement functions here.
"""
self.add(condition.FuncIsQuestInProgress)
self.add(condition.FuncCanProvideQuest)
self.add(condition.FuncIsQuestCompleted)
self.add(condition.FuncHasObject)
self.add(attribute.FuncGetAttr)
self.add(attribute.FuncHasAttr)
self.add(attribute.FuncCheckAttr)
self.add(rand.FuncOdd)
self.add(rand.FuncRand)
self.add(rand.FuncRandInt)
class SkillFuncSet(BaseStatementFuncSet):
"""
Statement functions used in actions.
"""
def at_creation(self):
"""
Load statement functions here.
"""
self.add(skill.FuncEscape)
self.add(skill.FuncHeal)
self.add(skill.FuncHit)
self.add(skill.FuncIncreaseMaxHP)
|
the-stack_106_18606
|
from argparse import ArgumentParser
import os
report_name = "report.html"
survivor_graph = "log.png"
net_extension = ".svg"
def write_header(output_file):
html_code = f"""
<html>
<head>
<style>
table, th, td {{
border: 1px solid black;
border-collapse: collapse;
}}
</style>
</head>
<body>
<h1>Biosim report</h1>
<img
src="{survivor_graph}"
/>
"""
output_file.write(html_code)
def write_footer(output_file):
output_file.write('</body></html>')
def write_video(output_file,video_filename):
html_code = f"""
<video class="tab" controls
height="400"
width="400">
<source src="{video_filename}"/>
</video>
"""
output_file.write(html_code)
def write_net_cell(output_file,net_filename):
html_code = f"""
<td>
<img
src="{net_filename}"
height="400"
width="400"
/>
</td>
""".replace('video_filename',net_filename)
output_file.write(html_code)
def process_net_files(output_file,input_dir,generation_prefix):
for filename in os.listdir(input_dir):
if filename.endswith(net_extension) and filename.startswith(generation_prefix):
write_net_cell(output_file,filename)
def process_generation(output_file,input_dir,video_filename,video_extension):
generation_prefix = video_filename.replace(video_extension,'')
generation = generation_prefix.replace('gen-','')
#output_file.write("<tr><td>")
#output_file.write("</td></tr>")
output_file.write("<tr><td>")
output_file.write(f"<h2>Generation: {generation}</h2>")
write_video(output_file,video_filename)
output_file.write("</td>")
process_net_files(output_file,input_dir,generation_prefix)
output_file.write("</tr>")
def process_generations(output_file,input_dir,video_extension):
output_file.write("<table>")
files = []
for filename in os.listdir(input_dir):
if filename.endswith(video_extension):
files.append(filename)
for filename in sorted(files):
process_generation(output_file,input_dir,filename,video_extension)
output_file.write("</table>")
def make_report(input_dir,video_extension):
full_filename = input_dir + os.path.sep + report_name
output_file = open(full_filename, "w")
write_header(output_file)
process_generations(output_file,dirname,video_extension)
write_footer(output_file)
output_file.close()
if __name__ == "__main__":
parser = ArgumentParser(description="Convert a net txt file to a graph\n")
requiredNamed = parser.add_argument_group('required arguments')
requiredNamed.add_argument("--dir", "-d", help="Directory with net txt files", type=str,required=True,default=None)
parser.add_argument("--videoextension", "-e", help="Extension of the video files", type=str,default=".ogv")
args = parser.parse_args()
video_extension =args.videoextension
dirname =args.dir
make_report(dirname,video_extension)
|
the-stack_106_18608
|
# coding: utf-8
# # Applying Neutrophil and Monocyte Signatures using Top Features
#
# **Gregory Way, 2018**
#
# Instead of focusing only on two compressed features, like we did in [notebook 3](https://github.com/greenelab/BioBombe/blob/master/8.gtex-interpret/3.apply-signatures.ipynb), we apply all of the signatures in the following plots to their external validation data.
#
# We previously demonstrated that the enrichment of specific genesets (like Neutrophils and Monocytes) were different across algorithms and k dimensions.
# This implies that certain biological features are best captured by different dimensions and algorithms.
# Here, we test if our network projection scores were associated with strength of signature separation across different groups of samples.
#
# ## Part 1:
#
# ### Enrichment of Neutrophil Signatures
#
# Publicly available dataset capturing neutrophil differentiation in two leukemia cell lines.
#
# 
#
# ## Part 2:
#
# ### Enrichment of Monocyte Signatures
#
# Publicly available dataset that captures various cell-types, including monocytes, undergoing hematopoiesis.
#
# 
#
# ## Output
#
# We output scores for all compression feature scores for both validation data sets.
# There are a total of 5 * 28 = 140 scores per dataset
# In[1]:
import os
import sys
import numpy as np
import pandas as pd
from scipy.stats import ttest_ind, ttest_rel
import matplotlib.pyplot as plt
import seaborn as sns
from scripts.utils import (
load_weight_matrix,
apply_signature,
load_enrichment_results,
extract_feature,
)
sys.path.append('../scripts')
from latent import parse_gmt
# In[2]:
genesets = ['Neutrophils_HPCA_2', 'Monocytes_FANTOM_2']
# In[3]:
# Get top scores across algorithms and z dimensions
score_dir = os.path.join("..", "6.biobombe-projection", "results", "gtex",
"gpxcell", "signal")
full_list = []
for score_file in os.listdir(score_dir):
score_file = os.path.join(score_dir, score_file)
top_df = pd.read_table(score_file)
top_df = (
top_df
.query("variable in @genesets")
.assign(abs_z_score=top_df.z_score.abs())
.sort_values(by='abs_z_score', ascending=False)
.groupby(['z', 'algorithm', 'variable'])
.first()
.reset_index()
)
full_list.append(top_df)
# In[4]:
# Confirm extraction worked (note different colors)
full_top_df = pd.concat(full_list)
sns.relplot(x="z", y="abs_z_score", hue="algorithm", data=full_top_df,
col='variable', kind='line')
# In[5]:
# Compile feature matrix for each geneset
neutrophil_list = []
monocyte_list = []
for idx, feature in full_top_df.iterrows():
z_dim = feature.z
seed = feature.seed
algorithm = feature.algorithm
feature_num = feature.feature
geneset = feature.variable
weight_df = load_weight_matrix(dataset='GTEX',
z_dim=z_dim,
seed=seed)
feature_df = extract_feature(weight_df=weight_df,
algorithm=algorithm,
feature=feature_num)
rename_feature = '{}_zdim_{}_seed_{}'.format(feature_df.name, z_dim, seed)
feature_df = feature_df.rename(rename_feature)
if geneset == "Neutrophils_HPCA_2":
neutrophil_list.append(feature_df)
else:
monocyte_list.append(feature_df)
# In[6]:
neutrophil_df = pd.concat(neutrophil_list, axis='columns')
monocyte_df = pd.concat(monocyte_list, axis='columns')
# ## 1.0. Load External Neutrophil Dataset
# In[7]:
file = os.path.join('data', 'GSE103706_processed_matrix.tsv.gz')
geo_scaled_zeroone_df = pd.read_table(file, index_col=0)
print(geo_scaled_zeroone_df.shape)
geo_scaled_zeroone_df.head(2)
# ## 1.1. Apply Signature from All Top Features
# In[8]:
neutrophil_result_df, missing_genes = (
apply_signature(weight_df=neutrophil_df,
other_df=geo_scaled_zeroone_df,
align=True)
)
top_compressed_features = neutrophil_result_df.columns.tolist()
len(missing_genes)
# ## 1.2. Combine Data and Add Phenotype Information
# In[9]:
# Process phenotype data
cell_line = [x[0] for x in neutrophil_result_df.index.str.split(',')]
treatment = [x[1] for x in neutrophil_result_df.index.str.split(',')]
day = [x[2].strip(' ') if 'replicate' not in x[2] else 'day 0'
for x in neutrophil_result_df.index.str.split(',')]
# In[10]:
neutrophil_result_df = (
neutrophil_result_df
.assign(cell_line=cell_line,
treatment=treatment,
day=day)
.reset_index()
.rename(columns={'index': 'full_id'})
)
recode_labels = {' not differentiated': 'control',
' DMSO': 'treated',
' DMSO+Nutridoma': 'treated'}
neutrophil_result_df.treatment = neutrophil_result_df.treatment.replace(recode_labels)
neutrophil_result_df.head(2)
# ## 1.3. Perform t-test on Treatment vs. Control
# In[11]:
ttest_results = []
for compressed_feature in top_compressed_features:
signature_df = neutrophil_result_df.loc[:, [compressed_feature, 'treatment']]
treatment_values = signature_df.query("treatment == 'treated'").iloc[:, 0].values
control_values = signature_df.query("treatment == 'control'").iloc[:, 0].values
t_stat, t_p = ttest_ind(treatment_values, control_values)
ttest_results.append(pd.Series([compressed_feature, t_stat, t_p]))
# In[12]:
t_results_df = pd.concat(ttest_results, axis='columns').transpose()
t_results_df.columns = ['feature', 't_stat', 't_p']
t_results_df = t_results_df.assign(neg_log_p = -np.log10(t_results_df.t_p.astype(np.float64)))
t_results_df.head()
# In[13]:
neutrophils_top_df = full_top_df.query("variable == 'Neutrophils_HPCA_2'")
neutrophils_top_df.head(2)
# ## 1.4. Compile and Output Plotting Data
# In[14]:
final_neutrophil_results_df = (
pd.DataFrame(t_results_df
.feature
.str
.split('_')
.values
.tolist(),
columns=['algorithm',
'feature_num',
"drop_z",
"z_dim",
"drop_seed",
"seed"])
.merge(t_results_df,
left_index=True,
right_index=True)
.drop(['drop_z',
'drop_seed',
'feature'],
axis='columns')
)
final_neutrophil_results_df.loc[:, 'z_dim'] = final_neutrophil_results_df.z_dim.astype(np.int64)
final_neutrophil_results_df.loc[:, 'feature_num'] = final_neutrophil_results_df.feature_num.astype(np.int64)
final_neutrophil_results_df.loc[:, 'seed'] = final_neutrophil_results_df.seed.astype(np.int64)
final_neutrophil_results_df = (
final_neutrophil_results_df
.merge(neutrophils_top_df,
left_on=['algorithm', 'feature_num', 'z_dim', 'seed'],
right_on=['algorithm', 'feature', 'z', 'seed'])
)
final_neutrophil_results_df.head()
# In[15]:
sns.scatterplot(x="z_score", y="t_stat", data=final_neutrophil_results_df)
# In[16]:
file = os.path.join("results", "all_neutrophil_top_scores_and_separation.tsv")
final_neutrophil_results_df.to_csv(file, sep='\t', index=False)
# ## 2.0. Load External Monocyte Dataset
#
# We perform a similar procedure, but apply top monocyte signatures to an alternative publicly available dataset.
# In[17]:
file = os.path.join('data', 'GSE24759_processed_matrix.tsv.gz')
heme_zeroone_df = pd.read_table(file, index_col=0)
print(heme_zeroone_df.shape)
heme_zeroone_df.head(2)
# ## 2.1. Apply Signature from All Top Features and Process Data
# In[18]:
result_df, missing_genes = apply_signature(weight_df=monocyte_df,
other_df=heme_zeroone_df,
align=True)
full_heme_result_df = result_df.reset_index().rename(columns={'index': 'cell'})
heme_cell_type_recode_df = (
pd.DataFrame(full_heme_result_df.cell.str.split('_').values.tolist(),
columns = ['cell_type', 'replicate', 'additional'])
)
heme_cell_type_recode_df.loc[~heme_cell_type_recode_df.additional.isna(), 'cell_type'] = "PRE_BCELL2"
full_heme_result_df = (
pd.concat([heme_cell_type_recode_df.drop(['additional'], axis='columns'),
full_heme_result_df], axis='columns')
)
top_compressed_features = result_df.columns.tolist()
len(missing_genes)
# ## 2.2. Add Phenotype Information
# In[19]:
# Recode cell-type into larger classification
file = os.path.join('results', 'cell-type-classification.tsv')
cell_class_df = pd.read_table(file)
cell_updater = dict(zip(cell_class_df.label, cell_class_df.classification))
monocyte_updater = dict(zip(cell_class_df.label, cell_class_df.monocyte))
cell_class_df.head()
# In[20]:
full_heme_result_df = (
full_heme_result_df
.assign(cell_class = full_heme_result_df.cell_type.replace(cell_updater),
monocyte_status = full_heme_result_df.cell_type.replace(monocyte_updater))
)
full_heme_result_df.head(2)
# ## 2.3. Perform t-test on Monocyte vs. Non-Monocyte
# In[21]:
ttest_results = []
for compressed_feature in top_compressed_features:
signature_df = full_heme_result_df.loc[:, [compressed_feature, 'monocyte_status']]
treatment_values = signature_df.query("monocyte_status == 'Monocyte'").iloc[:, 0].values
control_values = signature_df.query("monocyte_status == 'Non Monocyte'").iloc[:, 0].values
t_stat, t_p = ttest_ind(treatment_values, control_values)
ttest_results.append(pd.Series([compressed_feature, t_stat, t_p]))
# In[22]:
monocyte_top_df = full_top_df.query("variable == 'Monocytes_FANTOM_2'")
monocyte_top_df.head(2)
# In[23]:
t_results_df = pd.concat(ttest_results, axis='columns').transpose()
t_results_df.columns = ['feature', 't_stat', 't_p']
t_results_df = t_results_df.assign(neg_log_p = -np.log10(t_results_df.t_p.astype(np.float64)))
t_results_df.head()
# ## 2.4. Compile and Output Plotting Data
# In[24]:
full_heme_result_df = (
pd.DataFrame(t_results_df
.feature
.str
.split('_')
.values
.tolist(),
columns=['algorithm',
'feature_num',
"drop_z",
"z_dim",
"drop_seed",
"seed"])
.merge(t_results_df,
left_index=True,
right_index=True)
.drop(['drop_z',
'drop_seed',
'feature'],
axis='columns')
)
full_heme_result_df.loc[:, 'z_dim'] = full_heme_result_df.z_dim.astype(np.int64)
full_heme_result_df.loc[:, 'feature_num'] = full_heme_result_df.feature_num.astype(np.int64)
full_heme_result_df.loc[:, 'seed'] = full_heme_result_df.seed.astype(np.int64)
full_heme_result_df = (
full_heme_result_df
.merge(monocyte_top_df,
left_on=['algorithm', 'feature_num', 'z_dim', 'seed'],
right_on=['algorithm', 'feature', 'z', 'seed'])
)
full_heme_result_df.head()
# In[25]:
file = os.path.join("results", "all_monocyte_top_scores_and_separation.tsv")
full_heme_result_df.to_csv(file, sep='\t', index=False)
|
the-stack_106_18609
|
import random
import re
import socket
from collections import OrderedDict
from django.conf import settings
from django.core.cache.backends.base import DEFAULT_TIMEOUT, get_key_func
from django.core.exceptions import ImproperlyConfigured
from django.utils.encoding import smart_text
from django.utils.module_loading import import_string
from redis.exceptions import ConnectionError, ResponseError, TimeoutError
from .. import pool
from ..exceptions import CompressorError, ConnectionInterrupted
from ..util import CacheKey
_main_exceptions = (TimeoutError, ResponseError, ConnectionError, socket.timeout)
special_re = re.compile('([*?[])')
def glob_escape(s):
return special_re.sub(r'[\1]', s)
class DefaultClient:
def __init__(self, server, params, backend):
self._backend = backend
self._server = server
self._params = params
self.reverse_key = get_key_func(
params.get("REVERSE_KEY_FUNCTION") or "django_redis.util.default_reverse_key"
)
if not self._server:
raise ImproperlyConfigured("Missing connections string")
if not isinstance(self._server, (list, tuple, set)):
self._server = self._server.split(",")
self._clients = [None] * len(self._server)
self._options = params.get("OPTIONS", {})
self._slave_read_only = self._options.get('SLAVE_READ_ONLY', True)
serializer_path = self._options.get("SERIALIZER", "django_redis.serializers.pickle.PickleSerializer")
serializer_cls = import_string(serializer_path)
compressor_path = self._options.get("COMPRESSOR", "django_redis.compressors.identity.IdentityCompressor")
compressor_cls = import_string(compressor_path)
self._serializer = serializer_cls(options=self._options)
self._compressor = compressor_cls(options=self._options)
self.connection_factory = pool.get_connection_factory(options=self._options)
def __contains__(self, key):
return self.has_key(key)
def get_next_client_index(self, write=True, tried=()):
"""
Return a next index for read client.
This function implements a default behavior for
get a next read client for master-slave setup.
Overwrite this function if you want a specific
behavior.
"""
if tried and len(tried) < len(self._server):
not_tried = [i for i in range(0, len(self._server)) if i not in tried]
return random.choice(not_tried)
if write or len(self._server) == 1:
return 0
return random.randint(1, len(self._server) - 1)
def get_client(self, write=True, tried=(), show_index=False):
"""
Method used for obtain a raw redis client.
This function is used by almost all cache backend
operations for obtain a native redis client/connection
instance.
"""
index = self.get_next_client_index(write=write, tried=tried or [])
if self._clients[index] is None:
self._clients[index] = self.connect(index)
if show_index:
return self._clients[index], index
else:
return self._clients[index]
def connect(self, index=0):
"""
Given a connection index, returns a new raw redis client/connection
instance. Index is used for master/slave setups and indicates that
connection string should be used. In normal setups, index is 0.
"""
return self.connection_factory.connect(self._server[index])
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None, client=None, nx=False, xx=False):
"""
Persist a value to the cache, and set an optional expiration time.
Also supports optional nx parameter. If set to True - will use redis setnx instead of set.
"""
nkey = self.make_key(key, version=version)
nvalue = self.encode(value)
if timeout == DEFAULT_TIMEOUT:
timeout = self._backend.default_timeout
original_client = client
tried = []
while True:
try:
if not client:
client, index = self.get_client(write=True, tried=tried, show_index=True)
if timeout is not None:
# Convert to milliseconds
timeout = int(timeout * 1000)
if timeout <= 0:
if nx:
# Using negative timeouts when nx is True should
# not expire (in our case delete) the value if it exists.
# Obviously expire not existent value is noop.
return not self.has_key(key, version=version, client=client)
else:
# redis doesn't support negative timeouts in ex flags
# so it seems that it's better to just delete the key
# than to set it and than expire in a pipeline
return self.delete(key, client=client, version=version)
return client.set(nkey, nvalue, nx=nx, px=timeout, xx=xx)
except _main_exceptions as e:
if not original_client and not self._slave_read_only and len(tried) < len(self._server):
tried.append(index)
client = None
continue
raise ConnectionInterrupted(connection=client, parent=e)
def incr_version(self, key, delta=1, version=None, client=None):
"""
Adds delta to the cache version for the supplied key. Returns the
new version.
"""
if client is None:
client = self.get_client(write=True)
if version is None:
version = self._backend.version
old_key = self.make_key(key, version)
value = self.get(old_key, version=version, client=client)
try:
ttl = client.ttl(old_key)
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
if value is None:
raise ValueError("Key '%s' not found" % key)
if isinstance(key, CacheKey):
new_key = self.make_key(key.original_key(), version=version + delta)
else:
new_key = self.make_key(key, version=version + delta)
self.set(new_key, value, timeout=ttl, client=client)
self.delete(old_key, client=client)
return version + delta
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None, client=None):
"""
Add a value to the cache, failing if the key already exists.
Returns ``True`` if the object was added, ``False`` if not.
"""
return self.set(key, value, timeout, version=version, client=client, nx=True)
def get(self, key, default=None, version=None, client=None):
"""
Retrieve a value from the cache.
Returns decoded value if key is found, the default if not.
"""
if client is None:
client = self.get_client(write=False)
key = self.make_key(key, version=version)
try:
value = client.get(key)
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
if value is None:
return default
return self.decode(value)
def persist(self, key, version=None, client=None):
if client is None:
client = self.get_client(write=True)
key = self.make_key(key, version=version)
if client.exists(key):
client.persist(key)
def expire(self, key, timeout, version=None, client=None):
if client is None:
client = self.get_client(write=True)
key = self.make_key(key, version=version)
if client.exists(key):
client.expire(key, timeout)
def lock(self, key, version=None, timeout=None, sleep=0.1,
blocking_timeout=None, client=None):
if client is None:
client = self.get_client(write=True)
key = self.make_key(key, version=version)
return client.lock(key, timeout=timeout, sleep=sleep,
blocking_timeout=blocking_timeout)
def delete(self, key, version=None, prefix=None, client=None):
"""
Remove a key from the cache.
"""
if client is None:
client = self.get_client(write=True)
try:
return client.delete(self.make_key(key, version=version,
prefix=prefix))
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
def delete_pattern(self, pattern, version=None, prefix=None, client=None, itersize=None):
"""
Remove all keys matching pattern.
"""
if client is None:
client = self.get_client(write=True)
pattern = self.make_pattern(pattern, version=version, prefix=prefix)
kwargs = {'match': pattern, }
if itersize:
kwargs['count'] = itersize
try:
count = 0
for key in client.scan_iter(**kwargs):
client.delete(key)
count += 1
return count
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
def delete_many(self, keys, version=None, client=None):
"""
Remove multiple keys at once.
"""
if client is None:
client = self.get_client(write=True)
keys = [self.make_key(k, version=version) for k in keys]
if not keys:
return
try:
return client.delete(*keys)
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
def clear(self, client=None):
"""
Flush all cache keys.
"""
if client is None:
client = self.get_client(write=True)
try:
client.flushdb()
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
def decode(self, value):
"""
Decode the given value.
"""
try:
value = int(value)
except (ValueError, TypeError):
try:
value = self._compressor.decompress(value)
except CompressorError:
# Handle little values, chosen to be not compressed
pass
value = self._serializer.loads(value)
return value
def encode(self, value):
"""
Encode the given value.
"""
if isinstance(value, bool) or not isinstance(value, int):
value = self._serializer.dumps(value)
value = self._compressor.compress(value)
return value
return value
def get_many(self, keys, version=None, client=None):
"""
Retrieve many keys.
"""
if client is None:
client = self.get_client(write=False)
if not keys:
return {}
recovered_data = OrderedDict()
map_keys = OrderedDict(
(self.make_key(k, version=version), k) for k in keys
)
try:
results = client.mget(*map_keys)
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
for key, value in zip(map_keys, results):
if value is None:
continue
recovered_data[map_keys[key]] = self.decode(value)
return recovered_data
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None, client=None):
"""
Set a bunch of values in the cache at once from a dict of key/value
pairs. This is much more efficient than calling set() multiple times.
If timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
"""
if client is None:
client = self.get_client(write=True)
try:
pipeline = client.pipeline()
for key, value in data.items():
self.set(key, value, timeout, version=version, client=pipeline)
pipeline.execute()
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
def _incr(self, key, delta=1, version=None, client=None, ignore_key_check=False):
if client is None:
client = self.get_client(write=True)
key = self.make_key(key, version=version)
try:
try:
# if key expired after exists check, then we get
# key with wrong value and ttl -1.
# use lua script for atomicity
if not ignore_key_check:
lua = """
local exists = redis.call('EXISTS', KEYS[1])
if (exists == 1) then
return redis.call('INCRBY', KEYS[1], ARGV[1])
else return false end
"""
else:
lua = """
return redis.call('INCRBY', KEYS[1], ARGV[1])
"""
value = client.eval(lua, 1, key, delta)
if value is None:
raise ValueError("Key '%s' not found" % key)
except ResponseError:
# if cached value or total value is greater than 64 bit signed
# integer.
# elif int is encoded. so redis sees the data as string.
# In this situations redis will throw ResponseError
# try to keep TTL of key
timeout = client.ttl(key)
# returns -2 if the key does not exist
# means, that key have expired
if timeout == -2:
raise ValueError("Key '%s' not found" % key)
value = self.get(key, version=version, client=client) + delta
self.set(key, value, version=version, timeout=timeout,
client=client)
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
return value
def incr(self, key, delta=1, version=None, client=None, ignore_key_check=False):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception. if ignore_key_check=True then the key will be
created and set to the delta value by default.
"""
return self._incr(key=key, delta=delta, version=version, client=client, ignore_key_check=ignore_key_check)
def decr(self, key, delta=1, version=None, client=None):
"""
Decreace delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
return self._incr(key=key, delta=-delta, version=version,
client=client)
def ttl(self, key, version=None, client=None):
"""
Executes TTL redis command and return the "time-to-live" of specified key.
If key is a non volatile key, it returns None.
"""
if client is None:
client = self.get_client(write=False)
key = self.make_key(key, version=version)
if not client.exists(key):
return 0
t = client.ttl(key)
if t >= 0:
return t
elif t == -1:
return None
elif t == -2:
return 0
else:
# Should never reach here
return None
def has_key(self, key, version=None, client=None):
"""
Test if key exists.
"""
if client is None:
client = self.get_client(write=False)
key = self.make_key(key, version=version)
try:
return client.exists(key) == 1
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
def iter_keys(self, search, itersize=None, client=None, version=None):
"""
Same as keys, but uses redis >= 2.8 cursors
for make memory efficient keys iteration.
"""
if client is None:
client = self.get_client(write=False)
pattern = self.make_pattern(search, version=version)
for item in client.scan_iter(match=pattern, count=itersize):
item = smart_text(item)
yield self.reverse_key(item)
def keys(self, search, version=None, client=None):
"""
Execute KEYS command and return matched results.
Warning: this can return huge number of results, in
this case, it strongly recommended use iter_keys
for it.
"""
if client is None:
client = self.get_client(write=False)
pattern = self.make_pattern(search, version=version)
try:
encoding_map = [smart_text(k) for k in client.keys(pattern)]
return [self.reverse_key(k) for k in encoding_map]
except _main_exceptions as e:
raise ConnectionInterrupted(connection=client, parent=e)
def make_key(self, key, version=None, prefix=None):
if isinstance(key, CacheKey):
return key
if prefix is None:
prefix = self._backend.key_prefix
if version is None:
version = self._backend.version
return CacheKey(self._backend.key_func(key, prefix, version))
def make_pattern(self, pattern, version=None, prefix=None):
if isinstance(pattern, CacheKey):
return pattern
if prefix is None:
prefix = self._backend.key_prefix
prefix = glob_escape(prefix)
if version is None:
version = self._backend.version
version = glob_escape(str(version))
return CacheKey(self._backend.key_func(pattern, prefix, version))
def close(self, **kwargs):
if getattr(settings, "DJANGO_REDIS_CLOSE_CONNECTION", False):
for i in range(len(self._clients)):
for c in self._clients[i].connection_pool._available_connections:
c.disconnect()
self._clients[i] = None
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None, client=None):
"""
Sets a new expiration for a key.
"""
if client is None:
client = self.get_client(write=True)
key = self.make_key(key, version=version)
return client.expire(key, timeout)
|
the-stack_106_18610
|
from setuptools import setup, find_packages
install_requires = [
'arrow<1.0', # Guard against losing py3.5 compat announced for v1.0.
'Babel',
'csvw>=1.0',
'clldutils>=3.5',
'pycldf>=1.5.1',
'setuptools>=25',
'pyramid>=1.10',
'pyramid_mako>=1.0',
'pyramid_tm',
'SQLAlchemy>=1.0.6',
'purl>=0.5',
'pytz',
'zope.sqlalchemy',
'alembic>=0.7.1',
'webassets>=0.12.1', # no longer supports py2, no longer requires six!
'markupsafe',
'requests>=2.4.3', # we use the support for connect timeouts introduced in 2.4.0
'rdflib>=4.1.1', # rdflib 4.1.0 requires html5lib==0.95
'colander',
'python-dateutil',
'paginate',
'webhelpers2>=2.0',
'nameparser',
'waitress>=1.4.2',
]
setup(
name='clld',
version='7.4.2.dev0',
description=(
'Python library supporting the development of cross-linguistic databases'),
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI",
],
keywords='web pyramid LRL Linguistics',
author="Robert Forkel, MPI SHH",
author_email="[email protected]",
url="https://clld.org",
license="Apache Software License",
packages=find_packages(where="src"),
package_dir={"": "src"},
include_package_data=True,
zip_safe=False,
python_requires='>=3.5',
install_requires=install_requires,
extras_require={
'dev': [
'cookiecutter',
'cldfcatalog',
'waitress',
'pyramid_debugtoolbar',
'tox',
'flake8',
'wheel',
'twine',
],
'test': [
'cookiecutter',
'xlrd',
'mock',
'pytest>=6',
'pytest-clld>=1.0.2',
'pytest-mock',
'pytest-cov',
'coverage>=4.2',
'zope.component>=3.11.0',
],
'docs': [
'Sphinx',
'docutils',
'repoze.sphinx.autointerface',
],
},
message_extractors={'src/clld': [
('**.py', 'python', None),
('**.mako', 'mako', {'encoding': 'utf8'}),
('web/static/**', 'ignore', None)]},
entry_points="""\
[console_scripts]
clld = clld.__main__:main
"""
)
|
the-stack_106_18611
|
import os
import re
import platform
from conans.client import conan_api
from cpt.packager import ConanMultiPackager
from cpt.tools import split_colon_env
from cpt.remotes import RemotesManager
# from cpt.ci_manager import *
from cpt.printer import Printer
from bincrafters.build_paths import BINCRAFTERS_REPO_URL, BINCRAFTERS_LOGIN_USERNAME, BINCRAFTERS_USERNAME, BINCRAFTERS_REPO_NAME
printer = Printer()
# ci_manager = CIManager(printer=printer)
def get_recipe_path(cwd=None):
cwd = os.getenv("BPT_CWD", cwd)
conanfile = os.getenv("CONAN_CONANFILE", "conanfile.py")
if cwd is None:
return os.path.abspath(conanfile)
else:
return os.path.abspath(os.path.join(cwd, conanfile))
def get_bool_from_env(var_name, default="1"):
val = os.getenv(var_name, default)
return str(val).lower() in ("1", "true", "yes", "y")
def get_value_from_recipe(search_string, recipe=None):
if recipe is None:
recipe = get_recipe_path()
with open(recipe, "r") as conanfile:
contents = conanfile.read()
result = re.search(search_string, contents)
return result
def inspect_value_from_recipe(attribute, recipe_path):
cwd = os.getcwd()
result = None
try:
dir_name = os.path.dirname(recipe_path)
conanfile_name = os.path.basename(recipe_path)
if dir_name == "":
dir_name = "./"
os.chdir(dir_name)
conan_instance, _, _ = conan_api.Conan.factory()
inspect_result = conan_instance.inspect(path=conanfile_name, attributes=[attribute])
result = inspect_result.get(attribute)
except:
pass
os.chdir(cwd)
return result
def get_name_from_recipe(recipe=None):
name = inspect_value_from_recipe(attribute="name", recipe_path=recipe)
return name or get_value_from_recipe(r'''name\s*=\s*["'](\S*)["']''', recipe=recipe).groups()[0]
def get_version_from_recipe(recipe=None):
version = inspect_value_from_recipe(attribute="version", recipe_path=recipe)
return version or get_value_from_recipe(r'''version\s*=\s*["'](\S*)["']''', recipe=recipe).groups()[0]
def is_shared(recipe=None):
options = inspect_value_from_recipe(attribute="options", recipe_path=recipe)
if options:
return "shared" in options
match = get_value_from_recipe(r'''options.*=([\s\S]*?)(?=}|$)''', recipe=recipe)
if match is None:
return False
return "shared" in match.groups()[0]
def get_repo_name_from_ci():
reponame_a = os.getenv("APPVEYOR_REPO_NAME", "")
reponame_azp = os.getenv("BUILD_REPOSITORY_NAME", "")
reponame_g = os.getenv("GITHUB_REPOSITORY", "")
return reponame_a or reponame_azp or reponame_g
def get_repo_branch_from_ci():
# TODO: Try one day again to migrate this to CPTs CI Manager
# Since CPTs CI Managers varies in logic this break to much of the existing behaviour
# in a first attempt (Croydon)
# ~~Remove GHA special handling after CPT 0.32.0 is released~~
repobranch_a = os.getenv("APPVEYOR_REPO_BRANCH", "")
repobranch_azp = os.getenv("BUILD_SOURCEBRANCH", "")
if repobranch_azp.startswith("refs/pull/"):
repobranch_azp = os.getenv("SYSTEM_PULLREQUEST_TARGETBRANCH", "")
def _clean_branch(branch):
return branch[11:] if branch.startswith("refs/heads/") else branch
repobranch_azp = _clean_branch(repobranch_azp)
repobranch_g = _clean_branch(os.getenv("GITHUB_REF", ""))
if os.getenv("GITHUB_EVENT_NAME", "") == "pull_request":
repobranch_g = os.getenv("GITHUB_BASE_REF", "")
return repobranch_a or repobranch_azp or repobranch_g
def get_ci_vars():
reponame = get_repo_name_from_ci()
reponame_split = reponame.split("/")
repobranch = get_repo_branch_from_ci()
repobranch_split = repobranch.split("/")
username, _ = reponame_split if len(reponame_split) == 2 else ["", ""]
channel, version = repobranch_split if len(repobranch_split) == 2 else ["", ""]
return username, channel, version
def get_username_from_ci():
username, _, _ = get_ci_vars()
return username
def get_channel_from_ci():
_, channel, _ = get_ci_vars()
return channel
def get_version_from_ci():
_, _, version = get_ci_vars()
return version
def get_version(recipe=None):
env_ver = os.getenv("CONAN_VERSION", None)
ci_ver = get_version_from_ci()
if env_ver:
return env_ver
elif ci_ver:
return ci_ver
else:
return get_version_from_recipe(recipe=recipe)
def get_conan_vars(recipe=None, kwargs={}):
# these fallbacks have to handle empty environment variables too!
# this is the case for e.g. external pull request (i.e. no secrets available)
# This combined with versioned branches, lead to the error
# that the channel is defined but not the username and CPT fails
if "CONAN_USERNAME" in os.environ and os.getenv("CONAN_USERNAME") != "":
username_fallback = os.getenv("CONAN_USERNAME")
else:
username_fallback = get_username_from_ci() or BINCRAFTERS_USERNAME
if "CONAN_LOGIN_USERNAME" in os.environ and os.getenv("CONAN_LOGIN_USERNAME") != "":
login_username_fallback = os.getenv("CONAN_LOGIN_USERNAME")
else:
login_username_fallback = BINCRAFTERS_LOGIN_USERNAME
username = kwargs.get("username", username_fallback)
kwargs["channel"] = kwargs.get("channel", os.getenv("CONAN_CHANNEL", get_channel_from_ci()))
version = get_version(recipe=recipe)
kwargs["login_username"] = kwargs.get("login_username", login_username_fallback)
kwargs["username"] = username
return username, version, kwargs
def get_user_repository(username, repository_name):
return "https://{0}.jfrog.io/artifactory/api/conan/{1}".format(username.lower(), repository_name)
def get_conan_upload(username):
if os.getenv("BPT_NO_UPLOAD", "").lower() in ["true", "yes", "on", "1"]:
return False
upload = os.getenv("CONAN_UPLOAD")
if upload:
return upload.split('@') if '@' in upload else upload
repository_name = os.getenv("BINTRAY_REPOSITORY", BINCRAFTERS_REPO_NAME)
return get_user_repository(username, repository_name)
def get_conan_upload_param(username, kwargs):
if not get_conan_upload(username):
try:
del kwargs["upload"]
except:
pass
return kwargs
if "upload" not in kwargs and get_conan_upload(username):
kwargs["upload"] = get_conan_upload(username)
return kwargs
def get_conan_remotes(username, kwargs):
remotes = None
if "remotes" not in kwargs:
remotes = os.getenv("CONAN_REMOTES")
if remotes:
remotes = remotes.split(',')
for remote in reversed(remotes):
if '@' in remote:
remote = RemotesManager._get_remote_from_str(remote, var_name=remote)
else:
# While redundant, this moves upload remote to position 0.
remotes = [get_conan_upload(username)] if get_conan_upload(username) else []
# Add bincrafters repository for other users, e.g. if the package would
# require other packages from the bincrafters repo.
bincrafters_user = BINCRAFTERS_USERNAME
if username != bincrafters_user:
if get_conan_upload(bincrafters_user):
remotes.append(get_conan_upload(bincrafters_user))
# Force Bincrafters repo on remotes
if BINCRAFTERS_REPO_URL not in remotes:
remotes.append(BINCRAFTERS_REPO_URL)
kwargs["remotes"] = remotes
return kwargs
def get_upload_when_stable(kwargs):
upload_when_stable = kwargs.get('upload_only_when_stable')
if upload_when_stable is None:
kwargs['upload_only_when_stable'] = get_bool_from_env("CONAN_UPLOAD_ONLY_WHEN_STABLE")
return kwargs
def get_os():
return platform.system().replace("Darwin", "Macos")
def get_archs(kwargs):
if "archs" not in kwargs:
archs = os.getenv("CONAN_ARCHS", None)
if archs is None:
# Per default only build 64-bit artifacts
kwargs["archs"] = ["x86_64"]
else:
kwargs["archs"] = split_colon_env("CONAN_ARCHS") if archs else None
return kwargs
def get_stable_branch_pattern(kwargs):
if "stable_branch_pattern" not in kwargs:
kwargs["stable_branch_pattern"] = os.getenv("CONAN_STABLE_BRANCH_PATTERN", "stable/*")
return kwargs
def get_reference(name, version, kwargs):
if "reference" not in kwargs:
kwargs["reference"] = "{0}/{1}".format(name, version)
return kwargs
def get_builder(build_policy=None, cwd=None, **kwargs):
recipe = get_recipe_path(cwd)
cwd = os.path.dirname(recipe)
name = get_name_from_recipe(recipe=recipe)
username, version, kwargs = get_conan_vars(recipe=recipe, kwargs=kwargs)
kwargs = get_reference(name, version, kwargs)
kwargs = get_conan_upload_param(username, kwargs)
kwargs = get_conan_remotes(username, kwargs)
kwargs = get_upload_when_stable(kwargs)
kwargs = get_stable_branch_pattern(kwargs)
kwargs = get_archs(kwargs)
build_policy = os.getenv("CONAN_BUILD_POLICY", build_policy)
builder = ConanMultiPackager(
build_policy=build_policy,
cwd=cwd,
**kwargs)
return builder
|
the-stack_106_18612
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
import numpy as np
from collections import defaultdict
import fire, pickle, random, time, math, os, lzma
from dynet import *
from .utils import buildVocab, strong_normalize, shuffled_balanced_stream
from .modules import UPOSTagger, XPOSTagger, MSTParser, AHDPParser, AEDPParser
import pyximport; pyximport.install()
from .calgorithm import projectivize, is_projective
from .asbeamconf import ASBeamConf
from .evaluation import POSCorrect
from .io import read_conll, write_conll
from .layers import MultiLayerPerceptron, Dense, identity, Biaffine
class ComputationCarrier(object):
def __copy__(self):
result = object.__new__(ComputationCarrier)
result.__dict__.update(self.__dict__)
return result
class CDParser:
def __init__(self):
pass
def create_parser(self, **kwargs):
self._verbose = kwargs.get("verbose", True)
if self._verbose:
print("Parameters (others default):")
for k in sorted(kwargs):
print(k, kwargs[k])
self._args = kwargs
self._learning_rate = kwargs.get("learning_rate", 0.001)
self._beta1 = kwargs.get("beta2", 0.9)
self._beta2 = kwargs.get("beta2", 0.999)
self._epsilon = kwargs.get("epsilon", 1e-8)
self._edecay = kwargs.get("edecay", 0.)
self._clip = kwargs.get("clip", 5.)
self._sparse_updates = kwargs.get("sparse_updates", False)
self._optimizer = kwargs.get("optimizer", "adam")
self._batch_size = kwargs.get("batch_size", 50)
self._anneal_base = kwargs.get("anneal_base", 1.0)
self._anneal_steps = kwargs.get("anneal_steps", 1000)
self._word_smooth = kwargs.get("word_smooth", 0.25)
self._char_smooth = kwargs.get("char_smooth", 0.25)
self._wdims = kwargs.get("wdims", 128)
self._bilstm_dims = kwargs.get("bilstm_dims", 128)
self._bilstm_layers = kwargs.get("bilstm_layers", 2)
self._bilstm_dropout = kwargs.get("bilstm_dropout", 0.0)
self._pdims = kwargs.get("pdims", 0)
self._fdims = kwargs.get("fdims", 0)
self._feature_dropout = kwargs.get("feature_dropout", 0.0)
self._block_dropout = kwargs.get("block_dropout", 0.)
self._char_dropout = kwargs.get("char_dropout", 0.)
self._cdims = kwargs.get("cdims", 32)
self._char_lstm_dims = kwargs.get("char_lstm_dims", 128)
self._char_lstm_layers = kwargs.get("char_lstm_layers", 2)
self._char_lstm_dropout = kwargs.get("char_lstm_dropout", 0.0)
self._char_repr_method = kwargs.get("char_repr_method", "pred")
self._activations = {'tanh': tanh, 'sigmoid': logistic, 'relu': rectify, 'tanh3': (lambda x: tanh(cwise_multiply(cwise_multiply(x, x), x)))}
self._utagger_num = kwargs.get("utagger_num", 1)
self._utagger_weight = kwargs.get("utagger_weight", 1.0)
self._utaggers = [UPOSTagger(self, id="UPOS-{}".format(i+1), **self._args) for i in range(self._utagger_num)]
self._xtagger_num = kwargs.get("xtagger_num", 1)
self._xtagger_weight = kwargs.get("xtagger_weight", 1.0)
self._xtaggers = [XPOSTagger(self, id="XPOS-{}".format(i+1), **self._args) for i in range(self._xtagger_num)]
self._ahdp_num = kwargs.get("ahdp_num", 1)
self._ahdp_weight = kwargs.get("ahdp_weight", 1.0)
self._ahdp_parsers = [AHDPParser(self, id="AHDP-{}".format(i+1), **self._args) for i in range(self._ahdp_num)]
self._aedp_num = kwargs.get("aedp_num", 1)
self._aedp_weight = kwargs.get("aedp_weight", 1.0)
self._aedp_parsers = [AEDPParser(self, id="AEDP-{}".format(i+1), **self._args) for i in range(self._aedp_num)]
self._as_mlp_activation = self._activations[kwargs.get('as_mlp_activation', 'relu')]
self._as_mlp_dims = kwargs.get("as_mlp_dims", 128)
self._as_mlp_layers = kwargs.get("as_mlp_layers", 2)
self._as_mlp_dropout = kwargs.get("as_mlp_dropout", 0.0)
self._as_stack_features = kwargs.get("as_stack_features", 2)
self._as_buffer_features = kwargs.get("as_buffer_features", 1)
self._as_weight = kwargs.get("as_weight", 1.0)
self._mst_num = kwargs.get("mst_num", 1)
self._mst_weight = kwargs.get("mst_weight", 1.0)
self._mst_parsers = [MSTParser(self, id="MST-{}".format(i+1), **self._args) for i in range(self._mst_num)]
self._label_mlp_activation = self._activations[kwargs.get('label_mlp_activation', 'relu')]
self._label_mlp_dims = kwargs.get("label_mlp_dims", 128)
self._label_mlp_layers = kwargs.get("label_mlp_layers", 1)
self._label_mlp_dropout = kwargs.get("label_mlp_dropout", 0.0)
self._label_concat_dims = kwargs.get("label_concat_dims", 128)
self._label_concat_layers = kwargs.get("label_concat_layers", 1)
self._label_weight = kwargs.get("label_weight", 1.0)
self._label_discrim = kwargs.get("label_discrim", False)
self._label_biaffine = kwargs.get("label_biaffine", False)
return self
def _load_vocab(self, vocab):
self._fullvocab = vocab
self._upos = {p: i for i, p in enumerate(vocab["upos"])}
self._iupos = vocab["upos"]
self._xpos = {p: i for i, p in enumerate(vocab["xpos"])}
self._ixpos = vocab["xpos"]
self._vocab = {w: i + 3 for i, w in enumerate(vocab["vocab"])}
self._wordfreq = vocab["wordfreq"]
self._charset = {c: i + 3 for i, c in enumerate(vocab["charset"])}
self._charfreq = vocab["charfreq"]
self._rels = {r: i for i, r in enumerate(vocab["rels"])}
self._irels = vocab["rels"]
self._feats = {f: i + 1 for i, f in enumerate(vocab["feats"])}
def load_vocab(self, filename):
with open(filename, "rb") as f:
vocab = pickle.load(f)
self._load_vocab(vocab)
return self
def save_vocab(self, filename):
with open(filename, "wb") as f:
pickle.dump(self._fullvocab, f)
return self
def build_vocab(self, filename, savefile=None, cutoff=1):
if isinstance(filename, str):
graphs = read_conll(filename)
elif isinstance(filename, list):
graphs = []
for f in filename:
graphs.extend(read_conll(f))
self._fullvocab= buildVocab(graphs, cutoff)
if savefile:
self.save_vocab(savefile)
self._load_vocab(self._fullvocab)
return self
def save_model(self, filename):
self.save_vocab(filename + ".vocab")
with open(filename + ".params", "wb") as f:
pickle.dump(self._args, f)
self._model.save(filename + ".model")
return self
def load_model(self, filename, **kwargs):
self.load_vocab(filename + ".vocab")
with open(filename + ".params", "rb") as f:
args = pickle.load(f)
args.update(kwargs)
self.create_parser(**args)
self.init_model()
self._model.load(filename + ".model")
return self
def init_model(self):
self._model = Model()
if self._optimizer == "adam":
self._trainer = AdamTrainer(self._model, alpha=self._learning_rate, beta_1 = self._beta1, beta_2=self._beta2, edecay=self._edecay, eps=self._epsilon)
elif self._optimizer == "sgd":
self._trainer = SimpleSGDTrainer(self._model, self._learning_rate)
self._trainer.set_sparse_updates(self._sparse_updates)
self._trainer.set_clip_threshold(self._clip)
input_dims = 0
if self._cdims > 0 and self._char_lstm_dims > 0:
if self._char_lstm_dims > 0:
self._char_lookup = self._model.add_lookup_parameters((len(self._charset) + 3, self._cdims))
self._char_lstm = BiRNNBuilder(self._char_lstm_layers, self._cdims, self._char_lstm_dims, self._model, LSTMBuilder)
if self._char_repr_method == "concat":
input_dims += self._char_lstm_dims
if self._char_repr_method == "pred":
self._char_to_word = Dense(self._char_lstm_dims, self._wdims, tanh, self._model)
if self._wdims > 0:
self._word_lookup = self._model.add_lookup_parameters((len(self._vocab) + 3, self._wdims))
input_dims += self._wdims
if self._pdims > 0:
self._upos_lookup = self._model.add_lookup_parameters((len(self._upos) + 1, self._pdims))
input_dims += self._pdims
if self._fdims > 0:
self._feats_lookup = self._model.add_lookup_parameters((len(self._feats) + 1, self._fdims))
input_dims += self._fdims
if input_dims <= 0:
print("Input to LSTM is empty! You need to use at least one of word embeddings or character embeddings.")
return
self._bilstm = BiRNNBuilder(self._bilstm_layers, input_dims, self._bilstm_dims, self._model, LSTMBuilder)
self._root_repr = self._model.add_parameters(input_dims)
self._bos_repr = self._model.add_parameters(input_dims)
self._eos_repr = self._model.add_parameters(input_dims)
for utagger in self._utaggers:
utagger.init_params()
for xtagger in self._xtaggers:
xtagger.init_params()
for mstparser in self._mst_parsers:
mstparser.init_params()
for ahdpparser in self._ahdp_parsers:
ahdpparser.init_params()
for aedpparser in self._aedp_parsers:
aedpparser.init_params()
self._as_pad_repr = [self._model.add_parameters(self._bilstm_dims) for i in range(self._as_stack_features + self._as_buffer_features)]
self._as_mlp = MultiLayerPerceptron([(self._bilstm_dims) * (self._as_stack_features + self._as_buffer_features)] + [self._as_mlp_dims] * self._as_mlp_layers, self._as_mlp_activation, self._model)
self._as_final = Dense(self._as_mlp_dims, 3, identity, self._model)
if self._label_biaffine:
self._label_head_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._label_mlp_dims] * self._label_mlp_layers, self._label_mlp_activation, self._model)
self._label_mod_mlp = MultiLayerPerceptron([self._bilstm_dims] + [self._label_mlp_dims] * self._label_mlp_layers, self._label_mlp_activation, self._model)
self._label_scorer = Biaffine(self._label_mlp_dims, len(self._rels), self._model)
else:
self._label_mlp = MultiLayerPerceptron([(self._bilstm_dims) * 2] + [self._label_mlp_dims] * self._label_mlp_layers, self._label_mlp_activation, self._model)
self._label_final = Dense(self._label_mlp_dims, len(self._rels), identity, self._model)
return self
def load_embeddings(self, filename, xz=False):
if not os.path.isfile(filename):
print(filename, "does not exist")
return self
if xz:
f = lzma.open(filename, "rt", encoding="utf-8", errors="ignore")
else:
f = open(filename, "r")
found_set = set()
for line in f:
l = line.split()
word = strong_normalize(l[0])
vec = [float(x) for x in l[1:]]
if word in self._vocab:
found_set.add(word)
self._word_lookup.init_row(self._vocab[word], vec)
f.close()
print("Loaded embeddings from", filename)
print(len(found_set), "hits with vocab size of", len(self._vocab))
return self
def _next_epoch(self):
self._trainer.update_epoch()
self._epoch += 1
return self
def _get_lstm_features(self, sentence, train=False):
carriers = [ComputationCarrier() for i in range(len(sentence))]
carriers[0].vec = parameter(self._root_repr)
for entry, cc in zip(sentence[1:], carriers[1:]):
vecs = []
word_flag = False
if self._wdims > 0:
c = float(self._wordfreq.get(entry.norm, 0))
word_flag = c > 0 and (not train or (random.random() < (c / (self._word_smooth + c))))
wvec = lookup(self._word_lookup, int(self._vocab.get(entry.norm, 0)) if word_flag else 0)
if self._char_repr_method == "concat" or word_flag:
vecs.append(wvec)
if self._cdims > 0 and self._char_lstm_dims > 0:
if not (self._char_repr_method == "pred" and word_flag):
char_vecs = []
char_vecs.append(lookup(self._char_lookup, 1))
for ch in entry.word:
c = float(self._charfreq.get(ch, 0))
keep_flag = not train or (random.random() < (c / (self._char_smooth + c)))
charvec = lookup(self._char_lookup, int(self._charset.get(ch, 0)) if keep_flag else 0)
if self._char_dropout > 0.:
char_vecs.append(block_dropout(charvec, self._char_dropout))
else:
char_vecs.append(charvec)
char_vecs.append(lookup(self._char_lookup, 2))
char_vecs = self._char_lstm.add_inputs(char_vecs)
cvec = concatenate([char_vecs[0][-1].output(), char_vecs[-1][0].output()])
if self._char_repr_method == "concat":
vecs.append(cvec)
elif not word_flag:
vecs.append(self._char_to_word(cvec))
if self._pdims > 0:
keep_flag = not train or (random.random() > 0.01)
vecs.append(lookup(self._upos_lookup, int(self._upos.get(entry.upos, len(self._upos))) if keep_flag else len(self._upos)))
if self._fdims > 0:
feats = []
for f in entry.feats_set:
if f in self._feats and ((not train) or random.random() > self._feature_dropout):
feats.append(lookup(self._feats_lookup, int(self._feats[f])))
if len(feats) == 0:
feats.append(lookup(self._feats_lookup, 0))
vecs.append(emax(feats))
cc.vec = concatenate(vecs)
if train and self._block_dropout > 0.:
cc.vec = block_dropout(cc.vec, self._block_dropout)
ret = self._bilstm.transduce([parameter(self._bos_repr)] + [x.vec for x in carriers[:]] + [parameter(self._eos_repr)])
for vec, cc in zip(ret[1:-1], carriers[:]):
cc.vec = vec
return carriers
def _minibatch_update(self, loss, num_tokens):
if len(loss) == 0:
return 0.
loss = esum(loss) * (1. / self._batch_size)
ret = loss.scalar_value()
loss.backward()
self._trainer.update(self._base_lr * math.pow(self._anneal_base, self._steps / self._anneal_steps))
self._steps += 1
self._init_cg(train=True)
return ret * self._batch_size
def _as_conf_eval(self, features, carriers):
vecs = [carriers[f].vec if f >= 0 else parameter(self._as_pad_repr[i]) for i, f in enumerate(features)]
exprs = self._as_final(self._as_mlp(concatenate(vecs)))
return exprs.value(), exprs
def _as_sent_eval(self, graph, carriers):
gold_heads = graph.proj_heads
loss = []
beamconf = ASBeamConf(len(graph.nodes), 1, np.array(gold_heads), self._as_stack_features, self._as_buffer_features)
beamconf.init_conf(0, True)
total = 0
wrong = 0
while not beamconf.is_complete(0):
valid = beamconf.valid_transitions(0)
if np.count_nonzero(valid) < 1:
break
scores, exprs = self._as_conf_eval(beamconf.extract_features(0), carriers)
best = beamconf.static_oracle(0)
rest = tuple((i, s) for i, s in enumerate(scores) if i != best)
total += 1
if len(rest) > 0:
second, secondScore = max(rest, key=lambda x: x[1])
if scores[best] < scores[second] + 1.0:
loss.append(exprs[second] - exprs[best] + 1.)
wrong += 1
beamconf.make_transition(0, best)
return (total - wrong) / total * (len(graph.nodes) - 1), loss
def ast(self, graph, carriers):
beamconf = ASBeamConf(len(graph.nodes), 1, np.array(graph.heads), self._as_stack_features, self._as_buffer_features)
beamconf.init_conf(0, True)
while not beamconf.is_complete(0):
valid = beamconf.valid_transitions(0)
scores, exprs = self._as_conf_eval(beamconf.extract_features(0), carriers)
action, _ = max(((i, s) for i, s in enumerate(scores) if valid[i]), key=lambda x: x[1])
beamconf.make_transition(0, action)
graph.heads = list(beamconf.get_heads(0))
return self
def _label_arc_eval(self, carriers, head, mod):
if self._label_biaffine:
expr = self._label_scorer(self._label_head_mlp(carriers[head].vec), self._label_mod_mlp(carriers[mod].vec))
else:
expr = self._label_final(self._label_mlp(concatenate([carriers[head].vec, carriers[mod].vec])))
return expr.value(), expr
def _label_sent_eval(self, graph, carriers):
correct = 0
loss = []
for mod, head in enumerate(graph.heads):
if mod > 0 and head >= 0:
scores, exprs= self._label_arc_eval(carriers, head, mod)
if not graph.rels[mod] in self._rels:
continue
answer = self._rels[graph.rels[mod]]
if np.argmax(scores) == answer:
correct += 1
if self._label_discrim:
wrong_pred = max(((i, score) for i, score in enumerate(scores) if i != answer), key=lambda x: x[1])[0]
if scores[answer] < scores[wrong_pred] + 1.:
loss.append((exprs[wrong_pred] - exprs[answer] + 1.))
else:
loss.append(pickneglogsoftmax(exprs, answer))
return correct, loss
def label(self, graph, carriers):
for mod, head in enumerate(graph.heads):
if mod > 0 and head >= 0:
scores, exprs= self._label_arc_eval(carriers, head, mod)
graph.rels[mod] = self._irels[np.argmax(scores)]
return self
def predict(self, graphs, **kwargs):
ahdp = kwargs.get("ahdp", False)
aedp = kwargs.get("aedp", False)
ast = kwargs.get("ast", False)
mst = kwargs.get("mst", False)
label = kwargs.get("label", False)
for graph in graphs:
self._init_cg(train=False)
carriers = self._get_lstm_features(graph.nodes, train=False)
if mst:
for mstparser in self._mst_parsers:
mstparser.predict(graph, carriers)
if ahdp:
for ahdpparser in self._ahdp_parsers:
ahdpparser.predict(graph, carriers)
if aedp:
for aedpparser in self._aedp_parsers:
aedpparser.predict(graph, carriers)
if ast:
self.ast(graph, carriers)
if label:
self.label(graph, carriers)
return graphs
def test(self, graphs=None, filename=None, **kwargs):
utag = kwargs.get("utag", False)
xtag = kwargs.get("xtag", False)
ahdp = kwargs.get("ahdp", False)
aedp = kwargs.get("aedp", False)
ast = kwargs.get("ast", False)
mst = kwargs.get("mst", False)
label = kwargs.get("label", False)
save_prefix = kwargs.get("save_prefix", None)
if graphs is None:
graphs = read_conll(filename)
total = 0
correct_counts = defaultdict(int)
as_uas_correct = 0
as_las_correct = 0
label_correct = 0
ret = 0.
for gold_graph in graphs:
self._init_cg(train=False)
graph = gold_graph.cleaned(node_level=False)
total += len(graph.nodes) - 1
carriers = self._get_lstm_features(graph.nodes, train=False)
if utag:
gold_upos = [x.upos for x in graph.nodes]
for utagger in self._utaggers:
utagger.predict(graph, carriers)
predicted = [x.upos for x in graph.nodes]
correct_counts["{} Accuracy".format(utagger.id)] += POSCorrect(predicted, gold_upos)
if xtag:
gold_xpos = [x.xpos for x in graph.nodes]
for xtagger in self._xtaggers:
xtagger.predict(graph, carriers)
predicted = [x.xpos for x in graph.nodes]
correct_counts["{} Accuracy".format(xtagger.id)] += POSCorrect(predicted, gold_xpos)
if mst:
for mstparser in self._mst_parsers:
mstparser.predict(graph, carriers)
if label:
self.label(graph, carriers)
for i in range(1, len(graph.nodes)):
if gold_graph.heads[i] == graph.heads[i]:
correct_counts["{}-UAS".format(mstparser.id)] += 1
if gold_graph.rels[i].split(":")[0] == graph.rels[i].split(":")[0]:
correct_counts["{}-LAS".format(mstparser.id)] += 1
if save_prefix:
write_conll("{}_{}_{}.conllu".format(save_prefix, self._epoch, mstparser.id), [graph], append=True)
if ahdp:
for ahdpparser in self._ahdp_parsers:
ahdpparser.predict(graph, carriers)
if label:
self.label(graph, carriers)
for i in range(1, len(graph.nodes)):
if gold_graph.heads[i] == graph.heads[i]:
correct_counts["{}-UAS".format(ahdpparser.id)] += 1
if gold_graph.rels[i].split(":")[0] == graph.rels[i].split(":")[0]:
correct_counts["{}-LAS".format(ahdpparser.id)] += 1
if save_prefix:
write_conll("{}_{}_{}.conllu".format(save_prefix, self._epoch, ahdpparser.id), [graph], append=True)
if aedp:
for aedpparser in self._aedp_parsers:
aedpparser.predict(graph, carriers)
if label:
self.label(graph, carriers)
for i in range(1, len(graph.nodes)):
if gold_graph.heads[i] == graph.heads[i]:
correct_counts["{}-UAS".format(aedpparser.id)] += 1
if gold_graph.rels[i].split(":")[0] == graph.rels[i].split(":")[0]:
correct_counts["{}-LAS".format(aedpparser.id)] += 1
if save_prefix:
write_conll("{}_{}_{}.conllu".format(save_prefix, self._epoch, aedpparser.id), [graph], append=True)
if ast:
self.ast(graph, carriers)
if label:
self.label(graph, carriers)
for i in range(1, len(graph.nodes)):
if gold_graph.heads[i] == graph.heads[i]:
as_uas_correct += 1
if gold_graph.rels[i].split(":")[0] == graph.rels[i].split(":")[0]:
as_las_correct += 1
if save_prefix:
write_conll("{}_{}_as.conllu".format(save_prefix, self._epoch), [graph], append=True)
if not ahdp and not aedp and not mst and not ast and label:
graph.heads = np.copy(gold_graph.heads)
self.label(graph, carriers)
for i in range(1, len(graph.nodes)):
if gold_graph.rels[i].split(":")[0] == graph.rels[i].split(":")[0]:
label_correct += 1
for id in sorted(correct_counts):
print(id, correct_counts[id] / total)
if label and "-LAS" in id:
ret = max(ret, correct_counts[id])
if not label and "-UAS" in id:
ret = max(ret, correct_counts[id])
if ast:
if label:
print("AS-UAS", as_uas_correct / total)
print("AS-LAS", as_las_correct / total)
ret = max(ret, as_las_correct)
else:
print("AS-UAS", as_uas_correct / total)
ret = max(ret, as_uas_correct)
if not ahdp and not aedp and not mst and not ast and label:
print("LA", label_correct / total)
ret = max(ret, label_correct)
return ret / total
def fine_tune(self, filename, ratio=0.95, max_steps=1000, eval_steps=100, decay_evals=5, decay_times=0, dev=None, **kwargs):
graphs = read_conll(filename)
graphs_list = [list(random.sample(graphs, int(len(graphs) * ratio)))]
return self.train("", max_steps, eval_steps, decay_evals, decay_times, dev, graphs_list, **kwargs)
def train_small(self, filename, split_ratio=0.9, **kwargs):
save_prefix = kwargs.get("save_prefix", None)
graphs = read_conll(filename)
random.shuffle(graphs)
train_len = int(len(graphs) * split_ratio)
train_graphs = [graphs[:train_len]]
dev_graphs = graphs[train_len:]
if save_prefix is not None:
write_conll("{}_train.conllu".format(save_prefix), train_graphs[0])
write_conll("{}_dev.conllu".format(save_prefix), dev_graphs)
return self.train("", graphs_list=train_graphs, dev_graphs=dev_graphs, **kwargs)
def train(self, filename, max_steps=1000, eval_steps=100, decay_evals=5, decay_times=0, dev=None, graphs_list=None, dev_portion=0.8, dev_graphs=None, **kwargs):
if graphs_list is None:
if isinstance(filename, str):
graphs_list = [read_conll(filename)]
elif isinstance(filename, list):
graphs_list = [read_conll(f) for f in filename]
total = 0
proj_count = 0
total_trees = 0
for graphs in graphs_list:
total_trees += len(graphs)
for g in graphs:
total += len(g.nodes) - 1
if is_projective(g.heads):
g.proj_heads = g.heads
proj_count += 1
else:
g.proj_heads = projectivize(g.heads)
print("Training set projective ratio", proj_count / total_trees)
train_set_steps = total / self._batch_size
eval_steps = max(int(train_set_steps * 0.25), eval_steps)
save_prefix = kwargs.get("save_prefix", None)
if dev is not None and dev_graphs is None:
dev_graphs = read_conll(dev)
dev_samples = int(len(dev_graphs) * dev_portion)
dev_samples = min(dev_samples, 1000)
dev_graphs = list(random.sample(dev_graphs, dev_samples))
if save_prefix is not None:
write_conll("{}_dev.conllu".format(save_prefix), dev_graphs)
utag = kwargs.get("utag", False)
xtag = kwargs.get("xtag", False)
ahdp = kwargs.get("ahdp", False)
aedp = kwargs.get("aedp", False)
ast = kwargs.get("ast", False)
mst = kwargs.get("mst", False)
label = kwargs.get("label", False)
self._steps = 0
self._epoch = 0
self._base_lr = 1.
max_dev = 0.
max_dev_ep = 0
i = 0
t0 = time.time()
self._init_cg(train=True)
loss = []
loss_sum = 0.0
total_tokens = 0
num_tokens = 0
correct_counts = defaultdict(float)
as_correct = 0
label_correct = 0
if dev_graphs is not None:
performance = self.test(graphs=dev_graphs, **kwargs)
for graph in shuffled_balanced_stream(graphs_list):
i += 1
if i % 100 == 0:
print(i, "{0:.2f}s".format(time.time() - t0), end=" ")
sys.stdout.flush()
t0 = time.time()
carriers = self._get_lstm_features(graph.nodes, train=True)
num_tokens += len(graph.nodes) - 1
total_tokens += len(graph.nodes) - 1
if utag:
for utagger in self._utaggers:
c, l = utagger.sent_loss(graph, carriers)
correct_counts[utagger.id] += c
if len(l) > 0:
loss.append(esum(l) * (self._utagger_weight / self._utagger_num))
if xtag:
for xtagger in self._xtaggers:
c, l = xtagger.sent_loss(graph, carriers)
correct_counts[xtagger.id] += c
if len(l) > 0:
loss.append(esum(l) * (self._xtagger_weight / self._xtagger_num))
if ast:
c, l = self._as_sent_eval(graph, carriers)
as_correct += c
if len(l) > 0:
loss.append(esum(l) * self._as_weight)
if mst and len(graph.nodes) < 100:
for mstparser in self._mst_parsers:
c, l = mstparser.sent_loss(graph, carriers)
correct_counts[mstparser.id] += c
if len(l) > 0:
loss.append(esum(l) * (self._mst_weight / self._mst_num))
if ahdp and len(graph.nodes) < 100:
for ahdpparser in self._ahdp_parsers:
c, l = ahdpparser.sent_loss(graph, carriers)
correct_counts[ahdpparser.id] += c
if len(l) > 0:
loss.append(esum(l) * (self._ahdp_weight / self._ahdp_num))
if aedp and len(graph.nodes) < 100:
for aedpparser in self._aedp_parsers:
c, l = aedpparser.sent_loss(graph, carriers)
correct_counts[aedpparser.id] += c
if len(l) > 0:
loss.append(esum(l) * (self._aedp_weight / self._aedp_num))
if label:
c, l = self._label_sent_eval(graph, carriers)
label_correct += c
if len(l) > 0:
loss.append(esum(l) * self._label_weight)
if num_tokens >= self._batch_size:
loss_sum += self._minibatch_update(loss, num_tokens)
loss = []
num_tokens = 0
if self._steps % eval_steps == 0:
self._next_epoch()
print()
self._trainer.status()
print()
print("Total Loss", loss_sum, "Avg", loss_sum / total_tokens)
for id in sorted(correct_counts):
print("Train {} Acc".format(id), correct_counts[id] / total_tokens)
if ast:
print("Train AS Acc", as_correct / total_tokens)
if label:
print("Train Label Acc", label_correct / total_tokens)
loss_sum = 0.0
total_tokens = 0
num_tokens = 0
correct_counts = defaultdict(float)
as_correct = 0
label_correct = 0
if self._steps >= max_steps:
break
if dev_graphs is not None:
performance = self.test(graphs=dev_graphs, **kwargs)
if performance >= max_dev:
max_dev = performance
max_dev_ep = 0
else:
max_dev_ep += 1
if save_prefix:
self.save_model("{}_{}_model".format(save_prefix, max_dev_ep))
if max_dev_ep >= decay_evals:
if decay_times > 0:
decay_times -= 1
max_dev_ep = 0
self._base_lr /= 3.
print("Learning rate decayed!")
print("Current decay ratio", self._base_lr * math.pow(self._anneal_base, self._steps / self._anneal_steps))
else:
break
return self
def _init_cg(self, train=False):
renew_cg()
if train:
self._bilstm.set_dropout(self._bilstm_dropout)
if self._cdims > 0 and self._char_lstm_dims > 0:
self._char_lstm.set_dropout(self._char_lstm_dropout)
self._as_mlp.set_dropout(self._as_mlp_dropout)
if self._label_biaffine:
self._label_head_mlp.set_dropout(self._label_mlp_dropout)
self._label_mod_mlp.set_dropout(self._label_mlp_dropout)
else:
self._label_mlp.set_dropout(self._label_mlp_dropout)
else:
self._bilstm.set_dropout(0.)
if self._cdims > 0 and self._char_lstm_dims > 0:
self._char_lstm.set_dropout(0.)
self._as_mlp.set_dropout(0.)
if self._label_biaffine:
self._label_head_mlp.set_dropout(0.)
self._label_mod_mlp.set_dropout(0.)
else:
self._label_mlp.set_dropout(0.)
for utagger in self._utaggers:
utagger.init_cg(train)
for xtagger in self._xtaggers:
xtagger.init_cg(train)
for mstparser in self._mst_parsers:
mstparser.init_cg(train)
for ahdpparser in self._ahdp_parsers:
ahdpparser.init_cg(train)
for aedpparser in self._aedp_parsers:
aedpparser.init_cg(train)
def finish(self, **kwargs):
print()
if __name__ == '__main__':
fire.Fire(CDParser)
|
the-stack_106_18615
|
import kivy
from kivy.app import App
from kivy.uix.widget import Widget
import kivent_core
from kivy.properties import NumericProperty, StringProperty
from math import pi, cos, sin
def get_triangle_data(side_length):
return {
'vertices': {0: {'pos': (-side_length/2., -side_length/2.),
'v_color': (255, 0, 0, 255)},
1: {'pos': (side_length/2., -side_length/2.),
'v_color': (0, 255, 0, 255)},
2: {'pos': (0., side_length/2.),
'v_color': (0, 0, 255, 255)},
},
'indices': [0, 1, 2],
'vertex_count': 3,
'index_count': 3,
}
def get_rectangle_data(height, width):
return {
'vertices': {0: {'pos': (-width/2., -height/2.),
'v_color': (255, 0, 0, 255)},
1: {'pos': (-width/2., height/2.),
'v_color': (0, 255, 0, 255)},
2: {'pos': (width/2., height/2.),
'v_color': (0, 0, 255, 255)},
3: {'pos': (width/2., -height/2.),
'v_color': (255, 0, 255, 255)}
},
'indices': [0, 1, 2, 2, 3, 0],
'vertex_count': 4,
'index_count': 6,
}
def get_regular_polygon(sides, r, middle_color, edge_color, pos=(0., 0.)):
x, y = pos
angle = 2 * pi / sides
all_verts = {}
all_verts[0] = {'pos': pos, 'v_color': middle_color}
i = 0
indices = []
vert_count = 1
ind_count = 0
ind_ext = indices.extend
c = 1
for s in range(sides):
new_pos = [x + (r * sin(s * angle)), y + (r * cos(s * angle))]
all_verts[vert_count] = {'pos': new_pos, 'v_color': edge_color}
vert_count += 1
if c < sides:
ind_ext((c, 0, c+1))
else:
ind_ext((c, 0, 1))
ind_count += 3
c += 1
return {'indices': indices, 'vertices': all_verts,
'vertex_count': vert_count, 'index_count': ind_count}
def get_layered_regular_polygon(levels, sides, middle_color,
radius_color_dict, pos=(0., 0.)):
'''
radius_color_dict = {level#: (r, (r,g,b,a))}
'''
x, y = pos
angle = 2 * pi / sides
all_verts = {}
all_verts[0] = {'pos': pos, 'v_color': middle_color}
r_total = 0
i = 0
indices = []
vert_count = 1
ind_count = 0
ind_ext = indices.extend
for count in range(levels):
level = i + 1
r, color = radius_color_dict[level]
for s in range(sides):
new_pos = list((x + (r + r_total) * sin(s * angle),
y + (r + r_total) * cos(s * angle)))
all_verts[vert_count] = {'pos': new_pos, 'v_color': color}
vert_count += 1
r_total += r
c = 1 #side number we are on in loop
if level == 1:
for each in range(sides):
if c < sides:
ind_ext((c, 0, c+1))
else:
ind_ext((c, 0, 1))
ind_count += 3
c += 1
else:
for each in range(sides):
offset = sides*(i-1)
if c < sides:
ind_ext((c+sides+offset, c+sides+1+offset, c+offset))
ind_ext((c+offset, c+1+offset, c+sides+1+offset))
else:
ind_ext((c+sides+offset, sides+1+offset, sides+offset))
ind_ext((sides+offset, 1+offset, sides+1+offset))
ind_count += 6
c += 1
i += 1
return {'indices': indices, 'vertices': all_verts,
'vertex_count': vert_count, 'index_count': ind_count}
class TestGame(Widget):
entity_id = NumericProperty(None)
shape_to_draw = StringProperty(None, allownone=True)
def __init__(self, **kwargs):
super(TestGame, self).__init__(**kwargs)
self.shapes = {}
self.gameworld.init_gameworld(
['position', 'poly_renderer'],
callback=self.init_game)
def init_game(self):
self.setup_states()
self.set_state()
self.load_shapes()
def create_shape(self, pos, shape_name):
create_dict = {
'position': pos,
'poly_renderer': {'model_key': self.shapes[shape_name]},
}
return self.gameworld.init_entity(create_dict,
['position', 'poly_renderer'])
def on_touch_down(self, touch):
super(TestGame, self).on_touch_down(touch)
if not self.ids.button_tray.collide_point(touch.x, touch.y):
if self.shape_to_draw is not None:
self.create_shape(touch.pos, self.shape_to_draw)
def draw_shape_callback(self, shape_type):
self.shape_to_draw = shape_type
def stop_drawing(self):
self.shape_to_draw = None
def clear(self):
self.gameworld.clear_entities()
def load_shapes(self):
model_manager = self.gameworld.model_manager
init_entity = self.gameworld.init_entity
triangle_data = get_triangle_data(150.)
triangle_model = model_manager.load_model(
'vertex_format_2f4ub',
triangle_data['vertex_count'],
triangle_data['index_count'],
'triangle',
indices=triangle_data['indices'],
vertices=triangle_data['vertices']
)
self.shapes['triangle_model'] = triangle_model
rectangle_data = get_rectangle_data(100., 150.)
rectangle_model = model_manager.load_model(
'vertex_format_2f4ub',
rectangle_data['vertex_count'],
rectangle_data['index_count'],
'rectangle',
indices=rectangle_data['indices'],
vertices=rectangle_data['vertices']
)
self.shapes['rectangle_model'] = rectangle_model
circle_data = get_regular_polygon(32, 150., (255, 255, 0, 255),
(45, 0, 125, 255))
circle_model = model_manager.load_model(
'vertex_format_2f4ub',
circle_data['vertex_count'],
circle_data['index_count'],
'circle',
indices=circle_data['indices'],
vertices=circle_data['vertices'],
)
self.shapes['circle_model'] = circle_model
layered_circle_data = get_layered_regular_polygon(
2, 32,
(255, 0, 255, 255),
{1: (75., (255, 0, 0, 255)),
2: (5., (255, 255, 255, 0))}
)
layered_circle_model = model_manager.load_model(
'vertex_format_2f4ub',
layered_circle_data['vertex_count'],
layered_circle_data['index_count'],
'layered_circle',
vertices=layered_circle_data['vertices'],
indices=layered_circle_data['indices']
)
self.shapes['layered_circle_model'] = layered_circle_model
def setup_states(self):
self.gameworld.add_state(state_name='main',
systems_added=[],
systems_removed=[], systems_paused=[],
systems_unpaused=[],
screenmanager_screen='main')
def set_state(self):
self.gameworld.state = 'main'
class YourAppNameApp(App):
def build(self):
pass
if __name__ == '__main__':
YourAppNameApp().run()
|
the-stack_106_18618
|
import click
import datetime
from random import randint
from math import floor
from flask.cli import with_appcontext
from .models import Proposal, db
from grant.milestone.models import Milestone
from grant.comment.models import Comment
from grant.utils.enums import ProposalStatus, Category, ProposalStage
from grant.user.models import User
@click.command()
@click.argument('stage')
@click.argument('user_id')
@click.argument('proposal_id')
@click.argument('title')
@click.argument('content')
@with_appcontext
def create_proposal(stage, user_id, proposal_id, title, content):
proposal = Proposal.create(stage=stage,
user_id=user_id,
proposal_id=proposal_id,
title=title,
content=content)
db.session.add(proposal)
db.session.commit()
@click.command()
@click.argument('count', type=int)
@with_appcontext
def create_proposals(count):
user = User.query.filter_by().first()
for i in range(count):
if i < 5:
stage = ProposalStage.WIP
else:
stage = ProposalStage.COMPLETED
p = Proposal.create(
stage=stage,
status=ProposalStatus.LIVE,
title=f'Fake Proposal #{i}',
content=f'My fake proposal content, numero {i}',
brief=f'This is proposal {i} generated by "flask create-proposals"',
category=Category.ACCESSIBILITY,
target="123.456",
payout_address="fake123",
deadline_duration=100
)
p.date_published = datetime.datetime.now()
p.team.append(user)
p.date_approved = datetime.datetime.now()
p.accepted_with_funding = True
p.version = '2'
p.fully_fund_contibution_bounty()
db.session.add(p)
db.session.flush()
num_ms = randint(1, 9)
for j in range(num_ms):
m = Milestone(
title=f'Fake MS {j}',
content=f'Fake milestone #{j} on fake proposal #{i}!',
days_estimated='10',
payout_percent=str(floor(1 / num_ms * 100)),
immediate_payout=j == 0,
proposal_id=p.id,
index=j
)
db.session.add(m)
for j in range(100):
c = Comment(
proposal_id=p.id,
user_id=user.id,
parent_comment_id=None,
content=f'Fake comment #{j} on fake proposal #{i}!'
)
db.session.add(c)
Milestone.set_v2_date_estimates(p)
db.session.add(p)
db.session.commit()
print(f'Added {count} LIVE fake proposals')
@click.command()
@click.argument('dry', required=False)
@with_appcontext
def retire_v1_proposals(dry):
now = datetime.datetime.now()
proposals_funding_required = Proposal.query.filter_by(stage="FUNDING_REQUIRED").all()
proposals_draft = Proposal.query.filter_by(status=ProposalStatus.DRAFT).all()
proposals_pending = Proposal.query.filter_by(status=ProposalStatus.PENDING).all()
proposals_staking = Proposal.query.filter_by(status=ProposalStatus.STAKING).all()
modified_funding_required_count = 0
modified_draft_count = 0
modified_pending_count = 0
modified_staking_count = 0
deleted_draft_count = 0
if not proposals_funding_required and not proposals_draft and not proposals_pending and not proposals_staking:
print("No proposals found. Exiting...")
return
print(f"Found {len(proposals_funding_required)} 'FUNDING_REQUIRED' proposals to modify")
print(f"Found {len(proposals_draft)} 'DRAFT' proposals to modify")
print(f"Found {len(proposals_pending)} 'PENDING' proposals to modify")
print(f"Found {len(proposals_staking)} 'STAKING' proposals to modify")
if dry:
print(f"This is a dry run. Changes will not be committed to the database")
confirm = input("Continue? (y/n) ")
if confirm != "y":
print("Exiting...")
return
# move 'FUNDING_REQUIRED' proposals to a failed state
for p in proposals_funding_required:
if not dry:
new_deadline = (now - p.date_published).total_seconds()
p.stage = ProposalStage.FAILED
p.deadline_duration = int(new_deadline)
db.session.add(p)
modified_funding_required_count += 1
print(f"Modified 'FUNDING_REQUIRED' proposal {p.id} - {p.title}")
# reset proposal to draft state
def convert_proposal_to_v2_draft(proposal):
milestones = Milestone.query.filter_by(proposal_id=proposal.id).all()
if not dry:
# reset target because v2 estimates are in USD
proposal.target = '0'
proposal.version = '2'
proposal.stage = ProposalStage.PREVIEW
proposal.status = ProposalStatus.DRAFT
db.session.add(proposal)
for m in milestones:
# clear date estimated because v2 proposals use days_estimated (date_estimated is dynamically set)
m.date_estimated = None
db.session.add(m)
print(f"Modified {len(milestones)} milestones on proposal {p.id}")
# delete drafts that have no content
def delete_stale_draft(proposal):
if proposal.title or proposal.brief or proposal.content or proposal.category or proposal.target != "0":
return False
if proposal.payout_address or proposal.milestones:
return False
if not dry:
db.session.delete(proposal)
return True
for p in proposals_draft:
is_stale = delete_stale_draft(p)
if is_stale:
deleted_draft_count += 1
print(f"Deleted stale 'DRAFT' proposal {p.id} - {p.title}")
continue
convert_proposal_to_v2_draft(p)
modified_draft_count += 1
print(f"Modified 'DRAFT' proposal {p.id} - {p.title}")
for p in proposals_pending:
convert_proposal_to_v2_draft(p)
modified_pending_count += 1
print(f"Modified 'PENDING' proposal {p.id} - {p.title}")
for p in proposals_staking:
convert_proposal_to_v2_draft(p)
modified_staking_count += 1
print(f"Modified 'STAKING' proposal {p.id} - {p.title}")
if not dry:
print(f"Committing changes to database")
db.session.commit()
print("")
print(f"Modified {modified_funding_required_count} 'FUNDING_REQUIRED' proposals")
print(f"Modified {modified_draft_count} 'DRAFT' proposals")
print(f"Modified {modified_pending_count} 'PENDING' proposals")
print(f"Modified {modified_staking_count} 'STAKING' proposals")
print(f"Deleted {deleted_draft_count} stale 'DRAFT' proposals")
|
the-stack_106_18620
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.autograd import Function
import torch.nn.functional as F
import dynamicconv_cuda
from fairseq_mod import utils
from fairseq_mod.modules.unfold import unfold1d
from fairseq_mod.incremental_decoding_utils import with_incremental_state
from fairseq_mod.modules.fairseq_dropout import FairseqDropout
class dynamicconvFunction(Function):
@staticmethod
def forward(ctx, x, weights, padding_l):
ctx.padding_l = padding_l
outputs = dynamicconv_cuda.forward(x, weights, padding_l)
variables = [x, weights]
ctx.save_for_backward(*variables)
return outputs[0]
@staticmethod
def backward(ctx, grad_output):
outputs = dynamicconv_cuda.backward(
grad_output.contiguous(),
ctx.padding_l,
*ctx.saved_tensors)
grad_input, grad_weights = outputs
return grad_input, grad_weights, None
@with_incremental_state
class DynamicconvLayer(nn.Module):
def __init__(
self,
input_size,
kernel_size=1,
padding_l=None,
weight_softmax=False,
num_heads=1,
weight_dropout=0.,
bias=False,
renorm_padding=False,
conv_bias=False,
query_size=None,
):
super(DynamicconvLayer, self).__init__()
self.input_size = input_size
self.query_size = input_size if query_size is None else query_size
self.kernel_size = kernel_size
self.padding_l = padding_l
self.num_heads = num_heads
self.weight_softmax = weight_softmax
self.weight_dropout_module = FairseqDropout(weight_dropout, module_name=self.__class__.__name__)
self.renorm_padding = renorm_padding
self.bias = bias
self.weight_linear = nn.Linear(input_size, num_heads * kernel_size, bias)
if conv_bias:
self.conv_bias = nn.Parameter(torch.Tensor(input_size))
else:
self.conv_bias = None
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight_linear.weight)
if self.conv_bias is not None:
nn.init.constant_(self.conv_bias, 0.)
nn.init.constant_(self.weight_linaer.bias, 0.)
def forward(self, x, incremental_state=None, query=None, unfold=None):
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
# R = C // H
# during inference time, incremental BMM is faster
if incremental_state is not None:
unfold = x.size(0) > 512 if unfold is None else unfold # use unfold mode as default for long sequence to save memory
unfold = unfold or (incremental_state is not None)
assert query is None
if query is None:
query = x
if unfold:
output = self._forward_unfolded(x, incremental_state, query)
else:
output = self._forward_expanded(x, incremental_state, query)
if self.conv_bias is not None:
output = output + self.conv_bias.view(1, 1, -1)
return output
# during training time, use CUDA kernel
else:
weight = self.weight_linear(x).view(T, B, H, K)
if self.weight_softmax:
weight = F.softmax(weight, dim=-1)
if self.weight_dropout_module.p:
weight = self.weight_dropout_module(weight)
weight = weight.permute(1, 2, 3, 0).contiguous()
self.filters = weight
x = x.permute(1, 2, 0).contiguous()
output = dynamicconvFunction.apply(x, weight, self.padding_l).permute(2, 0, 1)
if self.conv_bias is not None:
output = output + self.conv_bias.view(1, 1, -1)
return output
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, 'input_buffer')
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer)
def _forward_unfolded(self, x, incremental_state, query):
'''The conventional implementation of convolutions.
Unfolding the input by having a window shifting to the right.'''
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight_linear(query).view(T*B*H, -1)
# renorm_padding is only implemented in _forward_expanded
assert not self.renorm_padding or incremental_state is not None
if incremental_state is not None:
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = x.new()
x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)
if self.kernel_size > 1:
self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:])
x_unfold = x_unfold.view(T*B*H, R, -1)
else:
padding_l = self.padding_l
if K > T and padding_l == K-1:
weight = weight.narrow(1, K-T, T)
K, padding_l = T, T-1
# unfold the input: T x B x C --> T' x B x C x K
x_unfold = unfold1d(x, K, padding_l, 0)
x_unfold = x_unfold.view(T*B*H, R, K)
if self.weight_softmax and not self.renorm_padding:
weight = F.softmax(weight, dim=1)
weight = weight.narrow(1, 0, K)
if incremental_state is not None:
weight = weight[:, -x_unfold.size(2):]
K = weight.size(1)
if self.weight_softmax and self.renorm_padding:
weight = F.softmax(weight, dim=1)
weight = self.weight_dropout_module(weight, inplace=False)
output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1
output = output.view(T, B, C)
return output
def _forward_expanded(self, x, incremental_stat, query):
'''Turn the convolution filters into band matrices and do matrix multiplication.
This is faster when the sequence is short, but less memory efficient.
This is not used in the decoder during inference.
'''
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
weight = self.weight_linear(query).view(T*B*H, -1)
if not self.renorm_padding:
if self.weight_softmax:
weight = F.softmax(weight, dim=1)
weight = self.weight_dropout_module(weight, inplace=False)
weight = weight.narrow(1, 0, K).contiguous()
weight = weight.view(T, B*H, K).transpose(0, 1)
x = x.view(T, B*H, R).transpose(0, 1)
if self.weight_softmax and self.renorm_padding:
# turn the convolution filters into band matrices
weight_expanded = weight.new(B*H, T, T+K-1).fill_(float('-inf'))
weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight)
weight_expanded = weight_expanded.narrow(2, self.padding_l, T)
# normalize the weight over valid positions like self-attention
weight_expanded = F.softmax(weight_expanded, dim=2)
weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False)
else:
P = self.padding_l
# For efficieny, we cut the kernel size and reduce the padding when the kernel is larger than the length
if K > T and P == K-1:
weight = weight.narrow(2, K-T, T)
K, P = T, T-1
# turn the convolution filters into band matrices
weight_expanded = weight.new_zeros(B*H, T, T+K-1, requires_grad=False)
weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight)
weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T
output = torch.bmm(weight_expanded, x)
output = output.transpose(0, 1).contiguous().view(T, B, C)
return output
|
the-stack_106_18621
|
def createWordList(filename):
text_file= open(filename,"r")
temp = text_file.read().split("\n")
text_file.close()
temp.pop() #remove the last new line
return temp
def canWeMakeIt(myWord, myLetters):
listLetters=[]
for letter in myLetters:
listLetters.append (letter)
for x in myWord:
if x not in listLetters:
return False
else:
return True
def specificLength(s,l):
result = []
if l == 1:
for c in s:
result.append(c)
return result
for c in s:
words = specificLength(s.replace(c,'',1), l-1)
for w in words:
result.append(w+c)
return result
def makeCandidates(s):
wordSet=set()
for a in range(1,len(s)):
word= specificLength(s,a)
for x in word:
wordSet.add(x)
return wordSet
def wordList(s):
list1=makeCandidates(s)
list2=createWordList("wordlist.txt")
list3=[]
for a in list1:
if a in list2:
list3.append(a)
return list3
def getWordPoints(myWord):
letterPoints = {'a':1, 'b':3, 'c':3, 'd':2, 'e':1, 'f':4,\
'g':2, 'h':4, 'i':1, 'j':8, 'k':5, 'l':1,\
'm':3, 'n':1, 'o':1, 'p':3, 'q':10, 'r':1,\
's':1, 't':1, 'u':1, 'v':4, 'w':4, 'x':8,\
'y':4, 'z':10}
result=0
for letter in myWord:
result=result+letterPoints[letter]
return result
def scrabbleWords(myLetters):
list1=wordList(myLetters)
lst=list()
for word in list1:
point=getWordPoints(word)
lst.append((point,word))
lst.sort(reverse=True)
result=[]
for point,word in lst:
result.append([point,word])
return result
|
the-stack_106_18622
|
from collections import deque, defaultdict
class Solution:
def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
'''Time: O(m^2*n) Space: O(m^2*n)'''
if endWord not in wordList:
return 0
if beginWord not in wordList:
wordList.append(beginWord)
graph = defaultdict(list)
for word in wordList: # m times
for i in range(len(word)): # n times
graph[word[:i] + '*' + word[i + 1:]].append(word) # This takes O(m)!!
queue = deque()
queue.append((beginWord, 1))
visited = set([beginWord])
while queue:
word, dist = queue.popleft()
if word == endWord:
return dist
for i in range(len(word)):
for neighbor in graph[word[:i] + '*' + word[i + 1:]]:
if neighbor not in visited:
queue.append((neighbor, dist + 1))
visited.add(neighbor)
return 0
|
the-stack_106_18623
|
import collections.abc
import re
from typing import (
Any,
Callable,
Dict,
List,
Mapping,
MutableMapping,
Optional,
Sequence,
Type,
Union,
IO,
)
import warnings
from io import BytesIO
from datetime import datetime
from base64 import b64encode, b64decode
from numbers import Integral
from types import SimpleNamespace
from functools import singledispatch
from fontTools.misc import etree
from fontTools.misc.textTools import tostr
# By default, we
# - deserialize <data> elements as bytes and
# - serialize bytes as <data> elements.
# Before, on Python 2, we
# - deserialized <data> elements as plistlib.Data objects, in order to
# distinguish them from the built-in str type (which is bytes on python2)
# - serialized bytes as <string> elements (they must have only contained
# ASCII characters in this case)
# You can pass use_builtin_types=[True|False] to the load/dump etc. functions
# to enforce a specific treatment.
# NOTE that unicode type always maps to <string> element, and plistlib.Data
# always maps to <data> element, regardless of use_builtin_types.
USE_BUILTIN_TYPES = True
XML_DECLARATION = b"""<?xml version='1.0' encoding='UTF-8'?>"""
PLIST_DOCTYPE = (
b'<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" '
b'"http://www.apple.com/DTDs/PropertyList-1.0.dtd">'
)
# Date should conform to a subset of ISO 8601:
# YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'
_date_parser = re.compile(
r"(?P<year>\d\d\d\d)"
r"(?:-(?P<month>\d\d)"
r"(?:-(?P<day>\d\d)"
r"(?:T(?P<hour>\d\d)"
r"(?::(?P<minute>\d\d)"
r"(?::(?P<second>\d\d))"
r"?)?)?)?)?Z",
re.ASCII,
)
def _date_from_string(s: str) -> datetime:
order = ("year", "month", "day", "hour", "minute", "second")
m = _date_parser.match(s)
if m is None:
raise ValueError(f"Expected ISO 8601 date string, but got '{s:r}'.")
gd = m.groupdict()
lst = []
for key in order:
val = gd[key]
if val is None:
break
lst.append(int(val))
# NOTE: mypy doesn't know that lst is 6 elements long.
return datetime(*lst) # type:ignore
def _date_to_string(d: datetime) -> str:
return "%04d-%02d-%02dT%02d:%02d:%02dZ" % (
d.year,
d.month,
d.day,
d.hour,
d.minute,
d.second,
)
class Data:
"""Represents binary data when ``use_builtin_types=False.``
This class wraps binary data loaded from a plist file when the
``use_builtin_types`` argument to the loading function (:py:func:`fromtree`,
:py:func:`load`, :py:func:`loads`) is false.
The actual binary data is retrieved using the ``data`` attribute.
"""
def __init__(self, data: bytes) -> None:
if not isinstance(data, bytes):
raise TypeError("Expected bytes, found %s" % type(data).__name__)
self.data = data
@classmethod
def fromBase64(cls, data: Union[bytes, str]) -> "Data":
return cls(b64decode(data))
def asBase64(self, maxlinelength: int = 76, indent_level: int = 1) -> bytes:
return _encode_base64(
self.data, maxlinelength=maxlinelength, indent_level=indent_level
)
def __eq__(self, other: Any) -> bool:
if isinstance(other, self.__class__):
return self.data == other.data
elif isinstance(other, bytes):
return self.data == other
else:
return NotImplemented
def __repr__(self) -> str:
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
def _encode_base64(
data: bytes, maxlinelength: Optional[int] = 76, indent_level: int = 1
) -> bytes:
data = b64encode(data)
if data and maxlinelength:
# split into multiple lines right-justified to 'maxlinelength' chars
indent = b"\n" + b" " * indent_level
max_length = max(16, maxlinelength - len(indent))
chunks = []
for i in range(0, len(data), max_length):
chunks.append(indent)
chunks.append(data[i : i + max_length])
chunks.append(indent)
data = b"".join(chunks)
return data
# Mypy does not support recursive type aliases as of 0.782, Pylance does.
# https://github.com/python/mypy/issues/731
# https://devblogs.microsoft.com/python/pylance-introduces-five-new-features-that-enable-type-magic-for-python-developers/#1-support-for-recursive-type-aliases
PlistEncodable = Union[
bool,
bytes,
Data,
datetime,
float,
int,
Mapping[str, Any],
Sequence[Any],
str,
]
class PlistTarget:
"""Event handler using the ElementTree Target API that can be
passed to a XMLParser to produce property list objects from XML.
It is based on the CPython plistlib module's _PlistParser class,
but does not use the expat parser.
>>> from fontTools.misc import etree
>>> parser = etree.XMLParser(target=PlistTarget())
>>> result = etree.XML(
... "<dict>"
... " <key>something</key>"
... " <string>blah</string>"
... "</dict>",
... parser=parser)
>>> result == {"something": "blah"}
True
Links:
https://github.com/python/cpython/blob/master/Lib/plistlib.py
http://lxml.de/parsing.html#the-target-parser-interface
"""
def __init__(
self,
use_builtin_types: Optional[bool] = None,
dict_type: Type[MutableMapping[str, Any]] = dict,
) -> None:
self.stack: List[PlistEncodable] = []
self.current_key: Optional[str] = None
self.root: Optional[PlistEncodable] = None
if use_builtin_types is None:
self._use_builtin_types = USE_BUILTIN_TYPES
else:
if use_builtin_types is False:
warnings.warn(
"Setting use_builtin_types to False is deprecated and will be "
"removed soon.",
DeprecationWarning,
)
self._use_builtin_types = use_builtin_types
self._dict_type = dict_type
def start(self, tag: str, attrib: Mapping[str, str]) -> None:
self._data: List[str] = []
handler = _TARGET_START_HANDLERS.get(tag)
if handler is not None:
handler(self)
def end(self, tag: str) -> None:
handler = _TARGET_END_HANDLERS.get(tag)
if handler is not None:
handler(self)
def data(self, data: str) -> None:
self._data.append(data)
def close(self) -> PlistEncodable:
if self.root is None:
raise ValueError("No root set.")
return self.root
# helpers
def add_object(self, value: PlistEncodable) -> None:
if self.current_key is not None:
stack_top = self.stack[-1]
if not isinstance(stack_top, collections.abc.MutableMapping):
raise ValueError("unexpected element: %r" % stack_top)
stack_top[self.current_key] = value
self.current_key = None
elif not self.stack:
# this is the root object
self.root = value
else:
stack_top = self.stack[-1]
if not isinstance(stack_top, list):
raise ValueError("unexpected element: %r" % stack_top)
stack_top.append(value)
def get_data(self) -> str:
data = "".join(self._data)
self._data = []
return data
# event handlers
def start_dict(self: PlistTarget) -> None:
d = self._dict_type()
self.add_object(d)
self.stack.append(d)
def end_dict(self: PlistTarget) -> None:
if self.current_key:
raise ValueError("missing value for key '%s'" % self.current_key)
self.stack.pop()
def end_key(self: PlistTarget) -> None:
if self.current_key or not isinstance(self.stack[-1], collections.abc.Mapping):
raise ValueError("unexpected key")
self.current_key = self.get_data()
def start_array(self: PlistTarget) -> None:
a: List[PlistEncodable] = []
self.add_object(a)
self.stack.append(a)
def end_array(self: PlistTarget) -> None:
self.stack.pop()
def end_true(self: PlistTarget) -> None:
self.add_object(True)
def end_false(self: PlistTarget) -> None:
self.add_object(False)
def end_integer(self: PlistTarget) -> None:
self.add_object(int(self.get_data()))
def end_real(self: PlistTarget) -> None:
self.add_object(float(self.get_data()))
def end_string(self: PlistTarget) -> None:
self.add_object(self.get_data())
def end_data(self: PlistTarget) -> None:
if self._use_builtin_types:
self.add_object(b64decode(self.get_data()))
else:
self.add_object(Data.fromBase64(self.get_data()))
def end_date(self: PlistTarget) -> None:
self.add_object(_date_from_string(self.get_data()))
_TARGET_START_HANDLERS: Dict[str, Callable[[PlistTarget], None]] = {
"dict": start_dict,
"array": start_array,
}
_TARGET_END_HANDLERS: Dict[str, Callable[[PlistTarget], None]] = {
"dict": end_dict,
"array": end_array,
"key": end_key,
"true": end_true,
"false": end_false,
"integer": end_integer,
"real": end_real,
"string": end_string,
"data": end_data,
"date": end_date,
}
# functions to build element tree from plist data
def _string_element(value: str, ctx: SimpleNamespace) -> etree.Element:
el = etree.Element("string")
el.text = value
return el
def _bool_element(value: bool, ctx: SimpleNamespace) -> etree.Element:
if value:
return etree.Element("true")
return etree.Element("false")
def _integer_element(value: int, ctx: SimpleNamespace) -> etree.Element:
if -1 << 63 <= value < 1 << 64:
el = etree.Element("integer")
el.text = "%d" % value
return el
raise OverflowError(value)
def _real_element(value: float, ctx: SimpleNamespace) -> etree.Element:
el = etree.Element("real")
el.text = repr(value)
return el
def _dict_element(d: Mapping[str, PlistEncodable], ctx: SimpleNamespace) -> etree.Element:
el = etree.Element("dict")
items = d.items()
if ctx.sort_keys:
items = sorted(items) # type: ignore
ctx.indent_level += 1
for key, value in items:
if not isinstance(key, str):
if ctx.skipkeys:
continue
raise TypeError("keys must be strings")
k = etree.SubElement(el, "key")
k.text = tostr(key, "utf-8")
el.append(_make_element(value, ctx))
ctx.indent_level -= 1
return el
def _array_element(array: Sequence[PlistEncodable], ctx: SimpleNamespace) -> etree.Element:
el = etree.Element("array")
if len(array) == 0:
return el
ctx.indent_level += 1
for value in array:
el.append(_make_element(value, ctx))
ctx.indent_level -= 1
return el
def _date_element(date: datetime, ctx: SimpleNamespace) -> etree.Element:
el = etree.Element("date")
el.text = _date_to_string(date)
return el
def _data_element(data: bytes, ctx: SimpleNamespace) -> etree.Element:
el = etree.Element("data")
# NOTE: mypy is confused about whether el.text should be str or bytes.
el.text = _encode_base64( # type: ignore
data,
maxlinelength=(76 if ctx.pretty_print else None),
indent_level=ctx.indent_level,
)
return el
def _string_or_data_element(raw_bytes: bytes, ctx: SimpleNamespace) -> etree.Element:
if ctx.use_builtin_types:
return _data_element(raw_bytes, ctx)
else:
try:
string = raw_bytes.decode(encoding="ascii", errors="strict")
except UnicodeDecodeError:
raise ValueError(
"invalid non-ASCII bytes; use unicode string instead: %r" % raw_bytes
)
return _string_element(string, ctx)
# The following is probably not entirely correct. The signature should take `Any`
# and return `NoReturn`. At the time of this writing, neither mypy nor Pyright
# can deal with singledispatch properly and will apply the signature of the base
# function to all others. Being slightly dishonest makes it type-check and return
# usable typing information for the optimistic case.
@singledispatch
def _make_element(value: PlistEncodable, ctx: SimpleNamespace) -> etree.Element:
raise TypeError("unsupported type: %s" % type(value))
_make_element.register(str)(_string_element)
_make_element.register(bool)(_bool_element)
_make_element.register(Integral)(_integer_element)
_make_element.register(float)(_real_element)
_make_element.register(collections.abc.Mapping)(_dict_element)
_make_element.register(list)(_array_element)
_make_element.register(tuple)(_array_element)
_make_element.register(datetime)(_date_element)
_make_element.register(bytes)(_string_or_data_element)
_make_element.register(bytearray)(_data_element)
_make_element.register(Data)(lambda v, ctx: _data_element(v.data, ctx))
# Public functions to create element tree from plist-compatible python
# data structures and viceversa, for use when (de)serializing GLIF xml.
def totree(
value: PlistEncodable,
sort_keys: bool = True,
skipkeys: bool = False,
use_builtin_types: Optional[bool] = None,
pretty_print: bool = True,
indent_level: int = 1,
) -> etree.Element:
"""Convert a value derived from a plist into an XML tree.
Args:
value: Any kind of value to be serialized to XML.
sort_keys: Whether keys of dictionaries should be sorted.
skipkeys (bool): Whether to silently skip non-string dictionary
keys.
use_builtin_types (bool): If true, byte strings will be
encoded in Base-64 and wrapped in a ``data`` tag; if
false, they will be either stored as ASCII strings or an
exception raised if they cannot be decoded as such. Defaults
to ``True`` if not present. Deprecated.
pretty_print (bool): Whether to indent the output.
indent_level (int): Level of indentation when serializing.
Returns: an ``etree`` ``Element`` object.
Raises:
``TypeError``
if non-string dictionary keys are serialized
and ``skipkeys`` is false.
``ValueError``
if non-ASCII binary data is present
and `use_builtin_types` is false.
"""
if use_builtin_types is None:
use_builtin_types = USE_BUILTIN_TYPES
else:
use_builtin_types = use_builtin_types
context = SimpleNamespace(
sort_keys=sort_keys,
skipkeys=skipkeys,
use_builtin_types=use_builtin_types,
pretty_print=pretty_print,
indent_level=indent_level,
)
return _make_element(value, context)
def fromtree(
tree: etree.Element,
use_builtin_types: Optional[bool] = None,
dict_type: Type[MutableMapping[str, Any]] = dict,
) -> Any:
"""Convert an XML tree to a plist structure.
Args:
tree: An ``etree`` ``Element``.
use_builtin_types: If True, binary data is deserialized to
bytes strings. If False, it is wrapped in :py:class:`Data`
objects. Defaults to True if not provided. Deprecated.
dict_type: What type to use for dictionaries.
Returns: An object (usually a dictionary).
"""
target = PlistTarget(use_builtin_types=use_builtin_types, dict_type=dict_type)
for action, element in etree.iterwalk(tree, events=("start", "end")):
if action == "start":
target.start(element.tag, element.attrib)
elif action == "end":
# if there are no children, parse the leaf's data
if not len(element):
# always pass str, not None
target.data(element.text or "")
target.end(element.tag)
return target.close()
# python3 plistlib API
def load(
fp: IO[bytes],
use_builtin_types: Optional[bool] = None,
dict_type: Type[MutableMapping[str, Any]] = dict,
) -> Any:
"""Load a plist file into an object.
Args:
fp: An opened file.
use_builtin_types: If True, binary data is deserialized to
bytes strings. If False, it is wrapped in :py:class:`Data`
objects. Defaults to True if not provided. Deprecated.
dict_type: What type to use for dictionaries.
Returns:
An object (usually a dictionary) representing the top level of
the plist file.
"""
if not hasattr(fp, "read"):
raise AttributeError("'%s' object has no attribute 'read'" % type(fp).__name__)
target = PlistTarget(use_builtin_types=use_builtin_types, dict_type=dict_type)
parser = etree.XMLParser(target=target)
result = etree.parse(fp, parser=parser)
# lxml returns the target object directly, while ElementTree wraps
# it as the root of an ElementTree object
try:
return result.getroot()
except AttributeError:
return result
def loads(
value: bytes,
use_builtin_types: Optional[bool] = None,
dict_type: Type[MutableMapping[str, Any]] = dict,
) -> Any:
"""Load a plist file from a string into an object.
Args:
value: A bytes string containing a plist.
use_builtin_types: If True, binary data is deserialized to
bytes strings. If False, it is wrapped in :py:class:`Data`
objects. Defaults to True if not provided. Deprecated.
dict_type: What type to use for dictionaries.
Returns:
An object (usually a dictionary) representing the top level of
the plist file.
"""
fp = BytesIO(value)
return load(fp, use_builtin_types=use_builtin_types, dict_type=dict_type)
def dump(
value: PlistEncodable,
fp: IO[bytes],
sort_keys: bool = True,
skipkeys: bool = False,
use_builtin_types: Optional[bool] = None,
pretty_print: bool = True,
) -> None:
"""Write a Python object to a plist file.
Args:
value: An object to write.
fp: A file opened for writing.
sort_keys (bool): Whether keys of dictionaries should be sorted.
skipkeys (bool): Whether to silently skip non-string dictionary
keys.
use_builtin_types (bool): If true, byte strings will be
encoded in Base-64 and wrapped in a ``data`` tag; if
false, they will be either stored as ASCII strings or an
exception raised if they cannot be represented. Defaults
pretty_print (bool): Whether to indent the output.
indent_level (int): Level of indentation when serializing.
Raises:
``TypeError``
if non-string dictionary keys are serialized
and ``skipkeys`` is false.
``ValueError``
if non-representable binary data is present
and `use_builtin_types` is false.
"""
if not hasattr(fp, "write"):
raise AttributeError("'%s' object has no attribute 'write'" % type(fp).__name__)
root = etree.Element("plist", version="1.0")
el = totree(
value,
sort_keys=sort_keys,
skipkeys=skipkeys,
use_builtin_types=use_builtin_types,
pretty_print=pretty_print,
)
root.append(el)
tree = etree.ElementTree(root)
# we write the doctype ourselves instead of using the 'doctype' argument
# of 'write' method, becuse lxml will force adding a '\n' even when
# pretty_print is False.
if pretty_print:
header = b"\n".join((XML_DECLARATION, PLIST_DOCTYPE, b""))
else:
header = XML_DECLARATION + PLIST_DOCTYPE
fp.write(header)
tree.write( # type: ignore
fp,
encoding="utf-8",
pretty_print=pretty_print,
xml_declaration=False,
)
def dumps(
value: PlistEncodable,
sort_keys: bool = True,
skipkeys: bool = False,
use_builtin_types: Optional[bool] = None,
pretty_print: bool = True,
) -> bytes:
"""Write a Python object to a string in plist format.
Args:
value: An object to write.
sort_keys (bool): Whether keys of dictionaries should be sorted.
skipkeys (bool): Whether to silently skip non-string dictionary
keys.
use_builtin_types (bool): If true, byte strings will be
encoded in Base-64 and wrapped in a ``data`` tag; if
false, they will be either stored as strings or an
exception raised if they cannot be represented. Defaults
pretty_print (bool): Whether to indent the output.
indent_level (int): Level of indentation when serializing.
Returns:
string: A plist representation of the Python object.
Raises:
``TypeError``
if non-string dictionary keys are serialized
and ``skipkeys`` is false.
``ValueError``
if non-representable binary data is present
and `use_builtin_types` is false.
"""
fp = BytesIO()
dump(
value,
fp,
sort_keys=sort_keys,
skipkeys=skipkeys,
use_builtin_types=use_builtin_types,
pretty_print=pretty_print,
)
return fp.getvalue()
|
the-stack_106_18624
|
b = 8.0/3
r = 40.0
sigma = 10.0
dt = 0.01
x = y = 0.01
z = t = 0.0
xOld = zOld = 0.0
first = True
def setup():
size(640, 480)
background(100, 100, 100)
colorMode(HSB, 100)
def draw():
global x, y, z, t, xOld, zOld
global first
strokeWeight(1)
stroke(t, 100 - t, 100)
dx = -sigma*(x - y)*dt
dy = (x*(r - z) - y)*dt
dz = (x*y - b*z)*dt
x += dx
y += dy
z += dz
# auf Fenstergröße skalieren
xx = (x*8) + 320
zz = 470 - (z*5.5)
if first:
point(xx, zz)
else:
line(xOld, zOld, xx, zz)
xOld = xx
zOld = zz
first = False
t = t + dt
if ( t >= 75.0):
print("I did it, Babe!")
noLoop()
|
the-stack_106_18625
|
#!/usr/bin/env python3
# Danny Rorabaugh, 2018 Dec
import argparse
import numpy as np
import pandas as pd
import skgstat as skg
from os import path, listdir
from time import time
t0 = time()
def mean(df):
return df.mean()
def std(df):
return df.std()
def mx(df):
return df.max()
def mn(df):
return df.min()
def rng(df):
return df.max() - df.min()
def qnt25(df):
return df.quantile(.25)
def qnt75(df):
return df.quantile(.75)
def count(df):
return df.dropna().shape[0]
def corr(df):
if df.columns[0]==df.columns[1]:
return 1
else:
return df[df.columns[0]].corr(df[df.columns[1]])
def variogram_range(df):
if df.columns[0]==df.columns[1]:
return 0
df = df.dropna()
if (len(set(df.columns[0]))<2):
return np.nan
coordinates = df[df.columns[:-1]]
values = df[df.columns[-1]]
try:
V = skg.Variogram(coordinates=coordinates, values=values)
except:
return np.nan
return V.describe()["effective_range"]
def variogram_sill(df):
if df.columns[0]==df.columns[1]:
return np.nan
df = df.dropna()
if (len(set(df.columns[0]))<2):
return np.nan
coordinates = df[df.columns[:-1]]
values = df[df.columns[-1]]
try:
V = skg.Variogram(coordinates=coordinates, values=values)
except:
return np.nan
return V.describe()["sill"]
def variogram_nugget(df):
if df.columns[0]==df.columns[1]:
return np.nan
df = df.dropna()
if (len(set(df.columns[0]))<2):
return np.nan
coordinates = df[df.columns[:-1]]
values = df[df.columns[-1]]
try:
V = skg.Variogram(coordinates=coordinates, values=values)
except:
return np.nan
return V.describe()["nugget"]
def compute_stats(in_file, out_file, stat_dicts):
df = pd.read_csv(in_file)
cols = df.shape[1]
print(f"Input file {in_file} has {cols} data columns.")
with open(out_file+"stats", "w") as stat_out:
with open(out_file+"stats.keys", "w") as keys_out:
for stat in stat_dicts[0]:
keys_out.write(f"{stat}()\n")
stat_out.write(str(list(stat_dicts[0][stat](df))).strip("[]")+"\n")
for stat in stat_dicts[1]:
keys_out.write(f"{stat}()\n")
stat_out.write(",".join([str(stat_dicts[1][stat](df[df.columns[i]])) for i in range(cols)])+"\n")
for stat in stat_dicts[2]:
for i in range(cols):
keys_out.write(f"{stat}({df.columns[i]})\n")
stat_out.write(",".join([str(stat_dicts[2][stat](df[[df.columns[i],df.columns[j]]])) for j in range(cols)])+"\n")
for stat in stat_dicts[3]:
keys_out.write(f"{stat}(x,y)\n")
stat_out.write("nan,nan," + ",".join([str(stat_dicts[3][stat](df[[df.columns[0], df.columns[1], df.columns[i]]])) for i in range(2,cols)])+"\n")
if __name__ == "__main__":
# Parse command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument("input_path",
help="The input file or directory of input files.")
parser.add_argument("-e", "--extension",
help="Expected file extension",
default="csv")
args = parser.parse_args()
# Check that arguments are sane
if not path.exists(args.input_path):
print(f"Warning! The file/folder {args.input_path} does not exist.")
if path.isfile(args.input_path):
files = [args.input_path]
elif path.isdir(args.input_path):
files = [file for file in listdir(args.input_path) if file.endswith(args.extension)]
stat_func_dicts = [{},{},{},{}]
# Functions that act on the entire dataframe, each column independently
stat_func_dicts[0] = {"mean":mean, "std":std, "min":mn, "max":mx, "range":rng, "quantile25":qnt25, "quantile75":qnt75}
# Functions performed on a 1-column dataframe
stat_func_dicts[1] = {"count":count}
# Functions performed on a 2-column dataframe, with the first column the dependent variable when applicable
stat_func_dicts[2] = {"count":count, "corr":corr}#, "variogram_range":variogram_range, "variogram_sill":variogram_sill}#, "variogram_nugget":variogram_nugget}
# Functions performed on a 3-column dataframe, with the first two cols x (longitude) and y (latitude)
#stat_func_dicts[3] = {"variogram_range":variogram_range, "variogram_sill":variogram_sill, "variogram_nugget":variogram_nugget}
for file in files:
out_path = file[:-len(args.extension)]
compute_stats(file, out_path, stat_func_dicts)
t1 = time()
print(f"Computed statistics for {len(files)} file(s) in {t1-t0} seconds.")
|
the-stack_106_18627
|
"""
This module houses the GeoIP object, a ctypes wrapper for the MaxMind GeoIP(R)
C API (http://www.maxmind.com/app/c). This is an alternative to the GPL
licensed Python GeoIP interface provided by MaxMind.
GeoIP(R) is a registered trademark of MaxMind, LLC of Boston, Massachusetts.
For IP-based geolocation, this module requires the GeoLite Country and City
datasets, in binary format (CSV will not work!). The datasets may be
downloaded from MaxMind at http://www.maxmind.com/download/geoip/database/.
Grab GeoIP.dat.gz and GeoLiteCity.dat.gz, and unzip them in the directory
corresponding to settings.GEOIP_PATH.
"""
__all__ = ['HAS_GEOIP']
try:
from .base import GeoIP, GeoIPException
HAS_GEOIP = True
__all__ += ['GeoIP', 'GeoIPException']
except RuntimeError: # libgeoip.py raises a RuntimeError if no GeoIP library is found
HAS_GEOIP = False
|
the-stack_106_18630
|
#!/usr/bin/python3
"""
@Author: Liu Shaoweihua
@Site: https://github.com/liushaoweihua
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import json
import numpy as np
import tensorflow as tf
from pyclue.tf1.models.engine.hooks import LoggingMetricsHook
from pyclue.tf1.models.engine.metrics import precision, recall, f1
from pyclue.tf1.models.engine.optimizations import create_optimizer
from pyclue.tf1.contrib.multi_class.inputs import FileProcessor, FileInputFn
from pyclue.tf1.contrib.multi_class.models import TextCnnModel, TextCnnConfig
from pyclue.tf1.tokenizers.word2vec_tokenizer import Word2VecTokenizer # Add more tokenizers
class Trainer(object):
def __init__(self, output_dir, random_seed=0):
self.base_output_dir = os.path.abspath(output_dir)
self.random_seed = random_seed
tf.set_random_seed(self.random_seed)
def build_model(self, vocab_file, config_file, init_checkpoint_file, max_seq_len=512):
# model
self.model = TextCnnModel(
config_file=config_file)
self.vocab_file = vocab_file
self.init_checkpoint_file = init_checkpoint_file
self.model_config = self.model.config
# max_seq_len and embedding_dim
self.max_seq_len = max_seq_len
self.embedding_dim = self.model_config.hidden_size
# tokenizer
self.tokenizer = Word2VecTokenizer(self.vocab_file)
# output_dir
self.output_dir = os.path.join(self.base_output_dir, 'textcnn')
if not tf.gfile.Exists(self.output_dir):
tf.gfile.MakeDirs(self.output_dir)
def load_data(self, data_dir, batch_size, recreate_tfrecord=True):
self.data_dir = os.path.abspath(data_dir)
self.batch_size = batch_size
self.recreate_tfrecord = recreate_tfrecord
self._load_processor()
self._load_input_fn()
def _load_processor(self):
self.processor = FileProcessor(
max_seq_len=self.max_seq_len, tokenizer=self.tokenizer,
data_dir=self.data_dir, save_tfrecord_dir=self.data_dir,
recreate_tfrecord=self.recreate_tfrecord)
self.labels = self.processor.labels
self.label_map = self.processor.label_map
self.label_map_reverse = self.processor.label_map_reverse
self.num_labels = self.processor.num_labels
self.train_examples = self.processor.train_examples
self.num_train_examples = self.processor.num_train_examples
self.dev_examples = self.processor.dev_examples
self.num_dev_examples = self.processor.num_dev_examples
self.test_examples = self.processor.test_examples
self.num_test_examples = self.processor.num_test_examples
def _load_input_fn(self):
self.input_fn_builder = FileInputFn(
self.max_seq_len, self.data_dir, self.batch_size)
self.train_input_fn = self.input_fn_builder.train_input_fn
self.dev_input_fn = self.input_fn_builder.dev_input_fn
self.test_input_fn = self.input_fn_builder.test_input_fn
def _load_estimator(self):
self.build_time = time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time()))
self.checkpoint_dir = os.path.join(
self.output_dir, self.build_time, 'checkpoints')
self.serving_model_dir = os.path.join(
self.output_dir, self.build_time, 'serving_model')
self.result_output_dir = os.path.join(
self.output_dir, self.build_time, 'outputs')
self.run_config = tf.estimator.RunConfig(
model_dir=self.checkpoint_dir,
tf_random_seed=self.random_seed,
save_checkpoints_steps=self.save_checkpoints_steps,
keep_checkpoint_max=10)
self.estimator = tf.estimator.Estimator(
self._load_model_fn(), config=self.run_config)
def _load_model_fn(self):
"""Returns `model_fn` for estimator."""
def model_fn(features, labels, mode, params):
"""The `model_fn` for estimator."""
input_ids = features['input_ids']
input_mask = features['input_mask']
segment_ids = features['segment_ids']
label_ids = features['label_ids']
if 'is_real_example' in features:
is_real_example = tf.cast(features['is_real_example'], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
batch_loss, per_example_loss, probabilities, logits, predictions = self.model(
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
labels=label_ids,
num_labels=self.num_labels)
from_word_embeddings_name = tf.train.list_variables(self.init_checkpoint_file)[0][0]
to_word_embeddings_name = 'embeddings/word_embeddings'
assignment_map = {
from_word_embeddings_name: to_word_embeddings_name}
tf.train.init_from_checkpoint(self.init_checkpoint_file, assignment_map)
# metrics: returns tuple (value_op, update_op)
value_accuracy_op, update_accuracy_op = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
value_loss_op, update_loss_op = tf.metrics.mean(
values=per_example_loss, weights=is_real_example)
value_precision_op, update_precision_op = precision(
labels=label_ids, predictions=predictions, num_classes=self.num_labels,
weights=is_real_example, average=None)
value_recall_op, update_recall_op = recall(
labels=label_ids, predictions=predictions, num_classes=self.num_labels,
weights=is_real_example, average=None)
value_f1_op, update_f1_op = f1(
labels=label_ids, predictions=predictions, num_classes=self.num_labels,
weights=is_real_example, average=None)
if mode == tf.estimator.ModeKeys.TRAIN:
train_metric_ops = {
'accuracy': value_accuracy_op,
'accuracy_update': update_accuracy_op,
'loss': value_loss_op,
'loss_update': update_loss_op,
'loss_batch': batch_loss,
'precision': value_precision_op,
'precision_update': update_precision_op,
'recall': value_recall_op,
'recall_update': update_recall_op,
'f1': value_f1_op,
'f1_update': update_f1_op}
train_op = create_optimizer(
batch_loss, self.optimizer_name, self.learning_rate, self.num_train_steps, self.num_warmup_steps)
train_metrics_hook = LoggingMetricsHook(
metric_ops=train_metric_ops,
label_map_reverse=self.label_map_reverse,
save_steps=self.log_steps,
output_dir=self.result_output_dir)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=batch_loss,
train_op=train_op,
training_hooks=[train_metrics_hook])
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {
'accuracy': (value_accuracy_op, update_accuracy_op),
'precision': (value_precision_op, update_precision_op),
'recall': (value_recall_op, update_recall_op),
'f1': (value_f1_op, update_f1_op)}
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=batch_loss,
eval_metric_ops=eval_metric_ops)
else:
predictions = {
'predictions': predictions,
'probabilities': probabilities}
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions)
return output_spec
return model_fn
def train(self, num_train_epochs, warmup_proportion, learning_rate, optimizer_name, log_steps=50):
self.num_train_epochs = num_train_epochs
self.warmup_proportion = warmup_proportion
self.num_train_steps = int(self.num_train_examples / self.batch_size * self.num_train_epochs)
self.num_warmup_steps = int(self.num_train_steps * self.warmup_proportion)
self.learning_rate = learning_rate
self.optimizer_name = optimizer_name
self.log_steps = log_steps
self._load_estimator()
self._asynchronous_train()
self.pb_model_file = self.save_model(checkpoint_path=self.ckpt_model_file)
model_file_dict = {
'ckpt_model_file': self.ckpt_model_file,
'pb_model_file': self.pb_model_file
}
return model_file_dict
def train_and_evaluate(self, num_train_epochs, warmup_proportion, learning_rate, optimizer_name,
log_steps=50, metric_name='accuracy', save_checkpoints_steps=200,
max_steps_without_increase=None, min_steps=None, mode=0,
apply_best_checkpoint=True):
self.num_train_epochs = num_train_epochs
self.warmup_proportion = warmup_proportion
self.num_train_steps = int(self.num_train_examples / self.batch_size * self.num_train_epochs)
self.num_warmup_steps = int(self.num_train_steps * self.warmup_proportion)
self.learning_rate = learning_rate
self.optimizer_name = optimizer_name
self.log_steps = log_steps
self.save_checkpoints_steps = save_checkpoints_steps
self._load_estimator()
if mode == 0:
model_file_dict = self._asynchronous_train_and_eval(
metric_name, apply_best_checkpoint)
elif mode == 1:
model_file_dict = self._synchronous_train_and_eval(
metric_name, max_steps_without_increase, min_steps)
else:
raise ValueError('`mode` argument can only be 0 (asynchronous) or 1 (synchronous)'
' during train_and_evaluate')
return model_file_dict
def _asynchronous_train(self):
# train
print('***** train phase *****')
print(' Num train examples = %d' % self.num_train_examples)
print(' Batch size = %d' % self.batch_size)
print(' Num train steps = %d' % self.num_train_steps)
self.estimator.train(input_fn=self.train_input_fn, max_steps=self.num_train_steps)
self.steps_and_files = []
files = tf.gfile.ListDirectory(self.checkpoint_dir)
for file in files:
if file.endswith('.index'):
file_name = os.path.join(self.checkpoint_dir, file.strip('.index'))
global_step = int(file_name.split('-')[-1])
self.steps_and_files.append([global_step, file_name])
self.steps_and_files = sorted(self.steps_and_files, key=lambda i: i[0])
self.last_checkpoint_file = self.steps_and_files[-1][-1]
self.ckpt_model_file = self.last_checkpoint_file
def _asynchronous_eval(self, metric_name, apply_best_checkpoint=True):
# dev
print('***** evaluate phase *****')
print(' Num evaluate examples = %d' % self.num_dev_examples)
files_and_results = []
output_eval_file = os.path.join(self.result_output_dir, 'dev_results.txt')
with tf.gfile.GFile(output_eval_file, 'w') as writer:
for global_step, file_name in self.steps_and_files[1:]:
result = self.estimator.evaluate(
input_fn=self.dev_input_fn,
checkpoint_path=file_name)
files_and_results.append([file_name, result[metric_name]])
writer.write('***** dev results %s *****\n' % file_name)
for key in sorted(result.keys()):
writer.write('%s = %s\n' % (key, str(result[key])))
files_and_results = sorted(files_and_results, key=lambda i: i[1], reverse=True)
self.best_checkpoint_file = files_and_results[0][0]
if apply_best_checkpoint:
self.ckpt_model_file = self.best_checkpoint_file
else:
self.ckpt_model_file = self.last_checkpoint_file
def _test(self):
print('***** test phase *****')
print(' Num test examples = %d' % self.num_test_examples)
if self.num_test_examples != 0:
output_eval_file = os.path.join(self.result_output_dir, 'test_results.txt')
with tf.gfile.GFile(output_eval_file, 'w') as writer:
result = self.estimator.evaluate(
input_fn=self.test_input_fn,
checkpoint_path=self.ckpt_model_file)
writer.write('***** test results %s *****\n' % self.ckpt_model_file)
for key in sorted(result.keys()):
writer.write('%s = %s\n' % (key, str(result[key])))
def _asynchronous_train_and_eval(self, metric_name, apply_best_checkpoint=True):
# train
self._asynchronous_train()
# eval
self._asynchronous_eval(
metric_name=metric_name, apply_best_checkpoint=apply_best_checkpoint)
self.pb_model_file = self.save_model(checkpoint_path=self.ckpt_model_file)
# test
self._test()
model_file_dict = {
'ckpt_model_file': self.ckpt_model_file,
'pb_model_file': self.pb_model_file
}
return model_file_dict
def _synchronous_train_and_eval(self, metric_name, max_steps_without_increase, min_steps):
# train and dev
print('***** train and evaluate phase *****')
print(' Num train examples = %d' % self.num_train_examples)
print(' Num evaluate examples = %d' % self.num_dev_examples)
print(' Batch size = %d' % self.batch_size)
print(' Num train steps = %d' % self.num_train_steps)
if not max_steps_without_increase:
max_steps_without_increase = int(self.num_train_steps // 10)
if not min_steps:
min_steps = self.num_warmup_steps
early_stop_hook = tf.estimator.experimental.stop_if_no_increase_hook(
self.estimator,
metric_name=metric_name,
max_steps_without_increase=max_steps_without_increase,
min_steps=min_steps)
exporter = tf.estimator.BestExporter(
serving_input_receiver_fn=self._serving_input_receiver_fn(),
exports_to_keep=1)
train_spec = tf.estimator.TrainSpec(
input_fn=self.train_input_fn,
max_steps=self.num_train_steps,
hooks=[early_stop_hook])
eval_spec = tf.estimator.EvalSpec(
input_fn=self.dev_input_fn,
exporters=exporter,
steps=None,
start_delay_secs=120,
throttle_secs=1)
result, _ = tf.estimator.train_and_evaluate(self.estimator, train_spec, eval_spec)
for file in tf.gfile.ListDirectory(self.checkpoint_dir):
if file.endswith('.index'):
self.ckpt_model_file = os.path.join(self.checkpoint_dir, file.strip('.index'))
self.pb_model_file = self.save_model(checkpoint_path=self.ckpt_model_file)
output_eval_file = os.path.join(self.result_output_dir, 'dev_results.txt')
with tf.gfile.GFile(output_eval_file, 'w') as writer:
writer.write('***** dev results %s *****\n' % self.ckpt_model_file)
for key in sorted(result.keys()):
writer.write('%s = %s\n' % (key, str(result[key])))
# test
self._test()
model_file_dict = {
'ckpt_model_file': self.ckpt_model_file,
'pb_model_file': self.pb_model_file
}
return model_file_dict
def _serving_input_receiver_fn(self):
feature_map = {
'input_ids': tf.placeholder(tf.int32, shape=[None, self.max_seq_len], name='input_ids'),
'input_mask': tf.placeholder(tf.int32, shape=[None, self.max_seq_len], name='input_mask'),
'segment_ids': tf.placeholder(tf.int32, shape=[None, self.max_seq_len], name='segment_ids'),
'label_ids': tf.placeholder(tf.int32, shape=[None], name='label_ids')}
serving_input_receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(feature_map)
return serving_input_receiver_fn
def save_model(self, checkpoint_path):
if not tf.gfile.Exists(self.serving_model_dir):
tf.gfile.MakeDirs(self.serving_model_dir)
serving_input_receiver_fn = self._serving_input_receiver_fn()
saved_path = self.estimator.export_saved_model(
export_dir_base=self.serving_model_dir,
serving_input_receiver_fn=serving_input_receiver_fn,
checkpoint_path=checkpoint_path,
experimental_mode=tf.estimator.ModeKeys.PREDICT)
saved_path = saved_path.decode('utf-8')
# save label_map_reverse for prediction
label_map_reverse_file = os.path.join(
saved_path, 'label_map_reverse.json')
with tf.gfile.GFile(label_map_reverse_file, 'w') as f:
json.dump(self.label_map_reverse, f, ensure_ascii=False, indent=4)
# save model_config for prediction
model_configs = {
'vocab_file': self.vocab_file,
'max_seq_len': self.max_seq_len
}
model_configs_file = os.path.join(
saved_path, 'model_config.json')
with tf.gfile.GFile(model_configs_file, 'w') as f:
json.dump(model_configs, f, ensure_ascii=False, indent=4)
return saved_path
def predict(self, texts):
if self.task_type == 'single':
if isinstance(texts, str):
new_texts = [self.labels[0], texts]
elif isinstance(texts, list):
new_texts = []
for item in texts:
if len(item) == 1 or len(item) == 2:
new_texts.append([self.labels[0], item[-1]])
else:
raise ValueError('texts item should contain 1 or 2 elements')
else:
raise ValueError('texts format should be `str` or `list`')
assert all([len(item) == 2 for item in new_texts]), \
'texts item should contain 2 elements'
else:
assert isinstance(texts, list), 'texts format should be `list`'
new_texts = []
for item in texts:
if isinstance(item, str):
new_texts.append([self.labels[0], item, ''])
else:
if len(item) == 2 or len(item) == 3:
new_texts.append([self.labels[0], item[-2], item[-1]])
else:
raise ValueError('text item should contain 2 or 3 elements')
assert all([len(item) == 3 for item in new_texts]), \
'texts item should contain 3 elements'
features = self.processor.get_features_for_inputs(new_texts)
result = self.estimator.predict(
input_fn=self.input_fn_builder.predict_input_fn(features=features),
checkpoint_path=self.ckpt_model_file)
result = list(result)
predictions = [item['predictions'] for item in result]
probabilities = [item['probabilities'].tolist() for item in result]
return [{
'text': ''.join(text[1:]),
'prediction': self.label_map_reverse[prediction],
'probability': probability
} for text, prediction, probability in zip(new_texts, predictions, probabilities)]
def predict_from_file(self, input_file):
texts = self.processor.read_file(input_file)
texts = np.squeeze(texts).tolist()
return self.predict(texts)
def quality_inspection(self, input_file, save_path=None):
texts = self.processor.read_file(input_file)
features = self.processor.get_features_for_inputs(texts)
result = self.estimator.predict(
input_fn=self.input_fn_builder.predict_input_fn(features=features),
checkpoint_path=self.ckpt_model_file)
result = list(result)
predictions = [item['predictions'] for item in result]
probabilities = [item['probabilities'].tolist() for item in result]
if not save_path:
save_path = os.path.join(self.result_output_dir, 'quality_inspection')
if not tf.gfile.Exists(save_path):
tf.gfile.MakeDirs(save_path)
with tf.gfile.GFile(os.path.join(save_path, input_file.split('/')[-1]), 'w') as writer:
for text, prediction, probability in zip(texts, predictions, probabilities):
prediction = self.label_map_reverse[prediction]
if text[0] != prediction:
writer.write(
'text = %s, true = %s, pred = %s, probability = %s\n'
% (text[1], text[0], prediction, probability))
|
the-stack_106_18631
|
from sentry.auth.exceptions import IdentityNotValid
from sentry.auth.providers.oauth2 import (
OAuth2Callback, OAuth2Provider, OAuth2Login
)
from .client import GenericApiError, GenericClient
from .constants import (
AUTHORIZE_URL, ACCESS_TOKEN_URL, CLIENT_ID, CLIENT_SECRET, SCOPE,
UNIQUE_USERID_FIELD
)
from .views import (
ConfirmEmail, FetchUser, GenericConfigureView
)
class GenericOAuth2Provider(OAuth2Provider):
access_token_url = ACCESS_TOKEN_URL
authorize_url = AUTHORIZE_URL
name = 'OAuth2'
client_id = CLIENT_ID
client_secret = CLIENT_SECRET
def __init__(self, org=None, **config):
super(GenericOAuth2Provider, self).__init__(**config)
self.org = org
def get_configure_view(self):
return GenericConfigureView.as_view()
def get_auth_pipeline(self):
return [
OAuth2Login(
authorize_url=self.authorize_url,
client_id=self.client_id,
scope=SCOPE,
),
OAuth2Callback(
access_token_url=self.access_token_url,
client_id=self.client_id,
client_secret=self.client_secret,
),
FetchUser(
client_id=self.client_id,
client_secret=self.client_secret,
org=self.org,
),
ConfirmEmail(),
]
def get_setup_pipeline(self):
pipeline = self.get_auth_pipeline()
return pipeline
def get_refresh_token_url(self):
return ACCESS_TOKEN_URL
def build_config(self, state):
return {
}
def build_identity(self, state):
data = state['data']
user_data = state['user']
return {
'id': user_data[UNIQUE_USERID_FIELD],
'email': user_data['email'],
'name': user_data['name'],
'data': self.get_oauth_data(data),
}
def refresh_identity(self, auth_identity):
client = GenericClient(self.client_id, self.client_secret)
access_token = auth_identity.data['access_token']
try:
if not client.get_user(access_token):
raise IdentityNotValid
except GenericApiError as e:
raise IdentityNotValid(e)
|
the-stack_106_18636
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import asyncio
import concurrent.futures
import uuid
import time
import threading
import functools
import json
import base64
import hmac
import hashlib
import sys
import random
from azure.iot.device.aio import IoTHubDeviceClient
from azure.iot.device.aio import ProvisioningDeviceClient
from azure.iot.device import Message
from azure.iot.device import MethodResponse
from blinkt import set_pixel, set_brightness, show, clear
def derive_device_key(device_id, group_symmetric_key):
"""
The unique device ID and the group master key should be encoded into "utf-8"
After this the encoded group master key must be used to compute an HMAC-SHA256 of the encoded registration ID.
Finally the result must be converted into Base64 format.
The device key is the "utf-8" decoding of the above result.
"""
message = device_id.encode("utf-8")
signing_key = base64.b64decode(group_symmetric_key.encode("utf-8"))
signed_hmac = hmac.HMAC(signing_key, message, hashlib.sha256)
device_key_encoded = base64.b64encode(signed_hmac.digest())
return device_key_encoded.decode("utf-8")
#======================================
# To switch from PC to RPi, switch commented/un-commented sections below
#======================================
from blinkt import set_pixel, set_brightness, show, clear
provisioning_host = os.getenv("PROVISIONING_HOST")
id_scope = os.getenv("PROVISIONING_IDSCOPE")
registration_id = os.getenv("PROVISIONING_DEVICE_ID")
group_symmetric_key = os.getenv("PROVISIONING_MASTER_SYMMETRIC_KEY")
symmetric_key = derive_device_key(
device_id=registration_id,
group_symmetric_key=group_symmetric_key,
)
#======================================
# provisioning_host = 'global.azure-devices-provisioning.net'
# id_scope = '<IDScope>'
# registration_id = '<DeviceId>'
# group_symmetric_key = '<Provisioning-master-key>'
# symmetric_key = derive_device_key(
# device_id=registration_id,
# group_symmetric_key=group_symmetric_key,
# )
# def set_pixel(i, r, g, b):
# time.sleep(0.1)
# def set_brightness(b):
# time.sleep(0.1)
# def show():
# time.sleep(0.1)
# def clear():
# time.sleep(0.1)
#======================================
class Led:
status=False
blink=False
r=255
g=255
b=255
def __init__(self, r=255, g=255, b=255):
self.status = False
self.blink = False
self.r = r
self.g = g
self.b = b
def set_color(self, r,g,b):
self.r = r
self.g = g
self.b = b
def set_status(self, status):
self.status = status
def set_blink(self, yesno):
self.blink = yesno
# list of leds
class Led_Manager:
leds = []
scroll_leds = False
def __init__(self):
for i in range(8):
self.leds.append(Led())
set_brightness(0.1)
def set_all_leds_color(self, r, g, b):
for i in range(8):
self.leds[i].set_color(r, g, b)
def set_led(self, i, status, r, g, b, blk=False):
self.leds[i].set_color(r, g, b)
self.leds[i].set_status(status)
self.leds[i].blink=blk
def set_all_leds_off(self):
self.scroll_leds = False
for i in range(8):
self.leds[i].set_status(False)
def start_scrolling(self):
self.scroll_leds = True
def stop_scrolling(self):
self.scroll_leds = False
async def scroll_leds_task(self):
while True:
if (self.scroll_leds):
print("Scrolling leds")
for i in range(8):
clear()
set_pixel(i, self.leds[i].r, self.leds[i].g, self.leds[i].b)
show()
await asyncio.sleep(0.05)
clear()
show()
else:
await asyncio.sleep(.5)
async def update_leds_task(self):
new_status = locals()
for i in range(8):
new_status[i] = self.leds[i].status
while True:
if (not self.scroll_leds):
clear()
for i in range(8):
if (self.leds[i].blink and self.leds[i].status):
new_status[i] = not new_status[i]
else:
new_status[i]=self.leds[i].status
if (new_status[i]):
set_pixel(i, self.leds[i].r, self.leds[i].g, self.leds[i].b)
show()
await asyncio.sleep(.5)
def printjson(obj):
parsed = json.dumps(obj)
loaded = json.loads(parsed)
print(json.dumps(loaded, indent=2, sort_keys=True))
led_manager = Led_Manager()
async def main():
# Function for sending message
async def send_test_message():
print("Sending telemetry message from device " + device_id)
body_dict = {}
body_dict['Temperature'] = random.randrange(76, 80, 1)
body_dict['Humidity'] = random.randrange(40, 60, 1)
body_dict['Location']='28.424911, -81.468962'
body_json = json.dumps(body_dict)
print(body_json)
msg = Message(body_json)
msg.message_id = uuid.uuid4()
msg.correlation_id = "correlation-1234"
msg.contentEncoding="utf-8",
msg.contentType="application/json",
await device_client.send_message(msg)
print("Done sending message")
# update the reported properties
async def update_device_twin(device_client, led_manager):
reported_properties = {}
for i in range (8):
key='led'+ str(i+1) +'_status'
reported_properties[key] = led_manager.leds[i].status
key='led'+ str(i+1) +'_blink'
reported_properties[key] = led_manager.leds[i].blink
key='led'+ str(i+1) +'_r'
reported_properties[key] = led_manager.leds[i].r
key='led'+ str(i+1) +'_g'
reported_properties[key] = led_manager.leds[i].g
key='led'+ str(i+1) +'_b'
reported_properties[key] = led_manager.leds[i].b
await device_client.patch_twin_reported_properties(reported_properties)
print("Updated Device Twin's reported properties:")
printjson(reported_properties)
# define behavior for receiving a twin patch
async def twin_patch_listener(device_client, led_manager):
while True:
patch = await device_client.receive_twin_desired_properties_patch() # blocking call
print("Received new device twin's desired properties:")
printjson(patch)
for i in range (8):
led_status = led_manager.leds[i].status
led_blink = led_manager.leds[i].blink
led_r = led_manager.leds[i].r
led_g = led_manager.leds[i].g
led_b = led_manager.leds[i].b
key='led'+ str(i+1) +'_status'
if key in patch:
led_status = patch[key]
key='led'+ str(i+1) +'_blink'
if key in patch:
led_blink = patch[key]
key='led'+ str(i+1) +'_r'
if key in patch:
led_r = patch[key]
key='led'+ str(i+1) +'_g'
if key in patch:
led_g = patch[key]
key='led'+ str(i+1) +'_b'
if key in patch:
led_b = patch[key]
led_manager.set_led(i, led_status, led_r, led_g, led_b, led_blink)
await update_device_twin(device_client, led_manager)
async def direct_methods_listener(device_client, led_manager):
while True:
method_request = (
await device_client.receive_method_request()
) # Wait for unknown method calls
# Check which method was involked
if (method_request.name == "TurnLedsOff"):
# Turn all leds off
led_manager.set_all_leds_off()
response_payload = {"result": True, "data": "Leds are all off"} # set response payload
response_status = 200 # set return status code
print("Executed method " + method_request.name)
elif (method_request.name == "ScrollLeds"):
# Set leds colors and start scrolling
led_manager.set_all_leds_off()
led_manager.set_all_leds_color(255, 255, 255)
led_manager.start_scrolling()
response_payload = {"result": True, "data": "Leds are now scrolling"} # set response payload
response_status = 200 # set return status code
print("Executed method " + method_request.name)
else:
# Respond
response_payload = {"result": True, "data": "unknown method"} # set response payload
response_status = 200 # set return status code
print("Executed unknown method: " + method_request.name)
method_response = MethodResponse.create_from_method_request(
method_request, response_status, response_payload
)
await device_client.send_method_response(method_response) # send response
# Schedule tasks for Methods and twins updates
led_listeners = asyncio.gather(
led_manager.scroll_leds_task(),
led_manager.update_leds_task()
)
# Thread pool Executor to execute async functions in sync task
# pool = concurrent.futures.ThreadPoolExecutor()
device_id = registration_id
print("Connecting device " + device_id)
# Connect the client.
print("Provisioning device to Azure IoT...")
led_manager.set_all_leds_color(0, 255, 0)
led_manager.start_scrolling()
# registration using DPS
provisioning_device_client = ProvisioningDeviceClient.create_from_symmetric_key(
provisioning_host=provisioning_host,
registration_id=registration_id,
id_scope=id_scope,
symmetric_key=symmetric_key
)
registration_result = await provisioning_device_client.register()
led_manager.set_all_leds_off()
if registration_result.status == "assigned":
print("Device successfully registered. Creating device client")
# Create device client from the above result
device_client = IoTHubDeviceClient.create_from_symmetric_key(
symmetric_key=symmetric_key,
hostname=registration_result.registration_state.assigned_hub,
device_id=registration_result.registration_state.device_id,
)
else:
led_manager.set_led(0, true, 255, 0, 0, true)
print("Provisioning of the device failed.")
sys.exit()
# Connect the client.
print("Connecting to Azure IoT...")
led_manager.set_all_leds_color(0, 0, 255)
led_manager.start_scrolling()
await device_client.connect()
print("Device is connected to Azure IoT")
led_manager.set_all_leds_off()
# Update Device Twin reported properties
await update_device_twin(device_client, led_manager)
# Schedule tasks for Methods and twins updates
iothub_listeners = asyncio.gather(
direct_methods_listener(device_client, led_manager),
twin_patch_listener(device_client, led_manager)
)
# define behavior for halting the application
def stdin_listener():
pool = concurrent.futures.ThreadPoolExecutor()
while True:
print("To control the leds from Azure IoT, you can send the following commands through Direct Methods: TurnLedsOff, ScrollLeds")
selection = input("Commands: \n Q: quit\n S: Send a telemetry message\n")
if selection == "Q" or selection == "q":
print("Quitting...")
break
elif selection == "S" or selection =="s":
# send 8 messages one after the other with a sleep
result = pool.submit(asyncio.run, send_test_message()).result()
loop = asyncio.get_running_loop()
user_finished = loop.run_in_executor(None, stdin_listener)
# Wait for user to indicate they are done listening for messages
await user_finished
# Cancel listening
led_listeners.cancel()
iothub_listeners.cancel()
# finally, disconnect
await device_client.disconnect()
if __name__ == "__main__":
asyncio.run(main())
# If using Python 3.6 or below, use the following code instead of asyncio.run(main()):
# loop = asyncio.get_event_loop()
# loop.run_until_complete(main())
# loop.close()
|
the-stack_106_18637
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import tempfile
import unittest
from pyspark import SparkConf, SparkContext, BasicProfiler
from pyspark.testing.utils import PySparkTestCase
if sys.version >= "3":
from io import StringIO
else:
from StringIO import StringIO
class ProfilerTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.profile", "true")
self.sc = SparkContext('local[4]', class_name, conf=conf)
def test_profiler(self):
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
id, profiler, _ = profilers[0]
stats = profiler.stats()
self.assertTrue(stats is not None)
width, stat_list = stats.get_print_list([])
func_names = [func_name for fname, n, func_name in stat_list]
self.assertTrue("heavy_foo" in func_names)
old_stdout = sys.stdout
sys.stdout = io = StringIO()
self.sc.show_profiles()
self.assertTrue("heavy_foo" in io.getvalue())
sys.stdout = old_stdout
d = tempfile.gettempdir()
self.sc.dump_profiles(d)
self.assertTrue("rdd_%d.pstats" % id in os.listdir(d))
def test_custom_profiler(self):
class TestCustomProfiler(BasicProfiler):
def show(self, id):
self.result = "Custom formatting"
self.sc.profiler_collector.profiler_cls = TestCustomProfiler
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
_, profiler, _ = profilers[0]
self.assertTrue(isinstance(profiler, TestCustomProfiler))
self.sc.show_profiles()
self.assertEqual("Custom formatting", profiler.result)
def do_computation(self):
def heavy_foo(x):
for i in range(1 << 18):
x = 1
rdd = self.sc.parallelize(range(100))
rdd.foreach(heavy_foo)
class ProfilerTests2(unittest.TestCase):
def test_profiler_disabled(self):
sc = SparkContext(conf=SparkConf().set("spark.python.profile", "false"))
try:
self.assertRaisesRegexp(
RuntimeError,
"'spark.python.profile' configuration must be set",
lambda: sc.show_profiles())
self.assertRaisesRegexp(
RuntimeError,
"'spark.python.profile' configuration must be set",
lambda: sc.dump_profiles("/tmp/abc"))
finally:
sc.stop()
if __name__ == "__main__":
from pyspark.tests.test_profiler import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
the-stack_106_18639
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test_kill_bolt.py"""
import logging
import test_template
NON_TMASTER_SHARD = 1
HERON_BOLT = 'identity-bolt_3'
class TestKillBolt(test_template.TestTemplate):
def execute_test_case(self):
logging.info("Executing kill bolt")
bolt_pid = self.get_pid(
'container_%d_%s' % (NON_TMASTER_SHARD, HERON_BOLT), self.params['workingDirectory'])
self.kill_process(bolt_pid)
|
the-stack_106_18641
|
import datetime
import itertools
import logging
import time
from decimal import Decimal
from typing import Iterable, List, Literal, Optional, Tuple
from tilapia.lib.basic.functional.require import require
from tilapia.lib.basic.functional.timing import timing_logger
from tilapia.lib.basic.functional.wraps import error_interrupter, timeout_lock
from tilapia.lib.basic.orm.database import db
from tilapia.lib.coin import data as coin_data
from tilapia.lib.coin import manager as coin_manager
from tilapia.lib.provider import data as provider_data
from tilapia.lib.provider import manager as provider_manager
from tilapia.lib.transaction import daos
from tilapia.lib.transaction.data import TX_TO_ACTION_STATUS_DIRECT_MAPPING, TxActionStatus
from tilapia.lib.transaction.models import TxAction
from tilapia.lib.utxo import manager as utxo_manager
logger = logging.getLogger("app.transaction")
def create_action(
txid: str,
status: TxActionStatus,
chain_code: str,
coin_code: str,
value: Decimal,
from_address: str,
to_address: str,
fee_limit: Decimal,
raw_tx: str,
**kwargs,
) -> TxAction:
return daos.new_action(
txid=txid,
status=status,
chain_code=chain_code,
coin_code=coin_code,
value=value,
from_address=from_address,
to_address=to_address,
fee_limit=fee_limit,
raw_tx=raw_tx,
**kwargs,
).save()
def get_action_by_id(action_id: int) -> TxAction:
return daos.get_action_by_id(action_id)
def update_action_status(
chain_code: str,
txid: str,
status: TxActionStatus,
):
daos.update_actions_status(chain_code, txid, status)
def has_actions_by_txid(chain_code: str, txid: str) -> bool:
return daos.has_actions_by_txid(chain_code, txid)
def query_actions_by_txid(chain_code: str, txid: str) -> List[TxAction]:
return daos.query_actions_by_txid(chain_code, txid)
def update_pending_actions(
chain_code: Optional[str] = None,
address: Optional[str] = None,
txid: Optional[str] = None,
):
pending_actions = daos.query_actions_by_status(
TxActionStatus.PENDING,
chain_code=chain_code,
address=address,
txid=txid,
)
if not pending_actions:
return
txids_of_chain = {(i.chain_code, i.txid) for i in pending_actions}
confirmed_txids = set()
for chain_code, tx in _query_transactions_of_chain(txids_of_chain):
try:
action_status = TX_TO_ACTION_STATUS_DIRECT_MAPPING.get(tx.status)
if tx.fee is None or tx.block_header is None or action_status is None:
continue
_on_transaction_confirmed(
chain_code=chain_code,
txid=tx.txid,
status=action_status,
fee_used=Decimal(tx.fee.used),
block_hash=tx.block_header.block_hash,
block_number=tx.block_header.block_number,
block_time=tx.block_header.block_time,
)
confirmed_txids.add(tx.txid)
logger.info(
f"TxAction confirmed. chain_code: {chain_code}, txid: {tx.txid}, action_status: {action_status}"
)
except Exception as e:
logger.exception(f"Error in updating actions. chain_code: {chain_code}, txid: {tx.txid}, error: {repr(e)}")
unconfirmed_actions = [i for i in pending_actions if i.txid not in confirmed_txids]
if not unconfirmed_actions:
return
now = datetime.datetime.now()
too_old = datetime.timedelta(days=3)
too_old_txids = {(i.chain_code, i.txid) for i in unconfirmed_actions if now - i.created_time >= too_old}
with db.atomic():
for chain_code, txid in too_old_txids:
daos.update_actions_status(chain_code, txid, status=TxActionStatus.UNKNOWN)
def _query_transactions_of_chain(
txids_of_chain: Iterable[Tuple[str, str]]
) -> Iterable[Tuple[str, provider_data.Transaction]]:
txids_of_chain = sorted(txids_of_chain, key=lambda i: i[0]) # in order to use itertools.groupby
for chain_code, group in itertools.groupby(txids_of_chain, key=lambda i: i[0]):
for (_, txid) in group:
try:
yield chain_code, provider_manager.get_transaction_by_txid(chain_code, txid)
except Exception as e:
logger.exception(
f"Error in getting transaction by txid. chain_code: {chain_code}, txid: {txid}, error: {repr(e)}"
)
def _search_txs_by_address(
chain_code: str, address: str, last_confirmed_action: TxAction = None
) -> Iterable[provider_data.Transaction]:
try:
if last_confirmed_action is not None:
paginate = provider_data.TxPaginate(start_block_number=last_confirmed_action.block_number)
else:
paginate = None
transactions = provider_manager.search_txs_by_address(chain_code, address, paginate=paginate)
return transactions
except Exception as e:
logger.exception(
f"Error in searching txs by address. chain_code: {chain_code}, "
f"address: {address}, last_confirmed_action: {last_confirmed_action}, error: {repr(e)}"
)
return []
def _tx_action_factory__account_model(
chain_code: str, transactions: Iterable[provider_data.Transaction]
) -> Iterable[TxAction]:
transactions = [i for i in transactions if i.status in TX_TO_ACTION_STATUS_DIRECT_MAPPING]
token_addresses = set()
for tx in transactions:
for tx_input in tx.inputs:
token_addresses.add(tx_input.token_address)
main_coin = coin_manager.get_coin_info(chain_code)
tokens = coin_manager.query_coins_by_token_addresses(chain_code, list(token_addresses))
tokens = {i.token_address: i for i in tokens if i.token_address}
txids = set()
for tx in transactions:
if (
tx.txid in txids
): # May get two txs with the same txid here, if the receiver and the sender are at the same address
continue
txids.add(tx.txid)
status = TX_TO_ACTION_STATUS_DIRECT_MAPPING.get(tx.status)
for index, (tx_input, tx_output) in enumerate(zip(tx.inputs, tx.outputs)):
token_address = tx_output.token_address
if not tx_input.address or not tx_output.address or (token_address and token_address not in tokens):
continue
coin = main_coin if not token_address else tokens[token_address]
info = dict(
txid=tx.txid,
status=status,
chain_code=chain_code,
coin_code=coin.code,
value=Decimal(tx_output.value),
from_address=tx_input.address,
to_address=tx_output.address,
fee_limit=Decimal(tx.fee.limit),
fee_price_per_unit=Decimal(tx.fee.price_per_unit),
raw_tx=tx.raw_tx,
index=index,
)
if tx.block_header:
info.update(
dict(
fee_used=Decimal(tx.fee.used),
block_number=tx.block_header.block_number,
block_hash=tx.block_header.block_hash,
block_time=tx.block_header.block_time,
created_time=datetime.datetime.fromtimestamp(
tx.block_header.block_time
), # Unify the ordering of local records and on-chain transactions
)
)
if tx.nonce is not None and tx.nonce >= 0:
info["nonce"] = tx.nonce
yield daos.new_action(**info)
_TX_ACTION_FACTORY_REGISTRY = {
coin_data.ChainModel.ACCOUNT: _tx_action_factory__account_model,
}
def _search_actions_from_provider_by_address(
chain_code: str, address: str, paginate: provider_data.TxPaginate = None
) -> List[TxAction]:
chain_info = coin_manager.get_chain_info(chain_code)
action_factory = _TX_ACTION_FACTORY_REGISTRY.get(chain_info.chain_model)
if not action_factory:
return []
try:
transactions = provider_manager.search_txs_by_address(chain_code, address, paginate=paginate)
except Exception as e:
transactions = []
logger.exception(
f"Error in searching txs by address form provider. "
f"chain_code: {chain_code}, address: {address}, paginate: {paginate}, error: {repr(e)}"
)
transactions = (i for i in transactions if i.status in TX_TO_ACTION_STATUS_DIRECT_MAPPING)
actions = action_factory(chain_code, transactions)
actions = [i for i in actions if i.from_address == address or i.to_address == address]
return actions
_LAST_ARCHIVED_ID_CACHE = {}
@timing_logger("transaction_manager.query_actions_by_address")
def query_actions_by_address(
chain_code: str,
coin_code: str,
address: str,
page_number: int = 1,
items_per_page: int = 20,
searching_address_as: Literal["sender", "receiver", "both"] = "both",
) -> List[TxAction]:
with timeout_lock("transaction_manager.query_actions_by_address") as acquired:
if not acquired:
return []
address = provider_manager.verify_address(chain_code, address).normalized_address
page_number = max(page_number, 1)
is_first_page = page_number == 1
archived_id_cache_key = f"{chain_code}:{address}"
archived_id = _LAST_ARCHIVED_ID_CACHE.get(archived_id_cache_key)
if is_first_page or not archived_id:
archived_id = int(time.time() * 1e3)
_LAST_ARCHIVED_ID_CACHE[archived_id_cache_key] = archived_id
local_actions = []
max_times = 3
for times in range(max_times + 1):
local_actions = daos.query_actions_by_address(
chain_code,
address,
coin_code=coin_code,
items_per_page=items_per_page,
page_number=page_number,
archived_ids=[archived_id, None],
searching_address_as=searching_address_as,
)
if (
len(local_actions) >= items_per_page
or times == max_times # No need to invoke synchronization the last time
or _sync_actions_by_address(chain_code, address, archived_id, require_sync_number=200 * 1 << times) == 0
):
break
return local_actions
@error_interrupter(logger, interrupt=True, default=0)
def _sync_actions_by_address(chain_code: str, address: str, archived_id: int, require_sync_number: int = 200) -> int:
first_confirmed_action = daos.get_first_confirmed_action_at_the_same_archived_id(chain_code, address, archived_id)
last_confirmed_action_before_this_archived = daos.get_last_confirmed_action_before_archived_id(
chain_code, address, archived_id
)
paginate = provider_data.TxPaginate(
start_block_number=(
max(
0, last_confirmed_action_before_this_archived.block_number - 1
) # Ensure that the requested block overlaps the recorded block
if last_confirmed_action_before_this_archived
else None
),
end_block_number=first_confirmed_action.block_number if first_confirmed_action else None,
items_per_page=require_sync_number,
)
syncing_actions = _search_actions_from_provider_by_address(chain_code, address, paginate)
syncing_txids = list({i.txid for i in syncing_actions})
pending_txids = daos.filter_existing_txids(chain_code, syncing_txids, status=TxActionStatus.PENDING)
to_be_confirmed_actions = {
i.txid: i for i in syncing_actions if i.txid in pending_txids and i.block_number is not None
}
old_archived_ids = daos.query_existing_archived_ids(chain_code, syncing_txids)
if archived_id in old_archived_ids:
old_archived_ids.remove(archived_id)
existing_txids = daos.filter_existing_txids(chain_code, syncing_txids)
to_be_created_actions = [i for i in syncing_actions if i.txid not in existing_txids]
expand_count = 0
with db.atomic():
if to_be_confirmed_actions:
for txid, action in to_be_confirmed_actions.items():
_on_transaction_confirmed(
chain_code=chain_code,
txid=txid,
status=action.status,
fee_used=action.fee_used,
block_hash=action.block_hash,
block_number=action.block_number,
block_time=action.block_time,
archived_id=archived_id,
)
expand_count += len(to_be_confirmed_actions)
if old_archived_ids:
expand_count += daos.update_archived_id(list(old_archived_ids), archived_id)
if to_be_created_actions:
for i in to_be_created_actions:
i.archived_id = archived_id
daos.bulk_create(to_be_created_actions)
expand_count += len(to_be_created_actions)
return expand_count
def _on_transaction_confirmed(
chain_code: str,
txid: str,
status: TxActionStatus,
fee_used: Decimal,
block_number: int,
block_hash: str,
block_time: int,
archived_id: int = None,
):
require(status in (TxActionStatus.CONFIRM_SUCCESS, TxActionStatus.CONFIRM_REVERTED))
logger.info(
f"Transaction confirmed. chain_code: {chain_code}, txid: {txid}, status: {status}, block_number: {block_number}"
)
daos.on_transaction_confirmed(
chain_code=chain_code,
txid=txid,
status=status,
fee_used=fee_used,
block_hash=block_hash,
block_number=block_number,
block_time=block_time,
archived_id=archived_id,
)
chain_info = coin_manager.get_chain_info(chain_code)
if chain_info.chain_model == coin_data.ChainModel.UTXO:
utxo_manager.mark_utxos_spent_by_txid(chain_code, txid)
if chain_info.nonce_supported is not True:
return
actions = daos.query_actions_by_txid(chain_code, txid, index=0)
main_action = actions[0] if actions and actions[0].nonce >= 0 else None
if not main_action:
return
same_nonce_actions = daos.query_actions_by_nonce(chain_code, main_action.from_address, main_action.nonce)
replaced_action_txids = {
i.txid for i in same_nonce_actions if i.txid != txid and i.status == TxActionStatus.PENDING
}
for txid in replaced_action_txids:
daos.update_actions_status(chain_code, txid, TxActionStatus.REPLACED)
def delete_actions_by_addresses(chain_code: str, addresses: List[str]) -> int:
return daos.delete_actions_by_addresses(chain_code, addresses)
@timing_logger("transaction_manager.on_ticker_signal")
def on_ticker_signal():
update_pending_actions()
|
the-stack_106_18642
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from numpy.core.fromnumeric import ravel
from numpy.lib.type_check import real
import rospy
import tf
import math
from nav_msgs.srv import GetMap
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
from visualization_msgs.msg import MarkerArray,Marker
from nav_msgs.msg import OccupancyGrid
import numpy as np
import cv2
from icp import ICP
from ekf import EKF,STATE_SIZE
from extraction import Extraction
from sift_match import sift_match
import matplotlib.pyplot as plt
MAX_LASER_RANGE = 30
def r2d(r):
return r*180/math.pi
class SLAM_EKF():
def __init__(self):
# ros param
self.robot_x = rospy.get_param('/slam/robot_x',0)
self.robot_y = rospy.get_param('/slam/robot_y',0)
self.robot_theta = rospy.get_param('/slam/robot_theta',0)
self.icp = ICP()
self.ekf = EKF()
self.extraction = Extraction()
# odom robot init states
self.sensor_sta = [self.robot_x,self.robot_y,self.robot_theta]
self.isFirstScan = True
self.src_pc = []
self.tar_pc = []
self.map = None
# State Vector [x y yaw]
self.xOdom = np.zeros((3, 1))
self.xEst = np.zeros((3, 1))
self.PEst = np.zeros((3, 3))
# ros topic
self.laser_sub = rospy.Subscriber('/scan',LaserScan,self.laserCallback)
self.map_sub = rospy.Subscriber('/map',OccupancyGrid,self.mapCallback)
self.location_pub = rospy.Publisher('ekf_location',Odometry,queue_size=3)
self.odom_pub = rospy.Publisher('icp_odom',Odometry,queue_size=3)
self.odom_broadcaster = tf.TransformBroadcaster()
self.landMark_pub = rospy.Publisher('/landMarks',MarkerArray,queue_size=1)
self.tf = tf.TransformListener()
self.resolution = 0
self.count = 0
def get_real_loc(self):
self.tf = tf.TransformListener()
self.tf.waitForTransform("/base_footprint", "/map", rospy.Time(),
rospy.Duration(4.0))
l2m, rot = self.tf.lookupTransform('/base_footprint', '/map', rospy.Time(0))
euler = tf.transformations.euler_from_quaternion(rot)
roll, pitch, yaw_l2m = euler[0], euler[1], euler[2]
self.tf.waitForTransform("/map", "/world_base", rospy.Time(),
rospy.Duration(4.0))
m2w, rot = self.tf.lookupTransform('/map', '/world_base', rospy.Time(0))
euler = tf.transformations.euler_from_quaternion(rot)
roll, pitch, yaw_m2w = euler[0], euler[1], euler[2]
dx = -l2m[0] * math.cos(yaw_l2m) - l2m[1] * math.sin(yaw_l2m) - m2w[0]
dy = l2m[0] * math.sin(yaw_l2m) - l2m[1] * math.cos(yaw_l2m) - m2w[1]
dthela = -yaw_l2m
z_world = np.array([ [dx * math.cos(yaw_m2w) + dy * math.sin(yaw_m2w)],
[- dx * math.sin(yaw_m2w) + dy * math.cos(yaw_m2w)],
[dthela - yaw_m2w]
])
return z_world
def match(self, np_msg):
self.count += 1
print ("Match with static map!")
real_loc = self.get_real_loc()
if(self.count <= 10):
x, y, yaw = real_loc[0, 0], real_loc[1, 0], real_loc[2, 0]
else:
x, y, yaw = self.xEst[0, 0], self.xEst[1, 0], self.xEst[2, 0]
# print ("Current position:", x, y ,yaw)
x_min = np.min(np_msg[0, :])
y_min = np.min(np_msg[1, :])
x_max = np.max(np_msg[0, :])
y_max = np.max(np_msg[1, :])
# laser_frame = np.zeros((int((y_max-y_min)/self.resolution) + 5, int((x_max - x_min)/self.resolution) + 5),dtype=np.uint8)
# laser_frame[:,:] = 255
# print ("laser frame size:", laser_frame.shape)
# for i in range(np_msg.shape[1]):
# y = int((np_msg[1, i] - y_min) / self.resolution) + 1
# x = int((np_msg[0, i] - x_min) / self.resolution) + 1
# laser_frame[y, x] = 0
# cv2.imshow('test', laser_frame)
# cv2.waitKey(0)
# cv2.destroyWindow('test')
# sift_match(cv2.cvtColor(self.map, cv2.COLOR_GRAY2RGB), cv2.cvtColor(laser_frame, cv2.COLOR_GRAY2RGB))
# Useless method
# T = self.icp.process(np_msg, self.map)
# print (T)
# print (self.T2u(T))
# return self.T2u(T)
# Another method
src_pc = np_msg.copy()
dx = src_pc[0, :]
dy = src_pc[1, :]
src_pc[0, :] = dx * math.cos(yaw) - dy * math.sin(yaw) + x
src_pc[1, :] = dx * math.sin(yaw) + dy * math.cos(yaw) + y
x_min = np.min(src_pc[0, :])
y_min = np.min(src_pc[1, :])
x_max = np.max(src_pc[0, :])
y_max = np.max(src_pc[1, :])
tmp_map = self.map[:, (self.map[0, :] > x_min) & (self.map[1, :] > y_min)
& (self.map[0, :] < x_max) & (self.map[1, :] < y_max) ]
# Show piece of the map
# self.count += 1
# if(self.count % 200 == 0):
# plt.axis("equal")
# plt.plot(tmp_map[0, :], tmp_map[1, :], marker='*', linestyle='')
# plt.show()
# plt.axis("equal")
# plt.plot(src_pc[0, :], src_pc[1, :], marker='*', linestyle='')
# plt.show()
# return np.array([[0],[0],[0]]), 0
try:
T, matches = self.icp.process(src_pc, tmp_map)
except:
return np.array([[0],[0],[0]]), 0
u = self.T2u(T)
z = np.array([[x-u[0, 0]],[y-u[1, 0]],[yaw-u[2, 0]]])
if(self.count <= 10):
return real_loc, 1000000
else:
return z, matches
# return real_loc, 1000000
def laserCallback(self, msg):
real_loc = self.get_real_loc()
print ("Real position:", real_loc[0, 0], real_loc[1, 0], real_loc[2, 0])
np_msg = self.laserToNumpy(msg)
# lm = self.extraction.process(np_msg)
u = self.calc_odometry(np_msg)
z, matches = self.match(np_msg)
self.xEst, self.PEst = self.ekf.estimate(self.xEst, self.PEst, z, u, matches)
# self.xEst, self.PEst = z, np.zeros((3,3))
self.publishResult()
pass
def mapCallback(self, msg):
print("Map received!!!")
print ("reso:", msg.info.resolution)
print ("Origin:", (msg.info.origin.position.x, msg.info.origin.position.y))
self.tf = tf.TransformListener()
self.tf.waitForTransform("/map", "/world_base", rospy.Time(),
rospy.Duration(4.0))
trans, rot = self.tf.lookupTransform('/map', '/world_base', rospy.Time(0))
euler = tf.transformations.euler_from_quaternion(rot)
roll, pitch, yaw = euler[0], euler[1], euler[2]
print ("Trans:", trans)
print ("Yaw:", yaw)
print ("Map height:", msg.info.height, "Map width:", msg.info.width)
self.map = np.array(msg.data).reshape(msg.info.height, msg.info.width).astype(np.uint8)
lines = cv2.Canny(self.map, 50, 150, apertureSize = 3)
rows, cols = lines.shape
# M = cv2.getRotationMatrix2D(((cols-1)/2.0,(rows-1)/2.0), r2d(yaw), 1)
# lines = cv2.warpAffine(lines,M,(cols,rows))
# lines = 255 - lines
# self.resolution = msg.info.resolution
# self.map = lines
# cv2.imshow('Canny', lines)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# oy, ox = np.nonzero(lines > 0)
# get critical points
ox, oy = np.nonzero(lines > 0)
ox = ox*msg.info.resolution + msg.info.origin.position.y
oy = oy*msg.info.resolution + msg.info.origin.position.x
# Show map base
# plt.axis("equal")
# plt.plot(oy, ox, marker='*', linestyle='')
# plt.show()
ox = ox - trans[1]
oy = oy - trans[0]
world_base_x = oy * math.cos(yaw) + ox * math.sin(yaw)
world_base_y = - oy * math.sin(yaw) + ox * math.cos(yaw)
print ("Total points from static map:", ox.shape[0])
self.map = np.zeros((3, ox.shape[0]))
self.map[0, :] = world_base_x
self.map[1, :] = world_base_y
self.map[2, :] = 1
# Show world base
# plt.axis("equal")
# plt.plot(ox, oy, marker='*', linestyle='')
# plt.show()
def observation(self, lm):
landmarks = lm
z = np.zeros((0, 3))
for i in range(len(landmarks.id)):
dx = landmarks.position_x[i]
dy = landmarks.position_y[i]
d = math.hypot(dx, dy)
angle = self.ekf.pi_2_pi(math.atan2(dy, dx))
zi = np.array([d, angle, i])
z = np.vstack((z, zi))
return z
def calc_odometry(self, np_msg):
if self.isFirstScan:
self.tar_pc = np_msg
self.isFirstScan = False
return np.array([[0.0,0.0,0.0]]).T
self.src_pc = np_msg
transform_acc, matches = self.icp.process(self.tar_pc, self.src_pc)
self.tar_pc = np_msg
return self.T2u(transform_acc)
def laserToNumpy(self,msg):
total_num = len(msg.ranges)
pc = np.ones([3,total_num])
range_l = np.array(msg.ranges)
range_l[range_l == np.inf] = MAX_LASER_RANGE
angle_l = np.linspace(msg.angle_min,msg.angle_max,total_num)
pc[0:2,:] = np.vstack((np.multiply(np.cos(angle_l),range_l),np.multiply(np.sin(angle_l),range_l)))
pc = pc[:, ~np.any(np.isnan(pc), axis=0)]
return pc
def T2u(self, t):
dw = math.atan2(t[1,0],t[0,0])
u = np.array([[t[0,2],t[1,2],dw]])
return u.T
def u2T(self, u):
w = u[2]
dx = u[0]
dy = u[1]
return np.array([
[ math.cos(w), -math.sin(w), dx],
[ math.sin(w), math.cos(w), dy],
[0,0,1]
])
def lm2pc(self, lm):
total_num = len(lm.id)
dy = lm.position_y
dx = lm.position_x
range_l = np.hypot(dy,dx)
angle_l = np.arctan2(dy,dx)
pc = np.ones((3,total_num))
pc[0:2,:] = np.vstack((np.multiply(np.cos(angle_l),range_l),np.multiply(np.sin(angle_l),range_l)))
pc = pc[:, ~np.any(np.isnan(pc), axis=0)]
return pc
def publishResult(self):
# tf
s = self.xEst.reshape(-1)
q = tf.transformations.quaternion_from_euler(0,0,s[2])
self.odom_broadcaster.sendTransform((s[0],s[1],0.001),(q[0],q[1],q[2],q[3]),
rospy.Time.now(),"ekf_location","world_base")
# odom
odom = Odometry()
odom.header.stamp = rospy.Time.now()
odom.header.frame_id = "world_base"
odom.pose.pose.position.x = s[0]
odom.pose.pose.position.y = s[1]
odom.pose.pose.position.z = 0.001
odom.pose.pose.orientation.x = q[0]
odom.pose.pose.orientation.y = q[1]
odom.pose.pose.orientation.z = q[2]
odom.pose.pose.orientation.w = q[3]
self.location_pub.publish(odom)
s = self.xOdom
q = tf.transformations.quaternion_from_euler(0,0,s[2])
# odom
odom = Odometry()
odom.header.stamp = rospy.Time.now()
odom.header.frame_id = "world_base"
odom.pose.pose.position.x = s[0]
odom.pose.pose.position.y = s[1]
odom.pose.pose.position.z = 0.001
odom.pose.pose.orientation.x = q[0]
odom.pose.pose.orientation.y = q[1]
odom.pose.pose.orientation.z = q[2]
odom.pose.pose.orientation.w = q[3]
self.odom_pub.publish(odom)
pass
def main():
rospy.init_node('slam_node')
s = SLAM_EKF()
rospy.spin()
if __name__ == '__main__':
main()
|
the-stack_106_18647
|
import sys
from samples.classification.ram.architecture.ram_modules import (
Actor,
CoreRNN,
GlimpseSensor,
Locator,
SignalBaseline,
)
from samples.classification.ram.ram_params import get_ram_config
from draugr import pil_img_to_np_array
sys.path.append("..")
import torch
if __name__ == "__main__":
config = get_ram_config()
# load images
imgs = []
paths = [config["data_dir"] / "lenna.jpg", config["data_dir"] / "cat.jpg"]
for i in range(len(paths)):
img = pil_img_to_np_array(paths[i], desired_size=[512, 512], expand=True)
imgs.append(torch.from_numpy(img))
imgs = torch.cat(imgs).permute((0, 3, 1, 2))
B, C, H, W = imgs.shape
loc = torch.FloatTensor([[-1.0, 1.0], [-1.0, 1.0]])
sensor = GlimpseSensor(h_g=128, h_l=128, g=64, k=3, s=2, c=3)
g_t = sensor(imgs, loc)
assert g_t.shape == (B, 256)
rnn = CoreRNN(input_size=256, hidden_size=256)
h_t = torch.zeros(g_t.shape[0], 256)
h_t = rnn(g_t, h_t)
assert h_t.shape == (B, 256)
classifier = Actor(256, 10)
a_t = classifier(h_t)
assert a_t.shape == (B, 10)
loc_net = Locator(256, 2, 0.11)
mu, l_t = loc_net(h_t)
assert l_t.shape == (B, 2)
base = SignalBaseline(256, 1)
b_t = base(h_t)
assert b_t.shape == (B, 1)
|
the-stack_106_18648
|
#!/usr/bin/env python3
import json
import time
import pprint
from sys import argv, exit
from os import environ
from collections import OrderedDict
from prometheus_http_client import Prometheus
from prometheus_client import CollectorRegistry, Gauge, push_to_gateway
def get_series(p, series):
m = p.series([series])
j = json.loads(m)
return j.get("data",[])
# --
def get_results(p, query, past_days=7):
# Returns results of a range query, so we pick up older runs' latencies
now = time.time()
start_ts = now - (60 * 60 * 24 * past_days)
m = p.query_rang(metric=query,start=start_ts, end=now, step=3600)
j = json.loads(m)
return j.get("data",{}).get("result")
# --
def get_completed_runs(p, mesh):
s = get_series(p,
'wrk2_benchmark_progress{exported_job="%s",status="done"}'%(mesh,))
r = sorted([ i.get("run") for i in s ])
return r
# --
def run_time_info(p, run, past_days=7):
info = {}
for kind in ["start", "end", "duration"]:
res = get_results(p,
'wrk2_benchmark_run_runtime{kind="%s",run="%s"}' % (kind,run),
past_days)
try:
info[kind] = int(res[0]["values"][0][1])
except IndexError:
print(" !!! Run %s lacks '%s' metric." % (run,kind))
return None
return info
# --
def get_latency_histogram(run,detailed=False,past_days=7):
# return RPS, histogram of a single run as dict
# <RPS>, {<percentile>: <latency in ms>, ...}
# e.g.: 500, {0.5: 399, 0.75: 478, 0.9: 589, ...}
ret=OrderedDict()
if detailed:
detailed="detailed_"
else:
detailed=""
out=[]
rps=0
for res in get_results(
p, 'wrk2_benchmark_latency_%sms{run="%s"}' %(detailed,run,),
past_days):
perc = float(res["metric"]["p"])
rps = float(res["metric"].get("rps",0))
lat = float(res["values"][0][1])
ret[perc] = lat
out.append("%s: %s" % (perc,lat))
if detailed == "":
print(" Run %s @%sRPS (%s): %s" %
(run, rps, "coarse" if detailed == "" else "detailed",
"\t".join(out)))
return rps, ret
# --
def get_latency_histograms(p, mesh, detailed=False, past_days=7):
# get all runs for a given service mesh.
# Returns dict of latency percentiles:
# { <percentile>: [ <lat>, <lat>, <lat>, ...], <percentile>:...},
# <percentile>: [...]}
# and info (doct) for each run (rps, start end, duration)
if False == detailed:
print("Mesh %s" %(mesh,))
histograms={}
info = {}
for run in get_completed_runs(p, mesh):
rps, h = get_latency_histogram(run, detailed, past_days)
i = run_time_info(p, run, past_days)
if not i:
continue
info[run] = i
info[run]["rps"] = rps
for perc,lat in h.items():
if histograms.get(perc, False):
histograms[perc][run]=lat
else:
histograms[perc] = OrderedDict({run:lat})
# sort runs' latencies for each percentile
for perc in histograms.keys():
histograms[perc] = {k: v for k, v in
sorted(histograms[perc].items(), key=lambda item: item[1])}
return histograms, info
# --
def create_summary_gauge(p, mesh, r, detailed=False, past_days=7):
histograms, info = get_latency_histograms(p, mesh, detailed, past_days)
if detailed:
detailed="detailed_"
else:
detailed=""
g = Gauge('wrk2_benchmark_summary_latency_%sms' % (detailed,),
'%s latency summary' % (detailed,),
labelnames=[
"p","source_run", "requested_rps", "start", "end", "duration"],
registry=r)
percs_count=0
# create latency entries for all runs, per percentile
for perc, latencies in histograms.items():
percs_count = percs_count + 1
for run, lat in latencies.items():
g.labels(p=perc, source_run=run, requested_rps=info[run]["rps"],
start=info[run]["start"]*1000,
# dashboard link fix: set end to 1min after actual end
end = (info[run]["end"] + 60) *1000,
duration=info[run]["duration"]).set(lat)
return g, percs_count, len(info)
# --
#
# -- main --
#
if 3 > len(argv):
print(
'Command line error: Prometheus URL and push gateway are required.')
print('Usage:')
print(' %s <Prometheus URL> <push gateway host:port> [<past-days>]'
% (argv[0],))
exit(1)
prometheus_url = argv[1]
pgw_url = argv[2]
past_days=7
if 4 == len(argv):
past_days=int(argv[3])
environ['PROMETHEUS_URL'] = prometheus_url
p = Prometheus()
for mesh in ["bare-metal", "svcmesh-linkerd", "svcmesh-istio", "svcmesh-consul"]:
r = CollectorRegistry()
workaround = mesh
g, percs, runs = create_summary_gauge(p, mesh, r, past_days=past_days)
dg, dpercs, druns = create_summary_gauge(p, mesh, r, detailed=True,
past_days=past_days)
print("%s: %d runs with %d percentiles (coarse)" % (mesh, runs, percs))
print("%s: %d runs with %d percentiles (detailed)" % (mesh, druns, dpercs))
push_to_gateway(
pgw_url, job=mesh, grouping_key={"instance":"emojivoto"}, registry=r)
|
the-stack_106_18649
|
# %%
from itertools import permutations
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from ml_for_programmers.config import Config
# %%
config = Config()
# %%
colors = Image.open(config.data_dir_path / "raw/colors.png")
# %%
colors
# %%
plt.imshow(colors)
# %%
color_array = np.array(colors)
# %%
type(color_array)
# %%
color_array.shape
# %%
color_array
# %%
color_array[:1]
# %%
plt.imshow(color_array[:1])
# %%
plt.imshow(color_array[:, :1])
# %%
plt.imshow(color_array[0])
# %%
color_array[0].shape
# %%
plt.imshow(color_array[0].reshape(1, 4, 3))
# %%
plt.imshow(np.expand_dims(color_array[0], axis=0))
# %%
layers = np.split(color_array, 3, axis=2)
len(layers)
# %%
layers[0].shape
# %%
plt.imshow(layers[0], cmap="binary")
# %%
list(permutations([1, 2, 3]))
# %%
fig, axes = plt.subplots(2, 3)
lin_axes = axes.reshape(-1)
for i, p in enumerate(permutations(layers)):
lin_axes[i].imshow(np.concatenate(p, axis=2))
# %%
|
the-stack_106_18650
|
__all__ = [
'Namespace',
'SubcommandHelpFormatter',
'UsageHelpFormatter',
'create_parser_dry_run',
'create_parser_filter_dates',
'create_parser_local',
'create_parser_logging',
'create_parser_meta',
'create_parser_yes',
'custom_path',
'merge_defaults',
'parse_args'
]
import argparse
import os
from pathlib import Path
from .datetime import datetime_string_to_time_period
from .path import UNIX_PATH_RE, convert_unix_path
from .structures import AttrMapping
#########
# Utils #
#########
# I use Windows Python install from Cygwin.
# This converts Unix-style paths to Windows-style paths.
def custom_path(value):
if os.name == 'nt' and UNIX_PATH_RE.match(str(value)):
value = Path(convert_unix_path(str(value)))
value = Path(value)
return value
###########
# Parsers #
###########
def create_parser_meta(title, version):
meta = argparse.ArgumentParser(
add_help=False
)
meta_options = meta.add_argument_group("Options")
meta_options.add_argument(
'-h', '--help',
action='help',
help="Display help."
)
meta_options.add_argument(
'-V', '--version',
action='version',
version=f"{title} {version}",
help="Output version."
)
return meta
def create_parser_dry_run():
dry_run = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS,
add_help=False
)
dry_run_options = dry_run.add_argument_group("Action")
dry_run_options.add_argument(
'-n', '--dry-run',
action='store_true',
help="Output results without taking action."
)
return dry_run
def create_parser_yes():
yes = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS,
add_help=False
)
yes_options = yes.add_argument_group("Action")
yes_options.add_argument(
'-y', '--yes',
action='store_true',
help="Don't ask for confirmation."
)
return yes
def create_parser_logging():
logging_ = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS,
add_help=False
)
logging_options = logging_.add_argument_group("Logging")
logging_options.add_argument(
'-v', '--verbose',
action='count',
help="Increase verbosity of output."
)
logging_options.add_argument(
'-q', '--quiet',
action='count',
help="Decrease verbosity of output."
)
logging_options.add_argument(
'--debug',
action='store_true',
help="Output log messages from dependencies."
)
logging_options.add_argument(
'--log-to-stdout',
action='store_true',
help="Log to stdout."
)
logging_options.add_argument(
'--no-log-to-stdout',
action='store_true',
help="Don't log to stdout."
)
logging_options.add_argument(
'--log-to-file',
action='store_true',
help="Log to file."
)
logging_options.add_argument(
'--no-log-to-file',
action='store_true',
help="Don't log to file."
)
return logging_
def create_parser_local():
local = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS,
add_help=False
)
local_options = local.add_argument_group("Local")
local_options.add_argument(
'--no-recursion',
action='store_true',
help=(
"Disable recursion when scanning for local files.\n"
"Recursion is enabled by default."
)
)
local_options.add_argument(
'--max-depth',
metavar='DEPTH',
type=int,
help=(
"Set maximum depth of recursion when scanning for local files.\n"
"Default is infinite recursion."
)
)
local_options.add_argument(
'-xp', '--exclude-path',
metavar='PATH',
action='append',
dest='exclude_paths',
help=(
"Exclude filepaths.\n"
"Can be specified multiple times."
)
)
local_options.add_argument(
'-xr', '--exclude-regex',
metavar='RX',
action='append',
dest='exclude_regexes',
help=(
"Exclude filepaths using regular expressions.\n"
"Can be specified multiple times."
)
)
local_options.add_argument(
'-xg', '--exclude-glob',
metavar='GP',
action='append',
dest='exclude_globs',
help=(
"Exclude filepaths using glob patterns.\n"
"Can be specified multiple times.\n"
"Absolute glob patterns not supported."
)
)
return local
def create_parser_filter_dates():
filter_dates = argparse.ArgumentParser(
argument_default=argparse.SUPPRESS,
add_help=False
)
dates_options = filter_dates.add_argument_group("Filter")
dates_options.add_argument(
'--created-in',
metavar='DATE',
type=lambda d: datetime_string_to_time_period(d, in_=True),
help="Include items created in year or year/month."
)
dates_options.add_argument(
'--created-on',
metavar='DATE',
type=lambda d: datetime_string_to_time_period(d, on=True),
help="Include items created on date."
)
dates_options.add_argument(
'--created-before',
metavar='DATE',
type=lambda d: datetime_string_to_time_period(d, before=True),
help="Include items created before datetime."
)
dates_options.add_argument(
'--created-after',
metavar='DATE',
type=lambda d: datetime_string_to_time_period(d, after=True),
help="Include items created after datetime."
)
dates_options.add_argument(
'--modified-in',
metavar='DATE',
type=lambda d: datetime_string_to_time_period(d, in_=True),
help="Include items created in year or year/month."
)
dates_options.add_argument(
'--modified-on',
metavar='DATE',
type=lambda d: datetime_string_to_time_period(d, on=True),
help="Include items created on date."
)
dates_options.add_argument(
'--modified-before',
metavar='DATE',
type=lambda d: datetime_string_to_time_period(d, before=True),
help="Include items modified before datetime."
)
dates_options.add_argument(
'--modified-after',
metavar='DATE',
type=lambda d: datetime_string_to_time_period(d, after=True),
help="Include items modified after datetime."
)
return filter_dates
############
# argparse #
############
class Namespace(AttrMapping):
pass
class UsageHelpFormatter(argparse.RawTextHelpFormatter): # pragma: nocover
def add_usage(self, usage, actions, groups, prefix="Usage: "):
super().add_usage(usage, actions, groups, prefix)
# Removes the command list while leaving the usage metavar intact.
class SubcommandHelpFormatter(UsageHelpFormatter): # pragma: nocover
def _format_action(self, action):
parts = super()._format_action(action)
if action.nargs == argparse.PARSER:
parts = "\n".join(parts.split("\n")[1:])
return parts
def merge_defaults(defaults, parsed):
args = Namespace()
args.update(defaults)
args.update(parsed)
return args
def parse_args(parser, args=None):
return parser.parse_args(args, namespace=Namespace())
|
the-stack_106_18651
|
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
input0 = Input("input0", "TENSOR_BOOL8", "{1, 1, 4, 1, 1}")
output0 = Output("output0", "TENSOR_BOOL8", "{1, 1, 4, 1, 1}")
model = Model().Operation("LOGICAL_NOT", input0).To(output0)
Example({
input0: [True, False, False, True],
output0: [False, True, True, False],
})
|
the-stack_106_18655
|
#!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
def read_readme():
with open('README.md') as f:
return f.read()
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(name='polyaxon-dockerizer',
version='0.5.6',
description='Python dockerizer to generate and build docker images.',
maintainer='Polyaxon, Inc.',
maintainer_email='[email protected]',
author='Polyaxon, Inc.',
author_email='[email protected]',
url='https://github.com/polyaxon/polyaxon',
license='Apache 2.0',
long_description_content_type="text/markdown",
long_description=read_readme(),
platforms='any',
packages=find_packages(),
keywords=[
'polyaxon',
'kubernetes',
'containers',
'docker'
'instrumentation'
],
install_requires=[
'docker==4.0.2',
'Unipath==1.1',
],
classifiers=[
'Programming Language :: Python',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
],
tests_require=[
"pytest",
],
cmdclass={'test': PyTest})
|
the-stack_106_18656
|
# -*- coding:utf-8 -*-
from meiduo_mall.libs.yuntongxun.CCPRestSDK import REST
# 说明:主账号,登陆云通讯网站后,可在"控制台-应用"中看到开发者主账号ACCOUNT SID
_accountSid = '8aaf0708669b101e0166a61092640aa1'
# 说明:主账号Token,登陆云通讯网站后,可在控制台-应用中看到开发者主账号AUTH TOKEN
_accountToken = 'a4d7ae4833554c0cbab6012abaa1dac9'
# 请使用管理控制台首页的APPID或自己创建应用的APPID
_appId = '8aaf0708669b101e0166a61092cb0aa8'
# 说明:请求地址,生产环境配置成app.cloopen.com
_serverIP = 'sandboxapp.cloopen.com'
# 说明:请求端口 ,生产环境为8883
_serverPort = "8883"
# 说明:REST API版本号保持不变
_softVersion = '2013-12-26'
# 云通讯官方提供的发送短信代码实例
# # 发送模板短信
# # @param to 手机号码
# # @param datas 内容数据 格式为数组 例如:{'12','34'},如不需替换请填 ''
# # @param $tempId 模板Id
#
# def sendTemplateSMS(to, datas, tempId):
# # 初始化REST SDK
# rest = REST(serverIP, serverPort, softVersion)
# rest.setAccount(accountSid, accountToken)
# rest.setAppId(appId)
#
# result = rest.sendTemplateSMS(to, datas, tempId)
# for k, v in result.iteritems():
#
# if k == 'templateSMS':
# for k, s in v.iteritems():
# print '%s:%s' % (k, s)
# else:
# print '%s:%s' % (k, v)
class CCP(object):
"""发送短信的辅助类"""
def __new__(cls, *args, **kwargs):
# 判断是否存在类属性_instance,_instance是类CCP的唯一对象,即单例
if not hasattr(CCP, "_instance"):
cls._instance = super(CCP, cls).__new__(cls, *args, **kwargs)
cls._instance.rest = REST(_serverIP, _serverPort, _softVersion)
cls._instance.rest.setAccount(_accountSid, _accountToken)
cls._instance.rest.setAppId(_appId)
return cls._instance
def send_template_sms(self, to, datas, temp_id):
"""发送模板短信"""
# @param to 手机号码
# @param datas 内容数据 格式为数组 例如:{'12','34'},如不需替换请填 ''
# @param temp_id 模板Id
result = self.rest.sendTemplateSMS(to, datas, temp_id)
# 如果云通讯发送短信成功,返回的字典数据result中statuCode字段的值为"000000"
if result.get("statusCode") == "000000":
# 返回0 表示发送短信成功
return 0
else:
# 返回-1 表示发送失败
return -1
if __name__ == '__main__':
# 注意: 测试的短信模板编号为1
result = CCP().send_template_sms('18784381659', ['1234', 5], 1)
print(result) # 0表示成功
|
the-stack_106_18657
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
from looker_deployer import version as pkg
NAME = "looker_deployer"
VERSION = pkg.__version__
REQUIRES = ["looker-sdk>=21.18.0", "oyaml", "python-json-logger"]
setup(
author="Looker Open Source",
author_email="[email protected]",
description="A Looker Deployment Tool",
install_requires=REQUIRES,
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
keywords=["Looker Deployer"],
license="Apache License 2.0",
name=NAME,
packages=["looker_deployer", "looker_deployer/commands", "looker_deployer/utils"],
entry_points={"console_scripts": ["ldeploy=looker_deployer.cli:main"]},
python_requires=">=3.6.0, <3.9",
version=VERSION
)
|
the-stack_106_18663
|
from typing import Any, Iterable, List, Optional
import pytest
from di.api.dependencies import DependantBase
from di.api.executor import AsyncTask, State, SyncTask, Task
from di.dependant import Dependant
from di.executors import AsyncExecutor, SyncExecutor
class TestAsyncTask(AsyncTask):
def __init__(
self,
dependant: DependantBase[Any],
):
...
async def compute(self, state: State) -> Iterable[Optional[Task]]:
raise NotImplementedError
class TestSyncTask(SyncTask):
def __init__(
self,
dependant: DependantBase[Any],
):
...
def compute(self, state: State) -> Iterable[Optional[Task]]:
raise NotImplementedError
def test_executing_async_dependencies_in_sync_executor():
state = State(object())
exc = SyncExecutor()
match = "Cannot execute async dependencies in execute_sync"
with pytest.raises(TypeError, match=match):
exc.execute_sync([TestAsyncTask(Dependant())], state)
def test_simple_sync_executor():
executed: List[int] = []
class Task1(TestSyncTask):
def compute(self, state: State) -> Iterable[Optional[Task]]:
executed.append(1)
return [Task2(Dependant())]
class Task2(TestSyncTask):
def compute(self, state: State) -> Iterable[Optional[Task]]:
executed.append(2)
return [Task3(Dependant())]
class Task3(TestSyncTask):
def compute(self, state: State) -> Iterable[Optional[Task]]:
executed.append(3)
return [None]
exc = SyncExecutor()
exc.execute_sync([Task1(Dependant())], State(object()))
assert executed == [1, 2, 3]
@pytest.mark.anyio
async def test_simple_async_executor():
executed: List[int] = []
class Task1(TestAsyncTask):
async def compute(self, state: State) -> Iterable[Optional[Task]]:
executed.append(1)
return [Task2(Dependant())]
class Task2(TestAsyncTask):
async def compute(self, state: State) -> Iterable[Optional[Task]]:
executed.append(2)
return [Task3(Dependant())]
class Task3(TestAsyncTask):
async def compute(self, state: State) -> Iterable[Optional[Task]]:
executed.append(3)
return [None]
exc = AsyncExecutor()
await exc.execute_async([Task1(Dependant())], State(object()))
assert executed == [1, 2, 3]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.