filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_23380 | """
Plist Service - handles parsing and formatting plist content
"""
from .exceptions import MuxError
from ..util import Log
import plistlib
import re
import ssl
import struct
from socket import socket
from typing import Optional, Dict, Any
from .usbmux import USBMux, MuxDevice
__all__ = ['PlistService']
log = Log.getLogger(__name__)
HARDWARE_PLATFORM_SUB = re.compile(r'[^\w<>/ \-_0-9\"\'\\=.?!+]+').sub
class PlistService:
def __exit__(self, *args):
self.close()
def __init__(
self,
port: int = 62078,
udid: Optional[str] = None,
device: Optional[MuxDevice] = None,
ssl_file: Optional[str] = None,
network=None
):
self.port = port
self.device = device or USBMux().find_device(udid, network)
log.debug(f'Connecting to device: {self.device.serial}')
self.sock = self.device.connect(port) # type: socket
if ssl_file:
self.ssl_start(ssl_file, ssl_file)
def ssl_start(self, keyfile, certfile):
self.sock = ssl.wrap_socket(self.sock, keyfile, certfile)
def send(self, msg):
totalsent = 0
while totalsent < len(msg):
sent = self.sock.send(msg[totalsent:])
if sent == 0:
raise MuxError('socket connection broken')
totalsent = totalsent + sent
def recv(self, length=4096, timeout=-1):
try:
if timeout > 0:
self.sock.settimeout(timeout)
buf = self.sock.recv(length)
return buf
except Exception as E:
return b''
def close(self):
self.sock.close()
def recv_exact(self, to_read) -> bytes:
buffer = bytearray(to_read)
view = memoryview(buffer)
while view:
received = self.sock.recv_into(view, to_read)
if received:
view = view[received:]
to_read -= received
else:
break
return buffer
def recv_plist(self) -> Optional[Dict[str, Any]]:
resp = self.recv_exact(4)
if not resp or len(resp) != 4:
return None
payload = self.recv_exact(struct.unpack('>L', resp)[0])
log.debug(f'接收 Plist byte: {payload}')
if not payload:
return None
if payload.startswith(b'bplist00') or payload.startswith(b'<?xml'):
data = plistlib.loads(payload)
log.debug(f'接收 Plist data: {data}')
return data
else:
raise ValueError('Received invalid data: {}'.format(payload[:100].decode('hex')))
def send_plist(self, data):
log.debug(f'发送 Plist: {data}')
payload = plistlib.dumps(data)
payload_len = struct.pack('>L', len(payload))
log.debug(f'发送 Plist byte: {payload_len+payload}')
return self.sock.send(payload_len + payload)
def plist_request(self, request):
self.send_plist(request)
return self.recv_plist()
|
the-stack_106_23382 | class Solution:
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
sett = set(nums)
dic = {}
for i in sett:
dic[i]=nums.count(i)
for i in dic:
if dic[i]>(len(nums)/2):
return i |
the-stack_106_23384 | #!/usr/bin/env python
import time
import os
import sys
import logging
from logging.handlers import TimedRotatingFileHandler
logger = logging.getLogger(__name__)
FORMATTER = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
LOG_FILE = os.path.expanduser("~") + "/isybridge.log"
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(FORMATTER)
file_handler = TimedRotatingFileHandler(LOG_FILE, backupCount=5, when="midnight")
file_handler.setFormatter(FORMATTER)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
from isy994.controller import Controller
import isy994
import homie
import isy_homie
from .devices.barrier import Barrier
from .devices.binary import Binary
from .devices.contact import Contact
from .devices.controller_action import Controller_Action
from .devices.dimmer import Dimmer
from .devices.door_lock import Door_Lock
from .devices.fan import Fan
from .devices.isy_controller import ISY_Controller
from .devices.notification_sensor import Notification_Sensor
from .devices.program import Program
from .devices.scene import Scene
from .devices.siren import Siren
from .devices.switch import Switch
from .devices.thermostat import Thermostat
from .devices.variable import Variable
HOMIE_SETTINGS = {
"update_interval": 60,
"implementation": "ISY Bridge Version {} Homie 4 Version {}".format(
isy_homie.__version__, homie.__version__
),
"fw_name": "ISY994V5",
"fw_version": isy994.__version__,
}
LOG_SETTINGS = {
'enable': False,
'level': logging.ERROR,
}
class Bridge(object):
controller = None
homie_devices = {} # indexed by item identifier
def __init__(
self,
address=None,
username=None,
password=None,
homie_settings=HOMIE_SETTINGS,
mqtt_settings=None,
log_settings = LOG_SETTINGS,
):
if log_settings is not None and log_settings['enable'] is True:
logging.basicConfig(level=log_settings ['level'],handlers=[file_handler,console_handler])
logger.info("ISY Homie MQTT {}".format(mqtt_settings))
self.homie_settings = homie_settings
self.mqtt_settings = mqtt_settings
self.controller = Controller(
address=address,
port=None,
username=username,
password=password,
use_https=False,
event_handler=self._isy_event_handler,
)
def _isy_event_handler(self, container, item, event, *args):
logger.info(
"Event {} from {}: {} {}".format(
event, container.container_type, item.name, *args
)
)
if container.container_type == "Device":
self._device_event_handler(item, event, args)
elif container.container_type == "Scene":
self._scene_event_handler(item, event, args)
elif container.container_type == "Variable":
self._variable_event_handler(item, event, args)
elif container.container_type == "Program":
self._program_event_handler(item, event, args)
elif container.container_type == "Controller":
self._controller_event_handler(item, event, args)
def _device_event_handler(self, device, event, *args):
logger.debug("Device event {}".format(device.name, event, args))
if event == "add":
bridge_device = None
if device.device_type == "binary":
bridge_device = Binary(device, self.homie_settings, self.mqtt_settings)
elif device.device_type == "barrier":
bridge_device = Barrier(device, self.homie_settings, self.mqtt_settings)
elif device.device_type == "contact":
bridge_device = Contact(device, self.homie_settings, self.mqtt_settings)
elif device.device_type == "dimmer":
bridge_device = Dimmer(device, self.homie_settings, self.mqtt_settings)
elif device.device_type == "fan":
bridge_device = Fan(device, self.homie_settings, self.mqtt_settings)
elif device.device_type == "lock":
bridge_device = Door_Lock(device, self.homie_settings, self.mqtt_settings)
elif device.device_type == "notification":
bridge_device = Notification_Sensor(device, self.homie_settings, self.mqtt_settings)
elif device.device_type == "thermostat":
bridge_device = Thermostat(device, self.homie_settings, self.mqtt_settings)
elif device.device_type == "siren":
bridge_device = Siren(device, self.homie_settings, self.mqtt_settings)
elif device.device_type == "switch":
bridge_device = Switch(device, self.homie_settings, self.mqtt_settings)
elif device.device_type == "controller":
bridge_device = Controller_Action(
device, self.homie_settings, self.mqtt_settings
)
if bridge_device is not None:
self.homie_devices[bridge_device.get_homie_device_id()] = bridge_device
else:
logger.warning("Unknown device {}".format(device))
def _scene_event_handler(self, device, event, *args):
logger.debug("Scene event {}".format(device.name, event))
if event == "add":
scene = Scene(device, self.homie_settings, self.mqtt_settings)
self.homie_devices[scene.get_homie_device_id()] = scene
def _variable_event_handler(self, device, event, *args):
logger.debug("Variable event {}".format(device.name, event))
if event == "add":
variable = Variable(device, self.homie_settings, self.mqtt_settings)
self.homie_devices[variable.get_homie_device_id()] = variable
def _program_event_handler(self, device, event, *args):
logger.debug("Program event {}".format(device.name, event))
if event == "add":
program = Program(device, self.homie_settings, self.mqtt_settings)
self.homie_devices[program.get_homie_device_id()] = program
def _controller_event_handler(self, device, event, *args):
logger.debug("Controller event {}".format(device.name, event))
# print ('container event',device.name,event)
if event == "add":
controller = ISY_Controller(device, self.homie_settings, self.mqtt_settings)
self.homie_devices[controller.get_homie_device_id()] = controller
# if event == 'property':
# if args [0] [0] == 'state' and args[0] [1] == 'lost'
# pass # could propagate this to all devices
# print ('args',args [0] [0], args[0] [1] )
def close(self):
for device in self.homie_devices:
device.close()
|
the-stack_106_23386 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model Cards Toolkit.
The model cards toolkit (MCT) provides a set of utilities to help users
generate Model Cards from trained models within ML pipelines.
"""
import json
import os
import os.path
import shutil
import tempfile
from typing import Any, Dict, Optional, Text
from absl import logging
import jinja2
import jsonschema
from model_card_toolkit import model_card as model_card_module
from model_card_toolkit.utils import graphics
from model_card_toolkit.utils import tfx_util
import semantic_version
from ml_metadata.metadata_store import metadata_store
# Constants about versioned JSON schema files for Model Card.
_SCHEMA_DIR = os.path.join(os.path.dirname(__file__), 'schema')
_SCHEMA_FILE_NAME = 'model_card.schema.json'
# Constants about provided UI templates.
_UI_TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'template')
_DEFAULT_UI_TEMPLATE_FILE = os.path.join('html', 'default_template.html.jinja')
# Constants about Model Cards Toolkit Assets (MCTA).
_MCTA_JSON_FILE = 'data/model_card.json'
_MCTA_TEMPLATE_DIR = 'template'
_MCTA_RESOURCE_DIR = 'resources/plots'
# Constants about the final generated model cards.
_DEFAULT_MODEL_CARD_FILE_NAME = 'model_card.html'
_MODEL_CARDS_DIR = 'model_cards/'
class ModelCardToolkit():
"""Model Cards Toolkit (MCT) provides utilities to generate a ModelCard.
Given a model, a ModelCardToolkit finds related metadata and lineage from a
MLMD instance and generates a ModelCard about the model in various output
formats (e.g., HTML). The ModelCardToolkit includes a list of APIs designed
for a human-in-the-loop process to elaborate the ModelCard. It organizes the
ModelCard assets (e.g., structured data, plots, and UI templates) in a user
specified directory, and updates them incrementally via the APIs.
"""
def __init__(self,
output_dir: Optional[Text] = None,
mlmd_store: Optional[metadata_store.MetadataStore] = None,
model_uri: Optional[Text] = None):
"""Initializes the ModelCardToolkit.
Args:
output_dir: The MCT assets path where the json file and templates are
written to. If not given, a temp directory is used.
mlmd_store: A ml-metadata MetadataStore to retrieve metadata and lineage
information about the model stored at `model_uri`. If given, a set of
model card properties can be auto-populated from the `mlmd_store`.
model_uri: The path to the trained model to generate model cards.
Raises:
ValueError: If `mlmd_store` is given and the `model_uri` cannot be
resolved as a model artifact in the metadata store.
"""
self.output_dir = output_dir or tempfile.mkdtemp()
self._mcta_json_file = os.path.join(self.output_dir, _MCTA_JSON_FILE)
self._mcta_template_dir = os.path.join(self.output_dir, _MCTA_TEMPLATE_DIR)
self._model_cards_dir = os.path.join(self.output_dir, _MODEL_CARDS_DIR)
self._store = mlmd_store
if self._store:
if not model_uri:
raise ValueError('If `mlmd_store` is set, `model_uri` should be set.')
models = self._store.get_artifacts_by_uri(model_uri)
if not models:
raise ValueError(f'"{model_uri}" cannot be found in the `mlmd_store`.')
if len(models) > 1:
logging.info(
'%d artifacts are found with the `model_uri`="%s". '
'The last one is used.', len(models), model_uri)
self._artifact_with_model_uri = models[-1]
def _write_file(self, path: Text, content: Text) -> None:
"""Write content to the path."""
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w+') as f:
f.write(content)
def _read_file(self, path: Text) -> Text:
"""Read content from a path."""
with open(path, 'r') as f:
return f.read()
def scaffold_assets(self) -> model_card_module.ModelCard:
"""Generates the model cards tookit assets.
Model cards assets include the model card json file and customizable model
card UI templates.
An assets directory is created if one does not already exist.
If the MCT is initialized with a `mlmd_store`, it further auto-populates
the model cards properties as well as generating related plots such as model
performance and data distributions.
Returns:
A ModelCard representing the given model.
"""
model_card = model_card_module.ModelCard()
if self._store:
model_card = tfx_util.generate_model_card_for_model(
self._store, self._artifact_with_model_uri.id)
metrics_artifacts = tfx_util.get_metrics_artifacts_for_model(
self._store, self._artifact_with_model_uri.id)
stats_artifacts = tfx_util.get_stats_artifacts_for_model(
self._store, self._artifact_with_model_uri.id)
for metrics_artifact in metrics_artifacts:
eval_result = tfx_util.read_metrics_eval_result(metrics_artifact.uri)
if eval_result is not None:
graphics.annotate_eval_result_plots(model_card, eval_result)
for stats_artifact in stats_artifacts:
train_stats = tfx_util.read_stats_proto(stats_artifact.uri, 'train')
eval_stats = tfx_util.read_stats_proto(stats_artifact.uri, 'eval')
graphics.annotate_dataset_feature_statistics_plots(
model_card, train_stats, eval_stats)
# Write JSON file.
self._write_file(self._mcta_json_file, model_card.to_json())
# Write UI template files.
shutil.copytree(_UI_TEMPLATE_DIR, self._mcta_template_dir)
return model_card
def update_model_card_json(self,
model_card: model_card_module.ModelCard) -> None:
"""Validates the model card and updates the JSON file in MCT assets.
If model_card.schema_version is not provided, it will assign the latest
schema version to the `model_card`, and validate it.
Args:
model_card: The updated model card that users want to write back.
Raises:
Error: when the given model_card is invalid w.r.t. the schema.
"""
if not model_card.schema_version:
sub_directories = [f for f in os.scandir(_SCHEMA_DIR) if f.is_dir()]
latest_schema_version = max(
sub_directories, key=lambda f: semantic_version.Version(f.name[1:]))
model_card.schema_version = latest_schema_version.name[1:]
# Validate the updated model_card first.
schema = self._find_model_card_schema(model_card.schema_version)
jsonschema.validate(model_card.to_dict(), schema)
# Write the updated JSON to the file.
self._write_file(self._mcta_json_file, model_card.to_json())
def export_format(self,
template_path: Text = None,
output_file=_DEFAULT_MODEL_CARD_FILE_NAME) -> Text:
"""Generates a model card based on the MCT assets.
Args:
template_path: The file path of the UI template. If not provided, the
default UI template will be used.
output_file: The file name of the generated model card. If not provided,
the default 'model_card.html' will be used. If the file already exists,
then it will be overwritten.
Returns:
The model card UI.
"""
if not template_path:
template_path = os.path.join(self._mcta_template_dir,
_DEFAULT_UI_TEMPLATE_FILE)
template_dir = os.path.dirname(template_path)
template_file = os.path.basename(template_path)
# Read JSON file.
model_card = json.loads(self._read_file(self._mcta_json_file))
# Generate Model Card.
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
autoescape=True,
auto_reload=True,
cache_size=0)
template = jinja_env.get_template(template_file)
# TODO(b/154990170) Think about how to adjust the img inside template.
model_card_file_content = template.render(
model_details=model_card['model_details'],
model_parameters=model_card['model_parameters'],
quantitative_analysis=model_card['quantitative_analysis'],
considerations=model_card['considerations'])
# Write the model card file.
mode_card_file_path = os.path.join(self._model_cards_dir, output_file)
self._write_file(mode_card_file_path, model_card_file_content)
return model_card_file_content
def save_mlmd(self) -> None:
"""Saves the model card of the model artifact with `model_uri` to MLMD."""
pass
def _find_model_card_schema(self, version: Text) -> Dict[Text, Any]:
"""Finds the model card JSON schema of a particular version.
A model card is created w.r.t. to ModelCard json schema. The MCT contains
a list of known Model Card JSON schemas. The util looks for the json schema
file at a particular version and returns for validation.
Args:
version: The version of the schema.
Returns:
Json schema.
Raises:
ValueError if cannot find the expect schema given the version.
"""
sub_directories = [f for f in os.scandir(_SCHEMA_DIR) if f.is_dir()]
# Remove the first charactor 'v' from path when comparing.
matching_dir = [f for f in sub_directories if str(f.name[1:]) == version]
if not matching_dir:
raise ValueError(
'Cannot find schema version that matches the version of the given '
'model card. Found Versions: {}. Given Version: {}'.format(
str([str(f.name[1:]) for f in sub_directories]), version))
schema_file = os.path.join(str(matching_dir[0].path), _SCHEMA_FILE_NAME)
with open(schema_file) as json_file:
schema = json.loads(json_file.read())
return schema
|
the-stack_106_23391 | # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import routes
import webob.dec
from nova.api import auth as api_auth
from nova.api import openstack as openstack_api
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack import compute
from nova.api.openstack.compute import versions
from nova.api.openstack import urlmap
from nova.api.openstack import wsgi as os_wsgi
from nova.api import wsgi
from nova.compute import flavors
from nova.compute import vm_states
import nova.conf
from nova import context
from nova.db.main import models
from nova import exception as exc
from nova import objects
from nova.objects import base
from nova import quota
from nova.tests.unit import fake_block_device
from nova.tests.unit.objects import test_keypair
from nova import utils
CONF = nova.conf.CONF
QUOTAS = quota.QUOTAS
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_PROJECT_ID = '6a6a9c9eee154e9cb8cec487b98d36ab'
FAKE_USER_ID = '5fae60f5cf4642609ddd31f71748beac'
FAKE_UUIDS = {}
@webob.dec.wsgify
def fake_wsgi(self, req):
return self.application
def wsgi_app_v21(fake_auth_context=None, v2_compatible=False,
custom_routes=None):
# NOTE(efried): Keep this (roughly) in sync with api-paste.ini
def wrap(app, use_context=False):
if v2_compatible:
app = openstack_api.LegacyV2CompatibleWrapper(app)
if use_context:
if fake_auth_context is not None:
ctxt = fake_auth_context
else:
ctxt = context.RequestContext(
'fake', FAKE_PROJECT_ID, auth_token=True)
app = api_auth.InjectContext(ctxt, app)
app = openstack_api.FaultWrapper(app)
return app
inner_app_v21 = compute.APIRouterV21(custom_routes=custom_routes)
mapper = urlmap.URLMap()
mapper['/'] = wrap(versions.Versions())
mapper['/v2'] = wrap(versions.VersionsV2())
mapper['/v2.1'] = wrap(versions.VersionsV2())
mapper['/v2/+'] = wrap(inner_app_v21, use_context=True)
mapper['/v2.1/+'] = wrap(inner_app_v21, use_context=True)
return mapper
def stub_out_key_pair_funcs(testcase, have_key_pair=True, **kwargs):
def key_pair(context, user_id):
return [dict(test_keypair.fake_keypair,
name='key', public_key='public_key', **kwargs)]
def one_key_pair(context, user_id, name):
if name in ['key', 'new-key']:
return dict(test_keypair.fake_keypair,
name=name, public_key='public_key', **kwargs)
else:
raise exc.KeypairNotFound(user_id=user_id, name=name)
def no_key_pair(context, user_id):
return []
if have_key_pair:
testcase.stub_out(
'nova.db.main.api.key_pair_get_all_by_user', key_pair)
testcase.stub_out('nova.db.main.api.key_pair_get', one_key_pair)
else:
testcase.stub_out(
'nova.db.main.api.key_pair_get_all_by_user', no_key_pair)
def stub_out_instance_quota(test, allowed, quota, resource='instances'):
def fake_reserve(context, **deltas):
requested = deltas.pop(resource, 0)
if requested > allowed:
quotas = dict(instances=1, cores=1, ram=1)
quotas[resource] = quota
usages = dict(instances=dict(in_use=0, reserved=0),
cores=dict(in_use=0, reserved=0),
ram=dict(in_use=0, reserved=0))
usages[resource]['in_use'] = (quotas[resource] * 9 // 10 - allowed)
usages[resource]['reserved'] = quotas[resource] // 10
raise exc.OverQuota(overs=[resource], quotas=quotas,
usages=usages)
test.stub_out('nova.quota.QUOTAS.reserve', fake_reserve)
def stub_out_networking(test):
def get_my_ip():
return '127.0.0.1'
test.stub_out('oslo_utils.netutils.get_my_ipv4', get_my_ip)
def stub_out_compute_api_snapshot(test):
def snapshot(self, context, instance, name, extra_properties=None):
# emulate glance rejecting image names which are too long
if len(name) > 256:
raise exc.Invalid
return dict(id='123', status='ACTIVE', name=name,
properties=extra_properties)
test.stub_out('nova.compute.api.API.snapshot', snapshot)
class stub_out_compute_api_backup(object):
def __init__(self, test):
self.extra_props_last_call = None
test.stub_out('nova.compute.api.API.backup', self.backup)
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
self.extra_props_last_call = extra_properties
props = dict(backup_type=backup_type,
rotation=rotation)
props.update(extra_properties or {})
return dict(id='123', status='ACTIVE', name=name, properties=props)
def stub_out_nw_api(test, cls=None, private=None, publics=None):
if not private:
private = '192.168.0.3'
if not publics:
publics = ['1.2.3.4']
class Fake(object):
def __init__(self):
pass
def get_instance_nw_info(*args, **kwargs):
pass
def get_floating_ips_by_fixed_address(*args, **kwargs):
return publics
def validate_networks(self, context, networks, max_count):
return max_count
def create_resource_requests(
self, context, requested_networks,
pci_requests=None, affinity_policy=None):
return None, [], objects.RequestLevelParams()
if cls is None:
cls = Fake
test.stub_out('nova.network.neutron.API', cls)
def stub_out_secgroup_api(test, security_groups=None):
def get_instances_security_groups_bindings(
context, servers, detailed=False):
instances_security_group_bindings = {}
if servers:
# we don't get security group information for down cells
instances_security_group_bindings = {
server['id']: security_groups or [] for server in servers
if server['status'] != 'UNKNOWN'
}
return instances_security_group_bindings
def get_instance_security_groups(context, instance, detailed=False):
return security_groups if security_groups is not None else []
test.stub_out(
'nova.network.security_group_api'
'.get_instances_security_groups_bindings',
get_instances_security_groups_bindings)
test.stub_out(
'nova.network.security_group_api.get_instance_security_groups',
get_instance_security_groups)
class FakeToken(object):
id_count = 0
def __getitem__(self, key):
return getattr(self, key)
def __init__(self, **kwargs):
FakeToken.id_count += 1
self.id = FakeToken.id_count
for k, v in kwargs.items():
setattr(self, k, v)
class FakeRequestContext(context.RequestContext):
def __init__(self, *args, **kwargs):
kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token')
super(FakeRequestContext, self).__init__(*args, **kwargs)
class HTTPRequest(os_wsgi.Request):
@classmethod
def blank(cls, *args, **kwargs):
defaults = {'base_url': 'http://localhost/v2'}
use_admin_context = kwargs.pop('use_admin_context', False)
project_id = kwargs.pop('project_id', FAKE_PROJECT_ID)
version = kwargs.pop('version', os_wsgi.DEFAULT_API_VERSION)
defaults.update(kwargs)
out = super(HTTPRequest, cls).blank(*args, **defaults)
out.environ['nova.context'] = FakeRequestContext(
user_id='fake_user',
project_id=project_id,
is_admin=use_admin_context)
out.api_version_request = api_version.APIVersionRequest(version)
return out
class HTTPRequestV21(HTTPRequest):
pass
class TestRouter(wsgi.Router):
def __init__(self, controller, mapper=None):
if not mapper:
mapper = routes.Mapper()
mapper.resource("test", "tests",
controller=os_wsgi.Resource(controller))
super(TestRouter, self).__init__(mapper)
class FakeAuthDatabase(object):
data = {}
@staticmethod
def auth_token_get(context, token_hash):
return FakeAuthDatabase.data.get(token_hash, None)
@staticmethod
def auth_token_create(context, token):
fake_token = FakeToken(created_at=timeutils.utcnow(), **token)
FakeAuthDatabase.data[fake_token.token_hash] = fake_token
FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
return fake_token
@staticmethod
def auth_token_destroy(context, token_id):
token = FakeAuthDatabase.data.get('id_%i' % token_id)
if token and token.token_hash in FakeAuthDatabase.data:
del FakeAuthDatabase.data[token.token_hash]
del FakeAuthDatabase.data['id_%i' % token_id]
def create_info_cache(nw_cache):
if nw_cache is None:
pub0 = ('192.168.1.100',)
pub1 = ('2001:db8:0:1::1',)
def _ip(ip):
return {'address': ip, 'type': 'fixed'}
nw_cache = [
{'address': 'aa:aa:aa:aa:aa:aa',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'test1',
'subnets': [{'cidr': '192.168.1.0/24',
'ips': [_ip(ip) for ip in pub0]},
{'cidr': 'b33f::/64',
'ips': [_ip(ip) for ip in pub1]}]}}]
if not isinstance(nw_cache, str):
nw_cache = jsonutils.dumps(nw_cache)
return {
"info_cache": {
"network_info": nw_cache,
"deleted": False,
"created_at": None,
"deleted_at": None,
"updated_at": None,
}
}
def get_fake_uuid(token=0):
if token not in FAKE_UUIDS:
FAKE_UUIDS[token] = uuidutils.generate_uuid()
return FAKE_UUIDS[token]
def fake_instance_get(**kwargs):
def _return_server(context, uuid, columns_to_join=None, use_slave=False):
if 'project_id' not in kwargs:
kwargs['project_id'] = 'fake'
return stub_instance(1, **kwargs)
return _return_server
def fake_compute_get(**kwargs):
def _return_server_obj(context, *a, **kw):
return stub_instance_obj(context, **kwargs)
return _return_server_obj
def fake_actions_to_locked_server(self, context, instance, *args, **kwargs):
raise exc.InstanceIsLocked(instance_uuid=instance['uuid'])
def fake_instance_get_all_by_filters(num_servers=5, **kwargs):
def _return_servers(context, *args, **kwargs):
servers_list = []
marker = None
limit = None
found_marker = False
if "marker" in kwargs:
marker = kwargs["marker"]
if "limit" in kwargs:
limit = kwargs["limit"]
if 'columns_to_join' in kwargs:
kwargs.pop('columns_to_join')
if 'use_slave' in kwargs:
kwargs.pop('use_slave')
if 'sort_keys' in kwargs:
kwargs.pop('sort_keys')
if 'sort_dirs' in kwargs:
kwargs.pop('sort_dirs')
if 'cell_mappings' in kwargs:
kwargs.pop('cell_mappings')
for i in range(num_servers):
uuid = get_fake_uuid(i)
server = stub_instance(id=i + 1, uuid=uuid,
**kwargs)
servers_list.append(server)
if marker is not None and uuid == marker:
found_marker = True
servers_list = []
if marker is not None and not found_marker:
raise exc.MarkerNotFound(marker=marker)
if limit is not None:
servers_list = servers_list[:limit]
return servers_list
return _return_servers
def fake_compute_get_all(num_servers=5, **kwargs):
def _return_servers_objs(context, search_opts=None, limit=None,
marker=None, expected_attrs=None, sort_keys=None,
sort_dirs=None, cell_down_support=False,
all_tenants=False):
db_insts = fake_instance_get_all_by_filters()(None,
limit=limit,
marker=marker)
expected = ['metadata', 'system_metadata', 'flavor',
'info_cache', 'security_groups']
return base.obj_make_list(context, objects.InstanceList(),
objects.Instance, db_insts,
expected_attrs=expected)
return _return_servers_objs
def stub_instance(id=1, user_id=None, project_id=None, host=None,
node=None, vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref=FAKE_UUID,
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0,
auto_disk_config=False, display_name=None,
display_description=None,
include_fake_metadata=True, config_drive=None,
power_state=None, nw_cache=None, metadata=None,
security_groups=None, root_device_name=None,
limit=None, marker=None,
launched_at=timeutils.utcnow(),
terminated_at=timeutils.utcnow(),
availability_zone='', locked_by=None, cleaned=False,
memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0,
flavor=None, launch_index=0, kernel_id="",
ramdisk_id="", user_data=None, system_metadata=None,
services=None, trusted_certs=None, hidden=False):
if user_id is None:
user_id = 'fake_user'
if project_id is None:
project_id = 'fake_project'
if metadata:
metadata = [{'key': k, 'value': v} for k, v in metadata.items()]
elif include_fake_metadata:
metadata = [models.InstanceMetadata(key='seq', value=str(id))]
else:
metadata = []
sys_meta = flavors.save_flavor_info(
{}, flavors.get_flavor_by_flavor_id(int(flavor_id)))
sys_meta.update(system_metadata or {})
if host is not None:
host = str(host)
if key_name:
key_data = 'FAKE'
else:
key_data = ''
if security_groups is None:
security_groups = [{"id": 1, "name": "test", "description": "Foo:",
"project_id": "project", "user_id": "user",
"created_at": None, "updated_at": None,
"deleted_at": None, "deleted": False}]
# ReservationID isn't sent back, hack it in there.
server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
info_cache = create_info_cache(nw_cache)
if flavor is None:
flavor = objects.Flavor.get_by_name(
context.get_admin_context(), 'm1.small')
flavorinfo = jsonutils.dumps({
'cur': flavor.obj_to_primitive(),
'old': None,
'new': None,
})
instance = {
"id": int(id),
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"deleted_at": datetime.datetime(2010, 12, 12, 10, 0, 0),
"deleted": None,
"user_id": user_id,
"project_id": project_id,
"image_ref": image_ref,
"kernel_id": kernel_id,
"ramdisk_id": ramdisk_id,
"hostname": display_name or server_name,
"launch_index": launch_index,
"key_name": key_name,
"key_data": key_data,
"power_state": power_state,
"vm_state": vm_state or vm_states.ACTIVE,
"task_state": task_state,
"services": services,
"memory_mb": memory_mb,
"vcpus": vcpus,
"root_gb": root_gb,
"ephemeral_gb": ephemeral_gb,
"ephemeral_key_uuid": None,
"host": host,
"node": node,
"instance_type_id": flavor.id,
"user_data": user_data,
"reservation_id": reservation_id,
"launched_at": launched_at,
"terminated_at": terminated_at,
"availability_zone": availability_zone,
"display_name": display_name or server_name,
"display_description": display_description,
"launched_on": "",
"locked": locked_by is not None,
"locked_by": locked_by,
"os_type": "",
"architecture": "",
"vm_mode": "",
"uuid": uuid,
"root_device_name": root_device_name,
"default_ephemeral_device": "",
"default_swap_device": "",
"config_drive": config_drive,
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
"auto_disk_config": auto_disk_config,
"progress": progress,
"shutdown_terminate": True,
"disable_terminate": False,
"cell_name": "",
"metadata": metadata,
"system_metadata": utils.dict_to_metadata(sys_meta),
"security_groups": security_groups,
"cleaned": cleaned,
"pci_devices": [],
"extra": {"numa_topology": None,
"pci_requests": None,
"flavor": flavorinfo,
"trusted_certs": trusted_certs,
},
"tags": [],
"hidden": hidden,
"name": "instance-%s" % id,
}
instance.update(info_cache)
instance['info_cache']['instance_uuid'] = instance['uuid']
return instance
def stub_instance_obj(ctxt, *args, **kwargs):
db_inst = stub_instance(*args, **kwargs)
expected = ['metadata', 'system_metadata', 'flavor',
'info_cache', 'security_groups', 'tags']
inst = objects.Instance._from_db_object(ctxt, objects.Instance(),
db_inst,
expected_attrs=expected)
inst.fault = None
if db_inst["services"] is not None:
# This ensures services there if one wanted so
inst.services = db_inst["services"]
return inst
def stub_volume(id, **kwargs):
volume = {
'id': id,
'user_id': 'fakeuser',
'project_id': 'fakeproject',
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
'status': 'fakestatus',
'attach_status': 'attached',
'name': 'vol name',
'display_name': 'displayname',
'display_description': 'displaydesc',
'created_at': datetime.datetime(1999, 1, 1, 1, 1, 1),
'snapshot_id': None,
'volume_type_id': 'fakevoltype',
'volume_metadata': [],
'volume_type': {'name': 'vol_type_name'},
'multiattach': False,
'attachments': {'fakeuuid': {'mountpoint': '/'},
'fakeuuid2': {'mountpoint': '/dev/sdb'}
}
}
volume.update(kwargs)
return volume
def stub_volume_create(self, context, size, name, description, snapshot,
**param):
vol = stub_volume('1')
vol['size'] = size
vol['display_name'] = name
vol['display_description'] = description
try:
vol['snapshot_id'] = snapshot['id']
except (KeyError, TypeError):
vol['snapshot_id'] = None
vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
return vol
def stub_volume_update(self, context, *args, **param):
pass
def stub_volume_get(self, context, volume_id):
return stub_volume(volume_id)
def stub_volume_get_all(context, search_opts=None):
return [stub_volume(100, project_id='fake'),
stub_volume(101, project_id='superfake'),
stub_volume(102, project_id='superduperfake')]
def stub_volume_check_attach(self, context, *args, **param):
pass
def stub_snapshot(id, **kwargs):
snapshot = {
'id': id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': timeutils.utcnow(),
'display_name': 'Default name',
'display_description': 'Default description',
'project_id': 'fake'
}
snapshot.update(kwargs)
return snapshot
def stub_snapshot_create(self, context, volume_id, name, description):
return stub_snapshot(100, volume_id=volume_id, display_name=name,
display_description=description)
def stub_compute_volume_snapshot_create(self, context, volume_id, create_info):
return {'snapshot': {'id': "421752a6-acf6-4b2d-bc7a-119f9148cd8c",
'volumeId': volume_id}}
def stub_snapshot_delete(self, context, snapshot_id):
if snapshot_id == '-1':
raise exc.SnapshotNotFound(snapshot_id=snapshot_id)
def stub_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
pass
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id == '-1':
raise exc.SnapshotNotFound(snapshot_id=snapshot_id)
return stub_snapshot(snapshot_id)
def stub_snapshot_get_all(self, context):
return [stub_snapshot(100, project_id='fake'),
stub_snapshot(101, project_id='superfake'),
stub_snapshot(102, project_id='superduperfake')]
def stub_bdm_get_all_by_instance_uuids(context, instance_uuids,
use_slave=False):
i = 1
result = []
for instance_uuid in instance_uuids:
for x in range(2): # add two BDMs per instance
result.append(fake_block_device.FakeDbBlockDeviceDict({
'id': i,
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'volume_id%d' % (i),
'instance_uuid': instance_uuid,
}))
i += 1
return result
def fake_not_implemented(*args, **kwargs):
raise NotImplementedError()
FLAVORS = {
'1': objects.Flavor(
id=1,
name='flavor 1',
memory_mb=256,
vcpus=1,
root_gb=10,
ephemeral_gb=20,
flavorid='1',
swap=10,
rxtx_factor=1.0,
vcpu_weight=None,
disabled=False,
is_public=True,
description=None,
extra_specs={"key1": "value1", "key2": "value2"}
),
'2': objects.Flavor(
id=2,
name='flavor 2',
memory_mb=512,
vcpus=1,
root_gb=20,
ephemeral_gb=10,
flavorid='2',
swap=5,
rxtx_factor=None,
vcpu_weight=None,
disabled=True,
is_public=True,
description='flavor 2 description',
extra_specs={}
),
}
def stub_out_flavor_get_by_flavor_id(test):
@staticmethod
def fake_get_by_flavor_id(context, flavor_id, read_deleted=None):
return FLAVORS[flavor_id]
test.stub_out('nova.objects.Flavor.get_by_flavor_id',
fake_get_by_flavor_id)
def stub_out_flavor_get_all(test):
@staticmethod
def fake_get_all(context, inactive=False, filters=None,
sort_key='flavorid', sort_dir='asc', limit=None,
marker=None):
if marker in ['99999']:
raise exc.MarkerNotFound(marker)
def reject_min(db_attr, filter_attr):
return (filter_attr in filters and
getattr(flavor, db_attr) < int(filters[filter_attr]))
filters = filters or {}
res = []
for flavor in FLAVORS.values():
if reject_min('memory_mb', 'min_memory_mb'):
continue
elif reject_min('root_gb', 'min_root_gb'):
continue
res.append(flavor)
res = sorted(res, key=lambda item: getattr(item, sort_key))
output = []
marker_found = True if marker is None else False
for flavor in res:
if not marker_found and marker == flavor.flavorid:
marker_found = True
elif marker_found:
if limit is None or len(output) < int(limit):
output.append(flavor)
return objects.FlavorList(objects=output)
test.stub_out('nova.objects.FlavorList.get_all', fake_get_all)
|
the-stack_106_23392 | # Copyright 2017 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import time
import uuid
from google.cloud import storage
import pytest
import storage_add_bucket_conditional_iam_binding
import storage_add_bucket_iam_member
import storage_remove_bucket_conditional_iam_binding
import storage_remove_bucket_iam_member
import storage_set_bucket_public_iam
import storage_view_bucket_iam_members
MEMBER = "group:[email protected]"
ROLE = "roles/storage.legacyBucketReader"
CONDITION_TITLE = "match-prefix"
CONDITION_DESCRIPTION = "Applies to objects matching a prefix"
CONDITION_EXPRESSION = (
'resource.name.startsWith("projects/_/buckets/bucket-name/objects/prefix-a-")'
)
@pytest.fixture(scope="module")
def bucket():
bucket = None
while bucket is None or bucket.exists():
storage_client = storage.Client()
bucket_name = "test-iam-{}".format(uuid.uuid4())
bucket = storage_client.bucket(bucket_name)
bucket.iam_configuration.uniform_bucket_level_access_enabled = True
storage_client.create_bucket(bucket)
yield bucket
time.sleep(3)
bucket.delete(force=True)
@pytest.fixture(scope="function")
def public_bucket():
# The new projects don't allow to make a bucket available to public, so
# we need to use the old main project for now.
original_value = os.environ['GOOGLE_CLOUD_PROJECT']
os.environ['GOOGLE_CLOUD_PROJECT'] = os.environ['MAIN_GOOGLE_CLOUD_PROJECT']
bucket = None
while bucket is None or bucket.exists():
storage_client = storage.Client()
bucket_name = "test-iam-{}".format(uuid.uuid4())
bucket = storage_client.bucket(bucket_name)
bucket.iam_configuration.uniform_bucket_level_access_enabled = True
storage_client.create_bucket(bucket)
yield bucket
time.sleep(3)
bucket.delete(force=True)
# Set the value back.
os.environ['GOOGLE_CLOUD_PROJECT'] = original_value
def test_view_bucket_iam_members(capsys, bucket):
storage_view_bucket_iam_members.view_bucket_iam_members(bucket.name)
assert re.match("Role: .*, Members: .*", capsys.readouterr().out)
def test_add_bucket_iam_member(bucket):
storage_add_bucket_iam_member.add_bucket_iam_member(bucket.name, ROLE, MEMBER)
policy = bucket.get_iam_policy(requested_policy_version=3)
assert any(
binding["role"] == ROLE and MEMBER in binding["members"]
for binding in policy.bindings
)
def test_add_bucket_conditional_iam_binding(bucket):
storage_add_bucket_conditional_iam_binding.add_bucket_conditional_iam_binding(
bucket.name,
ROLE,
CONDITION_TITLE,
CONDITION_DESCRIPTION,
CONDITION_EXPRESSION,
{MEMBER},
)
policy = bucket.get_iam_policy(requested_policy_version=3)
assert any(
binding["role"] == ROLE
and binding["members"] == {MEMBER}
and binding["condition"]
== {
"title": CONDITION_TITLE,
"description": CONDITION_DESCRIPTION,
"expression": CONDITION_EXPRESSION,
}
for binding in policy.bindings
)
def test_remove_bucket_iam_member(public_bucket):
storage_remove_bucket_iam_member.remove_bucket_iam_member(
public_bucket.name, ROLE, MEMBER)
policy = public_bucket.get_iam_policy(requested_policy_version=3)
assert not any(
binding["role"] == ROLE and MEMBER in binding["members"]
for binding in policy.bindings
)
def test_remove_bucket_conditional_iam_binding(bucket):
storage_remove_bucket_conditional_iam_binding.remove_bucket_conditional_iam_binding(
bucket.name, ROLE, CONDITION_TITLE, CONDITION_DESCRIPTION, CONDITION_EXPRESSION
)
policy = bucket.get_iam_policy(requested_policy_version=3)
condition = {
"title": CONDITION_TITLE,
"description": CONDITION_DESCRIPTION,
"expression": CONDITION_EXPRESSION,
}
assert not any(
(binding["role"] == ROLE and binding.get("condition") == condition)
for binding in policy.bindings
)
def test_set_bucket_public_iam(public_bucket):
role = "roles/storage.objectViewer"
member = "allUsers"
storage_set_bucket_public_iam.set_bucket_public_iam(
public_bucket.name, role, member
)
policy = public_bucket.get_iam_policy(requested_policy_version=3)
assert any(
binding["role"] == role and member in binding["members"]
for binding in policy.bindings
)
|
the-stack_106_23394 | import sublime
import sublime_plugin
import re
from collections import namedtuple
# py3 import compatibility. Better way to do this?
try:
from .helpers import BaseBlockCommand
except ValueError:
from helpers import BaseBlockCommand # NOQA
# reference:
# http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#sections
ADORNMENTS = r"""[!\"#$%&'\\()*+,\-./:;<=>?@\[\]\^_`{|}~]"""
PATTERN_RE = re.compile(r"^(%s*)\n(.+)\n(%s+)" % (ADORNMENTS, ADORNMENTS), re.MULTILINE)
Header = namedtuple('Header', "level start end adornment title raw idx")
class RstHeaderTree(object):
# based on sphinx's header conventions
DEFAULT_HEADERS = '** = - ^ " + ~ # \' :'.split()
def __init__(self, text):
# add a ficticius break as first line
# to allow catching a very first header without overline.
# This imply any position returned (Header.start, Header.end)
# must be decremented one character
self.headers = self._parse('\n' + text)
self._text_lenght = len(text)
def _parse(self, text):
"""
Given a chunk of restructuredText, returns a list of tuples
(level, start, end, adornment, title, raw) for each header found.
level: int (zero-based). the "weight" of the header.
start: index where the header starts
end: index where the header ends
adornment: one (just underlined) or two char
(over and underline) string
that represent the adornment,
title: the parsed title
raw : the raw parsed header text, including breaks.
"""
candidates = PATTERN_RE.findall(text)
headers = []
levels = []
idx = 0
for over, title, under in candidates:
# validate.
if ((over == '' or over == under) and len(under) >= len(title)
and len(set(under)) == 1):
# encode the adornment of the header to calculate its level
adornment = under[0] * (2 if over else 1)
if adornment not in levels:
levels.append(adornment)
level = levels.index(adornment)
raw = (over + '\n' if over else '') + title + '\n' + under
start = text.find(raw) - 1 # see comment on __init__
end = start + len(raw)
h = Header(level, start, end, adornment, title, raw, idx)
idx += 1
headers.append(h)
return headers
def belong_to(self, pos):
"""
given a cursor position, return the deeper header
that contains it
"""
match = []
for h in self.headers:
start, end = self.region(h)
if start <= pos <= end:
match.append(h)
try:
return sorted(match, key=lambda h: h.level, reverse=True)[0]
except IndexError:
return None
def region(self, header):
"""
determines the (start, end) region under the given header
A region ends when a header of the same or higher level
(i.e lower number) is found or at the EOF
"""
try:
index = self.headers.index(header)
except ValueError:
return
start = header.start
if index == len(self.headers) - 1: # last header
return (start, self._text_lenght)
for next_h in self.headers[index + 1:]:
if next_h.level <= header.level:
return (start, next_h.start - 1)
return (start, self._text_lenght)
def _index(self, header, same_or_high=False):
"""
helper method that returns the absolute index
of the header in the tree or a filteredr tree
If same_or_high is true, only move to headline with the same level
or higher level.
returns (index, headers)
"""
if same_or_high:
headers = [h for h in self.headers
if h.level <= header.level]
else:
headers = self.headers[:]
return headers.index(header), headers
def next(self, header, same_or_high=False):
"""
given a header returns the closer header
(down direction)
"""
index, headers = self._index(header, same_or_high)
try:
return headers[index + 1]
except IndexError:
return None
def prev(self, header, same_or_high=False, offset=-1):
"""same than next, but in reversed direction
"""
index, headers = self._index(header, same_or_high)
if index == 0:
return None
return headers[index + offset]
def levels(self):
""" returns the heading adornment map"""
_levels = RstHeaderTree.DEFAULT_HEADERS.copy()
for h in self.headers:
_levels[h.level] = h.adornment
levels = []
for adornment in _levels:
if adornment not in levels:
levels.append(adornment)
for adornment in RstHeaderTree.DEFAULT_HEADERS:
if adornment not in levels:
if len(adornment) == 2:
levels.insert(0, adornment)
else:
levels.append(adornment)
return levels
@classmethod
def make_header(cls, title, adornment, force_overline=False):
title = title.rstrip()
title_lenght = len(title.lstrip())
indent_lenght = len(title) - title_lenght
strike = adornment[0] * (title_lenght + indent_lenght * 2)
if force_overline or len(adornment) == 2:
result = strike + '\n' + title + '\n' + strike + '\n'
else:
result = title + '\n' + strike + '\n'
return result
class HeaderChangeLevelCommand(sublime_plugin.TextCommand):
"""
increase or decrease the header level,
The level markup is autodetected from the document,
and use sphinx's convention by default.
"""
views = {}
def run(self, edit, offset=-1):
vid = self.view.id()
HeaderChangeLevelEvent.listen.pop(vid, None)
cursor_pos = self.view.sel()[0].begin()
region = sublime.Region(0, self.view.size())
tree = RstHeaderTree(self.view.substr(region))
parent = tree.belong_to(cursor_pos)
is_in_header = parent.start <= cursor_pos <= parent.end
if not is_in_header:
return
idx, levels = HeaderChangeLevelCommand.views.get(vid, (None, None))
if idx != parent.idx:
levels = tree.levels()
HeaderChangeLevelCommand.views[vid] = (parent.idx, levels)
try:
level = levels.index(parent.adornment)
if level + offset < 0:
return
adornment = levels[level + offset]
except IndexError:
return
new_header = RstHeaderTree.make_header(parent.title, adornment)
hregion = sublime.Region(parent.start, parent.end + 1)
try:
self.view.replace(edit, hregion, new_header)
finally:
def callback():
HeaderChangeLevelEvent.listen[vid] = True
sublime.set_timeout(callback, 0)
class HeaderChangeLevelEvent(sublime_plugin.EventListener):
listen = {}
def on_modified(self, view):
vid = view.id()
if HeaderChangeLevelEvent.listen.get(vid):
del HeaderChangeLevelCommand.views[vid]
del HeaderChangeLevelEvent.listen[vid]
class HeadlineMoveCommand(sublime_plugin.TextCommand):
# briefly inspired on the code of Muchenxuan Tong in
# https://github.com/demon386/SmartMarkdown
def run(self, edit, forward=True, same_or_high=True):
"""Move between headlines, forward or backward.
If same_or_high is true, only move to headline with the same level
or higher level.
"""
cursor_pos = self.view.sel()[0].begin()
region = sublime.Region(0, self.view.size())
tree = RstHeaderTree(self.view.substr(region))
parent = tree.belong_to(cursor_pos)
if forward:
h = tree.next(parent, same_or_high)
else:
is_in_header = parent.start <= cursor_pos <= parent.end
offset = -1 if is_in_header else 0
h = tree.prev(parent, same_or_high, offset)
if h:
self.jump_to(h.end - len(h.raw.split('\n')[-1]) - 1)
def jump_to(self, pos):
region = sublime.Region(pos, pos)
self.view.sel().clear()
self.view.sel().add(region)
self.view.show(region)
class SmartFoldingCommand(sublime_plugin.TextCommand):
"""Smart folding is used to fold / unfold headline at the point.
It's designed to bind to TAB key, and if the current line is not
a headline, a \t would be inserted.
"""
def run(self, edit):
cursor_pos = self.view.sel()[0].begin()
region = sublime.Region(0, self.view.size())
tree = RstHeaderTree(self.view.substr(region))
parent = tree.belong_to(cursor_pos)
is_in_header = parent.start <= cursor_pos <= parent.end
if is_in_header:
start, end = tree.region(parent)
start += len(parent.raw) + 1
region = sublime.Region(start, end)
if any([i.contains(region) for i in
self.view.folded_regions()]):
self.view.unfold(region)
else:
self.view.fold(region)
else:
for r in self.view.sel():
self.view.insert(edit, r.a, '\t')
self.view.show(r)
class SmartHeaderCommand(BaseBlockCommand):
def run(self, edit):
for region in self.view.sel():
region, lines, indent = self.get_block_bounds()
head_lines = len(lines)
adornment_char = lines[-1][0]
if (head_lines not in (2, 3) or
head_lines == 3 and lines[-3][0] != adornment_char):
# invalid header
return
title = lines[-2]
force_overline = head_lines == 3
result = RstHeaderTree.make_header(title, adornment_char, force_overline)
self.view.replace(edit, region, result)
|
the-stack_106_23396 | import os, sys, math, time
import numpy as np
from collections import Counter
sys.path.append("../IAD-Generator/iad-generation/")
from csv_utils import read_csv
from sklearn import metrics
from sklearn.linear_model import SGDClassifier
import scipy
import matplotlib
import matplotlib.pyplot as plt
from itr_sklearn import ITR_Extractor
from itr_process import process_data, retrieve_data
from joblib import dump, load
def save_model(clf, name):
dump(clf, name+'.joblib')
def load_model(name):
return load(name+'.joblib')
def main(model_type, dataset_dir, csv_filename, dataset_type, dataset_id, layer, num_classes, repeat=1, parse_data=True, num_procs=1):
max_accuracy = 0
for iteration in range(repeat):
print("Processing depth: {:d}, iter: {:d}/{:d}".format(layer, iteration, repeat))
#num_classes = 5
save_dir = os.path.join(dataset_dir, 'svm_{0}_{1}_{2}'.format(model_type, dataset_type, dataset_id))
if (not os.path.exists(save_dir)):
os.makedirs(save_dir)
parse_data = True
if(parse_data):
process_data(dataset_dir, model_type, dataset_type, dataset_id, layer, csv_filename, num_classes, num_procs)
data_in, data_label, eval_in, eval_label = retrieve_data(dataset_dir, model_type, dataset_type, dataset_id, layer)
print("data_in.shape:", data_in.shape)
print("data_label.shape:", data_label.shape)
print("eval_in.shape:", eval_in.shape)
print("eval_label.shape:", eval_label.shape)
#from thundersvm import SVC
#clf = SVC(max_iter=1000, tol=1e-4, probability=True, kernel='linear', decision_function_shape='ovr')
clf = SGDClassifier(max_iter=1000, tol=1e-4, n_jobs=num_procs)
# TRAIN
print("fitting model...")
t_s = time.time()
clf.fit(data_in, data_label)
print("elapsed:", time.time()-t_s)
print("evaluating model...")
t_s = time.time()
pred = clf.predict(eval_in)
cur_accuracy = metrics.accuracy_score(eval_label, pred)
print("elapsed:", time.time()-t_s)
# if model accuracy is good then replace the old model with new save data
if(cur_accuracy > max_accuracy):
save_model(clf, os.path.join(save_dir, "model"))
max_accuracy = cur_accuracy
print("ACCURACY: layer: {:d}, iter: {:d}/{:d}, acc:{:0.4f}, max_acc: {:0.4f}".format(layer, iteration, repeat, cur_accuracy, max_accuracy))
print('------------')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Generate IADs from input files')
#required command line args
parser.add_argument('model_type', help='the type of model to use', choices=['i3d', 'trn', 'tsm'])
parser.add_argument('dataset_dir', help='the directory where the dataset is located')
parser.add_argument('csv_filename', help='a csv file denoting the files in the dataset')
parser.add_argument('dataset_type', help='the dataset type', choices=['frames', 'flow', 'both'])
parser.add_argument('dataset_id', type=int, help='a csv file denoting the files in the dataset')
parser.add_argument('num_classes', type=int, help='the number of classes in the dataset')
parser.add_argument('--num_procs', type=int, default=1, help='number of process to split IAD generation over')
parser.add_argument('--repeat', type=int, default=1, help='number of times to repeat training the model')
parser.add_argument('--parse_data', type=bool, default=True, help='whether to parse the data again or load from file')
FLAGS = parser.parse_args()
if(FLAGS.model_type == 'i3d'):
from gi3d_wrapper import DEPTH_SIZE, CNN_FEATURE_COUNT
if(FLAGS.model_type == 'rn50'):
from rn50_wrapper import DEPTH_SIZE, CNN_FEATURE_COUNT
if(FLAGS.model_type == 'trn'):
from trn_wrapper import DEPTH_SIZE, CNN_FEATURE_COUNT
if(FLAGS.model_type == 'tsm'):
from tsm_wrapper import DEPTH_SIZE, CNN_FEATURE_COUNT
for layer in range(DEPTH_SIZE-1, -1, -1):
main(FLAGS.model_type,
FLAGS.dataset_dir,
FLAGS.csv_filename,
FLAGS.dataset_type,
FLAGS.dataset_id,
layer,
FLAGS.num_classes,
FLAGS.repeat,
FLAGS.parse_data,
FLAGS.num_procs
)
|
the-stack_106_23397 | # Authors: Adam Li <[email protected]>
#
# License: BSD (3-clause)
import os
from mne.annotations import Annotations
from mne.epochs import BaseEpochs
from mne.io.meas_info import create_info
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
from mne.io import RawArray
from mne import make_fixed_length_epochs
from mne_connectivity import (Connectivity, EpochConnectivity,
EpochSpectralConnectivity,
EpochSpectroTemporalConnectivity,
EpochTemporalConnectivity, SpectralConnectivity,
SpectroTemporalConnectivity,
TemporalConnectivity)
from mne_connectivity.effective import phase_slope_index
from mne_connectivity.io import read_connectivity
from mne_connectivity import envelope_correlation, vector_auto_regression
from mne_connectivity.spectral import spectral_connectivity_epochs
def _make_test_epochs():
sfreq = 50.
n_signals = 3
n_epochs = 10
n_times = 500
rng = np.random.RandomState(42)
data = rng.randn(n_signals, n_epochs * n_times)
# create Epochs
info = create_info(np.arange(n_signals).astype(str).tolist(), sfreq=sfreq,
ch_types='eeg')
onset = [0, 0.5, 3]
duration = [0, 0, 0]
description = ['test1', 'test2', 'test3']
annots = Annotations(onset=onset, duration=duration,
description=description)
raw = RawArray(data, info)
raw = raw.set_annotations(annots)
epochs = make_fixed_length_epochs(raw, duration=1, preload=True)
# make sure Epochs has metadata
epochs.add_annotations_to_metadata()
return epochs
def _prep_correct_connectivity_input(conn_cls, n_nodes=3, symmetric=False,
n_epochs=4, indices=None):
correct_numpy_shape = []
extra_kwargs = dict()
if conn_cls.is_epoched:
correct_numpy_shape.append(n_epochs)
if indices is None:
if symmetric:
correct_numpy_shape.append((n_nodes + 1) * n_nodes // 2)
else:
correct_numpy_shape.append(n_nodes**2)
else:
correct_numpy_shape.append(len(indices[0]))
if conn_cls in (SpectralConnectivity, SpectroTemporalConnectivity,
EpochSpectralConnectivity,
EpochSpectroTemporalConnectivity):
extra_kwargs['freqs'] = np.arange(4)
correct_numpy_shape.append(4)
if conn_cls in (TemporalConnectivity, SpectroTemporalConnectivity,
EpochTemporalConnectivity,
EpochSpectroTemporalConnectivity):
extra_kwargs['times'] = np.arange(3)
correct_numpy_shape.append(3)
return correct_numpy_shape, extra_kwargs
@pytest.mark.parametrize(
'conn_cls', [Connectivity, EpochConnectivity,
SpectralConnectivity,
TemporalConnectivity,
SpectroTemporalConnectivity,
EpochTemporalConnectivity,
EpochSpectralConnectivity,
EpochSpectroTemporalConnectivity],
)
def test_connectivity_containers(conn_cls):
"""Test connectivity classes."""
n_epochs = 4
n_nodes = 3
data = [
[1, 0, 0],
[3, 4, 5],
[0, 1, 2],
]
bad_numpy_input = np.zeros((3, 3, 4, 5))
bad_indices = ([1, 0], [2])
if conn_cls.is_epoched:
bad_numpy_input = np.zeros((3, 3, 3, 4, 5))
correct_numpy_shape, extra_kwargs = _prep_correct_connectivity_input(
conn_cls, n_nodes=n_nodes, symmetric=False, n_epochs=n_epochs
)
correct_numpy_input = np.ones(correct_numpy_shape)
# test initialization error checks
with pytest.raises(TypeError, match='Connectivity data '
'must be passed in as a '
'numpy array'):
conn_cls(data=data, n_nodes=2, **extra_kwargs)
with pytest.raises(RuntimeError, match='Data*.'):
conn_cls(data=bad_numpy_input, n_nodes=2, **extra_kwargs)
with pytest.raises(ValueError, match='If indices are passed*.'):
conn_cls(data=correct_numpy_input, indices=bad_indices,
n_nodes=2, **extra_kwargs)
with pytest.raises(ValueError, match='Indices can only be*.'):
conn_cls(data=correct_numpy_input, indices='square',
n_nodes=2, **extra_kwargs)
indices = ([0, 1], [1, 0])
conn = conn_cls(data=correct_numpy_input, n_nodes=3, **extra_kwargs)
# test that get_data works as intended
with pytest.raises(ValueError, match="Invalid value for the "
"'output' parameter*."):
conn.get_data(output='blah')
assert conn.shape == tuple(correct_numpy_shape)
assert conn.get_data(output='raveled').shape == tuple(correct_numpy_shape)
assert conn.get_data(output='dense').ndim == len(correct_numpy_shape) + 1
# test renaming nodes error checks
with pytest.raises(ValueError, match="Name*."):
conn.rename_nodes({'100': 'new_name'})
with pytest.raises(ValueError, match="mapping must be*"):
conn.rename_nodes(['0', 'new_name'])
with pytest.raises(ValueError, match="New channel names*"):
conn.rename_nodes({'0': '1'})
# test renaming nodes
orig_names = conn.names
conn.rename_nodes({'0': 'new_name'})
new_names = conn.names
assert all([name_1 == name_2 for name_1, name_2 in
zip(orig_names, new_names)
if name_2 != 'new_name'])
conn.rename_nodes(lambda x: '0' if x == 'new_name' else x)
assert_array_equal(orig_names, conn.names)
# test connectivity instantiation with indices
indexed_numpy_shape, index_kwargs = _prep_correct_connectivity_input(
conn_cls, n_nodes=n_nodes, symmetric=False, n_epochs=n_epochs,
indices=indices
)
indexed_numpy_input = np.ones(indexed_numpy_shape)
conn2 = conn_cls(data=indexed_numpy_input, n_nodes=2, indices=indices,
**index_kwargs)
conn3 = conn_cls(data=indexed_numpy_input, n_nodes=3, indices=indices,
**index_kwargs)
# the number of nodes helps define the full dense output, but
# if unraveled, with indices then they should match exactly
assert_array_equal(
conn2.get_data(), conn3.get_data())
# test getting data with indices specified
with pytest.raises(ValueError, match='The number of indices'):
conn_cls(data=correct_numpy_input, n_nodes=3, indices=indices,
**extra_kwargs)
# test symmetric input
correct_numpy_shape, extra_kwargs = _prep_correct_connectivity_input(
conn_cls, n_nodes=3, symmetric=True
)
correct_numpy_input = np.ones(correct_numpy_shape)
with pytest.raises(ValueError, match='If "indices" is "symmetric"'):
conn_cls(data=correct_numpy_input, n_nodes=2,
indices='symmetric',
**extra_kwargs)
symm_conn = conn_cls(data=correct_numpy_input, n_nodes=n_nodes,
indices='symmetric',
**extra_kwargs)
assert symm_conn.n_nodes == n_nodes
# raveled shape should be the same
assert_array_equal(symm_conn.get_data(output='raveled').shape,
correct_numpy_shape)
# should be ([n_epochs], n_nodes, n_nodes, ...) dense shape
dense_shape = []
if conn_cls.is_epoched:
dense_shape.append(n_epochs)
dense_shape.extend([n_nodes, n_nodes])
assert all([symm_conn.get_data(
output='dense').shape[idx] == dense_shape[idx]
for idx in range(len(dense_shape))])
@pytest.mark.parametrize(
'conn_cls', [Connectivity, EpochConnectivity,
SpectralConnectivity,
TemporalConnectivity,
SpectroTemporalConnectivity,
EpochTemporalConnectivity,
EpochSpectralConnectivity,
EpochSpectroTemporalConnectivity],
)
def test_io(conn_cls, tmpdir):
"""Test writing and reading connectivity data."""
correct_numpy_shape = []
extra_kwargs = dict()
if conn_cls.is_epoched:
correct_numpy_shape.append(4)
correct_numpy_shape.append(4)
if conn_cls in (SpectralConnectivity, SpectroTemporalConnectivity,
EpochSpectralConnectivity,
EpochSpectroTemporalConnectivity):
extra_kwargs['freqs'] = np.arange(4)
correct_numpy_shape.append(4)
if conn_cls in (TemporalConnectivity, SpectroTemporalConnectivity,
EpochTemporalConnectivity,
EpochSpectroTemporalConnectivity):
extra_kwargs['times'] = np.arange(3)
correct_numpy_shape.append(3)
correct_numpy_input = np.ones(correct_numpy_shape)
# create the connectivity data structure
conn = conn_cls(data=correct_numpy_input, n_nodes=2, **extra_kwargs)
# temporary conn save
fname = os.path.join(tmpdir, 'connectivity.nc')
conn.save(fname)
# re-read the file in
new_conn = read_connectivity(fname)
# assert these two objects are the same
assert_array_equal(conn.names, new_conn.names)
assert conn.dims == new_conn.dims
for key, val in conn.coords.items():
assert_array_equal(val, new_conn.coords[key])
assert_array_equal(conn.get_data(), new_conn.get_data())
@pytest.mark.parametrize(
'conn_cls', [EpochConnectivity,
EpochTemporalConnectivity,
EpochSpectralConnectivity,
EpochSpectroTemporalConnectivity],
)
def test_append(conn_cls):
"""Test appending connectivity data."""
correct_numpy_shape = []
extra_kwargs = dict()
if conn_cls.is_epoched:
correct_numpy_shape.append(4)
correct_numpy_shape.append(4)
if conn_cls in (SpectralConnectivity, SpectroTemporalConnectivity,
EpochSpectralConnectivity,
EpochSpectroTemporalConnectivity):
extra_kwargs['freqs'] = np.arange(4)
correct_numpy_shape.append(4)
if conn_cls in (TemporalConnectivity, SpectroTemporalConnectivity,
EpochTemporalConnectivity,
EpochSpectroTemporalConnectivity):
extra_kwargs['times'] = np.arange(50)
correct_numpy_shape.append(50)
correct_numpy_input = np.ones(correct_numpy_shape)
events = np.zeros((correct_numpy_input.shape[0], 3), dtype=int)
events[:, -1] = 1 # event ID
events[:, 0] = np.linspace(0, 50, len(events))
# create the connectivity data structure
conn = conn_cls(data=correct_numpy_input, n_nodes=2, events=events,
**extra_kwargs)
# create a copy of the connectivity
conn_2 = conn.copy()
# append epochs
conn.append(conn_2)
assert conn.n_epochs == conn_2.n_epochs * 2
assert len(conn.events) == conn.n_epochs
@pytest.mark.parametrize(
'conn_func',
[vector_auto_regression, spectral_connectivity_epochs,
envelope_correlation, phase_slope_index]
)
def test_events_handling(conn_func):
"""Test that events and event_id are passed through correctly."""
epochs = _make_test_epochs()
n_epochs = len(epochs)
assert len(epochs.events) == n_epochs
# create the connectivity data structure
conn = conn_func(epochs, verbose=False)
assert len(conn.events) == n_epochs
@pytest.mark.parametrize(
'epochs', [
_make_test_epochs(),
np.random.RandomState(0).random((10, 3, 500))
])
@pytest.mark.parametrize(
'func', [
vector_auto_regression,
spectral_connectivity_epochs,
envelope_correlation,
phase_slope_index
])
def test_metadata_handling(func, tmpdir, epochs):
"""Test the presence of metadata is handled properly.
Test both with the cases of having an array input and
an ``mne.Epochs`` object input.
"""
# for each function, check that Annotations were added to the metadata
# and are handled correctly
conn = func(epochs, verbose=False)
metadata = conn.metadata
if isinstance(epochs, BaseEpochs):
# each metadata frame should have an Annotations column with n_epochs
# number of rows
assert 'annot_onset' in metadata.columns
assert 'annot_duration' in metadata.columns
assert 'annot_description' in metadata.columns
assert len(metadata) == len(epochs)
# temporary conn save
fname = os.path.join(tmpdir, 'connectivity.nc')
conn.save(fname)
new_conn = read_connectivity(fname)
# assert these two objects are the same
assert_array_equal(conn.names, new_conn.names)
assert conn.dims == new_conn.dims
for key, val in conn.coords.items():
assert_array_equal(val, new_conn.coords[key])
assert_array_equal(conn.get_data(), new_conn.get_data())
if isinstance(epochs, BaseEpochs):
assert metadata.equals(new_conn.metadata)
else:
assert isinstance(new_conn.metadata, pd.DataFrame)
assert metadata.empty
|
the-stack_106_23398 | # coding=utf8
"""
webhook.py - Sopel GitHub Module
Copyright 2015 Max Gurela
Copyright 2019 dgw
_______ __ __ __ __
| __|__| |_| |--.--.--.| |--.
| | | | _| | | || _ |
|_______|__|____|__|__|_____||_____|
________ __ __ __
| | | |.-----.| |--.| |--.-----.-----.| |--.-----.
| | | || -__|| _ || | _ | _ || <|__ --|
|________||_____||_____||__|__|_____|_____||__|__|_____|
"""
from __future__ import unicode_literals
from sopel import tools
from sopel.formatting import bold, color
from sopel.tools.time import get_timezone, format_time
from .formatting import get_formatted_response
from .formatting import fmt_repo
from .formatting import fmt_name
from threading import Thread
import bottle
import json
import requests
# Because I'm a horrible person
sopel_instance = None
def setup_webhook(sopel):
global sopel_instance
sopel_instance = sopel
host = sopel.config.github.webhook_host
port = sopel.config.github.webhook_port
base = StoppableWSGIRefServer(host=host, port=port)
server = Thread(target=bottle.run, kwargs={'server': base})
server.setDaemon(True)
server.start()
sopel.memory['gh_webhook_server'] = base
sopel.memory['gh_webhook_thread'] = server
conn = sopel.db.connect()
c = conn.cursor()
try:
c.execute('SELECT * FROM gh_hooks')
except Exception:
create_table(sopel, c)
conn.commit()
conn.close()
def create_table(bot, c):
primary_key = '(channel, repo_name)'
c.execute('''CREATE TABLE IF NOT EXISTS gh_hooks (
channel TEXT,
repo_name TEXT,
enabled BOOL DEFAULT 1,
url_color TINYINT DEFAULT 2,
tag_color TINYINT DEFAULT 6,
repo_color TINYINT DEFAULT 13,
name_color TINYINT DEFAULT 15,
hash_color TINYINT DEFAULT 14,
branch_color TINYINT DEFAULT 6,
PRIMARY KEY {0}
)'''.format(primary_key))
def shutdown_webhook(sopel):
global sopel_instance
sopel_instance = None
if 'gh_webhook_server' in sopel.memory:
print('Stopping webhook server')
sopel.memory['gh_webhook_server'].stop()
sopel.memory['gh_webhook_thread'].join()
print('GitHub webhook shutdown complete')
class StoppableWSGIRefServer(bottle.ServerAdapter):
server = None
def run(self, handler):
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw):
pass
self.options['handler_class'] = QuietHandler
self.server = make_server(self.host, self.port, handler, **self.options)
self.server.serve_forever()
def stop(self):
self.server.shutdown()
self.server.server_close()
def get_targets(repo):
conn = sopel_instance.db.connect()
c = conn.cursor()
c.execute('SELECT * FROM gh_hooks WHERE repo_name = ? AND enabled = 1', (repo.lower(), ))
return c.fetchall()
def process_payload(payload, targets):
if payload['event'] == 'ping':
for row in targets:
sopel_instance.say('[{}] {}: {} (Your webhook is now enabled)'.format(
fmt_repo(payload['repository']['name'], row),
fmt_name(payload['sender']['login'], row),
payload['zen']), row[0])
return
for row in targets:
messages = get_formatted_response(payload, row)
# Write the formatted message(s) to the channel
for message in messages:
sopel_instance.say(message, row[0])
@bottle.get("/webhook")
def show_hook_info():
return 'Listening for webhook connections!'
@bottle.post("/webhook")
def webhook():
try:
payload = bottle.request.json
except:
return bottle.abort(400, 'Something went wrong!')
payload['event'] = bottle.request.headers.get('X-GitHub-Event') or 'ping'
targets = get_targets(payload['repository']['full_name'])
# process hook payload in background
payload_handler = Thread(target=process_payload, args=(payload, targets))
payload_handler.start()
# send HTTP response ASAP, hopefully within GitHub's very short timeout
return '{"channels":' + json.dumps([target[0] for target in targets]) + '}'
@bottle.get('/auth')
def handle_auth_response():
code = bottle.request.query.code
state = bottle.request.query.state
repo = state.split(':')[0]
channel = state.split(':')[1]
data = {'client_id': sopel_instance.config.github.client_id,
'client_secret': sopel_instance.config.github.client_secret,
'code': code}
raw = requests.post('https://github.com/login/oauth/access_token', data=data, headers={'Accept': 'application/json'})
try:
res = json.loads(raw.text)
if 'error' in res:
raise ValueError('{err}: {desc}'.format(err=res['error'], desc=res['error_description']))
if 'scope' not in res:
raise ValueError('You\'ve already completed authorization on this repo')
if 'write:repo_hook' not in res['scope']:
raise ValueError('You didn\'t allow read/write on repo hooks!')
access_token = res['access_token']
data = {
"name": "web",
"active": True,
"events": ["*"],
"config": {
"url": sopel_instance.config.github.external_url,
"content_type": "json"
}
}
raw = requests.post('https://api.github.com/repos/{}/hooks?access_token={}'.format(repo, access_token), data=json.dumps(data))
res = json.loads(raw.text)
if 'ping_url' not in res:
if 'errors' in res:
raise ValueError(', '.join([error['message'] for error in res['errors']]))
else:
raise ValueError('Webhook creation failed, try again.')
raw = requests.get(res['ping_url'] + '?access_token={}'.format(access_token))
title = 'Done!'
header = 'Webhook setup complete!'
body = 'That was simple, right?! You should be seeing a completion message in {} any second now'.format(channel)
flair = 'There\'s no way it was that easy… things are never this easy…'
except Exception as e:
title = 'Error!'
header = 'Webhook setup failed!'
body = 'Please try using the link in {} again, something went wrong!'.format(channel)
flair = str(e)
page = '''
<!DOCTYPE html>
<html>
<head>
<title>{title}</title>
<style>
body {{
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}}
</style>
</head>
<body>
<h1>{header}</h1>
<p>{body}</p>
<small><em>{flair}</em></small>
</body>
</html>
'''
return page.format(title=title, header=header, body=body, flair=flair)
|
the-stack_106_23399 | #BattleShip Game
# There is randomly oriented ship in the grid either horizontal or vertical.
# Ask user to target and sink it for game to over
# Provide the user with the accuracy of sinking the ship
#Process
# 1. Define and initialize grid
# 2. Display Grid with col, row names
# 3. Randomly assign 4 block ship and display
# 4. Make user interface to interact with grid. Validate Input
# 5. Place user input on grid
# 6. Check if ship is sank and calculate accuracy
import random
import string
import os
import re
num_row = 10
num_col = 10
grid = []
ship_coordinates = []
#Initialize the grid
def grid_initialize():
for row in range(num_row):
h_list = []
for col in range(num_col):
h_list.append(" ")
grid.append(h_list)
#Display the grid
def grid_display():
#Display Alphabets for the columns of the grid
alphabet = [" ", " A ", " B " ," C "," D ", " E ", " F ", " G ", " H ", " I ", " J " ]
for col in range(num_col + 1):
print(alphabet[col], end=" ")
print()
#Display Row number for the columns of the grid
for row in range(num_row):
if (row + 1 < 10):
print(row + 1, end=" ")
else:
print(row + 1, end=" ")
for col in range(num_col):
print(grid[row][col] + " | ", end=" ")
print("\n " + " ---+" * num_col)
print()
#Randomly assign ship coordinates and orientation
def random_ship():
row_ship_coordinate = random.randint(0,9)
col_ship_coordinate = random.randint(0,9)
ship_orientation = random.randint(0,1)
#if ship_orientation is 0, then horizontol
if (ship_orientation == 0):
if (col_ship_coordinate <= 6):
ship_coordinates.append([row_ship_coordinate,col_ship_coordinate])
ship_coordinates.append([row_ship_coordinate,col_ship_coordinate + 1])
ship_coordinates.append([row_ship_coordinate,col_ship_coordinate + 2])
ship_coordinates.append([row_ship_coordinate,col_ship_coordinate + 3])
else:
ship_coordinates.append([row_ship_coordinate,col_ship_coordinate])
ship_coordinates.append([row_ship_coordinate,col_ship_coordinate - 1])
ship_coordinates.append([row_ship_coordinate,col_ship_coordinate - 2])
ship_coordinates.append([row_ship_coordinate,col_ship_coordinate - 3])
#if ship_orientation is 1, then vertical
else:
if (row_ship_coordinate <= 6):
ship_coordinates.append([row_ship_coordinate,col_ship_coordinate])
ship_coordinates.append([row_ship_coordinate + 1,col_ship_coordinate])
ship_coordinates.append([row_ship_coordinate + 2,col_ship_coordinate])
ship_coordinates.append([row_ship_coordinate + 3,col_ship_coordinate])
else:
ship_coordinates.append([row_ship_coordinate,col_ship_coordinate])
ship_coordinates.append([row_ship_coordinate - 1,col_ship_coordinate])
ship_coordinates.append([row_ship_coordinate - 2,col_ship_coordinate])
ship_coordinates.append([row_ship_coordinate - 3,col_ship_coordinate])
def user_input():
coordinate_given = input("Please enter coordinates where you want to hit as 'A1', 'Z3': ").upper()
# Check if the input does not creat any special characters
regex = re.compile('[@_!#$%^&*()<>?/\|}{~:]')
# check if input is in wrong format such as '5G'
while (coordinate_given[0].isdigit() == True) or (coordinate_given[1: ].isalpha() == True) or (regex.search(coordinate_given[:]) != None):
coordinate_given = input("Please enter coordinates where you want to hit (as 'A1', 'Z3': ").upper()
# Convert string values of col using 'ord' and conver string value of row using 'int'
coordinate_col = ord(coordinate_given[0])
coordinate_row = int(coordinate_given[1: ])
# Range check: check if input for col is in between A-J and for row 1-10
while ((coordinate_row < 1 or coordinate_row > 10) or (coordinate_col < 65 or coordinate_col > 74)):
coordinate_given = input("Coordinates are out of range. Please enter valid coordinates: ").upper()
coordinate_col = ord(coordinate_given[0])
coordinate_row = int(coordinate_given[1: ])
# Assign given coordinates a cell on the grid
grid_row = coordinate_row - 1
grid_col = coordinate_col - 65
return grid_row, grid_col
def place_target():
ship_hit_count = 0
ship_miss_count = 0
#Run game until all four cells of ship are sank
while (ship_hit_count < 4):
target_row, target_col = user_input()
# To avoid re-entering the same coordinate, use if loop.
if (grid[target_row][target_col] == " "):
os.system("cls" if os.name=="nt" else "clear")
if [target_row,target_col] in ship_coordinates:
grid[target_row][target_col] = "X"
ship_hit_count += 1
print("Ship has been hit!!! Congrats, almost there!")
else:
grid[target_row][target_col] = "O"
ship_miss_count += 1
print("You missed the hit :( Keep trying, don't give up!")
grid_display()
else:
print("You have already entered these coordinates previously. Try again!")
accuracy_rate(ship_hit_count, ship_miss_count)
print("Game over! You have successful sunk the ship. You are pro now.")
def accuracy_rate(ship_hit_count, ship_miss_count):
accuracy = ( ship_hit_count / (ship_hit_count + ship_miss_count))
print("Your accuracy of sinking the ship is: ", accuracy)
def main():
grid_initialize()
random_ship()
grid_display()
place_target()
main()
# End Code
|
the-stack_106_23400 | """ SiamRPN metrics """
import numpy as np
from ..filesystem import try_import_colorama
def Iou(rect1, rect2):
"""
caculate interection over union
Parameters
----------
rect1: list or np.array, rectangle1
rect2: list or np.array, rectangle2
Returns
-------
iou
"""
x1, y1, x2, y2 = rect1[0], rect1[1], rect1[2], rect1[3]
tx1, ty1, tx2, ty2 = rect2[0], rect2[1], rect2[2], rect2[3]
xx1 = np.maximum(tx1, x1)
yy1 = np.maximum(ty1, y1)
xx2 = np.minimum(tx2, x2)
yy2 = np.minimum(ty2, y2)
ww = np.maximum(0, xx2 - xx1)
hh = np.maximum(0, yy2 - yy1)
area = (x2-x1) * (y2-y1)
target_a = (tx2-tx1) * (ty2 - ty1)
inter = ww * hh
iou = inter / (area + target_a - inter)
return iou
def overlap_ratio(rect1, rect2):
"""Compute overlap ratio between two rects
Parameters
----------
rect1 : nd.array
2d array of N x [x,y,w,h]
rect2 : nd.array
2d array of N x [x,y,w,h]
Return
----------
IOU
"""
left = np.maximum(rect1[:, 0], rect2[:, 0])
right = np.minimum(rect1[:, 0]+rect1[:, 2], rect2[:, 0]+rect2[:, 2])
top = np.maximum(rect1[:, 1], rect2[:, 1])
bottom = np.minimum(rect1[:, 1]+rect1[:, 3], rect2[:, 1]+rect2[:, 3])
intersect = np.maximum(0, right - left) * np.maximum(0, bottom - top)
union = rect1[:, 2]*rect1[:, 3] + rect2[:, 2]*rect2[:, 3] - intersect
iou = intersect / union
iou = np.maximum(np.minimum(1, iou), 0)
return iou
def success_overlap(gt_bb, result_bb, n_frame):
"""get success_overlap score
Parameters
----------
result_bb : nd.array
2d array of N x [x,y,w,h]
n_frame : int
frame number
Return
----------
success score
"""
thresholds_overlap = np.arange(0, 1.05, 0.05)
success = np.zeros(len(thresholds_overlap))
iou = np.ones(len(gt_bb)) * (-1)
mask = np.sum(gt_bb[:, 2:] > 0, axis=1) == 2
iou[mask] = overlap_ratio(gt_bb[mask], result_bb[mask])
for i, per_threshold in enumerate(thresholds_overlap):
success[i] = np.sum(iou > per_threshold) / float(n_frame)
return success
def success_error(gt_center, result_center, thresholds, n_frame):
"""get success_error score
Parameters
----------
gt_center : np.ndarray
2d array of N x [x,y,w,h]
result_center : np.ndarray
2d array of N x [x,y,w,h]
thresholds : float
error float
n_frame : int
frame number
Return
----------
success_error score
"""
success = np.zeros(len(thresholds))
dist = np.ones(len(gt_center)) * (-1)
mask = np.sum(gt_center > 0, axis=1) == 2
dist[mask] = np.sqrt(np.sum(
np.power(gt_center[mask] - result_center[mask], 2), axis=1))
for i, per_threshold in enumerate(thresholds):
success[i] = np.sum(dist <= per_threshold) / float(n_frame)
return success
class OPEBenchmark:
"""
SiamRPN OPEBenchmark have eval_success, precision to select.
eval_success is distance between the center point of the predicted position
precision is Compute overlap ratio between two rects through thresholds_overlap
Parameters
----------
dataset
dataset Benchmark
"""
def __init__(self, dataset):
self.dataset = dataset
def convert_bb_to_center(self, bboxes):
return np.array([(bboxes[:, 0] + (bboxes[:, 2] - 1) / 2),
(bboxes[:, 1] + (bboxes[:, 3] - 1) / 2)]).T
def convert_bb_to_norm_center(self, bboxes, gt_wh):
return self.convert_bb_to_center(bboxes) / (gt_wh+1e-16)
def eval_success(self, eval_trackers=None):
"""eval_success is distance between the center point of the predicted position
and the center position marked in the benchmark
Parameters
----------
eval_trackers: list of tracker name or single tracker name
Return
----------
return: dict of results
"""
if eval_trackers is None:
eval_trackers = self.dataset.tracker_names
if isinstance(eval_trackers, str):
eval_trackers = [eval_trackers]
success_ret = {}
for tracker_name in eval_trackers:
success_ret_ = {}
for video in self.dataset:
gt_traj = np.array(video.gt_traj)
if tracker_name not in video.pred_trajs:
tracker_traj = video.load_tracker(self.dataset.tracker_path,
tracker_name, False)
tracker_traj = np.array(tracker_traj)
else:
tracker_traj = np.array(video.pred_trajs[tracker_name])
n_frame = len(gt_traj)
if hasattr(video, 'absent'):
gt_traj = gt_traj[video.absent == 1]
tracker_traj = tracker_traj[video.absent == 1]
success_ret_[video.name] = success_overlap(gt_traj, tracker_traj, n_frame)
success_ret[tracker_name] = success_ret_
return success_ret
def eval_precision(self, eval_trackers=None):
"""
eval model precision in eval_precision
Parameters
----------
eval_trackers: list of tracker name or single tracker name
Return
----------
return: dict of results
"""
if eval_trackers is None:
eval_trackers = self.dataset.tracker_names
if isinstance(eval_trackers, str):
eval_trackers = [eval_trackers]
precision_ret = {}
for tracker_name in eval_trackers:
precision_ret_ = {}
for video in self.dataset:
gt_traj = np.array(video.gt_traj)
if tracker_name not in video.pred_trajs:
tracker_traj = video.load_tracker(self.dataset.tracker_path,
tracker_name, False)
tracker_traj = np.array(tracker_traj)
else:
tracker_traj = np.array(video.pred_trajs[tracker_name])
n_frame = len(gt_traj)
if hasattr(video, 'absent'):
gt_traj = gt_traj[video.absent == 1]
tracker_traj = tracker_traj[video.absent == 1]
gt_center = self.convert_bb_to_center(gt_traj)
tracker_center = self.convert_bb_to_center(tracker_traj)
thresholds = np.arange(0, 51, 1)
precision_ret_[video.name] = success_error(gt_center, tracker_center,
thresholds, n_frame)
precision_ret[tracker_name] = precision_ret_
return precision_ret
def eval_norm_precision(self, eval_trackers=None):
"""
eval model precision in eval_norm_precision
Parameters
----------
eval_trackers: list of tracker name or single tracker name
Return
----------
return: dict of results
"""
if eval_trackers is None:
eval_trackers = self.dataset.tracker_names
if isinstance(eval_trackers, str):
eval_trackers = [eval_trackers]
norm_precision_ret = {}
for tracker_name in eval_trackers:
norm_precision_ret_ = {}
for video in self.dataset:
gt_traj = np.array(video.gt_traj)
if tracker_name not in video.pred_trajs:
tracker_traj = video.load_tracker(self.dataset.tracker_path,
tracker_name, False)
tracker_traj = np.array(tracker_traj)
else:
tracker_traj = np.array(video.pred_trajs[tracker_name])
n_frame = len(gt_traj)
if hasattr(video, 'absent'):
gt_traj = gt_traj[video.absent == 1]
tracker_traj = tracker_traj[video.absent == 1]
gt_center_norm = self.convert_bb_to_norm_center(gt_traj, gt_traj[:, 2:4])
tracker_center_norm = self.convert_bb_to_norm_center(tracker_traj, gt_traj[:, 2:4])
thresholds = np.arange(0, 51, 1) / 100
norm_precision_ret_[video.name] = success_error(gt_center_norm,
tracker_center_norm,
thresholds,
n_frame)
norm_precision_ret[tracker_name] = norm_precision_ret_
return norm_precision_ret
def show_result(self, success_ret, precision_ret=None,
norm_precision_ret=None, show_video_level=False, helight_threshold=0.6):
"""pretty print result
Parameters
----------
success_ret: returned dict from function eval
"""
colorama = try_import_colorama()
tracker_auc = {}
for tracker_name in success_ret.keys():
auc = np.mean(list(success_ret[tracker_name].values()))
tracker_auc[tracker_name] = auc
tracker_auc_ = sorted(tracker_auc.items(),
key=lambda x: x[1],
reverse=True)[:20]
tracker_names = [x[0] for x in tracker_auc_]
tracker_name_len = max((max([len(x) for x in success_ret.keys()])+2), 12)
header = ("|{:^"+str(tracker_name_len)+"}|{:^9}|{:^16}|{:^11}|").format("Tracker name",
"Success",
"Norm Precision",
"Precision")
formatter = "|{:^"+str(tracker_name_len)+"}|{:^9.3f}|{:^16.3f}|{:^11.3f}|"
print('-'*len(header))
print(header)
print('-'*len(header))
for tracker_name in tracker_names:
success = tracker_auc[tracker_name]
if precision_ret is not None:
precision = np.mean(list(precision_ret[tracker_name].values()), axis=0)[20]
else:
precision = 0
if norm_precision_ret is not None:
norm_precision = np.mean(list(norm_precision_ret[tracker_name].values()),
axis=0)[20]
else:
norm_precision = 0
print(formatter.format(tracker_name, success, norm_precision, precision))
print('-'*len(header))
if show_video_level and len(success_ret) < 10 \
and precision_ret is not None \
and len(precision_ret) < 10:
print("\n\n")
header1 = "|{:^21}|".format("Tracker name")
header2 = "|{:^21}|".format("Video name")
for tracker_name in success_ret.keys():
header1 += ("{:^21}|").format(tracker_name)
header2 += "{:^9}|{:^11}|".format("success", "precision")
print('-'*len(header1))
print(header1)
print('-'*len(header1))
print(header2)
print('-'*len(header1))
videos = list(success_ret[tracker_name].keys())
for video in videos:
row = "|{:^21}|".format(video)
for tracker_name in success_ret.keys():
success = np.mean(success_ret[tracker_name][video])
precision = np.mean(precision_ret[tracker_name][video])
success_str = "{:^9.3f}".format(success)
if success < helight_threshold:
row += f'{colorama.Fore.RED}{success_str}{colorama.Style.RESET_ALL}|'
else:
row += success_str+'|'
precision_str = "{:^11.3f}".format(precision)
if precision < helight_threshold:
row += f'{colorama.Fore.RED}{precision_str}{colorama.Style.RESET_ALL}|'
else:
row += precision_str+'|'
print(row)
print('-'*len(header1))
|
the-stack_106_23401 | #!/usr/bin/env python
# Contributors:
# Christopher P. Barnes <[email protected]>
# Andrei Sura: github.com/indera
# Mohan Das Katragadda <[email protected]>
# Philip Chase <[email protected]>
# Ruchi Vivek Desai <[email protected]>
# Taeber Rapczak <[email protected]>
# Nicholas Rejack <[email protected]>
# Josh Hanna <[email protected]>
# Copyright (c) 2014-2015, University of Florida
# All rights reserved.
#
# Distributed under the BSD 3-Clause License
# For full text of the BSD 3-Clause License see http://opensource.org/licenses/BSD-3-Clause
'''
@author : Radha
email : [email protected]
This file tests for the function updateTimeStamp.
we call the function and check if the function updates the timestamps of all the subjects
if all the subject's timestamps are updated the status value stays 1
else the status value is flipped to 0 and loop is exited and Hence the test fails in
assertion
'''
import unittest
import os
from lxml import etree
from redi import redi
file_dir = os.path.dirname(os.path.realpath(__file__))
goal_dir = os.path.join(file_dir, "../")
proj_root = os.path.abspath(goal_dir)+'/'
DEFAULT_DATA_DIRECTORY = os.getcwd()
class TestUpdateTimestamp(unittest.TestCase):
def setUp(self):
# initialize the data with element tree
redi.configure_logging(DEFAULT_DATA_DIRECTORY)
self.test_xml = """<?xml version="1.0" encoding="utf8"?>
<study>
<subject>
<NAME>TSH REFLEX</NAME>
<loinc_code>1552152</loinc_code>
<RESULT>0.74</RESULT>
<REFERENCE_LOW>0.27</REFERENCE_LOW>
<REFERENCE_HIGH>4.20</REFERENCE_HIGH>
<REFERENCE_UNIT>mIU/L</REFERENCE_UNIT>
<DATE_TIME_STAMP>1903-11-27 15:13:00</DATE_TIME_STAMP>
<STUDY_ID>59</STUDY_ID>
</subject>
<subject>
<NAME>HEP C RNA, QUANT REAL-TIME</NAME>
<loinc_code>740</loinc_code>
<RESULT>5.8</RESULT>
<REFERENCE_LOW></REFERENCE_LOW>
<REFERENCE_HIGH></REFERENCE_HIGH>
<REFERENCE_UNIT>log IU</REFERENCE_UNIT>
<DATE_TIME_STAMP>1903-11-27 15:13:00</DATE_TIME_STAMP>
<STUDY_ID>59</STUDY_ID>
</subject>
<subject>
<NAME>HCV QUANTITATIVE INTERPRETATION</NAME>
<loinc_code>1534483</loinc_code>
<RESULT>Detected</RESULT>
<REFERENCE_LOW></REFERENCE_LOW>
<REFERENCE_HIGH></REFERENCE_HIGH>
<REFERENCE_UNIT></REFERENCE_UNIT>
<DATE_TIME_STAMP>1903-11-27 15:13:00</DATE_TIME_STAMP>
<STUDY_ID>59</STUDY_ID>
</subject>
</study>"""
self.data = etree.ElementTree(etree.fromstring(self.test_xml))
self.input_date_format = "%Y-%m-%d %H:%M:%S"
self.output_date_format = "%Y-%m-%d"
def test_updateTimeStamp(self):
# add blank elements to each subject in data tree
redi.add_elements_to_tree(self.data)
# function to be tested
redi.update_time_stamp(self.data, self.input_date_format, self.output_date_format)
# output raw file to check it
#redi.write_element_tree_to_file(self.data, 'rawData.xml')
#initialize a dictionary for the timestamps
# key,value = timestamp, filled or not?(0/1)
isStampFilled = {}
for subject in self.data.iter('subject'):
ts = subject.find('timestamp').text
if not ts:
isStampFilled[ts] = 1
status=1
for key,value in isStampFilled.items():
if value != status:
status = 0
break
self.assertEqual(status,1)
def tearDown(self):
return()
if __name__ == '__main__':
unittest.main()
|
the-stack_106_23409 | from .whatsapp_object import WhatsappObjectWithId
class NumberStatus(WhatsappObjectWithId):
"""
Class which represents a User phonenumber status in WhatsApp service.
"""
def __init__(self, js_obj, driver=None):
super(NumberStatus, self).__init__(js_obj, driver)
if "status" in js_obj:
self.status = js_obj["status"]
if "isBusiness" in js_obj:
self.is_business = js_obj["isBusiness"]
if "canReceiveMessage" in js_obj:
self.can_receive_message = js_obj["canReceiveMessage"]
def __repr__(self):
return "<NumberStatus - {id} (business={is_business}) - status = {status}>".format(
id=self.id, is_business=self.is_business, status=self.status
)
|
the-stack_106_23410 | # -*- coding: utf-8 -*-
"""Tools used in **Igniter** GUI."""
import os
from typing import Union
from urllib.parse import urlparse, parse_qs
from pathlib import Path
import platform
import certifi
from pymongo import MongoClient
from pymongo.errors import (
ServerSelectionTimeoutError,
InvalidURI,
ConfigurationError,
OperationFailure
)
def should_add_certificate_path_to_mongo_url(mongo_url):
"""Check if should add ca certificate to mongo url.
Since 30.9.2021 cloud mongo requires newer certificates that are not
available on most of workstation. This adds path to certifi certificate
which is valid for it. To add the certificate path url must have scheme
'mongodb+srv' or has 'ssl=true' or 'tls=true' in url query.
"""
parsed = urlparse(mongo_url)
query = parse_qs(parsed.query)
lowered_query_keys = set(key.lower() for key in query.keys())
add_certificate = False
# Check if url 'ssl' or 'tls' are set to 'true'
for key in ("ssl", "tls"):
if key in query and "true" in query["ssl"]:
add_certificate = True
break
# Check if url contains 'mongodb+srv'
if not add_certificate and parsed.scheme == "mongodb+srv":
add_certificate = True
# Check if url does already contain certificate path
if add_certificate and "tlscafile" in lowered_query_keys:
add_certificate = False
return add_certificate
def validate_mongo_connection(cnx: str) -> (bool, str):
"""Check if provided mongodb URL is valid.
Args:
cnx (str): URL to validate.
Returns:
(bool, str): True if ok, False if not and reason in str.
"""
parsed = urlparse(cnx)
if parsed.scheme not in ["mongodb", "mongodb+srv"]:
return False, "Not mongodb schema"
kwargs = {
"serverSelectionTimeoutMS": 2000
}
# Add certificate path if should be required
if should_add_certificate_path_to_mongo_url(cnx):
kwargs["ssl_ca_certs"] = certifi.where()
try:
client = MongoClient(cnx, **kwargs)
client.server_info()
with client.start_session():
pass
client.close()
except ServerSelectionTimeoutError as e:
return False, f"Cannot connect to server {cnx} - {e}"
except ValueError:
return False, f"Invalid port specified {parsed.port}"
except (ConfigurationError, OperationFailure, InvalidURI) as exc:
return False, str(exc)
else:
return True, "Connection is successful"
def validate_mongo_string(mongo: str) -> (bool, str):
"""Validate string if it is mongo url acceptable by **Igniter**..
Args:
mongo (str): String to validate.
Returns:
(bool, str):
True if valid, False if not and in second part of tuple
the reason why it failed.
"""
if not mongo:
return True, "empty string"
return validate_mongo_connection(mongo)
def validate_path_string(path: str) -> (bool, str):
"""Validate string if it is path to OpenPype repository.
Args:
path (str): Path to validate.
Returns:
(bool, str):
True if valid, False if not and in second part of tuple
the reason why it failed.
"""
if not path:
return False, "empty string"
if not Path(path).exists():
return False, "path doesn't exists"
if not Path(path).is_dir():
return False, "path is not directory"
return True, "valid path"
def get_openpype_global_settings(url: str) -> dict:
"""Load global settings from Mongo database.
We are loading data from database `openpype` and collection `settings`.
There we expect document type `global_settings`.
Args:
url (str): MongoDB url.
Returns:
dict: With settings data. Empty dictionary is returned if not found.
"""
kwargs = {}
if should_add_certificate_path_to_mongo_url(url):
kwargs["ssl_ca_certs"] = certifi.where()
try:
# Create mongo connection
client = MongoClient(url, **kwargs)
# Access settings collection
col = client["openpype"]["settings"]
# Query global settings
global_settings = col.find_one({"type": "global_settings"}) or {}
# Close Mongo connection
client.close()
except Exception:
# TODO log traceback or message
return {}
return global_settings.get("data") or {}
def get_openpype_path_from_db(url: str) -> Union[str, None]:
"""Get OpenPype path from global settings.
Args:
url (str): mongodb url.
Returns:
path to OpenPype or None if not found
"""
global_settings = get_openpype_global_settings(url)
paths = (
global_settings
.get("openpype_path", {})
.get(platform.system().lower())
) or []
# For cases when `openpype_path` is a single path
if paths and isinstance(paths, str):
paths = [paths]
# Loop over paths and return only existing
for path in paths:
if os.path.exists(path):
return path
return None
def load_stylesheet() -> str:
"""Load css style sheet.
Returns:
str: content of the stylesheet
"""
stylesheet_path = Path(__file__).parent.resolve() / "stylesheet.css"
return stylesheet_path.read_text()
|
the-stack_106_23414 | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Linh Pham
# stats.wwdt.me is relased under the terms of the Apache License 2.0
"""Location name formatting functions used by the Stats Page"""
from typing import Dict
#region Formatting Functions
def format_location_name(location: Dict):
"""Returns a string with a combination of venue name, city
and state, depending on what information is available"""
if not location:
return None
if "venue" not in location and "city" not in location and "state" not in location:
return None
if location["venue"] and location["city"] and location["state"]:
return "{} ({}, {})".format(location["venue"],
location["city"],
location["state"])
elif location["venue"] and (not location["city"] and not location["state"]):
return location["venue"]
elif location["city"] and location["state"] and not location["venue"]:
return "({}, {})".format(location["city"], location["state"])
elif location["city"] and not location["state"]:
return location["city"]
#endregion
|
the-stack_106_23415 | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
import keras as k
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import ModelCheckpoint
import cv2
import tifffile as tiff
from tqdm import tqdm
x_train = []
x_test = []
y_train = []
df_train = pd.read_csv('../input/train.csv')
flatten = lambda l: [item for sublist in l for item in sublist]
labels = list(set(flatten([l.split(' ') for l in df_train['tags'].values])))
label_map = {l: i for i, l in enumerate(labels)}
inv_label_map = {i: l for l, i in label_map.items()}
for f, tags in tqdm(df_train.values, miniters=1000):
img = tiff.imread('../input/train-tif-v2/{}.tif'.format(f))
targets = np.zeros(17)
for t in tags.split(' '):
targets[label_map[t]] = 1
x_train.append(cv2.resize(img, (32, 32)))
y_train.append(targets)
y_train = np.array(y_train, np.uint8)
x_train = np.array(x_train, np.float16) / 255.
print(x_train.shape)
print(y_train.shape)
split = 35000
x_train, x_valid, y_train, y_valid = x_train[:split], x_train[split:], y_train[:split], y_train[split:]
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=(32, 32, 4)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(17, activation='sigmoid'))
model.compile(loss='binary_crossentropy', # We NEED binary here, since categorical_crossentropy l1 norms the output before calculating loss.
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=128,
epochs=20,
verbose=1,
callbacks=[ModelCheckpoint('/output/keras-simple.model', monitor='val_loss', verbose=0, mode='auto', period=1)],
validation_data=(x_valid, y_valid))
p_valid = model.predict(x_valid, batch_size=128)
print(y_valid)
print(p_valid)
from sklearn.metrics import fbeta_score
def f2_score(y_true, y_pred):
# fbeta_score throws a confusing error if inputs are not numpy arrays
y_true, y_pred, = np.array(y_true), np.array(y_pred)
# We need to use average='samples' here, any other average method will generate bogus results
return fbeta_score(y_true, y_pred, beta=2, average='samples')
print(f2_score(y_valid, np.array(p_valid) > 0.2))
# load test data
TEST_DIR = '../input/test-tif-v2/'
test_tif = os.listdir(TEST_DIR)
for file in tqdm(test_tif):
img = tiff.imread(TEST_DIR + file)
x_test.append(cv2.resize(img, (32, 32)))
x_test = np.array(x_test, np.float16) / 255.
y_pred = (model.predict(x_test) > 0.2)
output = open('/output/results.csv', 'w')
output.write('image_name,tags\n')
for i, row in enumerate(y_pred):
string = test_tif[i][:-4] + ','
for idx, x in enumerate(row):
if (x):
string += (inv_label_map[idx] + ' ')
output.write(string + '\n')
output.close() |
the-stack_106_23419 | # Imports
from scipy.io import loadmat
from scipy.signal import fftconvolve
import numpy as np
import gc as garbageCollector
########################################################################################################################
# Load signals from a specific file in the source files
# Convenience function to load signals
def loadSignals(recordName, dataPath, dataInDirectory):
if dataInDirectory:
signals = loadmat(dataPath + recordName + '/' + recordName + '.mat')
else:
signals = loadmat(dataPath + recordName + '.mat')
signals = signals['val']
garbageCollector.collect()
return signals
########################################################################################################################
# Loads and prepossesses signals from a specific record
def extractWholeRecord(recordName,
dataPath='PATH/',
dataInDirectory=True):
# Keep all channels except ECG
keepChannels = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
signals = loadSignals(recordName, dataPath, dataInDirectory)
signals = np.transpose(signals).astype(np.float64)
# Apply antialiasing FIR filter to each channel and downsample to 50Hz
filtCoeff = np.array([0.00637849379422531, 0.00543091599801427, -0.00255136650039784, -0.0123109503066702,
-0.0137267267561505, -0.000943230632358082, 0.0191919895027550, 0.0287148886882440,
0.0123598773891149, -0.0256928886371578, -0.0570987715759348, -0.0446385294777459,
0.0303553522906817, 0.148402006671856, 0.257171285176269, 0.301282456398562,
0.257171285176269, 0.148402006671856, 0.0303553522906817, -0.0446385294777459,
-0.0570987715759348, -0.0256928886371578, 0.0123598773891149, 0.0287148886882440,
0.0191919895027550, -0.000943230632358082, -0.0137267267561505, -0.0123109503066702,
-0.00255136650039784, 0.00543091599801427, 0.00637849379422531])
for n in range(signals.shape[1]):
signals[::, n] = np.convolve(signals[::, n], filtCoeff, mode='same')
signals = signals[0::4, keepChannels]
garbageCollector.collect()
# Scale SaO2 to sit between -0.5 and 0.5, a good range for input to neural network
signals[::, 11] += -32768.0
signals[::, 11] /= 65535.0
signals[::, 11] -= 0.5
# Normalize all the other channels by removing the mean and the rms in an 18 minute rolling window, using fftconvolve for computational efficiency
# 18 minute window is used because because baseline breathing is established in 2 minute window according to AASM standards.
# Normalizing over 18 minutes ensure a 90% overlap between the beginning and end of the baseline window
kernel_size = (50*18*60)+1
# Remove DC bias and scale for FFT convolution
center = np.mean(signals, axis=0)
scale = np.std(signals, axis=0)
scale[scale == 0] = 1.0
signals = (signals - center) / scale
# Compute and remove moving average with FFT convolution
center = np.zeros(signals.shape)
for n in range(signals.shape[1]):
center[::, n] = fftconvolve(signals[::, n], np.ones(shape=(kernel_size,))/kernel_size, mode='same')
# Exclude SAO2
center[::, 11] = 0.0
center[np.isnan(center) | np.isinf(center)] = 0.0
signals = signals - center
# Compute and remove the rms with FFT convolution of squared signal
scale = np.ones(signals.shape)
for n in range(signals.shape[1]):
temp = fftconvolve(np.square(signals[::, n]), np.ones(shape=(kernel_size,))/kernel_size, mode='same')
# Deal with negative values (mathematically, it should never be negative, but fft artifacts can cause this)
temp[temp < 0] = 0.0
# Deal with invalid values
invalidIndices = np.isnan(temp) | np.isinf(temp)
temp[invalidIndices] = 0.0
maxTemp = np.max(temp)
temp[invalidIndices] = maxTemp
# Finish rms calculation
scale[::, n] = np.sqrt(temp)
# Exclude SAO2
scale[::, 11] = 1.0
scale[(scale == 0) | np.isinf(scale) | np.isnan(scale)] = 1.0 # To correct for record 12 that has a zero amplitude chest signal
signals = signals / scale
garbageCollector.collect()
# Convert to 32 bits
signals = signals.astype(np.float32)
return signals
|
the-stack_106_23422 | import os
with open('./spider/summary.txt', 'r') as sample:
f = open('./spider/summary_pretrain.txt','w+')
for line in sample.readlines():
if ':' not in line:
continue
curLine = line.strip().split(":")
speaker = curLine[0].strip()
sentence = curLine[1].strip()
# if len(speaker.split()) > 10:
# print(speaker)
if len(speaker.split()) < 1:
print(speaker)
if len(sentence.split()) < 1:
continue
f.write(speaker+'\r\n')
f.write(sentence+'\r\n')
f.write('\r\n')
f.close()
print("写入成功") |
the-stack_106_23425 | """Interactive figures in the Jupyter notebook"""
from base64 import b64encode
import json
import io
import os
from IPython.display import display, HTML
from ipywidgets import DOMWidget, widget_serialization
from traitlets import (
Unicode, Bool, CInt, Float, List, Any, Instance, CaselessStrEnum, Enum,
default
)
from matplotlib import rcParams
from matplotlib.figure import Figure
from matplotlib import is_interactive
from matplotlib.backends.backend_webagg_core import (FigureManagerWebAgg,
FigureCanvasWebAggCore,
NavigationToolbar2WebAgg,
TimerTornado)
from matplotlib.backend_bases import (ShowBase, NavigationToolbar2,
FigureCanvasBase, cursors)
here = os.path.dirname(__file__)
with open(os.path.join(here, 'static', 'package.json')) as fid:
js_semver = '^%s' % json.load(fid)['version']
cursors_str = {
cursors.HAND: 'pointer',
cursors.POINTER: 'default',
cursors.SELECT_REGION: 'crosshair',
cursors.MOVE: 'move',
cursors.WAIT: 'wait'
}
class Show(ShowBase):
def __call__(self, block=None):
from matplotlib._pylab_helpers import Gcf
managers = Gcf.get_all_fig_managers()
if not managers:
return
interactive = is_interactive()
for manager in managers:
manager.show()
# plt.figure adds an event which puts the figure in focus
# in the activeQue. Disable this behaviour, as it results in
# figures being put as the active figure after they have been
# shown, even in non-interactive mode.
if hasattr(manager, '_cidgcf'):
manager.canvas.mpl_disconnect(manager._cidgcf)
if not interactive and manager in Gcf._activeQue:
Gcf._activeQue.remove(manager)
show = Show()
def draw_if_interactive():
import matplotlib._pylab_helpers as pylab_helpers
if is_interactive():
manager = pylab_helpers.Gcf.get_active()
if manager is not None:
manager.show()
def connection_info():
"""
Return a string showing the figure and connection status for
the backend. This is intended as a diagnostic tool, and not for general
use.
"""
from matplotlib._pylab_helpers import Gcf
result = []
for manager in Gcf.get_all_fig_managers():
fig = manager.canvas.figure
result.append('{0} - {0}'.format((fig.get_label() or
"Figure {0}".format(manager.num)),
manager.web_sockets))
if not is_interactive():
result.append('Figures pending show: {0}'.format(len(Gcf._activeQue)))
return '\n'.join(result)
class Toolbar(DOMWidget, NavigationToolbar2WebAgg):
_model_module = Unicode('jupyter-matplotlib').tag(sync=True)
_model_module_version = Unicode(js_semver).tag(sync=True)
_model_name = Unicode('ToolbarModel').tag(sync=True)
_view_module = Unicode('jupyter-matplotlib').tag(sync=True)
_view_module_version = Unicode(js_semver).tag(sync=True)
_view_name = Unicode('ToolbarView').tag(sync=True)
toolitems = List().tag(sync=True)
orientation = Enum(['horizontal', 'vertical'], default_value='vertical').tag(sync=True)
button_style = CaselessStrEnum(
values=['primary', 'success', 'info', 'warning', 'danger', ''], default_value='',
help="""Use a predefined styling for the button.""").tag(sync=True)
collapsed = Bool(True).tag(sync=True)
_current_action = Enum(values=['pan', 'zoom', ''], default_value='').tag(sync=True)
def __init__(self, canvas, *args, **kwargs):
DOMWidget.__init__(self, *args, **kwargs)
NavigationToolbar2WebAgg.__init__(self, canvas, *args, **kwargs)
self.on_msg(self.canvas._handle_message)
def export(self):
buf = io.BytesIO()
self.canvas.figure.savefig(buf, format='png', dpi='figure')
# Figure width in pixels
pwidth = self.canvas.figure.get_figwidth() * self.canvas.figure.get_dpi()
# Scale size to match widget on HiPD monitors
width = pwidth / self.canvas._dpi_ratio
data = "<img src='data:image/png;base64,{0}' width={1}/>"
data = data.format(b64encode(buf.getvalue()).decode('utf-8'), width)
display(HTML(data))
@default('toolitems')
def _default_toolitems(self):
icons = {
'home': 'home',
'back': 'arrow-left',
'forward': 'arrow-right',
'zoom_to_rect': 'square-o',
'move': 'arrows',
'download': 'floppy-o',
'export': 'file-picture-o'
}
download_item = ('Download', 'Download plot', 'download', 'save_figure')
toolitems = (NavigationToolbar2.toolitems + (download_item,))
return [(text, tooltip, icons[icon_name], method_name)
for text, tooltip, icon_name, method_name
in toolitems
if icon_name in icons]
class Canvas(DOMWidget, FigureCanvasWebAggCore):
_model_module = Unicode('jupyter-matplotlib').tag(sync=True)
_model_module_version = Unicode(js_semver).tag(sync=True)
_model_name = Unicode('MPLCanvasModel').tag(sync=True)
_view_module = Unicode('jupyter-matplotlib').tag(sync=True)
_view_module_version = Unicode(js_semver).tag(sync=True)
_view_name = Unicode('MPLCanvasView').tag(sync=True)
toolbar = Instance(Toolbar, allow_none=True).tag(sync=True, **widget_serialization)
toolbar_visible = Bool(True).tag(sync=True)
toolbar_position = Enum(['top', 'bottom', 'left', 'right'], default_value='left').tag(sync=True)
header_visible = Bool(True).tag(sync=True)
footer_visible = Bool(True).tag(sync=True)
resizable = Bool(True).tag(sync=True)
_width = CInt().tag(sync=True)
_height = CInt().tag(sync=True)
_figure_label = Unicode('Figure').tag(sync=True)
_message = Unicode().tag(sync=True)
_cursor = Unicode('pointer').tag(sync=True)
_image_mode = Unicode('full').tag(sync=True)
_rubberband_x = CInt(0).tag(sync=True)
_rubberband_y = CInt(0).tag(sync=True)
_rubberband_width = CInt(0).tag(sync=True)
_rubberband_height = CInt(0).tag(sync=True)
_closed = Bool(True)
# Must declare the superclass private members.
_png_is_old = Bool()
_force_full = Bool()
_current_image_mode = Unicode()
_dpi_ratio = Float(1.0)
_is_idle_drawing = Bool()
_is_saving = Bool()
_button = Any()
_key = Any()
_lastx = Any()
_lasty = Any()
def __init__(self, figure, *args, **kwargs):
DOMWidget.__init__(self, *args, **kwargs)
FigureCanvasWebAggCore.__init__(self, figure, *args, **kwargs)
self.on_msg(self._handle_message)
def _handle_message(self, object, content, buffers):
# Every content has a "type".
if content['type'] == 'closing':
self._closed = True
elif content['type'] == 'initialized':
_, _, w, h = self.figure.bbox.bounds
self.manager.resize(w, h)
else:
self.manager.handle_json(content)
def send_json(self, content):
# Change in the widget state?
if content['type'] == 'cursor':
self._cursor = cursors_str[content['cursor']]
elif content['type'] == 'message':
self._message = content['message']
elif content['type'] == 'figure_label':
self._figure_label = content['label']
elif content['type'] == 'resize':
self._width = content['size'][0]
self._height = content['size'][1]
# Send resize message anyway
self.send({'data': json.dumps(content)})
elif content['type'] == 'image_mode':
self._image_mode = content['mode']
else:
# Default: send the message to the front-end
self.send({'data': json.dumps(content)})
def send_binary(self, data):
self.send({'data': '{"type": "binary"}'}, buffers=[data])
def new_timer(self, *args, **kwargs):
return TimerTornado(*args, **kwargs)
def start_event_loop(self, timeout):
FigureCanvasBase.start_event_loop_default(self, timeout)
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
class FigureManager(FigureManagerWebAgg):
ToolbarCls = Toolbar
def __init__(self, canvas, num):
FigureManagerWebAgg.__init__(self, canvas, num)
self.web_sockets = [self.canvas]
def show(self):
if self.canvas._closed:
self.canvas._closed = False
display(self.canvas)
else:
self.canvas.draw_idle()
def destroy(self):
self.canvas.close()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
from matplotlib._pylab_helpers import Gcf
def closer(event):
Gcf.destroy(num)
canvas = Canvas(figure)
if 'nbagg.transparent' in set(rcParams.keys()) and rcParams['nbagg.transparent']:
figure.patch.set_alpha(0)
manager = FigureManager(canvas, num)
if is_interactive():
manager.show()
figure.canvas.draw_idle()
canvas.mpl_connect('close_event', closer)
return manager
|
the-stack_106_23427 | class TrieNode:
def __init__(self, w):
self.label = w
self.children = {}
self.index, self.parent, self.depth = None, None, None
def add_child(self, child):
self.children[child.label[0]] = child
child.parent = self
def get_all_leaves(self, f):
if len(self.children) == 0:
return [f(self)]
return [value for _, child in sorted(self.children.items())
for value in child.get_all_leaves(f)]
def set_index(self, index = 0):
for _, child in sorted(self.children.items()):
index = child.set_index(index)
self.index = index
return index + 1
def set_depth(self, depth = 0):
self.depth = depth
for _, child in self.children.items():
child.set_depth(depth + len(child.label))
def find_node(self, word, m):
if m == 0:
return self
if word[0] not in self.children:
return None
child = self.children[word[0]]
label = child.label
if m < len(label):
return child if child.label.startswith(word) else None
return child.find_node(
word[len(label):], m - len(label)) if word.startswith(label) else None
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
if (self.label != other.label
or self.children.keys() != other.children.keys()):
return False
for key in self.children:
if self.children[key] != other.children[key]:
return False
return True
|
the-stack_106_23428 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import importlib.resources
import logging
import os
import shlex
import subprocess
import sys
import tempfile
from contextlib import asynccontextmanager, contextmanager
from dataclasses import dataclass
from itertools import chain
from pathlib import Path
from typing import AsyncContextManager, ContextManager, Iterable, Optional, Union
from fs_image.vm.guest_agent import QemuError, QemuGuestAgent
from fs_image.vm.share import Share, process_shares
logger = logging.getLogger("vm")
@dataclass(frozen=True)
class KernelResources(object):
# these paths vary per kernel
vmlinuz: Path
initrd: Path
modules: Path
# these are invariant and a part of //fs_image/vm:vm
qemu: Path
qemu_bioses: Path
@contextmanager
def kernel_resources() -> ContextManager[KernelResources]:
try:
# QEMU BIOSes are a FB-specific resource
with importlib.resources.path(
"fs_image.vm", "qemu_bioses"
) as qemu_bioses:
bios_dir = qemu_bioses
except FileNotFoundError:
bios_dir = None
with importlib.resources.path(
"fs_image.vm", "vmlinuz"
) as vmlinuz, importlib.resources.path(
"fs_image.vm", "initrd"
) as initrd, importlib.resources.path(
"fs_image.vm", "modules"
) as modules, importlib.resources.path(
"fs_image.vm", "qemu"
) as qemu:
yield KernelResources(
vmlinuz=vmlinuz,
initrd=initrd,
modules=modules,
qemu=qemu,
qemu_bioses=bios_dir,
)
@asynccontextmanager
async def kernel_vm(
image: Path,
fbcode: Optional[Path] = None,
verbose: bool = False,
interactive: bool = False,
shares: Optional[Iterable[Union[Share, Path]]] = None,
dry_run: Optional[bool] = False,
up_timeout: Optional[int] = 2 * 60,
ncpus: Optional[int] = 1,
) -> AsyncContextManager[QemuGuestAgent]:
# An image should always be provided; either by vmtest or run_vm
assert image
sockfile = tempfile.NamedTemporaryFile(
prefix="qemu_guest_agent_",
suffix=".sock",
# qemu will delete this socket file
delete=False,
).name
# this ephemeral disk file will be deleted when it gets garbage collected,
# which will happen after QEMU finishes, whether it succeeds or fails
rwdevice = tempfile.NamedTemporaryFile(
prefix="vm_",
suffix="_rw.img",
# If available, create this temporary disk image in a temporary
# directory that we know will be on disk, instead of /tmp which may be
# a space-constrained tmpfs whichcan cause sporadic failures depending
# on how much VMs decide to write to the root partition multiplied by
# however many VMs are running concurrently.
# If DISK_TEMP is not set, Python will follow the normal mechanism to
# determine where to create this file as described in:
# https://docs.python.org/3/library/tempfile.html#tempfile.gettempdir
dir=os.getenv("DISK_TEMP"),
)
# TODO: should this size be configurable (or is it possible to dynamically grow)?
rwdevice.truncate(1 * 1024 * 1024 * 1024)
shares = process_shares(shares)
# Mount directories that are specific to the Facebook
try:
from fs_image.facebook.vm.share_fbcode_runtime import (
gen_fb_share_fbcode_runtime as _gen_fb_share_fbcode_runtime,
)
shares.extend(_gen_fb_share_fbcode_runtime())
except ImportError: # pragma: no cover
pass
if fbcode is not None:
# also share fbcode at the same mount point from the host
# so that absolute symlinks in fbcode work when in @mode/dev
shares += [
Share(
host_path=fbcode,
mount_tag="fbcode",
location=fbcode,
agent_mount=True,
)
]
with kernel_resources() as kernel:
args = [
"-no-reboot",
"-display",
"none",
"-serial",
"mon:stdio",
"-cpu",
"max",
"-smp",
str(ncpus),
"-m",
"4G",
"-device",
"virtio-rng-pci",
"-net",
"none",
"-device",
"virtio-serial",
"-kernel",
str(kernel.vmlinuz),
"-initrd",
str(kernel.initrd),
"-append",
(
"console=ttyS0,115200"
" root=/dev/vda"
" noapic"
" panic=-1"
" cgroup_no_v1=all"
" systemd.unified_cgroup_hierarchy=1"
" rootflags=subvol=volume"
" rw"
" rd.emergency=poweroff"
" rd.debug"
),
"-drive",
f"file={image},if=virtio,format=raw,readonly=on",
"-drive",
f"file={rwdevice.name},if=virtio,format=raw,readonly=off",
"-chardev",
f"socket,path={sockfile},server,nowait,id=qga0",
"-device",
"virtio-serial",
"-device",
"virtserialport,chardev=qga0,name=org.qemu.guest_agent.0",
]
# Only set directory for the BIOS if qemu_bioses are provided
if kernel.qemu_bioses:
args.extend(["-L", str(kernel.qemu_bioses)])
if os.access("/dev/kvm", os.R_OK | os.W_OK):
args += ["-enable-kvm"]
else:
print(
"KVM not available - falling back to slower, emulated CPU: "
+ "see https://our.intern.facebook.com/intern/qa/5312/how-do-i-enable-kvm-on-my-devvm",
file=sys.stderr,
)
# this modules directory is mounted by init.sh at boot, to avoid having
# to install kernels in the root fs and avoid expensive copying of
# ~400M worth of modules during boot
shares += [
Share(
host_path=kernel.modules,
mount_tag="modules",
agent_mount=False,
),
]
args += __qemu_share_args(shares)
if dry_run:
print(str(kernel.qemu) + " " + " ".join(shlex.quote(a) for a in args))
sys.exit(0)
if interactive:
proc = await asyncio.create_subprocess_exec(str(kernel.qemu), *args)
elif verbose:
# don't connect stdin if we are simply in verbose mode and not interactive
proc = await asyncio.create_subprocess_exec(
str(kernel.qemu), *args, stdin=subprocess.PIPE
)
else:
proc = await asyncio.create_subprocess_exec(
str(kernel.qemu),
*args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
ga = QemuGuestAgent(sockfile, connect_timeout=up_timeout)
try:
for share in [s for s in shares if s.agent_mount]:
await ga.mount_share(tag=share.mount_tag, mountpoint=share.location)
yield ga
except QemuError as err:
print(f"Qemu failed with error: {err}", flush=True, file=sys.stderr)
finally:
if interactive:
await proc.wait()
if proc.returncode is None:
proc.terminate()
await proc.wait()
def __qemu_share_args(shares: Iterable[Share]) -> Iterable[str]:
return chain.from_iterable(
(
"-virtfs",
f"local,path={share.host_path},security_model=none,readonly,mount_tag={share.mount_tag}",
)
for share in shares
)
|
the-stack_106_23432 | # -*- coding: utf-8 -*-
'''
The ssh client wrapper system contains the routines that are used to alter
how executions are run in the salt-ssh system, this allows for state routines
to be easily rewritten to execute in a way that makes them do the same tasks
as ZeroMQ salt, but via ssh.
'''
# Import python libs
from __future__ import absolute_import, print_function
import copy
# Import salt libs
import salt.loader
import salt.utils.data
import salt.utils.json
import salt.client.ssh
# Import 3rd-party libs
from salt.ext import six
class FunctionWrapper(object):
'''
Create an object that acts like the salt function dict and makes function
calls remotely via the SSH shell system
'''
def __init__(
self,
opts,
id_,
host,
wfuncs=None,
mods=None,
fsclient=None,
cmd_prefix=None,
aliases=None,
minion_opts=None,
**kwargs):
super(FunctionWrapper, self).__init__()
self.cmd_prefix = cmd_prefix
self.wfuncs = wfuncs if isinstance(wfuncs, dict) else {}
self.opts = opts
self.mods = mods if isinstance(mods, dict) else {}
self.kwargs = {'id_': id_,
'host': host}
self.fsclient = fsclient
self.kwargs.update(kwargs)
self.aliases = aliases
if self.aliases is None:
self.aliases = {}
self.minion_opts = minion_opts
def __contains__(self, key):
'''
We need to implement a __contains__ method, othwerwise when someone
does a contains comparison python assumes this is a sequence, and does
__getitem__ keys 0 and up until IndexError
'''
try:
self[key] # pylint: disable=W0104
return True
except KeyError:
return False
def __getitem__(self, cmd):
'''
Return the function call to simulate the salt local lookup system
'''
if '.' not in cmd and not self.cmd_prefix:
# Form of salt.cmd.run in Jinja -- it's expecting a subdictionary
# containing only 'cmd' module calls, in that case. Create a new
# FunctionWrapper which contains the prefix 'cmd' (again, for the
# salt.cmd.run example)
kwargs = copy.deepcopy(self.kwargs)
id_ = kwargs.pop('id_')
host = kwargs.pop('host')
return FunctionWrapper(self.opts,
id_,
host,
wfuncs=self.wfuncs,
mods=self.mods,
fsclient=self.fsclient,
cmd_prefix=cmd,
aliases=self.aliases,
minion_opts=self.minion_opts,
**kwargs)
if self.cmd_prefix:
# We're in an inner FunctionWrapper as created by the code block
# above. Reconstruct the original cmd in the form 'cmd.run' and
# then evaluate as normal
cmd = '{0}.{1}'.format(self.cmd_prefix, cmd)
if cmd in self.wfuncs:
return self.wfuncs[cmd]
if cmd in self.aliases:
return self.aliases[cmd]
def caller(*args, **kwargs):
'''
The remote execution function
'''
argv = [cmd]
argv.extend([salt.utils.json.dumps(arg) for arg in args])
argv.extend(
['{0}={1}'.format(salt.utils.stringutils.to_str(key),
salt.utils.json.dumps(val))
for key, val in six.iteritems(kwargs)]
)
single = salt.client.ssh.Single(
self.opts,
argv,
mods=self.mods,
wipe=True,
fsclient=self.fsclient,
minion_opts=self.minion_opts,
**self.kwargs
)
stdout, stderr, retcode = single.cmd_block()
if stderr.count('Permission Denied'):
return {'_error': 'Permission Denied',
'stdout': stdout,
'stderr': stderr,
'retcode': retcode}
try:
ret = salt.utils.json.loads(stdout)
if len(ret) < 2 and 'local' in ret:
ret = ret['local']
ret = ret.get('return', {})
except ValueError:
ret = {'_error': 'Failed to return clean data',
'stderr': stderr,
'stdout': stdout,
'retcode': retcode}
return ret
return caller
def __setitem__(self, cmd, value):
'''
Set aliases for functions
'''
if '.' not in cmd and not self.cmd_prefix:
# Form of salt.cmd.run in Jinja -- it's expecting a subdictionary
# containing only 'cmd' module calls, in that case. We don't
# support assigning directly to prefixes in this way
raise KeyError('Cannot assign to module key {0} in the '
'FunctionWrapper'.format(cmd))
if self.cmd_prefix:
# We're in an inner FunctionWrapper as created by the first code
# block in __getitem__. Reconstruct the original cmd in the form
# 'cmd.run' and then evaluate as normal
cmd = '{0}.{1}'.format(self.cmd_prefix, cmd)
if cmd in self.wfuncs:
self.wfuncs[cmd] = value
# Here was assume `value` is a `caller` function from __getitem__.
# We save it as an alias and then can return it when referenced
# later in __getitem__
self.aliases[cmd] = value
def get(self, cmd, default):
'''
Mirrors behavior of dict.get
'''
if cmd in self:
return self[cmd]
else:
return default
|
the-stack_106_23433 | # -*- coding: utf-8 -*-
#
# Copyright (c), 2018-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <[email protected]>
#
from setuptools import setup
with open("README.rst") as readme:
long_description = readme.read()
setup(
name='elementpath',
version='1.4.3',
packages=['elementpath'],
author='Davide Brunato',
author_email='[email protected]',
url='https://github.com/sissaschool/elementpath',
keywords=['XPath', 'XPath2', 'Pratt-parser', 'ElementTree', 'lxml'],
license='MIT',
description='XPath 1.0/2.0 parsers and selectors for ElementTree and lxml',
long_description=long_description,
extra_require={
'dev': ['tox', 'coverage', 'lxml', 'xmlschema~=1.1.0', 'Sphinx']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries'
]
)
|
the-stack_106_23435 | import numpy as np
import os
import pickle
import pytest
import re
import time
import shutil
from copy import deepcopy
from numpy import allclose, isclose
from flare import struc, env, gp
from flare.parameters import Parameters
from flare.mgp import MappedGaussianProcess
from flare.lammps import lammps_calculator
from flare.utils.element_coder import _Z_to_mass, _Z_to_element, _element_to_Z
from flare.ase.calculator import FLARE_Calculator
from flare.ase.atoms import FLARE_Atoms
from ase.calculators.lammpsrun import LAMMPS
from .fake_gp import get_gp, get_random_structure
from .mgp_test import clean, compare_triplet, predict_atom_diag_var
body_list = ["2", "3"]
multi_list = [True, False]
force_block_only = False
curr_path = os.getcwd()
@pytest.mark.skipif(
not os.environ.get("lmp", False),
reason=(
"lmp not found "
"in environment: Please install LAMMPS "
"and set the $lmp env. "
"variable to point to the executatble."
),
)
@pytest.fixture(scope="module")
def all_gp():
allgp_dict = {}
np.random.seed(123)
for bodies in body_list:
for multihyps in multi_list:
gp_model = get_gp(
bodies,
"mc",
multihyps,
cellabc=[1.5, 1, 2],
force_only=force_block_only,
noa=5,
)
gp_model.parallel = True
gp_model.n_cpus = 2
allgp_dict[f"{bodies}{multihyps}"] = gp_model
yield allgp_dict
del allgp_dict
@pytest.fixture(scope="module")
def all_mgp():
allmgp_dict = {}
for bodies in ["2", "3", "2+3"]:
for multihyps in [False, True]:
allmgp_dict[f"{bodies}{multihyps}"] = None
yield allmgp_dict
del allmgp_dict
@pytest.fixture(scope="module")
def all_lmp():
all_lmp_dict = {}
species = ["H", "He"]
specie_symbol_list = " ".join(species)
masses = [
f"{i} {_Z_to_mass[_element_to_Z[species[i]]]}" for i in range(len(species))
]
parameters = {
"command": os.environ.get("lmp"), # set up executable for ASE
"newton": "off",
"pair_style": "mgp",
"mass": masses,
}
# set up input params
for bodies in body_list:
for multihyps in multi_list:
# create ASE calc
label = f"{bodies}{multihyps}"
files = [f"{label}.mgp"]
by = "yes" if bodies == "2" else "no"
ty = "yes" if bodies == "3" else "no"
parameters["pair_coeff"] = [
f"* * {label}.mgp {specie_symbol_list} {by} {ty}"
]
lmp_calc = LAMMPS(
label=label,
keep_tmp_files=True,
tmp_dir="./tmp/",
parameters=parameters,
files=files,
specorder=species,
)
all_lmp_dict[f"{bodies}{multihyps}"] = lmp_calc
yield all_lmp_dict
del all_lmp_dict
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_init(bodies, multihyps, all_mgp, all_gp):
"""
test the init function
"""
clean()
gp_model = all_gp[f"{bodies}{multihyps}"]
# grid parameters
grid_params = {}
if "2" in bodies:
grid_params["twobody"] = {"grid_num": [160], "lower_bound": [0.02]}
if "3" in bodies:
grid_params["threebody"] = {"grid_num": [31, 32, 33], "lower_bound": [0.02] * 3}
lammps_location = f"{bodies}{multihyps}"
data = gp_model.training_statistics
try:
mgp_model = MappedGaussianProcess(
grid_params=grid_params,
unique_species=data["species"],
n_cpus=1,
lmp_file_name=lammps_location,
var_map="simple",
)
except:
mgp_model = MappedGaussianProcess(
grid_params=grid_params,
unique_species=data["species"],
n_cpus=1,
lmp_file_name=lammps_location,
var_map=None,
)
all_mgp[f"{bodies}{multihyps}"] = mgp_model
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_build_map(all_gp, all_mgp, bodies, multihyps):
"""
test the mapping for mc_simple kernel
"""
gp_model = all_gp[f"{bodies}{multihyps}"]
mgp_model = all_mgp[f"{bodies}{multihyps}"]
mgp_model.build_map(gp_model)
# with open(f'grid_{bodies}_{multihyps}.pickle', 'wb') as f:
# pickle.dump(mgp_model, f)
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_write_model(all_mgp, bodies, multihyps):
"""
test the mapping for mc_simple kernel
"""
mgp_model = all_mgp[f"{bodies}{multihyps}"]
mgp_model.write_model(f"my_mgp_{bodies}_{multihyps}")
mgp_model.write_model(f"my_mgp_{bodies}_{multihyps}", format="pickle")
# Ensure that user is warned when a non-mean_only
# model is serialized into a Dictionary
with pytest.warns(Warning):
mgp_model.var_map = "pca"
mgp_model.as_dict()
mgp_model.var_map = "simple"
mgp_model.as_dict()
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_load_model(all_mgp, bodies, multihyps):
"""
test the mapping for mc_simple kernel
"""
name = f"my_mgp_{bodies}_{multihyps}.json"
all_mgp[f"{bodies}{multihyps}"] = MappedGaussianProcess.from_file(name)
os.remove(name)
name = f"my_mgp_{bodies}_{multihyps}.pickle"
all_mgp[f"{bodies}{multihyps}"] = MappedGaussianProcess.from_file(name)
os.remove(name)
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_cubic_spline(all_gp, all_mgp, bodies, multihyps):
"""
test the predict for mc_simple kernel
"""
mgp_model = all_mgp[f"{bodies}{multihyps}"]
delta = 1e-4
if "3" in bodies:
body_name = "threebody"
elif "2" in bodies:
body_name = "twobody"
nmap = len(mgp_model.maps[body_name].maps)
print("nmap", nmap)
for i in range(nmap):
maxvalue = np.max(np.abs(mgp_model.maps[body_name].maps[i].mean.__coeffs__))
if maxvalue > 0:
comp_code = mgp_model.maps[body_name].maps[i].species_code
if "3" in bodies:
c_pt = np.array([[0.3, 0.4, 0.5]])
c, cderv = (
mgp_model.maps[body_name].maps[i].mean(c_pt, with_derivatives=True)
)
cderv = cderv.reshape([-1])
for j in range(3):
a_pt = deepcopy(c_pt)
b_pt = deepcopy(c_pt)
a_pt[0][j] += delta
b_pt[0][j] -= delta
a = mgp_model.maps[body_name].maps[i].mean(a_pt)[0]
b = mgp_model.maps[body_name].maps[i].mean(b_pt)[0]
num_derv = (a - b) / (2 * delta)
print("spline", comp_code, num_derv, cderv[j])
assert np.isclose(num_derv, cderv[j], rtol=1e-2)
elif "2" in bodies:
center = np.sum(mgp_model.maps[body_name].maps[i].bounds) / 2.0
a_pt = np.array([[center + delta]])
b_pt = np.array([[center - delta]])
c_pt = np.array([[center]])
a = mgp_model.maps[body_name].maps[i].mean(a_pt)[0]
b = mgp_model.maps[body_name].maps[i].mean(b_pt)[0]
c, cderv = (
mgp_model.maps[body_name].maps[i].mean(c_pt, with_derivatives=True)
)
cderv = cderv.reshape([-1])[0]
num_derv = (a - b) / (2 * delta)
print("spline", num_derv, cderv)
assert np.isclose(num_derv, cderv, rtol=1e-2)
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_predict(all_gp, all_mgp, bodies, multihyps):
"""
test the predict for mc_simple kernel
"""
gp_model = all_gp[f"{bodies}{multihyps}"]
mgp_model = all_mgp[f"{bodies}{multihyps}"]
# # debug
# filename = f'grid_{bodies}_{multihyps}.pickle'
# with open(filename, 'rb') as f:
# mgp_model = pickle.load(f)
nenv = 6
cell = 1.0 * np.eye(3)
cutoffs = gp_model.cutoffs
unique_species = gp_model.training_statistics["species"]
struc_test, f = get_random_structure(cell, unique_species, nenv)
test_envi = env.AtomicEnvironment(
struc_test, 0, cutoffs, cutoffs_mask=gp_model.hyps_mask
)
if "2" in bodies:
kernel_name = "twobody"
elif "3" in bodies:
kernel_name = "threebody"
# compare_triplet(mgp_model.maps['threebody'], gp_model, test_envi)
mgp_f, mgp_e_var, mgp_s, mgp_e = mgp_model.predict(test_envi)
assert Parameters.compare_dict(
gp_model.hyps_mask, mgp_model.maps[kernel_name].hyps_mask
)
if multihyps:
gp_e, gp_e_var = gp_model.predict_local_energy_and_var(test_envi)
gp_f, gp_f_var = gp_model.predict_force_xyz(test_envi)
else:
gp_e, gp_f, gp_s, gp_e_var, _, _ = gp_model.predict_efs(test_envi)
gp_s = -gp_s[[0, 3, 5, 4, 2, 1]]
# check stress
assert np.allclose(mgp_s, gp_s, rtol=1e-2)
# check mgp is within 2 meV/A of the gp
print("mgp_en, gp_en", mgp_e, gp_e)
assert np.allclose(mgp_e, gp_e, rtol=2e-3), (
f"{bodies} body" f" energy mapping is wrong"
)
# check forces
print("isclose?", mgp_f - gp_f, gp_f)
assert np.allclose(mgp_f, gp_f, atol=1e-3), f"{bodies} body force mapping is wrong"
if mgp_model.var_map == "simple":
print(bodies, multihyps)
for i in range(struc_test.nat):
test_envi = env.AtomicEnvironment(
struc_test, i, cutoffs, cutoffs_mask=gp_model.hyps_mask
)
mgp_pred = mgp_model.predict(test_envi)
mgp_var = mgp_pred[1]
gp_var = predict_atom_diag_var(test_envi, gp_model, kernel_name)
print("mgp_var, gp_var", mgp_var, gp_var)
assert np.allclose(mgp_var, gp_var, rtol=1e-2)
print("struc_test positions", struc_test.positions, struc_test.species_labels)
@pytest.mark.skipif(
not os.environ.get("lmp", False),
reason=(
"lmp not found "
"in environment: Please install LAMMPS "
"and set the $lmp env. "
"variable to point to the executatble."
),
)
@pytest.mark.parametrize("bodies", body_list)
@pytest.mark.parametrize("multihyps", multi_list)
def test_lmp_predict(all_lmp, all_gp, all_mgp, bodies, multihyps):
"""
test the lammps implementation
"""
# pytest.skip()
prefix = f"{bodies}{multihyps}"
mgp_model = all_mgp[prefix]
gp_model = all_gp[prefix]
lmp_calculator = all_lmp[prefix]
ase_calculator = FLARE_Calculator(gp_model, mgp_model, par=False, use_mapping=True)
# create test structure
np.random.seed(1)
cell = np.diag(np.array([1, 1, 1])) * 4
nenv = 10
unique_species = gp_model.training_statistics["species"]
cutoffs = gp_model.cutoffs
struc_test, f = get_random_structure(cell, unique_species, nenv)
# build ase atom from struc
ase_atoms_flare = struc_test.to_ase_atoms()
ase_atoms_flare = FLARE_Atoms.from_ase_atoms(ase_atoms_flare)
ase_atoms_flare.calc = ase_calculator
ase_atoms_lmp = deepcopy(struc_test).to_ase_atoms()
ase_atoms_lmp.calc = lmp_calculator
try:
lmp_en = ase_atoms_lmp.get_potential_energy()
flare_en = ase_atoms_flare.get_potential_energy()
lmp_stress = ase_atoms_lmp.get_stress()
flare_stress = ase_atoms_flare.get_stress()
lmp_forces = ase_atoms_lmp.get_forces()
flare_forces = ase_atoms_flare.get_forces()
except Exception as e:
os.chdir(curr_path)
print(e)
raise e
os.chdir(curr_path)
# check that lammps agrees with mgp to within 1 meV/A
print("energy", lmp_en - flare_en, flare_en)
assert np.isclose(lmp_en, flare_en, atol=1e-3)
print("force", lmp_forces - flare_forces, flare_forces)
assert np.isclose(lmp_forces, flare_forces, atol=1e-3).all()
print("stress", lmp_stress - flare_stress, flare_stress)
assert np.isclose(lmp_stress, flare_stress, atol=1e-3).all()
# check the lmp var
# mgp_std = np.sqrt(mgp_pred[1])
# print("isclose? diff:", lammps_stds[atom_num]-mgp_std, "mgp value", mgp_std)
# assert np.isclose(lammps_stds[atom_num], mgp_std, rtol=1e-2)
clean(prefix=prefix)
|
the-stack_106_23436 | import os
import glob
import h5py
import numpy as np
from torch.utils.data import Dataset
def download():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
str_download_add='wget %s' % (www)
str_download_add+=' --no-check-certificate'
os.system(str_download_add)
os.system('unzip %s' % (zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def load_data(partition):
download()
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(BASE_DIR, 'data')
all_data = []
all_label = []
for h5_name in glob.glob(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048', 'ply_data_%s*.h5'%partition)):
f = h5py.File(h5_name)
data = f['data'][:].astype('float32')
label = f['label'][:].astype('int64')
f.close()
all_data.append(data)
all_label.append(label)
all_data = np.concatenate(all_data, axis=0)
all_label = np.concatenate(all_label, axis=0)
return all_data, all_label
def random_point_dropout(pc, max_dropout_ratio=0.875):
''' batch_pc: BxNx3 '''
# for b in range(batch_pc.shape[0]):
dropout_ratio = np.random.random()*max_dropout_ratio # 0~0.875
drop_idx = np.where(np.random.random((pc.shape[0]))<=dropout_ratio)[0]
# print ('use random drop', len(drop_idx))
if len(drop_idx)>0:
pc[drop_idx,:] = pc[0,:] # set to the first point
return pc
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2./3., high=3./2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=0.01, clip=0.02):
N, C = pointcloud.shape
pointcloud += np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
return pointcloud
class ModelNet40(Dataset):
def __init__(self, num_points, partition='train'):
self.data, self.label = load_data(partition)
self.num_points = num_points
self.partition = partition
def __getitem__(self, item):
pointcloud = self.data[item][:self.num_points]
label = self.label[item]
if self.partition == 'train':
pointcloud = random_point_dropout(pointcloud) # open for dgcnn not for our idea for all
pointcloud = translate_pointcloud(pointcloud)
np.random.shuffle(pointcloud)
return pointcloud, label
def __len__(self):
return self.data.shape[0]
|
the-stack_106_23445 | #--**coding:utf-8**--
# Author : Mark
# time : 2021/7/13 16:02
# File : follow_nosignaljuntioncrossing_vehicle.py
import random
import py_trees
import carla
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
from srunner.scenariomanager.scenarioatomics.atomic_behaviors import (ActorTransformSetter,
ActorDestroy,
KeepVelocity,
StopVehicle,
SyncArrival,
WaypointFollower)
from srunner.scenariomanager.scenarioatomics.atomic_criteria import CollisionTest
from srunner.scenariomanager.scenarioatomics.atomic_trigger_conditions import (InTriggerDistanceToVehicle,
InTriggerDistanceToNextIntersection,
DriveDistance,
StandStill,
InTriggerRegion)
from srunner.scenariomanager.timer import TimeOut
from srunner.tools.scenario_helper import get_waypoint_in_distance
from srunner.scenarios.basic_scenario import BasicScenario
class Follow_NoSignalJunctionCrossing_Vehicle(BasicScenario):
"""
依然使用跟车场景
"""
# ego vehicle parameters
_ego_vehicle_max_velocity = 20
_ego_vehicle_driven_distance = 105
timeout = 120
def __init__(self, world, ego_vehicles, config, randomize=False, debug_mode=False, criteria_enable=True,
timeout=1000):
"""
设置所有相关参数并创建场景
"""
self._map = CarlaDataProvider.get_map()
self._reference_waypoint = self._map.get_waypoint(config.trigger_points[0].location)
self._other_actor_transform = None
self.timeout = timeout
self._first_vehicle_speed = 15
self._first_vehicle_location = 25
# self._second_vehicle_velocity = 15
# self._other_actor_target_velocity = 15
self._other_actor_stop_in_front_intersection = 0
self._other_actor_max_brake = 1.0
self._other_actor_target_velocity = 15 # npc车辆的速度
super(Follow_NoSignalJunctionCrossing_Vehicle, self).__init__("Follow_NoSignalJunctionCrossing_Vehicle",
ego_vehicles,
config,
world,
debug_mode,
criteria_enable=criteria_enable)
if randomize:
self._ego_other_distance_start = random.randint(4, 8) #如果设定了随机值,则随机一个初始距离
# Example code how to randomize start location 示例:如何随机起始位置
# distance = random.randint(20, 80)
# new_location, _ = get_location_in_distance(self.ego_vehicles[0], distance)
# waypoint = CarlaDataProvider.get_map().get_waypoint(new_location)
# waypoint.transform.location.z += 39
# self.other_actors[0].set_transform(waypoint.transform)
def _initialize_actors(self, config):
'''
自定义初始化
'''
# 先跟车 test1
# first_vehicle_waypoint, _ = get_waypoint_in_distance(self._reference_waypoint, self._first_vehicle_location)
# self._other_actor_transform = carla.Transform(
# carla.Location(first_vehicle_waypoint.transform.location.x,
# first_vehicle_waypoint.transform.location.y,
# first_vehicle_waypoint.transform.location.z + 1),
# first_vehicle_waypoint.transform.rotation)
#
# first_vehicle_transform = carla.Transform(
# carla.Location(self._other_actor_transform.location.x,
# self._other_actor_transform.location.y,
# self._other_actor_transform.location.z - 500),
# self._other_actor_transform.rotation)
# first_vehicle = CarlaDataProvider.request_new_actor('vehicle.nissan.patrol', first_vehicle_transform)
# first_vehicle.set_simulate_physics(enabled=False)
# self.other_actors.append(first_vehicle)
'''
下面设置的第一种场景的位置出现错误
'''
#先跟车 test2
# self._other_actor_transform = config.other_actors[0].transform # actor变换的位置和方向
# first_vehicle_transform = carla.Transform(
# carla.Location(config.other_actors[0].transform.location.x,
# config.other_actors[0].transform.location.y,
# config.other_actors[0].transform.location.z - 500),
# config.other_actors[0].transform.rotation)
# first_vehicle = CarlaDataProvider.request_new_actor(config.other_actors[0].model, first_vehicle_transform)
# first_vehicle.set_simulate_physics(enabled=False)
# self.other_actors.append(first_vehicle)
# 后十字路口 test1 test2
# self._other_actor_transform = config.other_actors[1].transform # actor变换的位置和方向
# second_vehicle_transform = carla.Transform(
# carla.Location(config.other_actors[1].transform.location.x,
# config.other_actors[1].transform.location.y,
# config.other_actors[1].transform.location.z - 500),
# config.other_actors[1].transform.rotation)
# second_vehicle = CarlaDataProvider.request_new_actor(config.other_actors[1].model, second_vehicle_transform)
# second_vehicle.set_simulate_physics(enabled=False)
# self.other_actors.append(second_vehicle)
# # add actors from xml file test 3
for actor in config.other_actors:
vehicle = CarlaDataProvider.request_new_actor(actor.model, actor.transform)
self.other_actors.append(vehicle)
vehicle.set_simulate_physics(enabled=False)
# following vehicle, tesla test3.1
# first_vehicle_waypoint, _ = get_waypoint_in_distance(self._reference_waypoint, self._first_vehicle_location)
# self._other_actor_transform = carla.Transform(
# carla.Location(first_vehicle_waypoint.transform.location.x,
# first_vehicle_waypoint.transform.location.y,
# first_vehicle_waypoint.transform.location.z + 1),
# first_vehicle_waypoint.transform.rotation)
#
# first_vehicle_transform = carla.Transform(
# carla.Location(self._other_actor_transform.location.x,
# self._other_actor_transform.location.y,
# self._other_actor_transform.location.z - 500),
# self._other_actor_transform.rotation)
# first_vehicle = CarlaDataProvider.request_new_actor('vehicle.nissan.patrol', first_vehicle_transform)
# first_vehicle.set_simulate_physics(enabled=False)
# self.other_actors.append(first_vehicle)
# following vehicle, tesla test3.2
self._other_actor_transform1 = config.other_actors[0].transform # actor变换的位置和方向
first_vehicle_transform = carla.Transform(
carla.Location(config.other_actors[0].transform.location.x,
config.other_actors[0].transform.location.y,
config.other_actors[0].transform.location.z - 500),
config.other_actors[0].transform.rotation)
first_vehicle = CarlaDataProvider.request_new_actor(config.other_actors[0].model, first_vehicle_transform)
first_vehicle.set_simulate_physics(enabled=False)
self.other_actors.append(first_vehicle)
# intersection vehicle test3
self._other_actor_transform2 = config.other_actors[1].transform # actor变换的位置和方向
second_vehicle_transform = carla.Transform(
carla.Location(config.other_actors[1].transform.location.x,
config.other_actors[1].transform.location.y,
config.other_actors[1].transform.location.z - 500),
config.other_actors[1].transform.rotation)
second_vehicle = CarlaDataProvider.request_new_actor(config.other_actors[1].model, second_vehicle_transform)
second_vehicle.set_simulate_physics(enabled=False)
self.other_actors.append(second_vehicle)
def _create_behavior(self):
"""
后面定义的场景是"跟车场景"。调用这个场景后,它会等待用户控制的车辆进入起始区域,然后让另一个actor驶向障碍物。一旦障碍物清除道路,让另一个演员开车前往下一个路口。
最后,用户控制的车辆必须足够接近其他参与者才能结束场景。如果这在 60 秒内没有发生,超时将停止场景
"""
# stage1 跟车
start_transform = ActorTransformSetter(self.other_actors[0], self._other_actor_transform1)
driving_to_next_intersection = py_trees.composites.Parallel( # 前往路口
"DrivingTowardsIntersection",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
driving_to_next_intersection.add_child(WaypointFollower(self.other_actors[0], self._first_vehicle_speed))
driving_to_next_intersection.add_child(InTriggerDistanceToNextIntersection(
self.other_actors[0], self._other_actor_stop_in_front_intersection))
# stop vehicle 调用atomic_behaviors中的StopVehicle类
stop = StopVehicle(self.other_actors[0], self._other_actor_max_brake)
endcondition = py_trees.composites.Parallel("Waiting for end position",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ALL)
endcondition_part1 = InTriggerDistanceToVehicle(self.other_actors[0],
self.ego_vehicles[0],
distance=20,
name="FinalDistance")
endcondition_part2 = StandStill(self.ego_vehicles[0], name="StandStill", duration=1)
endcondition.add_child(endcondition_part1)
endcondition.add_child(endcondition_part2)
# stage2 十字路口
start_other_trigger = InTriggerRegion( # ego触发区域
self.ego_vehicles[0],
-80, -70,
-75, -60)
sync_arrival = SyncArrival( # 同时到达交叉点
self.other_actors[1], self.ego_vehicles[0],
carla.Location(x=-74.63, y=-136.34))
pass_through_trigger = InTriggerRegion( # ego通过触发区域
self.ego_vehicles[0],
-90, -70,
-124, -119)
keep_velocity_other = KeepVelocity( # ncp 车辆的速度为恒定
self.other_actors[1],
self._other_actor_target_velocity)
stop_other_trigger = InTriggerRegion( # ncp 车辆的触发区域
self.other_actors[1],
-45, -35,
-140, -130)
stop_other = StopVehicle( # npc 车辆停车
self.other_actors[1],
self._other_actor_max_brake)
end_condition = InTriggerRegion( # ego 车辆终止区域
self.ego_vehicles[0],
-90, -70,
-170, -156
)
endcondition_part3 = end_condition
endcondition.add_child(endcondition_part3)
sync_arrival_parallel = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
keep_velocity_other_parallel = py_trees.composites.Parallel(
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
# Build behavior tree 建立行为树
root = py_trees.composites.Parallel("All Behaviors Trees",
policy=py_trees.common.ParallelPolicy.SUCCESS_ON_ONE)
sequence1 = py_trees.composites.Sequence("Sequence1 Behavior")
sequence1.add_child(start_transform)
sequence1.add_child(driving_to_next_intersection)
sequence1.add_child(stop)
sequence1.add_child(ActorDestroy(self.other_actors[0]))
sequence2 = py_trees.composites.Sequence("Sequence2 Behavior")
sequence2.add_child(ActorTransformSetter(self.other_actors[1], self._other_actor_transform2))
sequence2.add_child(start_other_trigger)
sequence2.add_child(sync_arrival_parallel)
sequence2.add_child(keep_velocity_other_parallel)
sequence2.add_child(stop_other)
sequence2.add_child(ActorDestroy(self.other_actors[1]))
sync_arrival_parallel.add_child(sync_arrival) # 同时到达设置为 Parallel节点
sync_arrival_parallel.add_child(pass_through_trigger) # 通过触发区域设置为Parallel节点
keep_velocity_other_parallel.add_child(keep_velocity_other) # npc车速设置为恒速为Parallel节点
keep_velocity_other_parallel.add_child(stop_other_trigger) # npc车辆到达触发区域为 Parallel节点
root.add_child(sequence1)
root.add_child(sequence2)
root.add_child(endcondition)
return root
def _create_test_criteria(self):
"""
创建所有的测试标准的列表,稍后在进行行为树中使用
"""
criteria = []
collision_criterion = CollisionTest(self.ego_vehicles[0])
criteria.append(collision_criterion)
return criteria
def __del__(self):
"""
删除所有的actor
"""
self.remove_all_actors()
|
the-stack_106_23446 | import re
import logging
from localstack.utils.common import to_str
from localstack.services.generic_proxy import ProxyListener
LOG = logging.getLogger(__name__)
def fix_creation_date(method, path, response):
try:
content = to_str(response._content)
except Exception:
LOG.info('Unable to convert EC2 response to string: %s' % response._content)
return
response._content = re.sub(r'>\s*([0-9-]+) ([0-9:.]+)Z?\s*</creationTimestamp>',
r'>\1T\2Z</creationTimestamp>', content, flags=re.DOTALL | re.MULTILINE)
response.headers['Content-Length'] = str(len(response._content))
def fix_error_tag(response):
# fix error root element from moto
response._content = re.sub(r'<(/?)ErrorResponse', r'<\1Response', to_str(response.content or ''))
class ProxyListenerEC2(ProxyListener):
def return_response(self, method, path, data, headers, response):
if response.content:
fix_creation_date(method, path, response)
fix_error_tag(response)
# instantiate listener
UPDATE_EC2 = ProxyListenerEC2()
|
the-stack_106_23447 | import csv
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from core.utils import to_mg
from .models import Category, Glucose
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import mpld3
DATE_FORMAT = '%m/%d/%Y'
TIME_FORMAT = '%I:%M %p'
def import_glucose_from_csv(user, csv_file):
"""
Import glucose CSV data.
We'll process all rows first and create Glucose model objects from them
and perform a bulk create. This way, no records will be inserted unless
all records are good.
Also note that we're using splitlines() to make sure 'universal newlines'
is used.
Assumed order: value, category, record_date, record_time, notes
"""
csv_data = []
reader = csv.reader(csv_file.read().splitlines(), delimiter=',',
quotechar='"')
for row in reader:
csv_data.append([item.strip() for item in row])
glucose_objects = []
# Check if headers exists. Skip the first entry if true.
header_check = ['value', 'category', 'date', 'time']
first_row = [i.lower().strip() for i in csv_data[0]]
if all(i in first_row for i in header_check):
csv_data = csv_data[1:]
for row in csv_data:
# Let's do an extra check to make sure the row is not empty.
if row:
try:
category = Category.objects.get(name__iexact=row[1].strip())
except ObjectDoesNotExist:
category = Category.objects.get(name__iexact='No Category'.strip())
# Since we always store the value in mg/dL format in the db, we need
# to make sure we convert it here if the user's setting is set to
# mmol/L.
if user.settings.glucose_unit.name == 'mmol/L':
value = int(to_mg(row[0]))
else:
value = int(row[0])
glucose_objects.append(Glucose(
user=user,
value=value,
category=category,
record_date=datetime.strptime(row[2], DATE_FORMAT),
record_time=datetime.strptime(row[3], TIME_FORMAT),
notes=row[4],
))
Glucose.objects.bulk_create(glucose_objects)
def get_initial_category(user):
"""
Retrieve the default category from the user settings.
If the default category is None (labeled as 'Auto' in the settings page),
automatically pick the category based on time of day.
"""
user_settings = user.settings
default_category = user_settings.default_category
if not default_category:
now = datetime.now(tz=user_settings.time_zone)
breakfast_start = now.replace(hour=4, minute=0)
breakfast_end = now.replace(hour=11, minute=0)
lunch_start = now.replace(hour=11, minute=0)
lunch_end = now.replace(hour=16, minute=0)
dinner_start = now.replace(hour=16, minute=0)
dinner_end = now.replace(hour=22, minute=0)
if now > breakfast_start and now < breakfast_end:
category_name = 'Breakfast'
elif now > lunch_start and now < lunch_end:
category_name = 'Lunch'
elif now > dinner_start and now < dinner_end:
category_name = 'Dinner'
else:
category_name = 'Bedtime'
default_category = Category.objects.get(name=category_name)
return default_category
def diff1(array):
"""
:param array: input x
:return: processed data which is the 1-order difference
"""
return [j-i for i, j in zip(array[:-1], array[1:])]
def diff2(array):
"""
:param array: input x
:return: processed data which is the 2-order difference
"""
return [j - i for i, j in zip(array[:-2], array[2:])]
def setup_equation(array, height):
"""
:param array: input x
:param height: input h
:return: output y
"""
diff_1 = diff1(array)
diff_2 = diff2(array)
# construct coefficients matrix and bias term
para = np.zeros((len(height), len(height)))
offset = np.zeros(len(height))
para[0][0] = diff_2[0]
para[0][1] = -diff_1[0]
offset[0] = diff_2[0] * height[0]
for i in range(1, len(height)-1):
para[i, i-1] = -diff_2[i] + diff_1[i]
para[i, i] = diff_2[i]
para[i, i+1] = -diff_1[i]
offset[i] = diff_2[i] * height[i]
para[-1][-2] = -diff_1[-1]
para[-1][-1] = diff_2[-1]
offset[-1] = diff_2[-1] * height[-1]
return para, offset
def algo():
df = pd.read_excel('dat.xlsx', 'Sheet1', header=None)
x = df.as_matrix()
df = pd.read_excel('dat.xlsx', 'Sheet2', header=None)
h = df.as_matrix()
# solve the equation
A, b = setup_equation(x, h)
result = np.linalg.solve(A, b)
y = np.concatenate(([0], result, [0]))
x = x.tolist()
y = y.tolist()
return x, y
# plot the result
# plt.plot(x, y)
# plt.show()
def generate_figure():
fig = plt.figure()
plt.axes([0.1, 0.15, 0.8, 0.75])
plt.plot(range(10))
return fig
|
the-stack_106_23451 | """
Write a program to find the node at which the intersection of two singly linked
lists begins.
For example, the following two linked lists:
A: a1 → a2
↘
c1 → c2 → c3
↗
B: b1 → b2 → b3
begin to intersect at node c1.
Notes:
If the two linked lists have no intersection at all, return null.
The linked lists must retain their original structure after the function
returns.
You may assume there are no cycles anywhere in the entire linked structure.
Your code should preferably run in O(n) time and use only O(1) memory.
Credits:
Special thanks to @stellari for adding this problem and creating all test
cases.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
if headA is None or headB is None:
return None
p,q=headA,headB
while p!=q:
if p is None:
p=headB
else:
p=p.next
if q is None:
q=headA
else:
q=q.next
# if no intersection, they will reach the end at same time
return p
|
the-stack_106_23453 | from io import StringIO
from django.core.management import call_command
from .base import UnitTest
from unittest.mock import patch, call
from tests.helpers import JsonData
from django.core.management.base import CommandError
from status.models import Block
class DownloadBlocksTest(UnitTest):
'''Unit test command "download_blocks"'''
@patch("update.management.commands.download_blocks.get_block_api")
def test_with_one_created_block(self, mock_block_data):
first_block_result = JsonData.first_block_result
Block.objects.create(**first_block_result)
out = StringIO()
call_command("download_blocks", "1", stdout=out)
mock_block_data.assert_called_with(first_block_result["height"] - 1)
self.assertEqual(
out.getvalue(), "Waiting time: 10\nComplete download 1 blocks\n"
)
@patch("update.management.commands.download_blocks.get_block_api")
@patch("update.management.commands.download_blocks.get_latest_block_height")
def test_command_output_without_blocks(self, mock_get_height, mock_block_data):
height = 100000
mock_get_height.return_value = height
out = StringIO()
call_command("download_blocks", "3", stdout=out)
mock_block_data.assert_has_calls([call(99998), call(99999), call(100000)])
self.assertEqual(
out.getvalue(),
"Waiting time: 30\n2 blocks remain\n1 block remains\nComplete download 3 blocks\n",
)
def test_errors(self):
with self.assertRaisesRegex(
CommandError, "Range of number of blocks must be from 1 to 10, not 11"
):
call_command("download_blocks", "11")
with self.assertRaisesRegex(
CommandError, "Range of number of blocks must be from 1 to 10, not 0"
):
call_command("download_blocks", "0")
with self.assertRaisesRegex(
CommandError, "Range of number of blocks must be from 1 to 10, not -1"
):
call_command("download_blocks", "-1")
@patch("update.management.commands.download_blocks.get_block_api")
@patch("update.management.commands.download_blocks.get_latest_block_height")
def test_height_min(self, mock_get_height, mock_block_data):
mock_get_height.return_value = 3
with self.assertRaisesRegex(
CommandError, "Block height must be greater than 0"
):
call_command("download_blocks", "4")
out = StringIO()
call_command("download_blocks", "3", stdout=out)
mock_block_data.assert_has_calls([call(1), call(2), call(3)])
|
the-stack_106_23454 | from keras import backend as K
from keras.engine import InputSpec, Layer
from keras import initializers, regularizers, constraints
# From a PR that is not pulled into Keras
# https://github.com/fchollet/keras/pull/3677
# I updated the code to work on Keras 2.x
class MinibatchDiscrimination(Layer):
"""Concatenates to each sample information about how different the input
features for that sample are from features of other snapshots in the same
minibatch, as described in Salimans et. al. (2016). Useful for preventing
GANs from collapsing to a single output. When using this layer, generated
snapshots and reference snapshots should be in separate batches.
# Example
```python
# apply a convolution 1d of length 3 to a sequence with 10 timesteps,
# with 64 output filters
model = Sequential()
model.add(Convolution1D(64, 3, border_mode='same', input_shape=(10, 32)))
# now model.output_shape == (None, 10, 64)
# flatten the output so it can be fed into a minibatch discrimination layer
model.add(Flatten())
# now model.output_shape == (None, 640)
# add the minibatch discrimination layer
model.add(MinibatchDiscrimination(5, 3))
# now model.output_shape = (None, 645)
```
# Arguments
nb_kernels: Number of discrimination kernels to use
(dimensionality concatenated to output).
kernel_dim: The dimensionality of the space where closeness of snapshots
is calculated.
init: name of initialization function for the weights of the layer
(see [initializations](../initializations.md)),
or alternatively, Theano function to use for weights initialization.
This parameter is only relevant if you don't pass a `weights` argument.
weights: list of numpy arrays to set as initial weights.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the main weights matrix.
activity_regularizer: instance of [ActivityRegularizer](../regularizers.md),
applied to the network output.
W_constraint: instance of the [constraints](../constraints.md) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
input_dim: Number of channels/dimensions in the input.
Either this argument or the keyword argument `input_shape`must be
provided when using this layer as the first layer in a model.
# Input shape
2D tensor with shape: `(snapshots, input_dim)`.
# Output shape
2D tensor with shape: `(snapshots, input_dim + nb_kernels)`.
# References
- [Improved Techniques for Training GANs](https://arxiv.org/abs/1606.03498)
"""
def __init__(self, nb_kernels, kernel_dim, init='glorot_uniform', weights=None,
W_regularizer=None, activity_regularizer=None,
W_constraint=None, input_dim=None, **kwargs):
self.init = initializers.get(init)
self.nb_kernels = nb_kernels
self.kernel_dim = kernel_dim
self.input_dim = input_dim
self.W_regularizer = regularizers.get(W_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.initial_weights = weights
self.input_spec = [InputSpec(ndim=2)]
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(MinibatchDiscrimination, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 2
input_dim = input_shape[1]
self.input_spec = [InputSpec(dtype=K.floatx(),
shape=(None, input_dim))]
self.W = self.add_weight(shape=(self.nb_kernels, input_dim, self.kernel_dim),
initializer=self.init,
name='kernel',
regularizer=self.W_regularizer,
trainable=True,
constraint=self.W_constraint)
# Set built to true.
super(MinibatchDiscrimination, self).build(input_shape)
def call(self, x, mask=None):
activation = K.reshape(K.dot(x, self.W), (-1, self.nb_kernels, self.kernel_dim))
diffs = K.expand_dims(activation, 3) - K.expand_dims(K.permute_dimensions(activation, [1, 2, 0]), 0)
abs_diffs = K.sum(K.abs(diffs), axis=2)
minibatch_features = K.sum(K.exp(-abs_diffs), axis=2)
return K.concatenate([x, minibatch_features], 1)
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) == 2
return input_shape[0], input_shape[1]+self.nb_kernels
def get_config(self):
config = {'nb_kernels': self.nb_kernels,
'kernel_dim': self.kernel_dim,
'init': self.init.__name__,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'activity_regularizer': self.activity_regularizer.get_config() if self.activity_regularizer else None,
'W_constraint': self.W_constraint.get_config() if self.W_constraint else None,
'input_dim': self.input_dim}
base_config = super(MinibatchDiscrimination, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
the-stack_106_23455 | """
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
from .base import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='+.[(;=Q07q6tB9UblTzVS{k>59g{t/AhOX?@8O$rq=_H%GxGGT')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]
INSTALLED_APPS += ['debug_toolbar', ]
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', '10.0.0.1', ]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['django_extensions', ]
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('wagtail.contrib.styleguide', )
ALLOWED_HOSTS = ('127.0.0.1', '10.0.0.101', 'localhost', 'www.dev.bluehut.ca', '192.168.0.106', '*')
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
|
the-stack_106_23456 | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch XLNet model.
"""
from dataclasses import dataclass
from typing import List, Optional, Tuple
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn import functional as F
from .activations import gelu_new, swish
from .configuration_xlnet import XLNetConfig
from .file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_callable,
replace_return_docstrings,
)
from .modeling_utils import (
PoolerAnswerClass,
PoolerEndLogits,
PoolerStartLogits,
PreTrainedModel,
SequenceSummary,
apply_chunking_to_forward,
)
from .utils import logging
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "XLNetConfig"
_TOKENIZER_FOR_DOC = "XLNetTokenizer"
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = [
"xlnet-base-cased",
"xlnet-large-cased",
# See all XLNet models at https://huggingface.co/models?filter=xlnet
]
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
"""A map of modules from TF to PyTorch.
I use a map to keep the PyTorch model as
identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, "transformer"):
if hasattr(model, "lm_loss"):
# We will load also the output bias
tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias
if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights:
# We will load also the sequence summary
tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight
tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias
if (
hasattr(model, "logits_proj")
and config.finetuning_task is not None
and "model/regression_{}/logit/kernel".format(config.finetuning_task) in tf_weights
):
tf_to_pt_map["model/regression_{}/logit/kernel".format(config.finetuning_task)] = model.logits_proj.weight
tf_to_pt_map["model/regression_{}/logit/bias".format(config.finetuning_task)] = model.logits_proj.bias
# Now load the rest of the transformer
model = model.transformer
# Embeddings and output
tf_to_pt_map.update(
{
"model/transformer/word_embedding/lookup_table": model.word_embedding.weight,
"model/transformer/mask_emb/mask_emb": model.mask_emb,
}
)
# Transformer blocks
for i, b in enumerate(model.layer):
layer_str = "model/transformer/layer_%d/" % i
tf_to_pt_map.update(
{
layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.rel_attn.o,
layer_str + "rel_attn/q/kernel": b.rel_attn.q,
layer_str + "rel_attn/k/kernel": b.rel_attn.k,
layer_str + "rel_attn/r/kernel": b.rel_attn.r,
layer_str + "rel_attn/v/kernel": b.rel_attn.v,
layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
}
)
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
r_s_list = []
seg_embed_list = []
for b in model.layer:
r_r_list.append(b.rel_attn.r_r_bias)
r_w_list.append(b.rel_attn.r_w_bias)
r_s_list.append(b.rel_attn.r_s_bias)
seg_embed_list.append(b.rel_attn.seg_embed)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
r_s_list = [model.r_s_bias]
seg_embed_list = [model.seg_embed]
tf_to_pt_map.update(
{
"model/transformer/r_r_bias": r_r_list,
"model/transformer/r_w_bias": r_w_list,
"model/transformer/r_s_bias": r_s_list,
"model/transformer/seg_embed": seg_embed_list,
}
)
return tf_to_pt_map
def load_tf_weights_in_xlnet(model, config, tf_path):
"""Load tf checkpoints in a pytorch model"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)
for name, pointer in tf_to_pt_map.items():
logger.info("Importing {}".format(name))
if name not in tf_weights:
logger.info("{} not in tf pre-trained weights, skipping".format(name))
continue
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if "kernel" in name and ("ff" in name or "summary" in name or "logit" in name):
logger.info("Transposing")
array = np.transpose(array)
if isinstance(pointer, list):
# Here we will split the TF weights
assert (
len(pointer) == array.shape[0]
), f"Pointer length {len(pointer)} and array length {array.shape[0]} mismatched"
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert (
p_i.shape == arr_i.shape
), f"Pointer shape {p_i.shape} and array shape {arr_i.shape} mismatched"
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
logger.info("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + "/Adam", None)
tf_weights.pop(name + "/Adam_1", None)
logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys())))
return model
ACT2FN = {"gelu": gelu_new, "relu": torch.nn.functional.relu, "swish": swish}
XLNetLayerNorm = nn.LayerNorm
class XLNetRelativeAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.d_model % config.n_head != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.d_model, config.n_head)
)
self.n_head = config.n_head
self.d_head = config.d_head
self.d_model = config.d_model
self.scale = 1 / (config.d_head ** 0.5)
self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head))
self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.dropout)
def prune_heads(self, heads):
raise NotImplementedError
@staticmethod
def rel_shift(x, klen=-1):
"""perform relative shift to form the relative attention score."""
x_size = x.shape
x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
x = x[1:, ...]
x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
# x = x[:, 0:klen, :, :]
x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
return x
@staticmethod
def rel_shift_bnij(x, klen=-1):
x_size = x.shape
x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2])
x = x[:, :, 1:, :]
x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1)
# Note: the tensor-slice form was faster in my testing than torch.index_select
# However, tracing doesn't like the nature of the slice, and if klen changes
# during the run then it'll fail, whereas index_select will be fine.
x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long))
# x = x[:, :, :, :klen]
return x
def rel_attn_core(
self,
q_head,
k_head_h,
v_head_h,
k_head_r,
seg_mat=None,
attn_mask=None,
head_mask=None,
output_attentions=False,
):
"""Core relative positional attention operations."""
# content based attention score
ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h)
# position based attention score
bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r)
bd = self.rel_shift_bnij(bd, klen=ac.shape[3])
# segment based attention score
if seg_mat is None:
ef = 0
else:
ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef)
# merge attention scores and perform masking
attn_score = (ac + bd + ef) * self.scale
if attn_mask is not None:
# attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
if attn_mask.dtype == torch.float16:
attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask)
else:
attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask)
# attention probability
attn_prob = F.softmax(attn_score, dim=3)
attn_prob = self.dropout(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask)
# attention output
attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h)
if output_attentions:
return attn_vec, torch.einsum("bnij->ijbn", attn_prob)
return attn_vec
def post_attention(self, h, attn_vec, residual=True):
"""Post-attention processing."""
# post-attention projection (back to `d_model`)
attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o)
attn_out = self.dropout(attn_out)
if residual:
attn_out = attn_out + h
output = self.layer_norm(attn_out)
return output
def forward(
self,
h,
g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems=None,
target_mapping=None,
head_mask=None,
output_attentions=False,
):
if g is not None:
# Two-stream attention with relative positional encoding.
# content based attention score
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content-based key head
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
# content-based value head
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# position-based key head
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# h-stream
# content-stream query head
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
# core attention ops
attn_vec_h = self.rel_attn_core(
q_head_h,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_h,
head_mask=head_mask,
output_attentions=output_attentions,
)
if output_attentions:
attn_vec_h, attn_prob_h = attn_vec_h
# post processing
output_h = self.post_attention(h, attn_vec_h)
# g-stream
# query-stream query head
q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q)
# core attention ops
if target_mapping is not None:
q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping)
attn_vec_g = self.rel_attn_core(
q_head_g,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_g,
head_mask=head_mask,
output_attentions=output_attentions,
)
if output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping)
else:
attn_vec_g = self.rel_attn_core(
q_head_g,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_g,
head_mask=head_mask,
output_attentions=output_attentions,
)
if output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
# post processing
output_g = self.post_attention(g, attn_vec_g)
if output_attentions:
attn_prob = attn_prob_h, attn_prob_g
else:
# Multi-head attention with relative positional encoding
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content heads
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# positional heads
# type casting for fp16 support
k_head_r = torch.einsum("ibh,hnd->ibnd", r.type(self.r.dtype), self.r)
# core attention ops
attn_vec = self.rel_attn_core(
q_head_h,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_h,
head_mask=head_mask,
output_attentions=output_attentions,
)
if output_attentions:
attn_vec, attn_prob = attn_vec
# post processing
output_h = self.post_attention(h, attn_vec)
output_g = None
outputs = (output_h, output_g)
if output_attentions:
outputs = outputs + (attn_prob,)
return outputs
class XLNetFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
self.layer_1 = nn.Linear(config.d_model, config.d_inner)
self.layer_2 = nn.Linear(config.d_inner, config.d_model)
self.dropout = nn.Dropout(config.dropout)
if isinstance(config.ff_activation, str):
self.activation_function = ACT2FN[config.ff_activation]
else:
self.activation_function = config.ff_activation
def forward(self, inp):
output = inp
output = self.layer_1(output)
output = self.activation_function(output)
output = self.dropout(output)
output = self.layer_2(output)
output = self.dropout(output)
output = self.layer_norm(output + inp)
return output
class XLNetLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.rel_attn = XLNetRelativeAttention(config)
self.ff = XLNetFeedForward(config)
self.dropout = nn.Dropout(config.dropout)
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
def forward(
self,
output_h,
output_g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems=None,
target_mapping=None,
head_mask=None,
output_attentions=False,
):
outputs = self.rel_attn(
output_h,
output_g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems=mems,
target_mapping=target_mapping,
head_mask=head_mask,
output_attentions=output_attentions,
)
output_h, output_g = outputs[:2]
if output_g is not None:
output_g = apply_chunking_to_forward(
self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_g
)
output_h = apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_h)
outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there
return outputs
def ff_chunk(self, output_x):
output_x = self.ff(output_x)
return output_x
class XLNetPreTrainedModel(PreTrainedModel):
"""An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = XLNetConfig
load_tf_weights = load_tf_weights_in_xlnet
base_model_prefix = "transformer"
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, XLNetLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, XLNetRelativeAttention):
for param in [
module.q,
module.k,
module.v,
module.o,
module.r,
module.r_r_bias,
module.r_s_bias,
module.r_w_bias,
module.seg_embed,
]:
param.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, XLNetModel):
module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
@dataclass
class XLNetModelOutput(ModelOutput):
"""
Output type of :class:`~transformers.XLNetModel`.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then
``num_predict`` corresponds to ``sequence_length``.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states.
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class XLNetLMHeadModelOutput(ModelOutput):
"""
Output type of :class:`~transformers.XLNetLMHeadModel`.
Args:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss (for next-token prediction).
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then
``num_predict`` corresponds to ``sequence_length``.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states.
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class XLNetForSequenceClassificationOutput(ModelOutput):
"""
Output type of :class:`~transformers.XLNetForSequenceClassification`.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states.
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class XLNetForTokenClassificationOutput(ModelOutput):
"""
Output type of :class:`~transformers.XLNetForTokenClassificationOutput`.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states.
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class XLNetForMultipleChoiceOutput(ModelOutput):
"""
Output type of :class:`~transformers.XLNetForMultipleChoice`.
Args:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states.
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class XLNetForQuestionAnsweringSimpleOutput(ModelOutput):
"""
Output type of :class:`~transformers.XLNetForQuestionAnsweringSimple`.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states.
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class XLNetForQuestionAnsweringOutput(ModelOutput):
"""
Output type of :class:`~transformers.XLNetForQuestionAnswering`.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the ``is_impossible`` label of the answers.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states.
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
start_top_log_probs: Optional[torch.FloatTensor] = None
start_top_index: Optional[torch.LongTensor] = None
end_top_log_probs: Optional[torch.FloatTensor] = None
end_top_index: Optional[torch.LongTensor] = None
cls_logits: Optional[torch.FloatTensor] = None
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
XLNET_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
XLNET_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.__call__` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states as computed by the model
(see `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
given to this model should not be passed as input ids as they have already been computed.
`use_cache` has to be set to `True` to make use of `mems`.
perm_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``:
If ``perm_mask[k, i, j] = 0``, i attend to j in batch k;
if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k.
If None, each token attends to all the others (full bidirectional attention).
Only used during pretraining (to define factorization order) or for sequential decoding (generation).
target_mapping (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the output tokens to use.
If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token.
Only used during pretraining for partial prediction or for sequential decoding (generation).
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token. The classifier token should be represented by a ``2``.
`What are token type IDs? <../glossary.html#token-type-ids>`_
input_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding.
Kept for compatibility with the original code base.
You can only uses one of `input_mask` and `attention_mask`
Mask values selected in ``[0, 1]``:
``1`` for tokens that are MASKED, ``0`` for tokens that are NOT MASKED.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
use_cache (:obj:`bool`):
If `use_cache` is True, `mems` are returned and can be used to speed up decoding (see `mems`). Defaults to `True`.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the hidden states of all layers are returned. See ``hidden_states`` under returned tensors for more detail.
return_dict (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the model will return a :class:`~transformers.file_utils.ModelOutput` instead of a
plain tuple.
"""
@add_start_docstrings(
"The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.",
XLNET_START_DOCSTRING,
)
class XLNetModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mem_len = config.mem_len
self.reuse_len = config.reuse_len
self.d_model = config.d_model
self.same_length = config.same_length
self.attn_type = config.attn_type
self.bi_data = config.bi_data
self.clamp_len = config.clamp_len
self.n_layer = config.n_layer
self.word_embedding = nn.Embedding(config.vocab_size, config.d_model)
self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model))
self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)])
self.dropout = nn.Dropout(config.dropout)
self.init_weights()
def get_input_embeddings(self):
return self.word_embedding
def set_input_embeddings(self, new_embeddings):
self.word_embedding = new_embeddings
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def create_mask(self, qlen, mlen):
"""
Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.
Args:
qlen: Sequence length
mlen: Mask length
::
same_length=False: same_length=True:
<mlen > < qlen > <mlen > < qlen >
^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
[0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
[0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]
"""
attn_mask = torch.ones([qlen, qlen])
mask_up = torch.triu(attn_mask, diagonal=1)
attn_mask_pad = torch.zeros([qlen, mlen])
ret = torch.cat([attn_mask_pad, mask_up], dim=1)
if self.same_length:
mask_lo = torch.tril(attn_mask, diagonal=-1)
ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)
ret = ret.to(self.device)
return ret
def cache_mem(self, curr_out, prev_mem):
# cache hidden states into memory.
if self.reuse_len is not None and self.reuse_len > 0:
curr_out = curr_out[: self.reuse_len]
if self.mem_len is None or self.mem_len == 0:
# If `use_cache` is active but no `mem_len` is defined, the model behaves like GPT-2 at inference time
# and returns all of the past and current hidden states.
cutoff = 0
else:
# If `use_cache` is active and `mem_len` is defined, the model returns the last `mem_len` hidden
# states. This is the preferred setting for training and long-form generation.
cutoff = -self.mem_len
if prev_mem is None:
# if `use_cache` is active and `mem_len` is defined, the model
new_mem = curr_out[cutoff:]
else:
new_mem = torch.cat([prev_mem, curr_out], dim=0)[cutoff:]
return new_mem.detach()
@staticmethod
def positional_embedding(pos_seq, inv_freq, bsz=None):
sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq)
pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
pos_emb = pos_emb[:, None, :]
if bsz is not None:
pos_emb = pos_emb.expand(-1, bsz, -1)
return pos_emb
def relative_positional_encoding(self, qlen, klen, bsz=None):
# create relative positional encoding.
freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model))
if self.attn_type == "bi":
# beg, end = klen - 1, -qlen
beg, end = klen, -qlen
elif self.attn_type == "uni":
# beg, end = klen - 1, -1
beg, end = klen, -1
else:
raise ValueError("Unknown `attn_type` {}.".format(self.attn_type))
if self.bi_data:
fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
if bsz is not None:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
else:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
else:
fwd_pos_seq = torch.arange(beg, end, -1.0)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
pos_emb = pos_emb.to(self.device)
return pos_emb
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="xlnet-base-cased",
output_type=XLNetModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
use_cache = self.training or (use_cache if use_cache is not None else self.config.use_cache)
# the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
# but we want a unified interface in the library with the batch size on the first dimension
# so we move here the first dimension (batch) to the end
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_ids = input_ids.transpose(0, 1).contiguous()
qlen, bsz = input_ids.shape[0], input_ids.shape[1]
elif inputs_embeds is not None:
inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None
mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0
klen = mlen + qlen
dtype_float = self.dtype
device = self.device
# Attention mask
# causal attention mask
if self.attn_type == "uni":
attn_mask = self.create_mask(qlen, mlen)
attn_mask = attn_mask[:, :, None, None]
elif self.attn_type == "bi":
attn_mask = None
else:
raise ValueError("Unsupported attention type: {}".format(self.attn_type))
# data mask: input mask & perm mask
assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
"or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one."
if input_mask is None and attention_mask is not None:
input_mask = 1.0 - attention_mask
if input_mask is not None and perm_mask is not None:
data_mask = input_mask[None] + perm_mask
elif input_mask is not None and perm_mask is None:
data_mask = input_mask[None]
elif input_mask is None and perm_mask is not None:
data_mask = perm_mask
else:
data_mask = None
if data_mask is not None:
# all mems can be attended to
if mlen > 0:
mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
data_mask = torch.cat([mems_mask, data_mask], dim=1)
if attn_mask is None:
attn_mask = data_mask[:, :, :, None]
else:
attn_mask += data_mask[:, :, :, None]
if attn_mask is not None:
attn_mask = (attn_mask > 0).to(dtype_float)
if attn_mask is not None:
non_tgt_mask = -torch.eye(qlen).to(attn_mask)
if mlen > 0:
non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
else:
non_tgt_mask = None
# Word embeddings and prepare h & g hidden states
if inputs_embeds is not None:
word_emb_k = inputs_embeds
else:
word_emb_k = self.word_embedding(input_ids)
output_h = self.dropout(word_emb_k)
if target_mapping is not None:
word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
# else: # We removed the inp_q input which was same as target mapping
# inp_q_ext = inp_q[:, :, None]
# word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
output_g = self.dropout(word_emb_q)
else:
output_g = None
# Segment embedding
if token_type_ids is not None:
# Convert `token_type_ids` to one-hot `seg_mat`
if mlen > 0:
mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
else:
cat_ids = token_type_ids
# `1` indicates not in the same segment [qlen x klen x bsz]
seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
else:
seg_mat = None
# Positional encoding
pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
pos_emb = self.dropout(pos_emb)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
new_mems = ()
if mems is None:
mems = [None] * len(self.layer)
attentions = [] if output_attentions else None
hidden_states = [] if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
if use_cache:
# cache new mems
new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
if output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
outputs = layer_module(
output_h,
output_g,
attn_mask_h=non_tgt_mask,
attn_mask_g=attn_mask,
r=pos_emb,
seg_mat=seg_mat,
mems=mems[i],
target_mapping=target_mapping,
head_mask=head_mask[i],
output_attentions=output_attentions,
)
output_h, output_g = outputs[:2]
if output_attentions:
attentions.append(outputs[2])
# Add last hidden state
if output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
output = self.dropout(output_g if output_g is not None else output_h)
# Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
output = output.permute(1, 0, 2).contiguous()
if not use_cache:
new_mems = None
if output_hidden_states:
if output_g is not None:
hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs)
else:
hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states)
if output_attentions:
if target_mapping is not None:
# when target_mapping is provided, there are 2-tuple of attentions
attentions = tuple(
tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions
)
else:
attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
if not return_dict:
return tuple(v for v in [output, new_mems, hidden_states, attentions] if v is not None)
return XLNetModelOutput(
last_hidden_state=output, mems=new_mems, hidden_states=hidden_states, attentions=attentions
)
@add_start_docstrings(
"""XLNet Model with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
XLNET_START_DOCSTRING,
)
class XLNetLMHeadModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.attn_type = config.attn_type
self.same_length = config.same_length
self.transformer = XLNetModel(config)
self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True)
self.init_weights()
def get_output_embeddings(self):
return self.lm_loss
def prepare_inputs_for_generation(self, input_ids, past, **kwargs):
# Add dummy token at the end (no attention on this one)
effective_batch_size = input_ids.shape[0]
dummy_token = torch.zeros((effective_batch_size, 1), dtype=torch.long, device=input_ids.device)
# At every pass, the attention values for the new token and the two last generated tokens
# are computed, the rest is reloaded from the `past` cache. A purely auto-regressive model would have
# offset = 1; offset = 2 seems to have slightly better computation.
offset = 2
if past:
input_ids = torch.cat([input_ids[:, -offset:], dummy_token], dim=1)
else:
input_ids = torch.cat([input_ids, dummy_token], dim=1)
# Build permutation mask so that previous tokens don't see last token
sequence_length = input_ids.shape[1]
perm_mask = torch.zeros(
(effective_batch_size, sequence_length, sequence_length), dtype=torch.float, device=input_ids.device
)
perm_mask[:, :, -1] = 1.0
# We'll only predict the last token
target_mapping = torch.zeros(
(effective_batch_size, 1, sequence_length), dtype=torch.float, device=input_ids.device
)
target_mapping[0, 0, -1] = 1.0
inputs = {
"input_ids": input_ids,
"perm_mask": perm_mask,
"target_mapping": target_mapping,
"use_cache": kwargs["use_cache"],
}
# if past is defined in model kwargs then use it for faster decoding
if past:
inputs["mems"] = tuple(layer_past[:-offset, :, :] for layer_past in past)
return inputs
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@replace_return_docstrings(output_type=XLNetLMHeadModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_predict)`, `optional`, defaults to :obj:`None`):
Labels for masked language modeling.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
The labels should correspond to the masked input words that should be predicted and depends on `target_mapping`. Note in order to perform standard auto-regressive language modeling a `<mask>` token has to be added to the `input_ids` (see `prepare_inputs_for_generation` fn and examples below)
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored, the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
Examples::
from transformers import XLNetTokenizer, XLNetLMHeadModel
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetLMHeadModel.from_pretrained('xlnet-large-cased', return_dict=True)
# We show how to setup inputs to predict a next token using a bi-directional context.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
# The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token
labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0)
assert labels.shape[0] == 1, 'only one word will be predicted'
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels)
loss = outputs.loss
next_token_logits = outputs.logits # Logits have shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
use_cache = self.training or (use_cache if use_cache is not None else self.config.use_cache)
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_loss(transformer_outputs[0])
loss = None
if labels is not None:
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return XLNetLMHeadModelOutput(
loss=loss,
logits=logits,
mems=transformer_outputs.mems,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""XLNet Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForSequenceClassification(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="xlnet-base-cased",
output_type=XLNetForSequenceClassificationOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`)
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
use_cache = self.training or (use_cache if use_cache is not None else self.config.use_cache)
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return XLNetForSequenceClassificationOutput(
loss=loss,
logits=logits,
mems=transformer_outputs.mems,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""XLNet Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForTokenClassification(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="xlnet-base-cased",
output_type=XLNetForTokenClassificationOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
use_cache = self.training or (use_cache if use_cache is not None else self.config.use_cache)
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return XLNetForTokenClassificationOutput(
loss=loss,
logits=logits,
mems=outputs.mems,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""XLNet Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RACE/SWAG tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForMultipleChoice(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, 1)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, num_choices, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="xlnet-base-cased",
output_type=XLNetForMultipleChoiceOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
token_type_ids=None,
input_mask=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
use_cache = self.training or (use_cache if use_cache is not None else self.config.use_cache)
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
transformer_outputs = self.transformer(
flat_input_ids,
token_type_ids=flat_token_type_ids,
input_mask=flat_input_mask,
attention_mask=flat_attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels.view(-1))
if not return_dict:
output = (reshaped_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return XLNetForMultipleChoiceOutput(
loss=loss,
logits=reshaped_logits,
mems=transformer_outputs.mems,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="xlnet-base-cased",
output_type=XLNetForQuestionAnsweringSimpleOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
use_cache = self.training or (use_cache if use_cache is not None else self.config.use_cache)
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return XLNetForQuestionAnsweringSimpleOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
mems=outputs.mems,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class XLNetForQuestionAnswering(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.transformer = XLNetModel(config)
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@replace_return_docstrings(output_type=XLNetForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
is_impossible=None,
cls_index=None,
p_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
is_impossible (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for position (index) of the classification token to use as input for computing plausibility of the answer.
p_mask (``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...).
1.0 means token should be masked. 0.0 mean token is not masked.
Returns:
Example::
>>> from transformers import XLNetTokenizer, XLNetForQuestionAnswering
>>> import torch
>>> tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
>>> model = XLNetForQuestionAnswering.from_pretrained('xlnet-base-cased', return_dict=True)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
use_cache = self.training or (use_cache if use_cache is not None else self.config.use_cache)
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
if not return_dict:
return (total_loss,) + transformer_outputs[1:]
else:
return XLNetForQuestionAnsweringOutput(
loss=total_loss,
mems=transformer_outputs.mems,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(
start_log_probs, self.start_n_top, dim=-1
) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
start_states
) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(
end_log_probs, self.end_n_top, dim=1
) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum(
"blh,bl->bh", hidden_states, start_log_probs
) # get the representation of START as weighted sum of hidden states
cls_logits = self.answer_class(
hidden_states, start_states=start_states, cls_index=cls_index
) # Shape (batch size,): one single `cls_logits` for each sample
if not return_dict:
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)
return outputs + transformer_outputs[1:]
else:
return XLNetForQuestionAnsweringOutput(
start_top_log_probs=start_top_log_probs,
start_top_index=start_top_index,
end_top_log_probs=end_top_log_probs,
end_top_index=end_top_index,
cls_logits=cls_logits,
mems=transformer_outputs.mems,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
|
the-stack_106_23458 |
import typing as t
from .abc import Node
from .exceptions import ParserException, ConstructionException
T = t.TypeVar('T')
backslash = '\\'
split_dept = {'(': ')', '[': ']', '{': '}', '\'': '\'', '"': '"'}
def tokenize(string: str, sep: str) -> t.Iterator[str]:
assert len(sep) == 1
start = 0
dept = []
last_backslash = False
for pos, char in enumerate(string):
if char == sep and not dept:
yield string[start:pos]
start = pos + 1
if not last_backslash:
if dept and char == dept[-1]:
dept.pop()
elif char in split_dept and '"' not in dept and "'" not in dept:
dept.append(split_dept[char])
if char == backslash and not last_backslash:
last_backslash = True
else:
last_backslash = False
if start - 1 != len(string):
yield string[start:]
if dept:
raise ParserException(f'unclosed {dept[-1]!r}')
def get(generator: t.Iterator[T], amount: int = None, func=next) \
-> t.Union[T, t.Sequence[T]]:
if amount is None:
return func(generator)
r = []
for x in range(amount):
r.append(func(generator))
return r
def ensure_nodes(object: t.Any, *names: str):
for name in names:
value = getattr(object, name, None)
if not value or not isinstance(value, Node):
raise ConstructionException(
f'expected {name!r} to be Node, not {value!r}'
)
def clean(string):
return string.rstrip('0').rstrip('.')
|
the-stack_106_23460 | from apas.util.logging import LogHandler
import pathlib
import json
import os
class ConfigHandler:
HEADER = "{0: <20}".format("(ConfigHandler):")
config_dict = None
secrets_dict = None
VERBOSITY_OPTIONS = {"Detailed": True, "Standard": False}
EMPTY_SECRETS_FILE_CONTENT = {
"AMAZON_ACCESS_KEY": "",
"AMAZON_SECRET_KEY": "",
"AMAZON_ASSOC_TAG": "",
}
DEFAULT_CONFIG_FILE_CONTENT = {
"RETRY_LIMIT": 3,
"COOLDOWN_TIME_FOR_RETRY": 5,
"VERBOSE_OUTPUT": "Standard",
"PRODUCT_LIMIT": 5,
"BrowseNodeCrawler": {
"books": {
"INCLUDED_MAIN_CATEGORIES": [],
"SPECIFIC_SUB_CATEGORIES": []
},
"kindle_books": {
"INCLUDED_MAIN_CATEGORIES": [],
"SPECIFIC_SUB_CATEGORIES": []
}
}
}
ROOT_DIR = None
CONFIG_FILE_PATH = None
SECRETS_FILE_PATH = None
def __init__(self, start_up_handler):
self.start_up_handler = start_up_handler
self.ROOT_DIR = self.start_up_handler.ROOT_DIR
self.CONFIG_FILE_PATH = str(pathlib.Path.joinpath(self.ROOT_DIR, "config", "config.json"))
self.SECRETS_FILE_PATH = str(pathlib.Path.joinpath(self.ROOT_DIR, "config", "secrets.json"))
def load_config_from_file(self):
if not os.path.isfile(self.CONFIG_FILE_PATH):
msg = f"{self.HEADER} Could not find config.json-File. Creating a new config.json with default values."
if self.start_up_handler.in_start_up_phase():
self.start_up_handler.log_start_up_message(msg=msg)
else:
print(msg)
with open(self.CONFIG_FILE_PATH, "w", encoding="UTF-8") as config_file:
try:
config_file.write(json.dumps(self.DEFAULT_CONFIG_FILE_CONTENT))
except (IOError, json.JSONDecodeError) as e:
raise Exception(
f"{self.HEADER} ERROR: An error occurred while trying to create a new empty config.json-file! "
f"Msg: {str(e)}"
)
with open(self.CONFIG_FILE_PATH, "r", encoding="UTF-8") as config_file:
try:
self.config_dict = json.loads(config_file.read(), encoding="UTF-8")
except json.JSONDecodeError:
raise Exception(
f"{self.HEADER} ERROR: Config.json is not a valid json-File! Please copy the "
f"content of the file in the following online-Tool to check the file's syntax "
"'https://jsonlint.com/'"
)
try:
self.config_dict["VERBOSE_OUTPUT"] = self.VERBOSITY_OPTIONS[
self.config_dict["VERBOSE_OUTPUT"]
]
except KeyError:
self.config_dict["VERBOSE_OUTPUT"] = False
def load_secrets_from_file(self):
if not os.path.isfile(self.SECRETS_FILE_PATH):
msg = f"{self.HEADER} Could not find config.json-File. Creating a new secrets.json with empty values."
if self.start_up_handler.in_start_up_phase():
self.start_up_handler.log_start_up_message(msg=msg)
else:
print(msg)
with open(self.SECRETS_FILE_PATH, "w", encoding="UTF-8") as new_secrets_file:
try:
new_secrets_file.write(json.dumps(self.EMPTY_SECRETS_FILE_CONTENT))
except IOError:
raise Exception(
f"{self.HEADER} ERROR: Could not create a new empty secrets.json-file!"
)
with open(self.SECRETS_FILE_PATH, "r", encoding="UTF-8") as secrets_file:
try:
self.secrets_dict = json.loads(secrets_file.read(), encoding="UTF-8")
except json.JSONDecodeError:
raise Exception(
f"{self.HEADER} ERROR: Config.json is not a valid json-File! Please copy the "
f"content of the file in the following online-Tool to check the file's syntax "
"'https://jsonlint.com/'"
)
def secrets_dict_is_empty(self):
for key, value in self.secrets_dict.items():
if value == "" or value is None:
return True
return False
def update_config_file(self, new_config_dict: dict) -> None:
with open(self.CONFIG_FILE_PATH, "r", encoding="UTF-8") as config_file:
try:
config_dict = json.loads(config_file.read(), encoding="UTF-8")
except json.JSONDecodeError:
raise Exception(
f"{self.HEADER} ERROR: Config.json is not a valid json-File! Please copy the "
f"content of the file in the following online-Tool to check the file's syntax "
"'https://jsonlint.com/'"
)
if 'BrowseNodeCrawler' in new_config_dict:
# BrowseNodeCrawler-Config is updated separately
# Why separate? Config could concern books OR kindle. A simple update of kindle would delete the values of
# of books and vice-versa
node_crawler_config = new_config_dict['BrowseNodeCrawler']
del new_config_dict['BrowseNodeCrawler'] # Delete ir from dict -> it will be treated separately
config_dict['BrowseNodeCrawler'].update(node_crawler_config)
if new_config_dict: # If there are keys left, update them
# Should actually be empty (separate updates of each section).
config_dict.update(new_config_dict)
else:
config_dict.update(new_config_dict)
with open(self.CONFIG_FILE_PATH, "w", encoding="UTF-8") as config_file:
try:
config_file.write(json.dumps(config_dict))
except json.JSONDecodeError:
raise Exception(
f"{self.HEADER} ERROR: Config.json is not a valid json-File! Please copy the "
f"content of the file in the following online-Tool to check the file's syntax "
"'https://jsonlint.com/'"
)
if self.config_dict["VERBOSE_OUTPUT"]:
LogHandler.log_message(
f"{self.HEADER} Successfully updated config.json. New Values: {str(config_dict)}"
)
else:
LogHandler.log_message(f"{self.HEADER} Successfully updated config.json.")
def update_secrets_file(self, new_secrets_dict: dict) -> None:
with open(self.SECRETS_FILE_PATH, "r", encoding="UTF-8") as secrets_file:
try:
secrets_dict = json.loads(secrets_file.read(), encoding="UTF-8")
except json.JSONDecodeError:
raise Exception(
f"{self.HEADER} ERROR: secrets.json is not a valid json-File! Please copy the "
f"content of the file in the following online-Tool to check the file's syntax "
"'https://jsonlint.com/'"
)
secrets_dict.update(new_secrets_dict)
with open(self.SECRETS_FILE_PATH, "w", encoding="UTF-8") as secrets_file:
try:
secrets_file.write(json.dumps(secrets_dict))
except json.JSONDecodeError:
raise Exception(
f"{self.HEADER} ERROR: secrets.json is not a valid json-File! Please copy the "
f"content of the file in the following online-Tool to check the file's syntax "
"'https://jsonlint.com/'"
)
if self.config_dict["VERBOSE_OUTPUT"]:
LogHandler.log_message(
f"{self.HEADER} Successfully updated secrets.json. New Values: {str(new_secrets_dict)}"
)
else:
LogHandler.log_message(f"{self.HEADER} Successfully updated secrets.json.") |
the-stack_106_23461 | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=protected-access
from collections import namedtuple
from platform import python_implementation
from unittest import mock
from opentelemetry.instrumentation.system_metrics import (
SystemMetricsInstrumentor,
)
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import InMemoryMetricReader
from opentelemetry.test.test_base import TestBase
def _mock_netconnection():
NetConnection = namedtuple(
"NetworkConnection", ["family", "type", "status"]
)
Type = namedtuple("Type", ["value"])
return [
NetConnection(
family=1,
status="ESTABLISHED",
type=Type(value=2),
),
NetConnection(
family=1,
status="ESTABLISHED",
type=Type(value=1),
),
]
class _SystemMetricsResult:
def __init__(self, attributes, value) -> None:
self.attributes = attributes
self.value = value
class TestSystemMetrics(TestBase):
def setUp(self):
super().setUp()
self.implementation = python_implementation().lower()
self._patch_net_connections = mock.patch(
"psutil.net_connections", _mock_netconnection
)
self._patch_net_connections.start()
def tearDown(self):
super().tearDown()
self._patch_net_connections.stop()
SystemMetricsInstrumentor().uninstrument()
def test_system_metrics_instrument(self):
reader = InMemoryMetricReader()
meter_provider = MeterProvider(metric_readers=[reader])
system_metrics = SystemMetricsInstrumentor()
system_metrics.instrument(meter_provider=meter_provider)
metric_names = []
for resource_metrics in reader.get_metrics_data().resource_metrics:
for scope_metrics in resource_metrics.scope_metrics:
for metric in scope_metrics.metrics:
metric_names.append(metric.name)
self.assertEqual(len(metric_names), 17)
observer_names = [
"system.cpu.time",
"system.cpu.utilization",
"system.memory.usage",
"system.memory.utilization",
"system.swap.usage",
"system.swap.utilization",
"system.disk.io",
"system.disk.operations",
"system.disk.time",
"system.network.dropped_packets",
"system.network.packets",
"system.network.errors",
"system.network.io",
"system.network.connections",
f"runtime.{self.implementation}.memory",
f"runtime.{self.implementation}.cpu_time",
f"runtime.{self.implementation}.gc_count",
]
for observer in metric_names:
self.assertIn(observer, observer_names)
observer_names.remove(observer)
def _assert_metrics(self, observer_name, reader, expected):
assertions = 0
# pylint: disable=too-many-nested-blocks
for resource_metrics in reader.get_metrics_data().resource_metrics:
for scope_metrics in resource_metrics.scope_metrics:
for metric in scope_metrics.metrics:
for data_point in metric.data.data_points:
for expect in expected:
if (
dict(data_point.attributes)
== expect.attributes
and metric.name == observer_name
):
self.assertEqual(
data_point.value,
expect.value,
)
assertions += 1
self.assertEqual(len(expected), assertions)
def _test_metrics(self, observer_name, expected):
reader = InMemoryMetricReader()
meter_provider = MeterProvider(metric_readers=[reader])
system_metrics = SystemMetricsInstrumentor()
system_metrics.instrument(meter_provider=meter_provider)
self._assert_metrics(observer_name, reader, expected)
# This patch is added here to stop psutil from raising an exception
# because we're patching cpu_times
# pylint: disable=unused-argument
@mock.patch("psutil.cpu_times_percent")
@mock.patch("psutil.cpu_times")
def test_system_cpu_time(self, mock_cpu_times, mock_cpu_times_percent):
CPUTimes = namedtuple("CPUTimes", ["idle", "user", "system", "irq"])
mock_cpu_times.return_value = [
CPUTimes(idle=1.2, user=3.4, system=5.6, irq=7.8),
CPUTimes(idle=1.2, user=3.4, system=5.6, irq=7.8),
]
expected = [
_SystemMetricsResult(
{
"cpu": 1,
"state": "idle",
},
1.2,
),
_SystemMetricsResult(
{
"cpu": 1,
"state": "user",
},
3.4,
),
_SystemMetricsResult(
{
"cpu": 1,
"state": "system",
},
5.6,
),
_SystemMetricsResult(
{
"cpu": 1,
"state": "irq",
},
7.8,
),
_SystemMetricsResult(
{
"cpu": 2,
"state": "idle",
},
1.2,
),
_SystemMetricsResult(
{
"cpu": 2,
"state": "user",
},
3.4,
),
_SystemMetricsResult(
{
"cpu": 2,
"state": "system",
},
5.6,
),
_SystemMetricsResult(
{
"cpu": 2,
"state": "irq",
},
7.8,
),
]
self._test_metrics("system.cpu.time", expected)
@mock.patch("psutil.cpu_times_percent")
def test_system_cpu_utilization(self, mock_cpu_times_percent):
CPUTimesPercent = namedtuple(
"CPUTimesPercent", ["idle", "user", "system", "irq"]
)
mock_cpu_times_percent.return_value = [
CPUTimesPercent(idle=1.2, user=3.4, system=5.6, irq=7.8),
CPUTimesPercent(idle=1.2, user=3.4, system=5.6, irq=7.8),
]
expected = [
_SystemMetricsResult({"cpu": 1, "state": "idle"}, 1.2 / 100),
_SystemMetricsResult({"cpu": 1, "state": "user"}, 3.4 / 100),
_SystemMetricsResult({"cpu": 1, "state": "system"}, 5.6 / 100),
_SystemMetricsResult({"cpu": 1, "state": "irq"}, 7.8 / 100),
_SystemMetricsResult({"cpu": 2, "state": "idle"}, 1.2 / 100),
_SystemMetricsResult({"cpu": 2, "state": "user"}, 3.4 / 100),
_SystemMetricsResult({"cpu": 2, "state": "system"}, 5.6 / 100),
_SystemMetricsResult({"cpu": 2, "state": "irq"}, 7.8 / 100),
]
self._test_metrics("system.cpu.utilization", expected)
@mock.patch("psutil.virtual_memory")
def test_system_memory_usage(self, mock_virtual_memory):
VirtualMemory = namedtuple(
"VirtualMemory", ["used", "free", "cached", "total"]
)
mock_virtual_memory.return_value = VirtualMemory(
used=1, free=2, cached=3, total=4
)
expected = [
_SystemMetricsResult({"state": "used"}, 1),
_SystemMetricsResult({"state": "free"}, 2),
_SystemMetricsResult({"state": "cached"}, 3),
]
self._test_metrics("system.memory.usage", expected)
@mock.patch("psutil.virtual_memory")
def test_system_memory_utilization(self, mock_virtual_memory):
VirtualMemory = namedtuple(
"VirtualMemory", ["used", "free", "cached", "total"]
)
mock_virtual_memory.return_value = VirtualMemory(
used=1, free=2, cached=3, total=4
)
expected = [
_SystemMetricsResult({"state": "used"}, 1 / 4),
_SystemMetricsResult({"state": "free"}, 2 / 4),
_SystemMetricsResult({"state": "cached"}, 3 / 4),
]
self._test_metrics("system.memory.utilization", expected)
@mock.patch("psutil.swap_memory")
def test_system_swap_usage(self, mock_swap_memory):
SwapMemory = namedtuple("SwapMemory", ["used", "free", "total"])
mock_swap_memory.return_value = SwapMemory(used=1, free=2, total=3)
expected = [
_SystemMetricsResult({"state": "used"}, 1),
_SystemMetricsResult({"state": "free"}, 2),
]
self._test_metrics("system.swap.usage", expected)
@mock.patch("psutil.swap_memory")
def test_system_swap_utilization(self, mock_swap_memory):
SwapMemory = namedtuple("SwapMemory", ["used", "free", "total"])
mock_swap_memory.return_value = SwapMemory(used=1, free=2, total=3)
expected = [
_SystemMetricsResult({"state": "used"}, 1 / 3),
_SystemMetricsResult({"state": "free"}, 2 / 3),
]
self._test_metrics("system.swap.utilization", expected)
@mock.patch("psutil.disk_io_counters")
def test_system_disk_io(self, mock_disk_io_counters):
DiskIO = namedtuple(
"DiskIO",
[
"read_count",
"write_count",
"read_bytes",
"write_bytes",
"read_time",
"write_time",
"read_merged_count",
"write_merged_count",
],
)
mock_disk_io_counters.return_value = {
"sda": DiskIO(
read_count=1,
write_count=2,
read_bytes=3,
write_bytes=4,
read_time=5,
write_time=6,
read_merged_count=7,
write_merged_count=8,
),
"sdb": DiskIO(
read_count=9,
write_count=10,
read_bytes=11,
write_bytes=12,
read_time=13,
write_time=14,
read_merged_count=15,
write_merged_count=16,
),
}
expected = [
_SystemMetricsResult({"device": "sda", "direction": "read"}, 3),
_SystemMetricsResult({"device": "sda", "direction": "write"}, 4),
_SystemMetricsResult({"device": "sdb", "direction": "read"}, 11),
_SystemMetricsResult({"device": "sdb", "direction": "write"}, 12),
]
self._test_metrics("system.disk.io", expected)
@mock.patch("psutil.disk_io_counters")
def test_system_disk_operations(self, mock_disk_io_counters):
DiskIO = namedtuple(
"DiskIO",
[
"read_count",
"write_count",
"read_bytes",
"write_bytes",
"read_time",
"write_time",
"read_merged_count",
"write_merged_count",
],
)
mock_disk_io_counters.return_value = {
"sda": DiskIO(
read_count=1,
write_count=2,
read_bytes=3,
write_bytes=4,
read_time=5,
write_time=6,
read_merged_count=7,
write_merged_count=8,
),
"sdb": DiskIO(
read_count=9,
write_count=10,
read_bytes=11,
write_bytes=12,
read_time=13,
write_time=14,
read_merged_count=15,
write_merged_count=16,
),
}
expected = [
_SystemMetricsResult({"device": "sda", "direction": "read"}, 1),
_SystemMetricsResult({"device": "sda", "direction": "write"}, 2),
_SystemMetricsResult({"device": "sdb", "direction": "read"}, 9),
_SystemMetricsResult({"device": "sdb", "direction": "write"}, 10),
]
self._test_metrics("system.disk.operations", expected)
@mock.patch("psutil.disk_io_counters")
def test_system_disk_time(self, mock_disk_io_counters):
DiskIO = namedtuple(
"DiskIO",
[
"read_count",
"write_count",
"read_bytes",
"write_bytes",
"read_time",
"write_time",
"read_merged_count",
"write_merged_count",
],
)
mock_disk_io_counters.return_value = {
"sda": DiskIO(
read_count=1,
write_count=2,
read_bytes=3,
write_bytes=4,
read_time=5,
write_time=6,
read_merged_count=7,
write_merged_count=8,
),
"sdb": DiskIO(
read_count=9,
write_count=10,
read_bytes=11,
write_bytes=12,
read_time=13,
write_time=14,
read_merged_count=15,
write_merged_count=16,
),
}
expected = [
_SystemMetricsResult(
{"device": "sda", "direction": "read"}, 5 / 1000
),
_SystemMetricsResult(
{"device": "sda", "direction": "write"}, 6 / 1000
),
_SystemMetricsResult(
{"device": "sdb", "direction": "read"}, 13 / 1000
),
_SystemMetricsResult(
{"device": "sdb", "direction": "write"}, 14 / 1000
),
]
self._test_metrics("system.disk.time", expected)
@mock.patch("psutil.net_io_counters")
def test_system_network_dropped_packets(self, mock_net_io_counters):
NetIO = namedtuple(
"NetIO",
[
"dropin",
"dropout",
"packets_sent",
"packets_recv",
"errin",
"errout",
"bytes_sent",
"bytes_recv",
],
)
mock_net_io_counters.return_value = {
"eth0": NetIO(
dropin=1,
dropout=2,
packets_sent=3,
packets_recv=4,
errin=5,
errout=6,
bytes_sent=7,
bytes_recv=8,
),
"eth1": NetIO(
dropin=9,
dropout=10,
packets_sent=11,
packets_recv=12,
errin=13,
errout=14,
bytes_sent=15,
bytes_recv=16,
),
}
expected = [
_SystemMetricsResult(
{"device": "eth0", "direction": "receive"}, 1
),
_SystemMetricsResult(
{"device": "eth0", "direction": "transmit"}, 2
),
_SystemMetricsResult(
{"device": "eth1", "direction": "receive"}, 9
),
_SystemMetricsResult(
{"device": "eth1", "direction": "transmit"}, 10
),
]
self._test_metrics("system.network.dropped_packets", expected)
@mock.patch("psutil.net_io_counters")
def test_system_network_packets(self, mock_net_io_counters):
NetIO = namedtuple(
"NetIO",
[
"dropin",
"dropout",
"packets_sent",
"packets_recv",
"errin",
"errout",
"bytes_sent",
"bytes_recv",
],
)
mock_net_io_counters.return_value = {
"eth0": NetIO(
dropin=1,
dropout=2,
packets_sent=3,
packets_recv=4,
errin=5,
errout=6,
bytes_sent=7,
bytes_recv=8,
),
"eth1": NetIO(
dropin=9,
dropout=10,
packets_sent=11,
packets_recv=12,
errin=13,
errout=14,
bytes_sent=15,
bytes_recv=16,
),
}
expected = [
_SystemMetricsResult(
{"device": "eth0", "direction": "receive"}, 4
),
_SystemMetricsResult(
{"device": "eth0", "direction": "transmit"}, 3
),
_SystemMetricsResult(
{"device": "eth1", "direction": "receive"}, 12
),
_SystemMetricsResult(
{"device": "eth1", "direction": "transmit"}, 11
),
]
self._test_metrics("system.network.packets", expected)
@mock.patch("psutil.net_io_counters")
def test_system_network_errors(self, mock_net_io_counters):
NetIO = namedtuple(
"NetIO",
[
"dropin",
"dropout",
"packets_sent",
"packets_recv",
"errin",
"errout",
"bytes_sent",
"bytes_recv",
],
)
mock_net_io_counters.return_value = {
"eth0": NetIO(
dropin=1,
dropout=2,
packets_sent=3,
packets_recv=4,
errin=5,
errout=6,
bytes_sent=7,
bytes_recv=8,
),
"eth1": NetIO(
dropin=9,
dropout=10,
packets_sent=11,
packets_recv=12,
errin=13,
errout=14,
bytes_sent=15,
bytes_recv=16,
),
}
expected = [
_SystemMetricsResult(
{"device": "eth0", "direction": "receive"}, 5
),
_SystemMetricsResult(
{"device": "eth0", "direction": "transmit"}, 6
),
_SystemMetricsResult(
{"device": "eth1", "direction": "receive"}, 13
),
_SystemMetricsResult(
{"device": "eth1", "direction": "transmit"}, 14
),
]
self._test_metrics("system.network.errors", expected)
@mock.patch("psutil.net_io_counters")
def test_system_network_io(self, mock_net_io_counters):
NetIO = namedtuple(
"NetIO",
[
"dropin",
"dropout",
"packets_sent",
"packets_recv",
"errin",
"errout",
"bytes_sent",
"bytes_recv",
],
)
mock_net_io_counters.return_value = {
"eth0": NetIO(
dropin=1,
dropout=2,
packets_sent=3,
packets_recv=4,
errin=5,
errout=6,
bytes_sent=7,
bytes_recv=8,
),
"eth1": NetIO(
dropin=9,
dropout=10,
packets_sent=11,
packets_recv=12,
errin=13,
errout=14,
bytes_sent=15,
bytes_recv=16,
),
}
expected = [
_SystemMetricsResult(
{"device": "eth0", "direction": "receive"}, 8
),
_SystemMetricsResult(
{"device": "eth0", "direction": "transmit"}, 7
),
_SystemMetricsResult(
{"device": "eth1", "direction": "receive"}, 16
),
_SystemMetricsResult(
{"device": "eth1", "direction": "transmit"}, 15
),
]
self._test_metrics("system.network.io", expected)
@mock.patch("psutil.net_connections")
def test_system_network_connections(self, mock_net_connections):
NetConnection = namedtuple(
"NetworkConnection", ["family", "type", "status"]
)
Type = namedtuple("Type", ["value"])
mock_net_connections.return_value = [
NetConnection(
family=1,
status="ESTABLISHED",
type=Type(value=2),
),
NetConnection(
family=1,
status="ESTABLISHED",
type=Type(value=1),
),
]
expected = [
_SystemMetricsResult(
{
"family": 1,
"protocol": "udp",
"state": "ESTABLISHED",
"type": Type(value=2),
},
1,
),
_SystemMetricsResult(
{
"family": 1,
"protocol": "tcp",
"state": "ESTABLISHED",
"type": Type(value=1),
},
1,
),
]
self._test_metrics("system.network.connections", expected)
@mock.patch("psutil.Process.memory_info")
def test_runtime_memory(self, mock_process_memory_info):
PMem = namedtuple("PMem", ["rss", "vms"])
mock_process_memory_info.configure_mock(
**{"return_value": PMem(rss=1, vms=2)}
)
expected = [
_SystemMetricsResult({"type": "rss"}, 1),
_SystemMetricsResult({"type": "vms"}, 2),
]
self._test_metrics(f"runtime.{self.implementation}.memory", expected)
@mock.patch("psutil.Process.cpu_times")
def test_runtime_cpu_time(self, mock_process_cpu_times):
PCPUTimes = namedtuple("PCPUTimes", ["user", "system"])
mock_process_cpu_times.configure_mock(
**{"return_value": PCPUTimes(user=1.1, system=2.2)}
)
expected = [
_SystemMetricsResult({"type": "user"}, 1.1),
_SystemMetricsResult({"type": "system"}, 2.2),
]
self._test_metrics(f"runtime.{self.implementation}.cpu_time", expected)
@mock.patch("gc.get_count")
def test_runtime_get_count(self, mock_gc_get_count):
mock_gc_get_count.configure_mock(**{"return_value": (1, 2, 3)})
expected = [
_SystemMetricsResult({"count": "0"}, 1),
_SystemMetricsResult({"count": "1"}, 2),
_SystemMetricsResult({"count": "2"}, 3),
]
self._test_metrics(f"runtime.{self.implementation}.gc_count", expected)
|
the-stack_106_23463 | from convokit import Corpus, CorpusObject, Transformer
from typing import Callable
from sklearn.feature_extraction.text import CountVectorizer as CV
class BoWTransformer(Transformer):
"""
Bag-of-Words Transformer for annotating a Corpus's objects with the bag-of-words vectorization
of some textual element.
- For utterances, this would be the utterance text.
- For conversations, this would be joined texts of all the utterances in the conversation
- For speakers, this would be the joined texts of all the utterances by the speaker
Compatible with any type of vectorizer (e.g. bag-of-words, TF-IDF, etc)
Runs on the Corpus's Speakers, Utterances, or Conversations (as specified by obj_type)
:param obj_type: "speaker", "utterance", or "conversation"
:param vectorizer: a sklearn vectorizer object; default is CountVectorizer(min_df=10, max_df=.5, ngram_range(1, 1),
binary=False, max_features=15000)
:param vector_name: the name of the metadata key to store the vector under
:param text_func: an optional (lambda) function to extract the textual element from the Corpus object, see
defaults above.
"""
def __init__(self, obj_type: str, vectorizer=None, vector_name="bow_vector",
text_func: Callable[[CorpusObject], str] = None):
if vectorizer is None:
print("Initializing default unigram CountVectorizer...", end="")
self.vectorizer = CV(decode_error='ignore', min_df=10, max_df=.5,
ngram_range=(1, 1), binary=False, max_features=15000)
print("Done.")
else:
self.vectorizer = vectorizer
self.obj_type = obj_type
self.vector_name = vector_name
if text_func is None:
if obj_type == "utterance":
self.text_func = lambda utt: utt.text
elif obj_type == "conversation":
self.text_func = lambda convo: " ".join(utt.text for utt in convo.iter_utterances())
elif obj_type == "speaker":
self.text_func = lambda speaker: " ".join(utt.text for utt in speaker.iter_utterances())
else:
raise ValueError("Invalid corpus object type. Use 'utterance', 'conversation', or 'speaker'")
else:
self.text_func = text_func
def fit(self, corpus: Corpus, y=None, selector: Callable[[CorpusObject], bool] = lambda x: True):
"""
Fit the Transformer's internal vectorizer on the Corpus objects' texts, with an optional selector that filters for objects to be fit on.
:param corpus: the target Corpus
:param selector: a (lambda) function that takes a Corpus object and returns True or False (i.e. include / exclude). By default, the selector includes all objects of the specified type in the Corpus.
:return: the fitted BoWTransformer
"""
# collect texts for vectorization
docs = []
for obj in corpus.iter_objs(self.obj_type, selector):
docs.append(self.text_func(obj))
self.vectorizer.fit(docs)
return self
def transform(self, corpus: Corpus, selector: Callable[[CorpusObject], bool] = lambda x: True) -> Corpus:
"""
Annotate the corpus objects with the vectorized representation of the object's text, with an optional
selector that filters for objects to be transformed. Objects that are not selected will get a metadata value
of 'None' instead of the vector.
:param corpus: the target Corpus
:param selector: a (lambda) function that takes a Corpus object and returns True or False (i.e. include / exclude). By default, the selector includes all objects of the specified type in the Corpus.
:return: the target Corpus annotated
"""
for obj in corpus.iter_objs(self.obj_type):
if selector(obj):
obj.meta[self.vector_name] = self.vectorizer.transform([self.text_func(obj)])
else:
obj.meta[self.vector_name] = None
return corpus
def fit_transform(self, corpus: Corpus, y=None, selector: Callable[[CorpusObject], bool] = lambda x: True) -> Corpus:
self.fit(corpus, selector=selector)
return self.transform(corpus, selector=selector)
def get_vocabulary(self):
"""
Get the vocabulary of the vectorizer object
"""
return self.vectorizer.get_feature_names()
|
the-stack_106_23465 | from collections import namedtuple, OrderedDict
from input import parse
import tensorflow as tf
import numpy as np
import json
import copy
import gensim
import subprocess
import re
import logging
def parsing(file, wordvecpath, numpypath, ckptpath,Yp, Yd, H, X, XL, Hin, keep_prob):
logging.info('parsing started')
f = open(file, 'r')
with open('./tmpdata/deprel.json', 'r') as fp:
deps = json.load(fp)
ndeprel = len(deps)
with open('./tmpdata/all.json', 'r') as fp:
dictionary2 = json.load(fp)
mode = gensim.models.Word2Vec.load(wordvecpath)
vecdims = mode.layer1_size
vecdims = vecdims + 11 + 2 + 2
model = mode.wv
Arcs = namedtuple('Arcs', ['headid', 'headform', 'tailid', 'tailform', 'deprel'])
Transition = namedtuple('Transition', ['transition', 'label'])
writefile = open('output.conll', 'w')
saver = tf.train.Saver()
sess = tf.Session()
saver.restore(sess,ckptpath)
inH = np.load(numpypath)
sid=0
buffer1 = []
stack = []
arcs = []
data = f.read()
for sent in parse(data):
del buffer1[:]
del stack[:]
del arcs[:]
buffer1 = copy.deepcopy(sent)
buffer1.append(OrderedDict(
[("id", 0), ("form", 'root'), ("lemma", 'root'), ("upostag", 'root'), ("xpostag", 'root'), ("feats", 'root'), ("head", -1),
("deprel", 'root'), ("deps", 'root'), ("misc", 'root'), ]))
while buffer1:
transi, label, inH = oracle(stack, buffer1, arcs, dictionary2, model,sent,sess,inH, vecdims,X,XL,Hin,keep_prob, H, Yp,Yd)
# print(label)
trans = Transition(transi, label)
if trans.transition == 0: # SHIFT
stack.insert(0, buffer1[0])
del buffer1[0]
elif trans.transition == 1: # REDUCE
if stack : del stack[0]
elif trans.transition == 2: # LERFT ARC
if stack :
arcs.append(Arcs(buffer1[0]['id'], buffer1[0]['form'], stack[0]['id'], stack[0]['form'], trans.label))
del stack[0]
else:
stack.insert(0, buffer1[0])
del buffer1[0]
elif trans.transition == 3: # RIGHT ARC
if stack and buffer1:
arcs.append(Arcs(stack[0]['id'], stack[0]['form'], buffer1[0]['id'], buffer1[0]['form'], trans.label))
stack.insert(0, buffer1[0])
del buffer1[0]
else :
stack.insert(0, buffer1[0])
del buffer1[0]
else :
stack.insert(0, buffer1[0])
del buffer1[0]
# print(arcs)
# print(sent)
attacharclabel(sent, arcs)
# print(sent)
for s in sent:
reverseparse(s['id'], s['form'], s['lemma'], s['upostag'], s['xpostag'], s['feats'], s['head'], s['deprel'], s['deps'], s['misc'], writefile)
writefile.write("\n")
sid+=1
logging.info('parsing sentence : '+str(sid))
sess.close()
writefile.close()
# removelastline("output.conll")
logging.info('parsing complete')
las,uas,la = evaluate()
return las,uas,la
def oracle(stack, buffer1, arcs, dictionary2, model,sent,sess,inH, vecdims,X,XL,Hin,keep_prob, H, Yp,Yd):
mones = [-1] * vecdims
ones = [1] * (vecdims - 4)
zeros = [0] * (vecdims - 15)
dep = [-1] * 4
sentenc = np.array([])
words = ["_", "_", "_", "_", "_"]
if stack:
words.pop(0)
words.insert(0, stack[0])
dep[0] = iofdeprel(rightchild(stack[0], arcs))
dep[1] = iofdeprel(leftchild(stack[0], arcs))
if len(stack) > 1:
words.pop(1)
words.insert(1, stack[1])
if buffer1:
words.pop(2)
words.insert(2, buffer1[0])
dep[2] = iofdeprel(rightchild(buffer1[0], arcs))
dep[3] = iofdeprel(leftchild(buffer1[0], arcs))
if len(buffer1) > 1:
words.pop(3)
words.insert(3, buffer1[1])
if len(buffer1) > 2:
words.pop(4)
words.insert(4, buffer1[2])
for w in words:
if w == '_':
sentenc = np.hstack((sentenc, mones))
elif w['form'] == 'root':
sentenc = np.hstack((sentenc, ones, D(w['upostag'], dictionary2), D(w['xpostag'], dictionary2), w['id'], len(sent)))
elif w['form'] in model.vocab:
sentenc = np.hstack((sentenc, model[w['form']], featureids(w['feats'], dictionary2),D(w['upostag'], dictionary2), D(w['xpostag'], dictionary2), w['id'], len(sent)))
elif w['form'] is not None:
sentenc = np.hstack((sentenc, zeros, featureids(w['feats'], dictionary2), D(w['upostag'], dictionary2), D(w['xpostag'], dictionary2), w['id'], len(sent)))
else:
sentenc = np.hstack((sentenc, mones))
sentenc = np.hstack((sentenc, dep))
line = sentenc.reshape([1, -1, vecdims*5+4])
t,depre, inH = sess.run([Yp,Yd,H], feed_dict={X: line, XL: [1], Hin: inH, keep_prob:1.0})
dl = riofdeprel(int(depre))
# print (dl,t,depre, inH)
return int(t), dl, inH
def D(key, dic):
if dic.get(key): return dic[key]
return -1;
def reverseparse(id, form, lemma, upostag, xpostag, feats, head, deprel, deps, misc, f):
filewrite(f, str(id))
filewrite(f, str(form))
filewrite(f, str(lemma))
filewrite(f, str(upostag))
filewrite(f, str(xpostag))
str1=""
for feat in feats.iteritems():
if feat[1]:
str1 += (feat[0] + '-' + feat[1] + '|')
str1=str1[:-1]
filewrite(f, str(str1))
filewrite(f, str(head))
filewrite(f, str(deprel))
filewrite(f, str(deps))
f.write(str(misc))
f.write("\n")
def filewrite(f, str):
if str : f.write(str)
else: f.write('_')
f.write('\t')
def riofdeprel(id):
with open('./tmpdata/deprel.json', 'r') as fp:
dic = json.load(fp)
dic = dict((v, k) for k, v in dic.items())
if id in dic:
return dic[id]
else:
return ""
def rightchild(stackc, arcs):
id=-1
deprel=""
for a in arcs :
if a.headid == stackc['id'] and a.tailid > stackc['id']:
if id==-1 :
id=a.tailid
deprel=a.deprel
else :
if id < a.tailid :
id = a.tailid
deprel = a.deprel
return deprel
def leftchild(stackc, arcs):
id=-1
deprel=""
for a in arcs :
if a.headid == stackc['id'] and a.tailid < stackc['id'] :
if not id :
id = a.tailid
deprel = a.deprel
else :
if id > a.tailid :
id = a.tailid
deprel = a.deprel
return deprel
def headd(stackc, arcs):
for a in arcs:
if a.headid == stackc['head']:
return a.headid, a.deprel
return None,""
def featureids(feats1, dic):
f=[]
for k in feats1 :
if k is not None : f.append(D(feats1[k], dic))
else: f.append(-1)
return f
def iofdeprel(deprel):
with open('./tmpdata/deprel.json', 'r') as fp:
dic = json.load(fp)
if deprel in dic:
return dic[deprel]
else:
return -1
def attacharclabel(sent, arcs):
for s in sent:
s['head'] =0
s['deprel']='root'
for a in arcs:
sent[a.tailid-1]['head']=a.headid
sent[a.tailid-1]['deprel'] = a.deprel
return sent
def removelastline(f):
readfile = open(f)
lines = readfile.readlines()
readfile.close()
w = open(f,'w')
w.writelines([i for i in lines[:-1]])
w.close()
return
|
the-stack_106_23466 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
from datetime import date
sys.path.insert(0, os.path.abspath("../"))
sys.path.insert(0, os.path.abspath("nondefaced_detector"))
import nondefaced_detector
# -- Project information -----------------------------------------------------
project = "nondefaced-detector"
copyright = "2021-{}, Developers of nondefaced-detector".format(date.today().year)
author = "Developers of nondefaced-detector"
currentdir = os.path.abspath(os.path.dirname(__file__))
currentdir = os.path.abspath(os.path.dirname(__file__))
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.autosectionlabel",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
"numpydoc",
"sphinx_rtd_theme",
]
intersphinx_mapping = {
"https://docs.python.org/3.5": None,
"https://docs.scipy.org/doc/numpy": None,
"https://docs.scipy.org/doc/scipy/reference": None,
"https://matplotlib.org/": None,
"https://scikit-learn.org/0.17": None,
"https://nipy.org/nibabel/": None,
"https://pandas.pydata.org/pandas-docs/stable/": None,
"https://neurosynth.readthedocs.io/en/latest/": None,
}
intersphinx_timeout = 5
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "2.2.0"
# Generate stubs
autosummary_generate = True
autodoc_default_flags = ["members", "inherited-members"]
add_module_names = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The short X.Y version.
version = nondefaced_detector.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If false, no module index is generated.
html_domain_indices = False
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "nondefaced_detector", "nondefaced-detector Documentation", [author], 1)
]
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"nondefaced_detector",
"nondefaced-detector Documentation",
author,
"nondefaced_detector",
"One line description of project.",
"Miscellaneous",
)
]
# If false, no module index is generated.
texinfo_domain_indices = False
|
the-stack_106_23467 | # -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
File Name: ProxyApi.py
Description :
Author : JHao
date: 2016/12/4
-------------------------------------------------
Change Activity:
2016/12/4:
-------------------------------------------------
"""
__author__ = 'JHao'
import sys
from werkzeug.wrappers import Response
from flask import Flask, render_template, jsonify, request
from DB.mysql_connector import OpenMySQL as conn
from Config.ConfigGetter import config
from Manager.ProxyManager import ProxyManager
sys.path.append('../')
app = Flask(__name__)
class JsonResponse(Response):
@classmethod
def force_type(cls, response, environ=None):
if isinstance(response, (dict, list)):
response = jsonify(response)
return super(JsonResponse, cls).force_type(response, environ)
app.response_class = JsonResponse
app.jinja_env.auto_reload = True
api_list = {
'get': u'get an usable proxy',
# 'refresh': u'refresh proxy pool',
'get_all': u'get all proxy from proxy pool',
'delete?proxy=127.0.0.1:8080': u'delete an unable proxy',
'get_status': u'proxy statistics'
}
db = conn()
def get_data():
data = db.select_all(sql="""select date_time, count from proxy_time_table;""", db="proxy")
if data:
datetime = []
count = []
dic = dict()
for i in data:
datetime.append(str(i.get("date_time")))
count.append(str(i.get("count")))
dic["date_time"] = datetime
dic["count"] = count
return dic
@app.route('/')
def index():
return api_list
@app.route('/chart/')
def chart():
data = get_data()
date_time = data.get("date_time")
count = data.get("count")
return render_template("./show.html", date_time=date_time, count=count)
@app.route('/get/')
def get():
proxy = ProxyManager().get()
return proxy if proxy else 'no proxy!'
@app.route('/refresh/')
def refresh():
# TODO refresh会有守护程序定时执行,由api直接调用性能较差,暂不使用
# ProxyManager().refresh()
pass
return 'success'
@app.route('/get_all/')
def getAll():
proxies = ProxyManager().getAll()
return proxies
@app.route('/delete/', methods=['GET'])
def delete():
proxy = request.args.get('proxy')
ProxyManager().delete(proxy)
return 'success'
@app.route('/get_status/')
def getStatus():
status = ProxyManager().getNumber()
return status
def run():
app.run(host=config.host_ip, port=config.host_port)
if __name__ == '__main__':
run()
|
the-stack_106_23471 | # Copyright 2019 Kakao Brain
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
"""Static skip connection layout of ``@skippable`` modules."""
from typing import Dict, Iterable, List, Tuple
from torch import nn
from .namespace import Namespace
__all__: List[str] = []
class SkipLayout:
"""Represents a skip connection layout across partitions."""
# Skip routes indexed by 'ns, name': {(ns, name): (prev_j, next_j), ...}
by_ns_name: Dict[Tuple[Namespace, str], Tuple[int, int]]
# Skip routes indexed by partition number 'j': [[next_j]: [(prev_j, ns, name), ...], ...]
by_partition: List[List[Tuple[int, Namespace, str]]]
def __init__(self, num_partitions: int, skip_routes: Dict[Tuple[Namespace, str], Tuple[int, int]],) -> None:
# The skip routes are already indexed by 'ns, name'.
self.by_ns_name = skip_routes
# Index skip routes by partition number 'j'.
self.by_partition = [[] for _ in range(num_partitions)]
for (ns, name), (prev_j, next_j) in skip_routes.items():
self.by_partition[next_j].append((prev_j, ns, name))
for p in self.by_partition:
p.sort()
def copy_policy(self, next_j: int) -> Iterable[Tuple[int, Namespace, str]]:
"""Generates skip routes for the given destination partition number.
The skip routes are sorted by source partition number in ascending
order.
Yields:
Each tuple of (source partition number, namespace, name).
"""
for prev_j, ns, name in self.by_partition[next_j]:
if prev_j == next_j:
# This skip tensor will be popped at the same partition where
# it is stashed. In this case, copy is not required.
continue
yield (prev_j, ns, name)
def requires_copy(self, ns: Namespace, name: str) -> bool:
"""Whether the given namespace and name requires partition-to-partition
copy or not.
"""
prev_j, next_j = self.by_ns_name.get((ns, name), (-1, -1))
return prev_j != next_j
def inspect_skip_layout(partitions: List[nn.Sequential]) -> SkipLayout:
"""Inspects the skip connection layout in the given partitions."""
# NOTE(sublee): Hide circular import inside this subroutine. Circular
# import is not ideal but placing this logic near to SkipLayout may
# increase cohesion of code.
from .skippable import Skippable
skip_routes: Dict[Tuple[Namespace, str], Tuple[int, int]] = {}
stashed_at: Dict[Tuple[Namespace, str], int] = {}
for j, partition in enumerate(partitions):
def inspect_layer(layer):
if not isinstance(layer, Skippable):
return
for ns, name in layer.stashable():
stashed_at[(ns, name)] = j
for ns, name in layer.poppable():
prev_j = stashed_at.pop((ns, name))
skip_routes[(ns, name)] = (prev_j, j)
if isinstance(partition, nn.Sequential):
for layer in partition:
inspect_layer(layer)
else:
inspect_layer(partition)
return SkipLayout(len(partitions), skip_routes)
|
the-stack_106_23472 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 30 22:18:02 2020
@author: Soundarya Ganesh
"""
import sys
import random
import threading
import json
from socket import *
import time
import numpy as np
from time import *
from datetime import *
import os
w_id = sys.argv[2]
class Task:
def __init__(self, job_id, task_id, remaining_time):
self.job_id = job_id
self.task_id = task_id
self.remaining_time = remaining_time
def reduce(self):
self.remaining_time -= 1
def time_check(self):
remaining_time = self.remaining_time
if (remaining_time == 0):
return 1
else:
return 0
def func_connect_est(path2file, flag, host, port): # 0-incoming 1-outgoing
with open(path2file, "a+") as fp:
if(flag == 0):
print_lock.acquire()
fp.write(str(datetime.now(
)) + "\tIN: Connection Established :host is {0}, port used is {1}]\n".format(host, port))
print_lock.release()
elif(flag == 1):
print_lock.acquire()
fp.write(str(datetime.now(
)) + "\tOUT: Connection Established :host is {0}, port used is {1}]\n".format(host, port))
print_lock.release()
def func_log_worker_start(path2file, i, port, slot):
with open(path2file, "a+") as f:
print_lock.acquire()
f.write(str(datetime.now()) + ":\tWorker has started = [worker_id:{0}, port:{1}, slots:{2}]\n".format(i,
port,slot))
print_lock.release()
def func_log_update_exec(path2file,flag, job_id, task_id):
with open(path2file, "a+") as f:
if(flag == 0):
print_lock.acquire()
f.write(str(datetime.now()) + ":\tUpdate sent to master = [job_id:{0}, task_id:{1}] completed\n".format(job_id, task_id))
print_lock.release()
if(flag==1):
print_lock.acquire()
f.write(str(datetime.now()) + ":\tFinished executing task = [job_id:{0}, task_id:{1}]\n".format(job_id, task_id))
print_lock.release()
if(flag==2):
print_lock.acquire()
f.write(str(datetime.now()) + ":\tStarted executing task = [job_id:{0}, task_id:{1}]\n".format(job_id,task_id))
print_lock.release()
def func_log_receive(path2file, job_id, task_id, duration): #check
with open(path2file, "a+") as f:
print_lock.acquire()
f.write(str(datetime.now()) + ":\tReceived task = [job_id:{0}, task_id:{1}, duration:{2}]\n".format(
job_id,
task_id,
duration))
print_lock.release()
def start_execute_task(w_id, task_data):
global job_id, task_id, remaining_time
task = Task(task_data["job_id"],
task_data["task_id"], task_data["duration"])
job_id = task_data["job_id"]
task_id = task_data["task_id"]
remaining_time = task_data["duration"]
n=len(execn_pool)
path2file= "proj_log/worker_" + str(w_id) + ".txt"
func_log_update_exec(path2file,2, task_data["job_id"], task_data["task_id"])
for i in range(n):
if (isinstance(execn_pool[w_id-1][i], int) and (execn_pool[w_id-1][i] == 0 and num_free_slots[w_id-1] > 0)):
execn_pool[w_id-1][i] = task
num_free_slots[w_id-1] -= 1
break
def func_receive_task_start(w_id):
skt = socket(AF_INET, SOCK_STREAM)
with skt:
skt.bind(("localhost", ports[w_id-1]))
skt.listen(1024)
while (1):
connectn, addr = skt.accept()
path2file= "proj_log/worker_"+str(w_id)+".txt"
func_connect_est(path2file, 0, addr[0], addr[1])
with connectn:
task_start_data = connectn.recv(1024).decode()
if task_start_data:
task = json.loads(task_start_data)
path2file= "proj_log/worker_"+str(w_id)+".txt"
func_log_receive(path2file, task["job_id"], task["task_id"], task["duration"])
workerLock.acquire()
start_execute_task(w_id, task)
workerLock.release()
def worker_task_execute(w_id):
while (1):
if(slots[w_id-1] == 0):
continue
for i in range(slots[w_id-1]):
if (isinstance(execn_pool[w_id-1][i], int) and execn_pool[w_id-1][i] == 0):
continue
elif (execn_pool[w_id-1][i].time_check()):
task = execn_pool[w_id-1][i]
job_id = task.job_id
task_id = task.task_id
store_t_data = {
"worker_id": w_id,
"job_id": job_id,
"task_id": task_id
}
path2file= "proj_log/worker_"+str(w_id)+".txt"
func_log_update_exec(path2file,1, job_id, task_id)
workerLock.acquire()
#remove_task(w_id, i)
execn_pool[w_id-1][i] = 0
num_free_slots[w_id-1] += 1
workerLock.release()
t_data = json.dumps(store_t_data)
skt = socket(AF_INET, SOCK_STREAM)
with skt:
skt.connect(('localhost', 5001))
path2file="proj_log/worker_"+str(w_id)+".txt"
func_connect_est(path2file, 1, "localhost", "5001")
skt.send(t_data.encode())
path2file="proj_log/worker_"+str(w_id)+".txt"
func_log_update_exec(path2file,0, job_id, task_id)
else:
workerLock.acquire()
execn_pool[w_id-1][i].reduce()
workerLock.release()
sleep(1)
workerLock = threading.Lock()
print_lock = threading.Lock()
slots = list()
ports = list()
num_free_slots = list()
execn_pool = list()
with open("config.json") as f:
config = json.load(f)
for worker in config['workers']:
ports.append(worker['port'])
slots.append(worker['slots'])
num_free_slots.append(worker['slots'])
execn_pool.append([0 for i in range(worker['slots'])])
count = len(ports)
for i in range(count):
path2file="proj_log/worker_" + str(w_id)+ ".txt"
func_log_worker_start(path2file,i+1,ports[i],slots[i])
if __name__ == "__main__":
try:
os.mkdir('proj_log')
except:
pass
f = open("proj_log/worker_"+w_id+".txt", "w")
f.close()
if (len(sys.argv) != 3):
sys.exit()
worker_port = int(sys.argv[1])
worker_id = int(sys.argv[2])
ports[worker_id - 1] = worker_port
thread_receive_task_start = threading.Thread(
target=func_receive_task_start, args=(worker_id,))
thread_task_execute = threading.Thread(
target=worker_task_execute, args=(worker_id,))
thread_receive_task_start.start()
thread_task_execute.start()
thread_receive_task_start.join()
thread_task_execute.join()
|
the-stack_106_23474 | """
api.video
api.video is an API that encodes on the go to facilitate immediate playback, enhancing viewer streaming experiences across multiple devices and platforms. You can stream live or on-demand online videos within minutes. # noqa: E501
Contact: [email protected]
"""
import re # noqa: F401
import sys # noqa: F401
from apivideo.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class PlayerSessionEvent(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'type': (str,), # noqa: E501
'emitted_at': (datetime,), # noqa: E501
'at': (int,), # noqa: E501
'_from': (int,), # noqa: E501
'to': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'type': 'type', # noqa: E501
'emitted_at': 'emittedAt', # noqa: E501
'at': 'at', # noqa: E501
'_from': 'from', # noqa: E501
'to': 'to', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""PlayerSessionEvent - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
type (str): Possible values are: ready, play, pause, resume, seek.backward, seek.forward, end. [optional] # noqa: E501
emitted_at (datetime): When an event occurred, presented in ISO-8601 format.. [optional] # noqa: E501
at (int): [optional] # noqa: E501
_from (int): [optional] # noqa: E501
to (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
the-stack_106_23475 | # Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
It is a memory-efficient, online-learning algorithm provided as an
alternative to :class:`MiniBatchKMeans`. It constructs a tree
data structure with the cluster centroids being read off the leaf.
These can be either the final cluster centroids or can be provided as input
to another clustering algorithm such as :class:`AgglomerativeClustering`.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started. Setting this value to be very low promotes
splitting and vice-versa.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
that node is split into two nodes with the subclusters redistributed
in each. The parent subcluster of that node is removed and two new
subclusters are added as parents of the 2 split nodes.
n_clusters : int, instance of sklearn.cluster model, default 3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples.
- `None` : the final clustering step is not performed and the
subclusters are returned as they are.
- `sklearn.cluster` Estimator : If a model is provided, the model is
fit treating the subclusters as new samples and the initial data is
mapped to the label of the closest subcluster.
- `int` : the model fit is :class:`AgglomerativeClustering` with
`n_clusters` set to be equal to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/archive/p/jbirch
Notes
-----
The tree data structure consists of nodes with each node consisting of
a number of subclusters. The maximum number of subclusters in a node
is determined by the branching factor. Each subcluster maintains a
linear sum, squared sum and the number of samples in that subcluster.
In addition, each subcluster can also have a node as its child, if the
subcluster is not a member of a leaf node.
For a new point entering the root, it is merged with the subcluster closest
to it and the linear sum, squared sum and the number of samples of that
subcluster are updated. This is done recursively till the properties of
the leaf node are updated.
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves : array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels : ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
|
the-stack_106_23479 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with an equivalent malleability clone
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 12,500 XBX:
starting_balance = 12500
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 12190)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 290)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 12190 - 290 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 400, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 200, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 400 XBX serialized is 00902f5009000000
pos0 = 2*(4+1+36+1+4+1)
hex400 = "00902f5009000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 400 and clone_raw[pos0 : pos0 + 16] != hex400 or
rawtx1["vout"][0]["value"] != 400 and clone_raw[pos0 : pos0 + 16] == hex400):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 500XBX for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 500
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 12190 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 290 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 1000 XBX for 2 matured,
# less possible orphaned matured subsidy
expected += 1000
if (self.options.mine_block):
expected -= 500
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 12190 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 290 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 12190
+ fund_foo_tx["fee"]
- 290
+ fund_bar_tx["fee"]
+ 1000)
# Node1's "from0" account balance
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
if __name__ == '__main__':
TxnMallTest().main()
|
the-stack_106_23481 | import argparse # noqa
from typing import List, Tuple, Union
import vapoursynth as vs
from lvsfunc.misc import source
from lvsfunc.types import Range
from vardautomation import FileInfo, PresetAAC, PresetWEB, VPath
from project_module import encoder as enc
from project_module import flt # noqa
core = vs.core
make_wraw: bool = False # Create a workraw
enc_type = 'Premux' if not make_wraw else 'wraw'
EP_NUM = __file__[-5:-3]
# Sources
JP_clip = FileInfo(f'sources/{EP_NUM}/[NC-Raws] 迦希女王不会放弃! - {EP_NUM} [B-Global][WEB-DL][1080p][AVC AAC][ENG_TH_SRT][MKV].mkv',
idx=lambda x: source(x, force_lsmas=True, cachedir=''),
preset=[PresetWEB, PresetAAC])
JP_clip.name_file_final = VPath(f"{enc_type.lower()}/Jahy_{EP_NUM} ({enc_type}).mkv")
JP_clip.name_clip_output = VPath(JP_clip.name + '.265')
JP_clip.do_qpfile = True
# Common variables
# OP/ED frames
opstart: Union[int, bool] = 912
edstart: Union[int, bool] = 28576
freeze_ranges: List[List[int]] = [ # [start_frame, end_frame, frame]
[opstart, opstart+18, opstart],
[opstart+87, opstart+96, opstart+87],
[opstart+201, opstart+207, opstart],
[opstart+238, opstart+244, opstart],
]
hardsub_sign: List[Range] = [ # Leftover hardsubbed signs that need a stronger mask
]
replace_scenes: List[Range] = [ # List of scenes to replace
]
def pre_freeze() -> vs.VideoNode:
"""Performing some freezeframing in the OP at the Typesetter's request"""
from adjust import Tweak
from vsutil import insert_clip
src = JP_clip.clip_cut
if opstart:
freeze = core.std.FreezeFrames(
src,
[s[0] for s in freeze_ranges],
[e[1] for e in freeze_ranges],
[f[2] for f in freeze_ranges]
)
to_adjust = freeze[opstart+87]
adjust = Tweak(to_adjust, hue=-18)
adjust = adjust * (freeze_ranges[2][1] - freeze_ranges[2][0] + 1)
insert = insert_clip(freeze, adjust, freeze_ranges[2][0])
else:
insert = src
return insert
def filterchain() -> Union[vs.VideoNode, Tuple[vs.VideoNode, ...]]:
"""Regular VapourSynth filterchain"""
import EoEfunc as eoe
import havsfunc as haf
import lvsfunc as lvf
import vardefunc as vdf
from adptvgrnMod import adptvgrnMod
from ccd import ccd
from muvsfunc import SSIM_downsample
from vsutil import depth, get_y, iterate
src = pre_freeze().std.AssumeFPS(fpsnum=24000, fpsden=1001)
src = depth(src, 16)
# TO-DO: Figure out how they post-sharpened it. Probably some form of unsharpening?
src_y = depth(get_y(src), 32)
descale = lvf.kernels.Bicubic(b=0, c=3/4).descale(src_y, 1440, 810)
double = vdf.scale.nnedi3cl_double(descale, pscrn=1)
rescale = depth(SSIM_downsample(double, 1920, 1080), 16)
scaled = vdf.misc.merge_chroma(rescale, src)
denoise = core.knlm.KNLMeansCL(scaled, d=1, a=3, s=4, h=0.4, channels='Y')
stab = haf.GSMC(denoise, radius=2, planes=[0])
cdenoise = ccd(stab, threshold=5, matrix='709')
decs = vdf.noise.decsiz(cdenoise, sigmaS=4, min_in=208 << 8, max_in=232 << 8)
dehalo = haf.YAHR(decs, blur=2, depth=32)
halo_mask = lvf.mask.halo_mask(decs, rad=3, brz=0.3, thma=0.42)
dehalo_masked = core.std.MaskedMerge(decs, dehalo, halo_mask)
dehalo_min = core.std.Expr([dehalo_masked, decs], "x y min")
aa = lvf.aa.nneedi3_clamp(dehalo_min, strength=1.5)
# Some scenes have super strong aliasing that I really don't wanna scenefilter until BDs. Thanks, Silver Link!
aa_strong = lvf.sraa(dehalo_min, rfactor=1.35)
aa_spliced = lvf.rfs(aa, aa_strong, [(3453, 3542), (19348, 19442)])
upscale = lvf.kernels.Bicubic(b=0, c=3/4).scale(descale, 1920, 1080)
credit_mask = lvf.scale.descale_detail_mask(src_y, upscale, threshold=0.08)
credit_mask = iterate(credit_mask, core.std.Deflate, 3)
credit_mask = iterate(credit_mask, core.std.Inflate, 3)
credit_mask = iterate(credit_mask, core.std.Maximum, 2)
merge_credits = core.std.MaskedMerge(aa_spliced, src, depth(credit_mask, 16))
deband = flt.masked_f3kdb(merge_credits, rad=18, thr=32, grain=[24, 0])
grain: vs.VideoNode = adptvgrnMod(deband, seed=42069, strength=0.15, luma_scaling=10,
size=1.25, sharp=80, static=True, grain_chroma=False)
return grain
def wraw_filterchain() -> vs.VideoNode:
"""Workraw filterchain with minimal filtering"""
from vardefunc.deband import dumb3kdb
from vsutil import depth
src: vs.VideoNode = pre_freeze()
src = depth(src, 16)
deband = dumb3kdb(src, radius=16, threshold=30, grain=16)
grain: vs.VideoNode = core.grain.Add(deband, 0.15)
return grain
if __name__ == '__main__':
FILTERED = filterchain() if not make_wraw else wraw_filterchain()
enc.Encoder(JP_clip, FILTERED).run(wraw=make_wraw, make_comp=False, clean_up=True, ep_num=EP_NUM) # type: ignore
elif __name__ == '__vapoursynth__':
FILTERED = filterchain()
if not isinstance(FILTERED, vs.VideoNode):
for i, CLIP_FILTERED in enumerate(FILTERED, start=1):
CLIP_FILTERED.set_output(i)
else:
FILTERED.set_output(1)
else:
JP_clip.clip_cut.set_output(0)
FILTERED = filterchain() if not make_wraw else wraw_filterchain()
if not isinstance(FILTERED, vs.VideoNode):
for i, clip_filtered in enumerate(FILTERED, start=1):
if clip_filtered:
clip_filtered.set_output(i)
else:
FILTERED.set_output(1)
|
the-stack_106_23483 |
# Version: 0.15
"""
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
First, decide on values for the following configuration variables:
* `VCS`: the version control system you use. Currently accepts "git".
* `style`: the style of version string to be produced. See "Styles" below for
details. Defaults to "pep440", which looks like
`TAG[+DISTANCE.gSHORTHASH[.dirty]]`.
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file, so it can be imported at runtime. If your project uses
`src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
This file should be checked in to your VCS as usual: the copy created below
by `setup.py setup_versioneer` will include code that parses expanded VCS
keywords in generated tarballs. The 'build' and 'sdist' commands will
replace it with a copy that has just the calculated version string.
This must be set even if your project does not have any modules (and will
therefore never import `_version.py`), since "setup.py sdist" -based trees
still need somewhere to record the pre-calculated version strings. Anywhere
in the source tree should do. If there is a `__init__.py` next to your
`_version.py`, the `setup.py setup_versioneer` command (described below)
will append some `__version__`-setting assignments, if they aren't already
present.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
If this is set to None, then `setup.py build` will not attempt to rewrite
any `_version.py` in the built tree. If your project does not have any
libraries (e.g. if it only builds a script), then you should use
`versionfile_build = None` and override `distutils.command.build_scripts`
to explicitly insert a copy of `versioneer.get_version()` into your
generated script.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string.
* `parentdir_prefix`:
a optional string, frequently the same as tag_prefix, which appears at the
start of all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'. To disable this feature,
just omit the field from your `setup.cfg`.
This tool provides one script, named `versioneer`. That script has one mode,
"install", which writes a copy of `versioneer.py` into the current directory
and runs `versioneer.py setup` to finish the installation.
To versioneer-enable your project:
* 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and
populating it with the configuration values you decided earlier (note that
the option names are not case-sensitive):
````
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix = ""
parentdir_prefix = myproject-
````
* 2: Run `versioneer install`. This will do the following:
* copy `versioneer.py` into the top of your source tree
* create `_version.py` in the right place (`versionfile_source`)
* modify your `__init__.py` (if one exists next to `_version.py`) to define
`__version__` (by calling a function from `_version.py`)
* modify your `MANIFEST.in` to include both `versioneer.py` and the
generated `_version.py` in sdist tarballs
`versioneer install` will complain about any problems it finds with your
`setup.py` or `setup.cfg`. Run it multiple times until you have fixed all
the problems.
* 3: add a `import versioneer` to your setup.py, and add the following
arguments to the setup() call:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: commit these changes to your VCS. To make sure you won't forget,
`versioneer install` will mark everything it touched for addition using
`git add`. Don't forget to add `setup.py` and `setup.cfg` too.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at
least one tag in its history.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See details.md in the Versioneer source tree for
descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
### Upgrading to 0.15
Starting with this version, Versioneer is configured with a `[versioneer]`
section in your `setup.cfg` file. Earlier versions required the `setup.py` to
set attributes on the `versioneer` module immediately after import. The new
version will refuse to run (raising an exception during import) until you
have provided the necessary `setup.cfg` section.
In addition, the Versioneer package provides an executable named
`versioneer`, and the installation process is driven by running `versioneer
install`. In 0.14 and earlier, the executable was named
`versioneer-installer` and was run without an argument.
### Upgrading to 0.14
0.14 changes the format of the version string. 0.13 and earlier used
hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a
plus-separated "local version" section strings, with dot-separated
components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old
format, but should be ok with the new one.
### Upgrading from 0.11 to 0.12
Nothing special.
### Upgrading from 0.10 to 0.11
You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
`setup.py setup_versioneer`. This will enable the use of additional
version-control systems (SVN, etc) in the future.
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is hereby released into the
public domain. The `_version.py` that it creates is also in the public
domain.
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
pass
def get_root():
# we require that all commands are run from the project root, i.e. the
# directory that contains setup.py, setup.cfg, and versioneer.py .
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
pass
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# shell=True because with 3.4.4, does not always work with shell=False
p = subprocess.Popen(' '.join([c] + args), shell=True, cwd=cwd,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# shell=True because with 3.4.4, does not always work with shell=False
p = subprocess.Popen(' '.join([c] + args), shell=True, cwd=cwd,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with "
"prefix '%%s'" %% (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.15) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
import sys
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
class VersioneerBadRootError(Exception):
pass
def get_versions(verbose=False):
# returns dict with two keys: 'version' and 'full'
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version"}
def get_version():
return get_versions()["version"]
def get_cmdclass():
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
# self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix = ""
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
the-stack_106_23485 | import traci
import numpy as np
import timeit
import torch
from torch.autograd import Variable
# phase codes based on environment.net.xml
PHASE_NS_GREEN = 0 # action 0 code 00
PHASE_NS_YELLOW = 1
PHASE_NSL_GREEN = 2 # action 1 code 01
PHASE_NSL_YELLOW = 3
PHASE_EW_GREEN = 4 # action 2 code 10
PHASE_EW_YELLOW = 5
PHASE_EWL_GREEN = 6 # action 3 code 11
PHASE_EWL_YELLOW = 7
class Simulation:
def __init__(self, Model, TrafficGen, sumo_cmd, max_steps, green_duration, yellow_duration, num_states, num_actions):
self.dqn = Model
self.traffic_gen = TrafficGen
self.step = 0
self.sumo_cmd = sumo_cmd
self.max_steps = max_steps
self.green_duration = green_duration
self.yellow_duration = yellow_duration
self.num_states = num_states
self.num_actions = num_actions
def run(self, episode):
# for the purpose of recording trajectories
f = open("data.txt", "a")
"""
Runs the testing simulation
"""
start_time = timeit.default_timer()
# first, generate the route file for this simulation and set up sumo
self.traffic_gen.generate_routefile(seed=episode)
traci.start(self.sumo_cmd)
print("Simulating...")
# inits
self.step = 0
self.waiting_times = {}
old_total_wait = 0
old_action = -1
sum_reward = 0
sum_waiting = 0
while self.step < self.max_steps:
# get current state of the intersection
current_state = self.get_state()
# calculate reward of previous action: (change in cumulative waiting time between actions)
# waiting time = seconds waited by a car since the spawned in the environment.
current_total_wait = self.collect_waiting_times()
reward = old_total_wait - current_total_wait
# choose the light phase to activate, based on the current state of the intersection
action = self.choose_action(current_state)
q_values = self.retun_q_values(current_state)
# record the state action pair in the file
softmax = torch.nn.Softmax()
q_list = softmax(q_values[0])
f.write(str(list(current_state) + q_list.tolist()) + "," + str([action.tolist()]) + "\n")
# if the chosen phase is different from the last phase, activate the yellow phase
if self.step != 0 and old_action != action:
self.set_yellow_phase(old_action)
self.simulate(self.yellow_duration)
# execute the phase selected before
self.set_green_phase(action)
self.simulate(self.green_duration)
# saving variables for later & accumulate reward
old_action = action
old_total_wait = current_total_wait
# saving only the meaningful reward to better see if the agent is behaving correctly
if reward < 0:
sum_reward += reward
sum_waiting += current_total_wait
avg_reward = sum_reward / self.max_steps
avg_waiting = sum_waiting / self.max_steps
traci.close()
simulation_time = round(timeit.default_timer() - start_time, 1)
f.close()
return simulation_time, avg_reward, avg_waiting
def simulate(self, steps_todo):
"""
Proceed with the simulation in sumo
"""
# do not do more steps than the maximum allowed number of steps
if (self.step + steps_todo) >= self.max_steps:
steps_todo = self.max_steps - self.step
while steps_todo > 0:
traci.simulationStep() # simulate 1 step in sumo
self.step += 1 # update the step counter
steps_todo -= 1
def collect_waiting_times(self):
"""
Retrieve the waiting time of every car in the incoming roads
"""
incoming_roads = ["E2TL", "N2TL", "W2TL", "S2TL"]
car_list = traci.vehicle.getIDList()
for car_id in car_list:
wait_time = traci.vehicle.getAccumulatedWaitingTime(car_id)
# get the road id where the car is located
road_id = traci.vehicle.getRoadID(car_id)
# consider only the waiting times of cars in incoming roads
if road_id in incoming_roads:
self.waiting_times[car_id] = wait_time
else:
# a car that was tracked has cleared the intersection
if car_id in self.waiting_times:
del self.waiting_times[car_id]
total_waiting_time = sum(self.waiting_times.values())
return total_waiting_time
def choose_action(self, state):
"""
Pick the best action known based on the current state of the env
"""
state = Variable(torch.FloatTensor(state).unsqueeze(0), requires_grad=False)
q_value = self.dqn.forward(state)
return q_value.max(1)[1].data[0]
def retun_q_values(self, state):
"""
Pick the best action known based on the current state of the env
"""
state = Variable(torch.FloatTensor(state).unsqueeze(0), requires_grad=False)
q_value = self.dqn.forward(state)
return q_value
def set_yellow_phase(self, old_action):
"""
Activate the correct yellow light combination in sumo
"""
# obtain the yellow phase code, based on the old action (ref on environment.net.xml)
yellow_phase_code = old_action * 2 + 1
traci.trafficlight.setPhase("TL", yellow_phase_code)
def set_green_phase(self, action_number):
"""
Activate the correct green light combination in sumo
"""
if action_number == 0:
traci.trafficlight.setPhase("TL", PHASE_NS_GREEN)
elif action_number == 1:
traci.trafficlight.setPhase("TL", PHASE_NSL_GREEN)
elif action_number == 2:
traci.trafficlight.setPhase("TL", PHASE_EW_GREEN)
elif action_number == 3:
traci.trafficlight.setPhase("TL", PHASE_EWL_GREEN)
def get_queue_length(self):
"""
Retrieve the number of cars with speed = 0 in every incoming lane
"""
halt_N = traci.edge.getLastStepHaltingNumber("N2TL")
halt_S = traci.edge.getLastStepHaltingNumber("S2TL")
halt_E = traci.edge.getLastStepHaltingNumber("E2TL")
halt_W = traci.edge.getLastStepHaltingNumber("W2TL")
queue_length = halt_N + halt_S + halt_E + halt_W
return queue_length
def get_state(self):
"""
Retrieve the state of the intersection from sumo, in the form of cell occupancy
"""
state = np.zeros(self.num_states)
car_list = traci.vehicle.getIDList()
for car_id in car_list:
lane_pos = traci.vehicle.getLanePosition(car_id)
lane_id = traci.vehicle.getLaneID(car_id)
# invert lane position value,
# so if the car is close to the traffic light -> lane_pos = 0 -> 750 = max len of a road
# https://sumo.dlr.de/pydoc/traci._vehicle.html#VehicleDomain-getLanePosition
lane_pos = 750 - lane_pos
# distance in meters from the traffic light -> mapping into cells
if lane_pos < 7:
lane_cell = 0
elif lane_pos < 14:
lane_cell = 1
elif lane_pos < 21:
lane_cell = 2
elif lane_pos < 28:
lane_cell = 3
elif lane_pos < 40:
lane_cell = 4
elif lane_pos < 60:
lane_cell = 5
elif lane_pos < 100:
lane_cell = 6
elif lane_pos < 160:
lane_cell = 7
elif lane_pos < 400:
lane_cell = 8
elif lane_pos <= 750:
lane_cell = 9
# finding the lane where the car is located
# x2TL_3 are the "turn left only" lanes
if lane_id == "W2TL_0" or lane_id == "W2TL_1" or lane_id == "W2TL_2":
lane_group = 0
elif lane_id == "W2TL_3":
lane_group = 1
elif lane_id == "N2TL_0" or lane_id == "N2TL_1" or lane_id == "N2TL_2":
lane_group = 2
elif lane_id == "N2TL_3":
lane_group = 3
elif lane_id == "E2TL_0" or lane_id == "E2TL_1" or lane_id == "E2TL_2":
lane_group = 4
elif lane_id == "E2TL_3":
lane_group = 5
elif lane_id == "S2TL_0" or lane_id == "S2TL_1" or lane_id == "S2TL_2":
lane_group = 6
elif lane_id == "S2TL_3":
lane_group = 7
else:
lane_group = -1
if 1 <= lane_group <= 7:
# composition of the two postion ID to create a number in interval 0-79
car_position = int(str(lane_group) + str(lane_cell))
valid_car = True
elif lane_group == 0:
car_position = lane_cell
valid_car = True
else:
# flag for not detecting cars crossing the intersection or driving away from it
valid_car = False
if valid_car:
# write the position of the car car_id in the state array in the form of "cell occupied"
state[car_position] = 1
return state
|
the-stack_106_23487 | #!/usr/bin/env python
'''A script which returns the mutual information between the predictions of a
model and a test data set.'''
from __future__ import division
#Our standard Modules
import argparse
import numpy as np
import scipy as sp
import sys
import pandas as pd
#Our miscellaneous functions
#This module will allow us to easily tally the letter counts at a particular position
import mpathic.utils as utils
import mpathic.EstimateMutualInfoforMImax as EstimateMutualInfoforMImax
import mpathic.qc as qc
import mpathic.numerics as numerics
from mpathic import SortSeqError
import mpathic.io as io
import matplotlib.pyplot as plt
import scipy
def main(
data_df,model_df,
start=0,end=None,err=False,coarse_graining_level=0,
rsquared=False,return_freg=False):
#determine whether you are working with RNA, DNA, or protein.
#this also should determine modeltype (MAT, NBR, PAIR).
dicttype, modeltype = qc.get_model_type(model_df)
#get column header for the sequence column.
seq_cols = qc.get_cols_from_df(data_df,'seqs')
if not len(seq_cols)==1:
raise SortSeqError('Dataframe has multiple seq cols: %s'%str(seq_cols))
#create dictionary that goes from, for example, nucleotide to number and
#visa versa.
seq_dict,inv_dict = utils.choose_dict(dicttype,modeltype=modeltype)
#set name of sequences column based on type of sequence
type_name_dict = {'dna':'seq','rna':'seq_rna','protein':'seq_pro'}
seq_col_name = type_name_dict[dicttype]
if not end:
seqL = len(data_df[seq_col_name][0]) - start
else:
seqL = end-start
#throw out wrong length sequences.
#Cut the sequences based on start and end, and then check if it makes sense
if (start != 0 or end):
data_df.loc[:,seq_col_name] = \
data_df.loc[:,seq_col_name].str.slice(start,end)
right_length = data_df.loc[:,seq_col_name].apply(len) == (seqL)
if not right_length.all():
sys.stderr.write('''Not all sequences are the same length!
Throwing out incorrect sequences!''')
data_df = data_df.loc[right_length,:]
data_df = data_df.reset_index(drop=True)
if modeltype =='MAT':
if seqL != len(model_df.loc[:,'pos']):
raise SortSeqError('model length does not match dataset length')
elif modeltype =='NBR':
if seqL != len(model_df.loc[:,'pos'])+1:
raise SortSeqError('model length does not match dataset length')
elif modeltype == 'PAIR':
if int(scipy.misc.comb(seqL,2)) != len(model_df.loc[:,'pos']):
raise SortSeqError('model length does not match dataset length')
#get column names of the counts columns (excluding total counts 'ct')
col_headers = utils.get_column_headers(data_df)
if 'ct' not in data_df.columns:
data_df['ct'] = data_df[col_headers].sum(axis=1)
#remove empty rows.
data_df = data_df[data_df.ct != 0]
#determine sequence length.
#make a numpy array out of the model data frame
model_df_headers = ['val_' + str(inv_dict[i]) for i in range(len(seq_dict))]
value = np.array(model_df[model_df_headers])
#now we evaluate the expression of each sequence according to the model.
#first convert to matrix representation of sequences
seq_mat,wtrow = numerics.dataset2mutarray(data_df.copy(),modeltype)
temp_df = data_df.copy()
#evaluate energy of each sequence
temp_df['val'] = numerics.eval_modelmatrix_on_mutarray(value,seq_mat,wtrow)
#sort based on value
temp_sorted = temp_df.sort_values(by='val')
temp_sorted.reset_index(inplace=True,drop=True)
#freg is a regularized plot which show how sequences are distributed
#in energy space.
if return_freg:
fig,ax = plt.subplots()
MI,freg = EstimateMutualInfoforMImax.alt4(temp_sorted,coarse_graining_level=coarse_graining_level,return_freg=return_freg)
plt.imshow(freg,interpolation='nearest',aspect='auto')
plt.savefig(return_freg)
else:
MI = EstimateMutualInfoforMImax.alt4(temp_sorted,coarse_graining_level=coarse_graining_level,return_freg=return_freg)
#if we want to calculate error then use bootstrapping.
if not err:
Std = np.NaN
else:
data_df_for_sub = data_df.copy()
sub_MI = np.zeros(15)
for i in range(15):
sub_df = data_df_for_sub.sample(int(len(data_df_for_sub.index)/2))
sub_df.reset_index(inplace=True,drop=True)
sub_MI[i],sub_std = main(
sub_df,model_df,err=False)
Std = np.std(sub_MI)/np.sqrt(2)
#we can return linfoot corrolation (rsquared) or return MI.
if rsquared:
return (1-2**(-2*MI)),(1-2**(-2*Std))
else:
return MI,Std
def wrapper(args):
data_df = io.load_dataset(args.dataset)
# Take input from standard input or through the -i flag.
if args.model:
model_df = io.load_model(args.model)
else:
model_df = io.load_model(sys.stdin)
MI,Std = main(
data_df,model_df,start=args.start,
end=args.end,err=args.err,coarse_graining_level = args.coarse_graining_level,
rsquared=args.rsquared,return_freg=args.return_freg)
#format output
output_df = pd.DataFrame([MI],columns=['info'])
#if you calculated error add column to your data frame
if args.err:
output_df = pd.concat([output_df,pd.Series(Std,name='info_err')],axis=1)
if args.out:
outloc = open(args.out,'w')
else:
outloc = sys.stdout
#set output option, this will remove column length restriction
pd.set_option('max_colwidth',int(1e8))
#write to file.
output_df.to_string(
outloc, index=False,col_space=10,float_format=utils.format_string)
# Connects argparse to wrapper
def add_subparser(subparsers):
p = subparsers.add_parser('predictiveinfo')
p.add_argument('-rs','--rsquared',action='store_true',help='return effective r squared')
p.add_argument('-ds','--dataset')
p.add_argument(
'--err',action='store_true',help='''Flag to use if you want to
calculate error''')
p.add_argument(
'-s','--start',type=int,default=0,help ='''Position to start your
analyzed region''')
p.add_argument(
'-e','--end',type=int,default = None,
help='''Position to end your analyzed region''')
p.add_argument(
'-fr','--return_freg',type=str,
help='''return regularized plot and save it to this file name''')
p.add_argument(
'-cg','--coarse_graining_level',default=0,type=float,help='''coarse graining
level to use for mutual information calculation, higher values will
speed up computation''')
p.add_argument(
'-m', '--model', default=None,help='''Model file, otherwise input
through the standard input.''')
p.add_argument('-o', '--out', default=None)
p.set_defaults(func=wrapper)
|
the-stack_106_23489 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
from abc import ABC, abstractmethod
from typing import Optional
import torch
from torch import Tensor
from pytorch_lightning import _logger as log
from pytorch_lightning.callbacks import GradientAccumulationScheduler
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.loggers.base import DummyLogger
from pytorch_lightning.utilities import AMPType, rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import is_oom_error, garbage_collection_cuda
from pytorch_lightning.utilities.parsing import lightning_hasattr, lightning_getattr, lightning_setattr
try:
from apex import amp
except ImportError:
amp = None
EPSILON = 1e-6
EPSILON_FP16 = 1e-5
class TrainerTrainingTricksMixin(ABC):
# this is just a summary on variables used in this abstract class,
# the proper values/initialisation should be done in child class
gradient_clip_val: ...
precision: int
default_root_dir: str
progress_bar_callback: ...
on_gpu: bool
amp_backend: AMPType
@abstractmethod
def get_model(self) -> LightningModule:
"""Warning: this is just empty shell for code implemented in other class."""
@abstractmethod
def save_checkpoint(self, *args):
"""Warning: this is just empty shell for code implemented in other class."""
@abstractmethod
def restore(self, *args):
"""Warning: this is just empty shell for code implemented in other class."""
@abstractmethod
def fit(self, *args):
"""Warning: this is just empty shell for code implemented in other class."""
def clip_gradients(self, optimizer):
# this code is a modification of torch.nn.utils.clip_grad_norm_
# with TPU support based on https://github.com/pytorch/xla/blob/master/TROUBLESHOOTING.md
if self.gradient_clip_val <= 0:
return
model = self.get_model()
if self.amp_backend == AMPType.APEX:
parameters = amp.master_params(optimizer)
else:
parameters = model.parameters()
max_norm = float(self.gradient_clip_val)
norm_type = float(2.0)
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
if norm_type == math.inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
device = parameters[0].device
out = torch.empty(len(parameters), device=device)
for i, p in enumerate(parameters):
torch.norm(p.grad.data.to(device), norm_type, out=out[i])
total_norm = torch.norm(out, norm_type)
eps = EPSILON_FP16 if self.precision == 16 else EPSILON
clip_coef = torch.tensor(max_norm, device=device) / (total_norm + eps)
clip_coef = torch.min(clip_coef, torch.ones_like(clip_coef))
for p in parameters:
p.grad.data.mul_(clip_coef.to(p.grad.data.device))
def print_nan_gradients(self) -> None:
model = self.get_model()
for param in model.parameters():
if (param.grad is not None) and torch.isnan(param.grad.float()).any():
log.info(param, param.grad)
def detect_nan_tensors(self, loss: Tensor) -> None:
model = self.get_model()
# check if loss is nan
if not torch.isfinite(loss).all():
raise ValueError(
'The loss returned in `training_step` is nan or inf.'
)
# check if a network weight is nan
for name, param in model.named_parameters():
if not torch.isfinite(param).all():
self.print_nan_gradients()
raise ValueError(
f'Detected nan and/or inf values in `{name}`.'
' Check your forward pass for numerically unstable operations.'
)
def configure_accumulated_gradients(self, accumulate_grad_batches):
if isinstance(accumulate_grad_batches, dict):
self.accumulation_scheduler = GradientAccumulationScheduler(accumulate_grad_batches)
elif isinstance(accumulate_grad_batches, int):
schedule = {0: accumulate_grad_batches}
self.accumulation_scheduler = GradientAccumulationScheduler(schedule)
else:
raise TypeError("Gradient accumulation supports only int and dict types")
def scale_batch_size(self,
model: LightningModule,
mode: str = 'power',
steps_per_trial: int = 3,
init_val: int = 2,
max_trials: int = 25,
batch_arg_name: str = 'batch_size'):
r"""
Will iteratively try to find the largest batch size for a given model
that does not give an out of memory (OOM) error.
Args:
model: Model to fit.
mode: string setting the search mode. Either `power` or `binsearch`.
If mode is `power` we keep multiplying the batch size by 2, until
we get an OOM error. If mode is 'binsearch', we will initially
also keep multiplying by 2 and after encountering an OOM error
do a binary search between the last successful batch size and the
batch size that failed.
steps_per_trial: number of steps to run with a given batch size.
Idealy 1 should be enough to test if a OOM error occurs,
however in practise a few are needed
init_val: initial batch size to start the search with
max_trials: max number of increase in batch size done before
algorithm is terminated
"""
if not lightning_hasattr(model, batch_arg_name):
raise MisconfigurationException(
f'Field {batch_arg_name} not found in both `model` and `model.hparams`')
if hasattr(model, batch_arg_name) and hasattr(model, "hparams") and batch_arg_name in model.hparams:
rank_zero_warn(
f'Field `model.{batch_arg_name}` and `model.hparams.{batch_arg_name}` are mutually exclusive!'
f' `model.{batch_arg_name}` will be used as the initial batch size for scaling.'
f' If this is not the intended behavior, please remove either one.'
)
if hasattr(model.train_dataloader, 'patch_loader_code'):
raise MisconfigurationException('The batch scaling feature cannot be used with dataloaders'
' passed directly to `.fit()`. Please disable the feature or'
' incorporate the dataloader into the model.')
# Arguments we adjust during the batch size finder, save for restoring
self.__scale_batch_dump_params()
# Set to values that are required by the algorithm
self.__scale_batch_reset_params(model, steps_per_trial)
# Save initial model, that is loaded after batch size is found
save_path = os.path.join(self.default_root_dir, 'temp_model.ckpt')
self.save_checkpoint(str(save_path))
if self.progress_bar_callback:
self.progress_bar_callback.disable()
# Initially we just double in size until an OOM is encountered
new_size = _adjust_batch_size(self, value=init_val) # initially set to init_val
if mode == 'power':
new_size = _run_power_scaling(self, model, new_size, batch_arg_name, max_trials)
elif mode == 'binsearch':
new_size = _run_binsearch_scaling(self, model, new_size, batch_arg_name, max_trials)
else:
raise ValueError('mode in method `scale_batch_size` can only be `power` or `binsearch')
garbage_collection_cuda()
log.info(f'Finished batch size finder, will continue with full run using batch size {new_size}')
# Restore initial state of model
self.restore(str(save_path), on_gpu=self.on_gpu)
os.remove(save_path)
# Finish by resetting variables so trainer is ready to fit model
self.__scale_batch_restore_params()
if self.progress_bar_callback:
self.progress_bar_callback.enable()
return new_size
def __scale_batch_dump_params(self):
# Prevent going into infinite loop
self.__dumped_params = {
'max_steps': self.max_steps,
'weights_summary': self.weights_summary,
'logger': self.logger,
'callbacks': self.callbacks,
'checkpoint_callback': self.checkpoint_callback,
'early_stop_callback': self.early_stop_callback,
'auto_scale_batch_size': self.auto_scale_batch_size,
'limit_train_batches': self.limit_train_batches,
'model': self.model,
}
def __scale_batch_reset_params(self, model, steps_per_trial):
self.auto_scale_batch_size = None # prevent recursion
self.max_steps = steps_per_trial # take few steps
self.weights_summary = None # not needed before full run
self.logger = DummyLogger()
self.callbacks = [] # not needed before full run
self.checkpoint_callback = False # required for saving
self.early_stop_callback = None
self.limit_train_batches = 1.0
self.optimizers, self.schedulers = [], [] # required for saving
self.model = model # required for saving
def __scale_batch_restore_params(self):
self.max_steps = self.__dumped_params['max_steps']
self.weights_summary = self.__dumped_params['weights_summary']
self.logger = self.__dumped_params['logger']
self.callbacks = self.__dumped_params['callbacks']
self.checkpoint_callback = self.__dumped_params['checkpoint_callback']
self.auto_scale_batch_size = self.__dumped_params['auto_scale_batch_size']
self.early_stop_callback = self.__dumped_params['early_stop_callback']
self.limit_train_batches = self.__dumped_params['limit_train_batches']
self.model = self.__dumped_params['model']
del self.__dumped_params
def _adjust_batch_size(trainer,
batch_arg_name: str = 'batch_size',
factor: float = 1.0,
value: Optional[int] = None,
desc: str = None):
""" Function for adjusting the batch size. It is expected that the user
has provided a model that has a hparam field called `batch_size` i.e.
`model.hparams.batch_size` should exist.
Args:
trainer: instance of pytorch_lightning.Trainer
batch_arg_name: field where batch_size is stored in `model.hparams`
factor: value which the old batch size is multiplied by to get the
new batch size
value: if a value is given, will override the batch size with this value.
Note that the value of `factor` will not have an effect in this case
desc: either `succeeded` or `failed`. Used purely for logging
"""
model = trainer.get_model()
batch_size = lightning_getattr(model, batch_arg_name)
if value:
lightning_setattr(model, batch_arg_name, value)
new_size = value
if desc:
log.info(f'Batch size {batch_size} {desc}, trying batch size {new_size}')
else:
new_size = int(batch_size * factor)
if desc:
log.info(f'Batch size {batch_size} {desc}, trying batch size {new_size}')
lightning_setattr(model, batch_arg_name, new_size)
return new_size
def _run_power_scaling(trainer, model, new_size, batch_arg_name, max_trials):
""" Batch scaling mode where the size is doubled at each iteration until an
OOM error is encountered. """
for _ in range(max_trials):
garbage_collection_cuda()
trainer.global_step = 0 # reset after each try
try:
# Try fit
trainer.fit(model)
# Double in size
new_size = _adjust_batch_size(trainer, batch_arg_name, factor=2.0, desc='succeeded')
except RuntimeError as exception:
# Only these errors should trigger an adjustment
if is_oom_error(exception):
# If we fail in power mode, half the size and return
garbage_collection_cuda()
new_size = _adjust_batch_size(trainer, batch_arg_name, factor=0.5, desc='failed')
break
else:
raise # some other error not memory related
return new_size
def _run_binsearch_scaling(trainer, model, new_size, batch_arg_name, max_trials):
""" Batch scaling mode where the size is initially is doubled at each iteration
until an OOM error is encountered. Hereafter, the batch size is further
refined using a binary search """
high = None
count = 0
while True:
garbage_collection_cuda()
trainer.global_step = 0 # reset after each try
try:
# Try fit
trainer.fit(model)
count += 1
if count > max_trials:
break
# Double in size
low = new_size
if high:
if high - low <= 1:
break
midval = (high + low) // 2
new_size = _adjust_batch_size(trainer, batch_arg_name, value=midval, desc='succeeded')
else:
new_size = _adjust_batch_size(trainer, batch_arg_name, factor=2.0, desc='succeeded')
except RuntimeError as exception:
# Only these errors should trigger an adjustment
if is_oom_error(exception):
# If we fail in power mode, half the size and return
garbage_collection_cuda()
high = new_size
midval = (high + low) // 2
new_size = _adjust_batch_size(trainer, value=midval, desc='failed')
if high - low <= 1:
break
else:
raise # some other error not memory related
return new_size
|
the-stack_106_23490 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
)
from ..utils import (
ExtractorError,
)
class NovaMovIE(InfoExtractor):
IE_NAME = 'novamov'
IE_DESC = 'NovaMov'
_VALID_URL_TEMPLATE = r'http://(?:(?:www\.)?%(host)s/(?:file|video)/|(?:(?:embed|www)\.)%(host)s/embed\.php\?(?:.*?&)?v=)(?P<id>[a-z\d]{13})'
_VALID_URL = _VALID_URL_TEMPLATE % {'host': 'novamov\.com'}
_HOST = 'www.novamov.com'
_FILE_DELETED_REGEX = r'This file no longer exists on our servers!</h2>'
_FILEKEY_REGEX = r'flashvars\.filekey="(?P<filekey>[^"]+)";'
_TITLE_REGEX = r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>([^<]+)</h3>'
_DESCRIPTION_REGEX = r'(?s)<div class="v_tab blockborder rounded5" id="v_tab1">\s*<h3>[^<]+</h3><p>([^<]+)</p>'
_TEST = {
'url': 'http://www.novamov.com/video/4rurhn9x446jj',
'md5': '7205f346a52bbeba427603ba10d4b935',
'info_dict': {
'id': '4rurhn9x446jj',
'ext': 'flv',
'title': 'search engine optimization',
'description': 'search engine optimization is used to rank the web page in the google search engine'
},
'skip': '"Invalid token" errors abound (in web interface as well as youtube-dl, there is nothing we can do about it.)'
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage(
'http://%s/video/%s' % (self._HOST, video_id), video_id, 'Downloading video page')
if re.search(self._FILE_DELETED_REGEX, page) is not None:
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
filekey = self._search_regex(self._FILEKEY_REGEX, page, 'filekey')
title = self._html_search_regex(self._TITLE_REGEX, page, 'title', fatal=False)
description = self._html_search_regex(self._DESCRIPTION_REGEX, page, 'description', default='', fatal=False)
api_response = self._download_webpage(
'http://%s/api/player.api.php?key=%s&file=%s' % (self._HOST, filekey, video_id), video_id,
'Downloading video api response')
response = compat_urlparse.parse_qs(api_response)
if 'error_msg' in response:
raise ExtractorError('%s returned error: %s' % (self.IE_NAME, response['error_msg'][0]), expected=True)
video_url = response['url'][0]
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description
}
|
the-stack_106_23491 | from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
from send_email import send_mail
app = Flask(__name__)
ENV = 'prod'
if ENV == 'dev':
app.debug = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:Shree2001@localhost/feedback'
else:
app.debug = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://blphthkouehynn:add3d1071a06332dc9af99be749359c107894279bf58e232d109c8ef3fa80b27@ec2-54-160-120-28.compute-1.amazonaws.com:5432/df0l9mk6d39kh0'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Feedback(db.Model):
__tablename__ = 'feedback'
id = db.Column(db.Integer, primary_key = True)
customer = db.Column(db.String(200), unique=True)
dealer = db.Column(db.String(200))
rating = db.Column(db.Integer)
comments = db.Column(db.Text())
def __init__(self, customer, dealer, rating, comments):
self.customer = customer
self.dealer = dealer
self.rating = rating
self.comments = comments
@app.route('/')
def index():
return render_template("index.html")
@app.route('/submit', methods = ['POST'])
def submit():
if request.method == 'POST':
customer = request.form['customername']
dealer = request.form['dealer']
rating = request.form['rating']
comments = request.form['comments']
if customer == '' or dealer == '':
return render_template('index.html', message='Please enter required fields')
if db.session.query(Feedback).filter(Feedback.customer == customer).count() == 0:
data = Feedback(customer, dealer, rating, comments)
db.session.add(data)
db.session.commit()
send_mail(customer, dealer, rating, comments)
return render_template("success.html")
return render_template('index.html', message='You have already submitted feedback')
if __name__ == '__main__':
app.run(debug=True) |
the-stack_106_23493 | from __future__ import unicode_literals, division, absolute_import
import re
import urllib
import logging
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
from flexget.utils import requests
from flexget.utils.soup import get_soup
from flexget.utils.search import torrent_availability, normalize_unicode
log = logging.getLogger('iptorrents')
CATEGORIES = {
'Movie-all': 72,
# Movies
'Movie-3D': 87,
'Movie-480p': 77,
'Movie-BD-R': 89,
'Movie-BD-Rip': 90,
'Movie-DVD-R': 6,
'Movie-HD-Bluray': 48,
'Movie-Kids': 54,
'Movie-MP4': 62,
'Movie-Non-English': 38,
'Movie-Packs': 68,
'Movie-XviD': 17,
#TV
'TV-all': 73,
'TV-Sports': 55,
'TV-480p': 78,
'TV-MP4': 66,
'TV-Non-English': 82,
'TV-Packs': 65,
'TV-Packs-Non-English': 83,
'TV-SD-x264': 79,
'TV-x264': 5,
'TV-XVID': 4
}
import sys
class UrlRewriteIPTorrents(object):
"""
IpTorrents urlrewriter and search plugin.
iptorrents:
rss_key: xxxxxxxxx (required)
uid: xxxxxxxx (required)
password: xxxxxxxx (required)
category: HD
Category is any combination of: all, Movie-3D, Movie-480p, Movie-3D,
Movie-480p, Movie-BD-R, Movie-BD-Rip, Movie-DVD-R,
Movie-HD-Bluray, Movie-Kids, Movie-MP4,
Movie-Non-English, Movie-Packs, Movie-XviD,
TV-all, TV-Sports, TV-480p, TV-MP4, TV-Non-English, TV-Packs,
TV-Packs-Non-English, TV-SD-x264, TV-x264, TV-XVID
"""
schema = {
'type': 'object',
'properties': {
'rss_key': {'type': 'string'},
'uid': {'type': 'integer'},
'password': {'type': 'string'},
'category': one_or_more({
'oneOf': [
{'type': 'integer'},
{'type': 'string', 'enum': list(CATEGORIES)},
]}),
},
'required': ['rss_key', 'uid', 'password'],
'additionalProperties': False
}
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
if url.startswith('http://iptorrents.com/download.php/'):
return False
if url.startswith('http://iptorrents.com/'):
return True
return False
# urlrewriter API
def url_rewrite(self, task, entry):
if not 'url' in entry:
log.error("Didn't actually get a URL...")
else:
log.debug("Got the URL: %s" % entry['url'])
if entry['url'].startswith('http://iptorrents.com/t?'):
# use search
results = self.search(entry)
if not results:
raise UrlRewritingError("No search results found")
# TODO: Search doesn't enforce close match to title, be more picky
entry['url'] = results[0]['url']
@plugin.internet(log)
def search(self, entry, config=None):
"""
Search for name from torrentleech.
"""
rss_key = config['rss_key']
if not isinstance(config, dict):
config = {}
# sort = SORT.get(config.get('sort_by', 'seeds'))
# if config.get('sort_reverse'):
# sort += 1
categories = config.get('category', 'all')
# Make sure categories is a list
if not isinstance(categories, list):
categories = [categories]
# If there are any text categories, turn them into their id number
categories = [c if isinstance(c, int) else CATEGORIES[c]
for c in categories]
filter_url = '&'.join(('l' + str(c) + '=') for c in categories)
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
query = normalize_unicode(search_string)
# urllib.quote will crash if the unicode string has non ascii
# characters, so encode in utf-8 beforehand
url = ('http://iptorrents.com/t?' + filter_url + '&q=' +
urllib.quote_plus(query.encode('utf-8')) + '&qf=')
page = requests.get(url, cookies={'uid': str(config['uid']),
'pass': config['password']}).content
soup = get_soup(page)
if soup.find("title").contents[0] == "IPT":
raise plugin.PluginError("Page title unexpected: Could it be the login page?...")
log.debug('searching with url: %s' % url)
tb = soup.find('table', {'class': 'torrents'})
if not tb:
continue
# list all row of torrents table except first because it is titles
for tr in tb.findAll('tr')[1:]:
h1 = tr.find('h1')
if h1 is not None:
if h1.contents[0] == 'No Torrents Found!':
break
link = tr.find("a", attrs={'href':
re.compile('/details\.php\?id=\d+')
})
log.debug('link phase: %s' % link.contents[0])
entry = Entry()
entry['title'] = link.contents[0]
torrent_url = tr.find("a", attrs={'href': re.compile('/download.php/\d+/.*')}).get('href')
torrent_url = normalize_unicode(torrent_url)
torrent_url = urllib.quote(torrent_url.encode('utf-8'))
torrent_url = 'http://iptorrents.com' + torrent_url + '?torrent_pass=' + rss_key
log.debug('RSS-ified download link: %s' % torrent_url)
entry['url'] = torrent_url
seeders = tr.find_all('td', {'class': 'ac t_seeders'})
leechers = tr.find_all('td', {'class': 'ac t_leechers'})
entry['torrent_seeds'] = int(seeders[0].contents[0])
entry['torrent_leeches'] = int(leechers[0].contents[0])
entry['search_sort'] = torrent_availability(entry['torrent_seeds'],
entry['torrent_leeches'])
size = tr.find("td", text=re.compile('([\.\d]+) ([GMK]?)B')).contents[0]
size = re.search('([\.\d]+) ([GMK]?)B', size)
if size:
if size.group(2) == 'G':
entry['content_size'] = int(float(size.group(1)) * 1000 ** 3 / 1024 ** 2)
elif size.group(2) == 'M':
entry['content_size'] = int(float(size.group(1)) * 1000 ** 2 / 1024 ** 2)
elif size.group(2) == 'K':
entry['content_size'] = int(float(size.group(1)) * 1000 / 1024 ** 2)
else:
entry['content_size'] = int(float(size.group(1)) / 1024 ** 2)
entries.add(entry)
return sorted(entries, reverse=True, key=lambda x: x.get('search_sort'))
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteIPTorrents, 'iptorrents',
groups=['urlrewriter', 'search'], api_ver=2)
|
the-stack_106_23495 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
class AutoRestRequiredOptionalTestServiceConfiguration(Configuration):
"""Configuration for AutoRestRequiredOptionalTestService.
Note that all parameters used to create this instance are saved as instance
attributes.
:param required_global_path: number of items to skip.
:type required_global_path: str
:param required_global_query: number of items to skip.
:type required_global_query: str
:param optional_global_query: number of items to skip.
:type optional_global_query: int
"""
def __init__(
self,
required_global_path, # type: str
required_global_query, # type: str
optional_global_query=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> None
if required_global_path is None:
raise ValueError("Parameter 'required_global_path' must not be None.")
if required_global_query is None:
raise ValueError("Parameter 'required_global_query' must not be None.")
super(AutoRestRequiredOptionalTestServiceConfiguration, self).__init__(**kwargs)
self.required_global_path = required_global_path
self.required_global_query = required_global_query
self.optional_global_query = optional_global_query
kwargs.setdefault("sdk_moniker", "autorestrequiredoptionaltestservice/{}".format(VERSION))
self._configure(**kwargs)
def _configure(
self, **kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
|
the-stack_106_23498 | """
:copyright: Alistair Muldal
:license: Unknown, shared on StackOverflow and Pastebin
Reference:
P. Perona and J. Malik.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
<http://www.cs.berkeley.edu/~malik/papers/MP-aniso.pdf>
Original MATLAB code by Peter Kovesi
School of Computer Science & Software Engineering
The University of Western Australia
pk @ csse uwa edu au
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by Alistair Muldal
Department of Pharmacology
University of Oxford
<[email protected]>
June 2000 original version.
March 2002 corrected diffusion eqn No 2.
July 2012 translated to Python
"""
import numpy as np
import warnings
def anisodiff(img, niter=1, kappa=50, gamma=0.1, step=(1., 1.), option=1):
"""
Anisotropic diffusion.
Usage:
imgout = anisodiff(im, niter, kappa, gamma, option)
Arguments:
img - input image
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
Returns:
imgout - diffused image.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence
diffusion across step edges. A large value reduces the influence of
intensity gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between
adjacent pixels differs in the x and y axes
Diffusion equation 1 favours high contrast edges over low contrast
ones.
Diffusion equation 2 favours wide regions over smaller ones.
"""
# ...you could always diffuse each color channel independently if you
# really want
if img.ndim == 3:
m = "Only grayscale images allowed, converting to 2D matrix"
warnings.warn(m)
img = img.mean(2)
# initialize output array
img = img.astype('float32')
imgout = img.copy()
# initialize some internal variables
deltaS = np.zeros_like(imgout)
deltaE = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
gS = np.ones_like(imgout)
gE = gS.copy()
for ii in xrange(niter):
# calculate the diffs
deltaS[:-1, :] = np.diff(imgout, axis=0)
deltaE[:, :-1] = np.diff(imgout, axis=1)
# conduction gradients (only need to compute one per dim!)
if option == 1:
gS = np.exp(-(deltaS/kappa)**2.)/step[0]
gE = np.exp(-(deltaE/kappa)**2.)/step[1]
elif option == 2:
gS = 1./(1.+(deltaS/kappa)**2.)/step[0]
gE = 1./(1.+(deltaE/kappa)**2.)/step[1]
# update matrices
E = gE*deltaE
S = gS*deltaS
# subtract a copy that has been shifted 'North/West' by one
# pixel. don't as questions. just do it. trust me.
NS[:] = S
EW[:] = E
NS[1:, :] -= S[:-1, :]
EW[:, 1:] -= E[:, :-1]
# update the image
imgout += gamma*(NS+EW)
return imgout
def anisodiff3(stack, niter=1, kappa=50, gamma=0.1, step=(1., 1., 1.), option=1):
"""
3D Anisotropic diffusion.
Usage:
stackout = anisodiff(stack, niter, kappa, gamma, option)
Arguments:
stack - input stack
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (z,y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
Returns:
stackout - diffused stack.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence
diffusion across step edges. A large value reduces the influence of
intensity gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between
adjacent pixels differs in the x,y and/or z axes
Diffusion equation 1 favours high contrast edges over low contrast
ones.
Diffusion equation 2 favours wide regions over smaller ones.
"""
# ...you could always diffuse each color channel independently if you
# really want
if stack.ndim == 4:
m = "Only grayscale stacks allowed, converting to 3D matrix"
warnings.warn(m)
stack = stack.mean(3)
# initialize output array
stack = stack.astype('float32')
stackout = stack.copy()
# initialize some internal variables
deltaS = np.zeros_like(stackout)
deltaE = deltaS.copy()
deltaD = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
UD = deltaS.copy()
gS = np.ones_like(stackout)
gE = gS.copy()
gD = gS.copy()
for ii in range(niter):
# calculate the diffs
deltaD[:-1, :, :] = np.diff(stackout, axis=0)
deltaS[:, :-1, :] = np.diff(stackout, axis=1)
deltaE[:, :, :-1] = np.diff(stackout, axis=2)
# conduction gradients (only need to compute one per dim!)
if option == 1:
gD = np.exp(-(deltaD/kappa)**2.)/step[0]
gS = np.exp(-(deltaS/kappa)**2.)/step[1]
gE = np.exp(-(deltaE/kappa)**2.)/step[2]
elif option == 2:
gD = 1./(1.+(deltaD/kappa)**2.)/step[0]
gS = 1./(1.+(deltaS/kappa)**2.)/step[1]
gE = 1./(1.+(deltaE/kappa)**2.)/step[2]
# Update matrices.
D = gD*deltaD
E = gE*deltaE
S = gS*deltaS
# Subtract a copy that has been shifted 'Up/North/West' by one
# pixel. Don't ask questions. Just do it. Trust me.
UD[:] = D
NS[:] = S
EW[:] = E
UD[1:, :, :] -= D[:-1, :, :]
NS[:, 1:, :] -= S[:, :-1, :]
EW[:, :, 1:] -= E[:, :, :-1]
# update the image
stackout += gamma*(UD+NS+EW)
return stackout
|
the-stack_106_23500 | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
import random
import requests
import execjs
from scrapy import signals
from scrapy.http import HtmlResponse
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
from wenshu.settings import USER_AGENTS
from scrapy.downloadermiddlewares.defaultheaders import DefaultHeadersMiddleware
from scrapy.downloadermiddlewares.cookies import CookiesMiddleware
class WenshuSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class WenshuDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
with open('./wenshu/js/process.js', 'r') as f:
default = execjs.get('phantomjs')
data = f.read()
ect = default.compile(data)
guid = ect.call('getGuid')
number = self.get_code(guid)
vjkl5 = self.get_vjkl5()
vl5x = ect.call('getKey', vjkl5)
headers = (
{
'user-agent': random.choice(USER_AGENTS),
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
}
)
cookies = {
'vjkl5': vjkl5
}
data = {
'Param': request.meta['Param'],
'Index': request.meta['Index'],
'Page': request.meta['Page'],
'Order': request.meta['Order'],
'Direction': request.meta['Direction'],
'vl5x': vl5x,
'number': number,
'guid': guid
}
res = requests.post(request.url, data=data, headers=headers, cookies=cookies)
return HtmlResponse(request.url, body=res.text, encoding='utf8')
def get_code(self, guid):
url = 'http://wenshu.court.gov.cn/ValiCode/GetCode'
s = requests.Session()
s.headers.update(
{
'user-agent': random.choice(USER_AGENTS),
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
}
)
data = {
'guid': guid
}
res = s.post(url, data=data)
return res.text
def get_vjkl5(self):
url = 'http://wenshu.court.gov.cn/list/list/?sorttype=1'
headers = (
{
'user-agent': random.choice(USER_AGENTS),
}
)
res = requests.get(url, headers=headers)
return res.cookies['vjkl5']
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class WenshuUserAgentMiddleware(UserAgentMiddleware):
def __init__(self, user_agent=random.choice(USER_AGENTS)):
super().__init__(user_agent)
class WenshuHeadersMiddleware(DefaultHeadersMiddleware):
def __init__(self, headers=({'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'})):
super().__init__(headers)
class WenshuCookiesMiddleware(CookiesMiddleware):
def __init__(self):
super().__init__()
|
the-stack_106_23502 | import sys
import h5py
import numpy as np
import struct
f = h5py.File(sys.argv[1], 'r')
print(f.keys())
dataset = f['train'][:]
print(dataset.shape, dataset.dtype)
queries = f['test'][:]
print(queries.shape, queries.dtype)
answers = f['neighbors'][:]
print(answers.shape, answers.dtype)
def serialize(a, file_name):
if len(a.shape) != 2:
raise Exception('array must be two-dimensional')
if a.dtype != np.float32 and a.dtype != np.int32:
raise Exception('invalid dtype')
if a.dtype == np.float32:
spec = 'f'
else:
spec = 'i'
print(spec)
with open(file_name, 'wb') as output:
output.write(struct.pack('Q', a.shape[0]))
for i in range(a.shape[0]):
output.write(struct.pack('Q', a.shape[1]))
output.write(struct.pack('%d%s' % (a.shape[1], spec), *a[i]))
serialize(dataset, sys.argv[2])
serialize(queries, sys.argv[3])
serialize(answers, sys.argv[4])
def toNumpy(a, file_name):
np.save(file_name + '.npy', np.asarray(a, dtype=a.dtype))
toNumpy(dataset, sys.argv[2])
toNumpy(queries, sys.argv[3])
toNumpy(answers, sys.argv[4])
|
the-stack_106_23505 | # -*- coding: utf-8 -*-
from odoo import api, models, fields
class ResConfigSettings(models.TransientModel):
_inherit = 'res.config.settings'
is_edi_proxy_active = fields.Boolean(compute='_compute_is_edi_proxy_active')
@api.depends('company_id.account_edi_proxy_client_ids', 'company_id.account_edi_proxy_client_ids.active')
def _compute_is_edi_proxy_active(self):
for config in self:
config.is_edi_proxy_active = config.company_id.account_edi_proxy_client_ids
def button_create_proxy_user(self):
# For now, only fattura_pa uses the proxy.
# To use it for more, we have to either make the activation of the proxy on a format basis
# or create a user per format here (but also when installing new formats)
fattura_pa = self.env.ref('l10n_it_edi.edi_fatturaPA')
edi_identification = fattura_pa._get_proxy_identification(self.company_id)
if not edi_identification:
return
self.env['account_edi_proxy_client.user']._register_proxy_user(self.company_id, fattura_pa, edi_identification)
|
the-stack_106_23508 | # Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import numpy
import pytest
from tests.integ import DATA_DIR, PYTHON_VERSION, TRAINING_DEFAULT_TIMEOUT_MINUTES
from tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name
from sagemaker.pytorch.estimator import PyTorch
from sagemaker.pytorch.model import PyTorchModel
from sagemaker.pytorch.defaults import LATEST_PY2_VERSION
from sagemaker.utils import sagemaker_timestamp
MNIST_DIR = os.path.join(DATA_DIR, "pytorch_mnist")
MNIST_SCRIPT = os.path.join(MNIST_DIR, "mnist.py")
@pytest.fixture(scope="module", name="pytorch_training_job")
def fixture_training_job(sagemaker_session, pytorch_full_version, cpu_instance_type):
with timeout(minutes=TRAINING_DEFAULT_TIMEOUT_MINUTES):
pytorch = _get_pytorch_estimator(sagemaker_session, pytorch_full_version, cpu_instance_type)
pytorch.fit({"training": _upload_training_data(pytorch)})
return pytorch.latest_training_job.name
@pytest.mark.canary_quick
@pytest.mark.regional_testing
@pytest.mark.skipif(
PYTHON_VERSION == "py2",
reason="Python 2 is supported by PyTorch {} and lower versions.".format(LATEST_PY2_VERSION),
)
def test_sync_fit_deploy(pytorch_training_job, sagemaker_session, cpu_instance_type):
# TODO: add tests against local mode when it's ready to be used
endpoint_name = "test-pytorch-sync-fit-attach-deploy{}".format(sagemaker_timestamp())
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
estimator = PyTorch.attach(pytorch_training_job, sagemaker_session=sagemaker_session)
predictor = estimator.deploy(1, cpu_instance_type, endpoint_name=endpoint_name)
data = numpy.zeros(shape=(1, 1, 28, 28), dtype=numpy.float32)
predictor.predict(data)
batch_size = 100
data = numpy.random.rand(batch_size, 1, 28, 28).astype(numpy.float32)
output = predictor.predict(data)
assert output.shape == (batch_size, 10)
@pytest.mark.local_mode
@pytest.mark.skipif(
PYTHON_VERSION == "py2",
reason="Python 2 is supported by PyTorch {} and lower versions.".format(LATEST_PY2_VERSION),
)
def test_fit_deploy(sagemaker_local_session, pytorch_full_version):
pytorch = PyTorch(
entry_point=MNIST_SCRIPT,
role="SageMakerRole",
framework_version=pytorch_full_version,
py_version="py3",
train_instance_count=1,
train_instance_type="local",
sagemaker_session=sagemaker_local_session,
)
pytorch.fit({"training": "file://" + os.path.join(MNIST_DIR, "training")})
predictor = pytorch.deploy(1, "local")
try:
batch_size = 100
data = numpy.random.rand(batch_size, 1, 28, 28).astype(numpy.float32)
output = predictor.predict(data)
assert output.shape == (batch_size, 10)
finally:
predictor.delete_endpoint()
@pytest.mark.skipif(
PYTHON_VERSION == "py2",
reason="Python 2 is supported by PyTorch {} and lower versions.".format(LATEST_PY2_VERSION),
)
def test_deploy_model(pytorch_training_job, sagemaker_session, cpu_instance_type):
endpoint_name = "test-pytorch-deploy-model-{}".format(sagemaker_timestamp())
with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
desc = sagemaker_session.sagemaker_client.describe_training_job(
TrainingJobName=pytorch_training_job
)
model_data = desc["ModelArtifacts"]["S3ModelArtifacts"]
model = PyTorchModel(
model_data,
"SageMakerRole",
entry_point=MNIST_SCRIPT,
sagemaker_session=sagemaker_session,
)
predictor = model.deploy(1, cpu_instance_type, endpoint_name=endpoint_name)
batch_size = 100
data = numpy.random.rand(batch_size, 1, 28, 28).astype(numpy.float32)
output = predictor.predict(data)
assert output.shape == (batch_size, 10)
def _upload_training_data(pytorch):
return pytorch.sagemaker_session.upload_data(
path=os.path.join(MNIST_DIR, "training"),
key_prefix="integ-test-data/pytorch_mnist/training",
)
def _get_pytorch_estimator(
sagemaker_session, pytorch_full_version, instance_type, entry_point=MNIST_SCRIPT
):
return PyTorch(
entry_point=entry_point,
role="SageMakerRole",
framework_version=pytorch_full_version,
py_version=PYTHON_VERSION,
train_instance_count=1,
train_instance_type=instance_type,
sagemaker_session=sagemaker_session,
)
def _is_local_mode(instance_type):
return instance_type == "local"
|
the-stack_106_23509 | import pygame
import pygame_menu
from pygame_menu import sound
from pygame_menu.themes import Theme
class Menu:
menu = None
screen = None
my_theme = None
my_image = None
sound = None
def my_theme(self):
font = pygame_menu.font.FONT_8BIT
styl = pygame_menu.widgets.MENUBAR_STYLE_NONE
my_theme = Theme(widget_font=font)
my_theme.title_font_color = (255, 255, 255)
my_theme.title_font = font
my_theme.title_bar_style = styl
my_theme.widget_border_color = (55, 25, 25)
self.my_image = pygame_menu.baseimage.BaseImage(
image_path='img/bckg.jpg',
drawing_mode=pygame_menu.baseimage.IMAGE_MODE_REPEAT_XY
)
my_theme.background_color = self.my_image
return my_theme
def __init__(self, arg_screen, sound_path):
self.menu = pygame_menu.Menu('Shamboogeon', 800, 600,
theme=self.my_theme())
self.screen= arg_screen
self.sound= pygame.mixer.Sound(sound_path)
self.sound.play(-1)
self.sound.set_volume(0.1)
self.response(800,600)
def add_button(self,name , action):
widget_manager = pygame_menu.menu.WidgetManager(self.menu)
widget_manager.button(name, action, self.screen)
def response(self,width,height):
self.menu = pygame_menu.Menu('Shamboogeon', width, height,
theme=self.my_theme())
engine = sound.Sound()
engine.set_sound(sound.SOUND_TYPE_KEY_ADDITION, 'sounds/UI-select.ogg')
self.menu.set_sound(engine, recursive=True)
|
the-stack_106_23511 | """empty message
Revision ID: 8ebeb4c2e02f
Revises: 2ef21fa29d1b
Create Date: 2020-01-30 15:43:59.227314
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "8ebeb4c2e02f"
down_revision = "2ef21fa29d1b"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"downloadable_files",
sa.Column("analysis_friendly", sa.Boolean(), nullable=True),
)
conn = op.get_bind()
conn.execute(
"""
UPDATE downloadable_files SET analysis_friendly = true
WHERE upload_type in ('participants info', 'samples info', 'cell counts assignment', 'cell counts compartment', 'cell counts profiling', 'combined maf', 'ihc marker combined')
"""
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("downloadable_files", "analysis_friendly")
# ### end Alembic commands ###
|
the-stack_106_23514 | """
Copyright (c) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from functools import singledispatch
from PIL import Image
import numpy as np
from ..representation import (
SegmentationPrediction, SegmentationAnnotation,
StyleTransferAnnotation, StyleTransferPrediction,
SuperResolutionPrediction, SuperResolutionAnnotation,
ImageProcessingPrediction, ImageProcessingAnnotation,
ImageInpaintingAnnotation, ImageInpaintingPrediction,
SalientRegionAnnotation, SalientRegionPrediction
)
from ..postprocessor.postprocessor import PostprocessorWithSpecificTargets, ApplyToOption
from ..postprocessor import ResizeSegmentationMask
from ..config import NumberField
from ..utils import get_size_from_config
class Resize(PostprocessorWithSpecificTargets):
__provider__ = 'resize'
prediction_types = (StyleTransferPrediction, ImageProcessingPrediction,
SegmentationPrediction, SuperResolutionPrediction,
ImageInpaintingPrediction)
annotation_types = (StyleTransferAnnotation, ImageProcessingAnnotation,
SegmentationAnnotation, SuperResolutionAnnotation,
ImageInpaintingPrediction)
@classmethod
def parameters(cls):
parameters = super().parameters()
parameters.update({
'dst_width': NumberField(
value_type=int, optional=True, min_value=1, description="Destination width for resize"
),
'dst_height': NumberField(
value_type=int, optional=True, min_value=1, description="Destination height for resize."
),
'size': NumberField(
value_type=int, optional=True, min_value=1,
description="Destination size for resize for both dimensions (height and width)."
)
})
return parameters
def configure(self):
self.dst_height, self.dst_width = get_size_from_config(self.config, allow_none=True)
self._required_both = True
def process_image_with_metadata(self, annotation, prediction, image_metadata=None):
if self._deprocess_predictions:
self._calculate_scale(image_metadata)
self.process_image(annotation, prediction)
def _calculate_scale(self, image_metadata):
if image_metadata is None:
self.x_scale, self.y_scale = 1, 1
return
image_h, image_w = image_metadata['image_size'][:2]
input_shape = next(iter(image_metadata['input_shape'].values()))
input_h, input_w = input_shape[2:] if input_shape[1] in [1, 3, 4] else input_shape[1:3]
self.x_scale = image_w / input_w
self.y_scale = image_h / input_h
def process_image(self, annotations, predictions):
@singledispatch
def resize(entry, height, width):
return entry
@resize.register(StyleTransferAnnotation)
@resize.register(StyleTransferPrediction)
@resize.register(SuperResolutionAnnotation)
@resize.register(SuperResolutionPrediction)
@resize.register(ImageProcessingAnnotation)
@resize.register(ImageProcessingPrediction)
@resize.register(ImageInpaintingAnnotation)
@resize.register(ImageInpaintingPrediction)
def _(entry, height, width):
entry.value = entry.value.astype(np.uint8)
data = Image.fromarray(entry.value)
data = data.resize((width, height), Image.BICUBIC)
entry.value = np.array(data)
return entry
@resize.register(SegmentationPrediction)
@resize.register(SalientRegionPrediction)
def _(entry, height, width):
if len(entry.mask.shape) == 2:
entry.mask = ResizeSegmentationMask.segm_resize(entry.mask, width, height)
return entry
entry_mask = []
for class_mask in entry.mask:
resized_mask = ResizeSegmentationMask.segm_resize(class_mask, width, height)
entry_mask.append(resized_mask)
entry.mask = np.array(entry_mask)
return entry
@resize.register(SegmentationAnnotation)
@resize.register(SalientRegionAnnotation)
def _(entry, height, width):
entry.mask = ResizeSegmentationMask.segm_resize(entry.mask, width, height)
return entry
@singledispatch
def set_sizes(entry):
height = self.dst_height if self.dst_height else self.image_size[0]
width = self.dst_width if self.dst_width else self.image_size[1]
return height, width
@set_sizes.register(SuperResolutionAnnotation)
def _(entry):
height = self.dst_height if self.dst_height else entry.value.shape[0]
width = self.dst_width if self.dst_width else entry.value.shape[1]
return height, width
@set_sizes.register(SuperResolutionPrediction)
def _(entry):
if self._deprocess_predictions:
height = int(entry.value.shape[0] * self.y_scale)
width = int(entry.value.shape[1] * self.x_scale)
return height, width
height = self.dst_height if self.dst_height else entry.value.shape[0]
width = self.dst_width if self.dst_width else entry.value.shape[1]
return height, width
@set_sizes.register(SegmentationPrediction)
@set_sizes.register(SalientRegionPrediction)
def _(entry):
if self._deprocess_predictions:
return self.image_size[:2]
height = self.dst_height if self.dst_height else self.image_size[0]
width = self.dst_width if self.dst_width else self.image_size[1]
return height, width
if self.apply_to is None or self.apply_to in [ApplyToOption.PREDICTION, ApplyToOption.ALL]:
if annotations:
for annotation, prediction in zip(annotations, predictions):
height, width = set_sizes(annotation or prediction)
resize(prediction, height, width)
else:
for prediction in predictions:
height, width = set_sizes(prediction)
resize(prediction, height, width)
if self.apply_to is None or self.apply_to in [ApplyToOption.ANNOTATION, ApplyToOption.ALL]:
for annotation in annotations:
if annotation is None:
continue
height, width = set_sizes(annotation)
resize(annotation, height, width)
return annotations, predictions
|
the-stack_106_23515 | from fontTools.ttLib import TTFont
from afdko.pdflib.fontpdf import (doTitle, FontPDFParams)
from afdko.pdflib.otfpdf import txPDFFont
from afdko.pdflib.pdfgen import Canvas
from test_utils import get_input_path
OTF_FONT = 'OTF.otf'
# -----
# Tests
# -----
def test_doTitle_pageIncludeTitle_1():
with TTFont(get_input_path(OTF_FONT)) as otfont:
params = FontPDFParams()
assert params.pageIncludeTitle == 1
pdfFont = txPDFFont(otfont, params)
rt_canvas = Canvas("pdf_file_path")
assert rt_canvas._code == []
doTitle(rt_canvas, pdfFont, params, 1)
assert len(rt_canvas._code)
assert 'SourceSansPro-Black' in rt_canvas._code[1]
def test_doTitle_pageIncludeTitle_0():
with TTFont(get_input_path(OTF_FONT)) as otfont:
params = FontPDFParams()
params.pageIncludeTitle = 0
pdfFont = txPDFFont(otfont, params)
rt_canvas = Canvas("pdf_file_path")
assert rt_canvas._code == []
doTitle(rt_canvas, pdfFont, params, 1)
assert rt_canvas._code == []
|
the-stack_106_23517 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import hashlib
import inspect
import json
import traceback
import ray.cloudpickle as pickle
import ray.local_scheduler
import ray.signature as signature
import ray.worker
from ray.utils import (FunctionProperties, _random_string, is_cython,
push_error_to_driver)
def compute_actor_handle_id(actor_handle_id, num_forks):
"""Deterministically comopute an actor handle ID.
A new actor handle ID is generated when it is forked from another actor
handle. The new handle ID is computed as hash(old_handle_id || num_forks).
Args:
actor_handle_id (common.ObjectID): The original actor handle ID.
num_forks: The number of times the original actor handle has been
forked so far.
Returns:
An object ID for the new actor handle.
"""
handle_id_hash = hashlib.sha1()
handle_id_hash.update(actor_handle_id.id())
handle_id_hash.update(str(num_forks).encode("ascii"))
handle_id = handle_id_hash.digest()
assert len(handle_id) == 20
return ray.local_scheduler.ObjectID(handle_id)
def compute_actor_creation_function_id(class_id):
"""Compute the function ID for an actor creation task.
Args:
class_id: The ID of the actor class.
Returns:
The function ID of the actor creation event.
"""
return ray.local_scheduler.ObjectID(class_id)
def compute_actor_method_function_id(class_name, attr):
"""Get the function ID corresponding to an actor method.
Args:
class_name (str): The class name of the actor.
attr (str): The attribute name of the method.
Returns:
Function ID corresponding to the method.
"""
function_id_hash = hashlib.sha1()
function_id_hash.update(class_name)
function_id_hash.update(attr.encode("ascii"))
function_id = function_id_hash.digest()
assert len(function_id) == 20
return ray.local_scheduler.ObjectID(function_id)
def set_actor_checkpoint(worker, actor_id, checkpoint_index, checkpoint,
frontier):
"""Set the most recent checkpoint associated with a given actor ID.
Args:
worker: The worker to use to get the checkpoint.
actor_id: The actor ID of the actor to get the checkpoint for.
checkpoint_index: The number of tasks included in the checkpoint.
checkpoint: The state object to save.
frontier: The task frontier at the time of the checkpoint.
"""
actor_key = b"Actor:" + actor_id
worker.redis_client.hmset(
actor_key, {
"checkpoint_index": checkpoint_index,
"checkpoint": checkpoint,
"frontier": frontier,
})
def get_actor_checkpoint(worker, actor_id):
"""Get the most recent checkpoint associated with a given actor ID.
Args:
worker: The worker to use to get the checkpoint.
actor_id: The actor ID of the actor to get the checkpoint for.
Returns:
If a checkpoint exists, this returns a tuple of the number of tasks
included in the checkpoint, the saved checkpoint state, and the
task frontier at the time of the checkpoint. If no checkpoint
exists, all objects are set to None. The checkpoint index is the .
executed on the actor before the checkpoint was made.
"""
actor_key = b"Actor:" + actor_id
checkpoint_index, checkpoint, frontier = worker.redis_client.hmget(
actor_key, ["checkpoint_index", "checkpoint", "frontier"])
if checkpoint_index is not None:
checkpoint_index = int(checkpoint_index)
return checkpoint_index, checkpoint, frontier
def save_and_log_checkpoint(worker, actor):
"""Save a checkpoint on the actor and log any errors.
Args:
worker: The worker to use to log errors.
actor: The actor to checkpoint.
checkpoint_index: The number of tasks that have executed so far.
"""
try:
actor.__ray_checkpoint__()
except Exception:
traceback_str = ray.utils.format_error_message(
traceback.format_exc())
# Log the error message.
ray.utils.push_error_to_driver(
worker.redis_client,
"checkpoint",
traceback_str,
driver_id=worker.task_driver_id.id(),
data={"actor_class": actor.__class__.__name__,
"function_name": actor.__ray_checkpoint__.__name__})
def restore_and_log_checkpoint(worker, actor):
"""Restore an actor from a checkpoint and log any errors.
Args:
worker: The worker to use to log errors.
actor: The actor to restore.
"""
checkpoint_resumed = False
try:
checkpoint_resumed = actor.__ray_checkpoint_restore__()
except Exception:
traceback_str = ray.utils.format_error_message(
traceback.format_exc())
# Log the error message.
ray.utils.push_error_to_driver(
worker.redis_client,
"checkpoint",
traceback_str,
driver_id=worker.task_driver_id.id(),
data={
"actor_class": actor.__class__.__name__,
"function_name":
actor.__ray_checkpoint_restore__.__name__})
return checkpoint_resumed
def make_actor_method_executor(worker, method_name, method, actor_imported):
"""Make an executor that wraps a user-defined actor method.
The wrapped method updates the worker's internal state and performs any
necessary checkpointing operations.
Args:
worker (Worker): The worker that is executing the actor.
method_name (str): The name of the actor method.
method (instancemethod): The actor method to wrap. This should be a
method defined on the actor class and should therefore take an
instance of the actor as the first argument.
actor_imported (bool): Whether the actor has been imported.
Checkpointing operations will not be run if this is set to False.
Returns:
A function that executes the given actor method on the worker's stored
instance of the actor. The function also updates the worker's
internal state to record the executed method.
"""
def actor_method_executor(dummy_return_id, actor, *args):
# Update the actor's task counter to reflect the task we're about to
# execute.
worker.actor_task_counter += 1
# If this is the first task to execute on the actor, try to resume from
# a checkpoint.
if actor_imported and worker.actor_task_counter == 1:
checkpoint_resumed = restore_and_log_checkpoint(worker, actor)
if checkpoint_resumed:
# NOTE(swang): Since we did not actually execute the __init__
# method, this will put None as the return value. If the
# __init__ method is supposed to return multiple values, an
# exception will be logged.
return
# Determine whether we should checkpoint the actor.
checkpointing_on = (actor_imported and
worker.actor_checkpoint_interval > 0)
# We should checkpoint the actor if user checkpointing is on, we've
# executed checkpoint_interval tasks since the last checkpoint, and the
# method we're about to execute is not a checkpoint.
save_checkpoint = (checkpointing_on and
(worker.actor_task_counter %
worker.actor_checkpoint_interval == 0 and
method_name != "__ray_checkpoint__"))
# Execute the assigned method and save a checkpoint if necessary.
try:
method_returns = method(actor, *args)
except Exception:
# Save the checkpoint before allowing the method exception to be
# thrown.
if save_checkpoint:
save_and_log_checkpoint(worker, actor)
raise
else:
# Save the checkpoint before returning the method's return values.
if save_checkpoint:
save_and_log_checkpoint(worker, actor)
return method_returns
return actor_method_executor
def fetch_and_register_actor(actor_class_key, resources, worker):
"""Import an actor.
This will be called by the worker's import thread when the worker receives
the actor_class export, assuming that the worker is an actor for that
class.
Args:
actor_class_key: The key in Redis to use to fetch the actor.
resources: The resources required for this actor's lifetime.
worker: The worker to use.
"""
actor_id_str = worker.actor_id
(driver_id, class_id, class_name,
module, pickled_class, checkpoint_interval,
actor_method_names,
actor_method_num_return_vals) = worker.redis_client.hmget(
actor_class_key, ["driver_id", "class_id", "class_name", "module",
"class", "checkpoint_interval",
"actor_method_names",
"actor_method_num_return_vals"])
actor_name = class_name.decode("ascii")
module = module.decode("ascii")
checkpoint_interval = int(checkpoint_interval)
actor_method_names = json.loads(actor_method_names.decode("ascii"))
actor_method_num_return_vals = json.loads(
actor_method_num_return_vals.decode("ascii"))
# Create a temporary actor with some temporary methods so that if the actor
# fails to be unpickled, the temporary actor can be used (just to produce
# error messages and to prevent the driver from hanging).
class TemporaryActor(object):
pass
worker.actors[actor_id_str] = TemporaryActor()
worker.actor_checkpoint_interval = checkpoint_interval
def temporary_actor_method(*xs):
raise Exception("The actor with name {} failed to be imported, and so "
"cannot execute this method".format(actor_name))
# Register the actor method signatures.
register_actor_signatures(worker, driver_id, class_id, class_name,
actor_method_names, actor_method_num_return_vals)
# Register the actor method executors.
for actor_method_name in actor_method_names:
function_id = compute_actor_method_function_id(class_name,
actor_method_name).id()
temporary_executor = make_actor_method_executor(worker,
actor_method_name,
temporary_actor_method,
actor_imported=False)
worker.functions[driver_id][function_id] = (actor_method_name,
temporary_executor)
worker.num_task_executions[driver_id][function_id] = 0
try:
unpickled_class = pickle.loads(pickled_class)
worker.actor_class = unpickled_class
except Exception:
# If an exception was thrown when the actor was imported, we record the
# traceback and notify the scheduler of the failure.
traceback_str = ray.utils.format_error_message(traceback.format_exc())
# Log the error message.
push_error_to_driver(worker.redis_client, "register_actor_signatures",
traceback_str, driver_id,
data={"actor_id": actor_id_str})
# TODO(rkn): In the future, it might make sense to have the worker exit
# here. However, currently that would lead to hanging if someone calls
# ray.get on a method invoked on the actor.
else:
# TODO(pcm): Why is the below line necessary?
unpickled_class.__module__ = module
worker.actors[actor_id_str] = unpickled_class.__new__(unpickled_class)
actor_methods = inspect.getmembers(
unpickled_class, predicate=(lambda x: (inspect.isfunction(x) or
inspect.ismethod(x) or
is_cython(x))))
for actor_method_name, actor_method in actor_methods:
function_id = compute_actor_method_function_id(
class_name, actor_method_name).id()
executor = make_actor_method_executor(worker, actor_method_name,
actor_method,
actor_imported=True)
worker.functions[driver_id][function_id] = (actor_method_name,
executor)
# We do not set worker.function_properties[driver_id][function_id]
# because we currently do need the actor worker to submit new tasks
# for the actor.
def register_actor_signatures(worker, driver_id, class_id, class_name,
actor_method_names,
actor_method_num_return_vals,
actor_creation_resources=None,
actor_method_cpus=None):
"""Register an actor's method signatures in the worker.
Args:
worker: The worker to register the signatures on.
driver_id: The ID of the driver that this actor is associated with.
class_id: The ID of the actor class.
class_name: The name of the actor class.
actor_method_names: The names of the methods to register.
actor_method_num_return_vals: A list of the number of return values for
each of the actor's methods.
actor_creation_resources: The resources required by the actor creation
task.
actor_method_cpus: The number of CPUs required by each actor method.
"""
assert len(actor_method_names) == len(actor_method_num_return_vals)
for actor_method_name, num_return_vals in zip(
actor_method_names, actor_method_num_return_vals):
# TODO(rkn): When we create a second actor, we are probably overwriting
# the values from the first actor here. This may or may not be a
# problem.
function_id = compute_actor_method_function_id(class_name,
actor_method_name).id()
worker.function_properties[driver_id][function_id] = (
# The extra return value is an actor dummy object.
# In the cases where actor_method_cpus is None, that value should
# never be used.
FunctionProperties(num_return_vals=num_return_vals + 1,
resources={"CPU": actor_method_cpus},
max_calls=0))
if actor_creation_resources is not None:
# Also register the actor creation task.
function_id = compute_actor_creation_function_id(class_id)
worker.function_properties[driver_id][function_id.id()] = (
# The extra return value is an actor dummy object.
FunctionProperties(num_return_vals=0 + 1,
resources=actor_creation_resources,
max_calls=0))
def publish_actor_class_to_key(key, actor_class_info, worker):
"""Push an actor class definition to Redis.
The is factored out as a separate function because it is also called
on cached actor class definitions when a worker connects for the first
time.
Args:
key: The key to store the actor class info at.
actor_class_info: Information about the actor class.
worker: The worker to use to connect to Redis.
"""
# We set the driver ID here because it may not have been available when the
# actor class was defined.
actor_class_info["driver_id"] = worker.task_driver_id.id()
worker.redis_client.hmset(key, actor_class_info)
worker.redis_client.rpush("Exports", key)
def export_actor_class(class_id, Class, actor_method_names,
actor_method_num_return_vals,
checkpoint_interval, worker):
key = b"ActorClass:" + class_id
actor_class_info = {
"class_name": Class.__name__,
"module": Class.__module__,
"class": pickle.dumps(Class),
"checkpoint_interval": checkpoint_interval,
"actor_method_names": json.dumps(list(actor_method_names)),
"actor_method_num_return_vals": json.dumps(
actor_method_num_return_vals)}
if worker.mode is None:
# This means that 'ray.init()' has not been called yet and so we must
# cache the actor class definition and export it when 'ray.init()' is
# called.
assert worker.cached_remote_functions_and_actors is not None
worker.cached_remote_functions_and_actors.append(
("actor", (key, actor_class_info)))
# This caching code path is currently not used because we only export
# actor class definitions lazily when we instantiate the actor for the
# first time.
assert False, "This should be unreachable."
else:
publish_actor_class_to_key(key, actor_class_info, worker)
# TODO(rkn): Currently we allow actor classes to be defined within tasks.
# I tried to disable this, but it may be necessary because of
# https://github.com/ray-project/ray/issues/1146.
def export_actor(actor_id, class_id, class_name, actor_method_names,
actor_method_num_return_vals, actor_creation_resources,
actor_method_cpus, worker):
"""Export an actor to redis.
Args:
actor_id (common.ObjectID): The ID of the actor.
class_id (str): A random ID for the actor class.
class_name (str): The actor class name.
actor_method_names (list): A list of the names of this actor's methods.
actor_method_num_return_vals: A list of the number of return values for
each of the actor's methods.
actor_creation_resources: A dictionary mapping resource name to the
quantity of that resource required by the actor.
actor_method_cpus: The number of CPUs required by actor methods.
"""
ray.worker.check_main_thread()
if worker.mode is None:
raise Exception("Actors cannot be created before Ray has been "
"started. You can start Ray with 'ray.init()'.")
driver_id = worker.task_driver_id.id()
register_actor_signatures(
worker, driver_id, class_id, class_name, actor_method_names,
actor_method_num_return_vals,
actor_creation_resources=actor_creation_resources,
actor_method_cpus=actor_method_cpus)
args = [class_id]
function_id = compute_actor_creation_function_id(class_id)
return worker.submit_task(function_id, args, actor_creation_id=actor_id)[0]
def method(*args, **kwargs):
assert len(args) == 0
assert len(kwargs) == 1
assert "num_return_vals" in kwargs
num_return_vals = kwargs["num_return_vals"]
def annotate_method(method):
method.__ray_num_return_vals__ = num_return_vals
return method
return annotate_method
# Create objects to wrap method invocations. This is done so that we can
# invoke methods with actor.method.remote() instead of actor.method().
class ActorMethod(object):
def __init__(self, actor, method_name):
self._actor = actor
self._method_name = method_name
def __call__(self, *args, **kwargs):
raise Exception("Actor methods cannot be called directly. Instead "
"of running 'object.{}()', try "
"'object.{}.remote()'."
.format(self._method_name, self._method_name))
def remote(self, *args, **kwargs):
return self._actor._actor_method_call(
self._method_name, args=args, kwargs=kwargs,
dependency=self._actor._ray_actor_cursor)
class ActorHandleWrapper(object):
"""A wrapper for the contents of an ActorHandle.
This is essentially just a dictionary, but it is used so that the recipient
can tell that an argument is an ActorHandle.
"""
def __init__(self, actor_id, class_id, actor_handle_id, actor_cursor,
actor_counter, actor_method_names,
actor_method_num_return_vals, method_signatures,
checkpoint_interval, class_name,
actor_creation_dummy_object_id,
actor_creation_resources, actor_method_cpus):
# TODO(rkn): Some of these fields are probably not necessary. We should
# strip out the unnecessary fields to keep actor handles lightweight.
self.actor_id = actor_id
self.class_id = class_id
self.actor_handle_id = actor_handle_id
self.actor_cursor = actor_cursor
self.actor_counter = actor_counter
self.actor_method_names = actor_method_names
self.actor_method_num_return_vals = actor_method_num_return_vals
# TODO(swang): Fetch this information from Redis so that we don't have
# to fall back to pickle.
self.method_signatures = method_signatures
self.checkpoint_interval = checkpoint_interval
self.class_name = class_name
self.actor_creation_dummy_object_id = actor_creation_dummy_object_id
self.actor_creation_resources = actor_creation_resources
self.actor_method_cpus = actor_method_cpus
def wrap_actor_handle(actor_handle):
"""Wrap the ActorHandle to store the fields.
Args:
actor_handle: The ActorHandle instance to wrap.
Returns:
An ActorHandleWrapper instance that stores the ActorHandle's fields.
"""
wrapper = ActorHandleWrapper(
actor_handle._ray_actor_id,
actor_handle._ray_class_id,
compute_actor_handle_id(actor_handle._ray_actor_handle_id,
actor_handle._ray_actor_forks),
actor_handle._ray_actor_cursor,
0, # Reset the actor counter.
actor_handle._ray_actor_method_names,
actor_handle._ray_actor_method_num_return_vals,
actor_handle._ray_method_signatures,
actor_handle._ray_checkpoint_interval,
actor_handle._ray_class_name,
actor_handle._ray_actor_creation_dummy_object_id,
actor_handle._ray_actor_creation_resources,
actor_handle._ray_actor_method_cpus)
actor_handle._ray_actor_forks += 1
return wrapper
def unwrap_actor_handle(worker, wrapper):
"""Make an ActorHandle from the stored fields.
Args:
worker: The worker that is unwrapping the actor handle.
wrapper: An ActorHandleWrapper instance to unwrap.
Returns:
The unwrapped ActorHandle instance.
"""
driver_id = worker.task_driver_id.id()
register_actor_signatures(worker, driver_id, wrapper.class_id,
wrapper.class_name, wrapper.actor_method_names,
wrapper.actor_method_num_return_vals,
wrapper.actor_creation_resources,
wrapper.actor_method_cpus)
actor_handle_class = make_actor_handle_class(wrapper.class_name)
actor_object = actor_handle_class.__new__(actor_handle_class)
actor_object._manual_init(
wrapper.actor_id,
wrapper.class_id,
wrapper.actor_handle_id,
wrapper.actor_cursor,
wrapper.actor_counter,
wrapper.actor_method_names,
wrapper.actor_method_num_return_vals,
wrapper.method_signatures,
wrapper.checkpoint_interval,
wrapper.actor_creation_dummy_object_id,
wrapper.actor_creation_resources,
wrapper.actor_method_cpus)
return actor_object
class ActorHandleParent(object):
"""This is the parent class of all ActorHandle classes.
This enables us to identify actor handles by checking if an object obj
satisfies isinstance(obj, ActorHandleParent).
"""
pass
def make_actor_handle_class(class_name):
class ActorHandle(ActorHandleParent):
def __init__(self, *args, **kwargs):
raise Exception("Actor classes cannot be instantiated directly. "
"Instead of running '{}()', try '{}.remote()'."
.format(class_name, class_name))
@classmethod
def remote(cls, *args, **kwargs):
raise NotImplementedError("The classmethod remote() can only be "
"called on the original Class.")
def _manual_init(self, actor_id, class_id, actor_handle_id,
actor_cursor, actor_counter, actor_method_names,
actor_method_num_return_vals, method_signatures,
checkpoint_interval, actor_creation_dummy_object_id,
actor_creation_resources, actor_method_cpus):
self._ray_actor_id = actor_id
self._ray_class_id = class_id
self._ray_actor_handle_id = actor_handle_id
self._ray_actor_cursor = actor_cursor
self._ray_actor_counter = actor_counter
self._ray_actor_method_names = actor_method_names
self._ray_actor_method_num_return_vals = (
actor_method_num_return_vals)
self._ray_method_signatures = method_signatures
self._ray_checkpoint_interval = checkpoint_interval
self._ray_class_name = class_name
self._ray_actor_forks = 0
self._ray_actor_creation_dummy_object_id = (
actor_creation_dummy_object_id)
self._ray_actor_creation_resources = actor_creation_resources
self._ray_actor_method_cpus = actor_method_cpus
def _actor_method_call(self, method_name, args=None, kwargs=None,
dependency=None):
"""Method execution stub for an actor handle.
This is the function that executes when
`actor.method_name.remote(*args, **kwargs)` is called. Instead of
executing locally, the method is packaged as a task and scheduled
to the remote actor instance.
Args:
self: The local actor handle.
method_name: The name of the actor method to execute.
args: A list of arguments for the actor method.
kwargs: A dictionary of keyword arguments for the actor method.
dependency: The object ID that this method is dependent on.
Defaults to None, for no dependencies. Most tasks should
pass in the dummy object returned by the preceding task.
Some tasks, such as checkpoint and terminate methods, have
no dependencies.
Returns:
object_ids: A list of object IDs returned by the remote actor
method.
"""
ray.worker.check_connected()
ray.worker.check_main_thread()
function_signature = self._ray_method_signatures[method_name]
if args is None:
args = []
if kwargs is None:
kwargs = {}
args = signature.extend_args(function_signature, args, kwargs)
# Execute functions locally if Ray is run in PYTHON_MODE
# Copy args to prevent the function from mutating them.
if ray.worker.global_worker.mode == ray.PYTHON_MODE:
return getattr(
ray.worker.global_worker.actors[self._ray_actor_id],
method_name)(*copy.deepcopy(args))
# Add the execution dependency.
if dependency is None:
execution_dependencies = []
else:
execution_dependencies = [dependency]
is_actor_checkpoint_method = (method_name == "__ray_checkpoint__")
function_id = compute_actor_method_function_id(
self._ray_class_name, method_name)
object_ids = ray.worker.global_worker.submit_task(
function_id, args, actor_id=self._ray_actor_id,
actor_handle_id=self._ray_actor_handle_id,
actor_counter=self._ray_actor_counter,
is_actor_checkpoint_method=is_actor_checkpoint_method,
actor_creation_dummy_object_id=(
self._ray_actor_creation_dummy_object_id),
execution_dependencies=execution_dependencies)
# Update the actor counter and cursor to reflect the most recent
# invocation.
self._ray_actor_counter += 1
self._ray_actor_cursor = object_ids.pop()
# The last object returned is the dummy object that should be
# passed in to the next actor method. Do not return it to the user.
if len(object_ids) == 1:
return object_ids[0]
elif len(object_ids) > 1:
return object_ids
# Make tab completion work.
def __dir__(self):
return self._ray_actor_method_names
def __getattribute__(self, attr):
try:
# Check whether this is an actor method.
actor_method_names = object.__getattribute__(
self, "_ray_actor_method_names")
if attr in actor_method_names:
# We create the ActorMethod on the fly here so that the
# ActorHandle doesn't need a reference to the ActorMethod.
# The ActorMethod has a reference to the ActorHandle and
# this was causing cyclic references which were prevent
# object deallocation from behaving in a predictable
# manner.
actor_method_cls = ActorMethod
return actor_method_cls(self, attr)
except AttributeError:
pass
# If the requested attribute is not a registered method, fall back
# to default __getattribute__.
return object.__getattribute__(self, attr)
def __repr__(self):
return "Actor(" + self._ray_actor_id.hex() + ")"
def __reduce__(self):
raise Exception("Actor objects cannot be pickled.")
def __del__(self):
"""Kill the worker that is running this actor."""
# TODO(swang): Also clean up forked actor handles.
# Kill the worker if this is the original actor handle, created
# with Class.remote().
if (ray.worker.global_worker.connected and
self._ray_actor_handle_id.id() == ray.worker.NIL_ACTOR_ID):
# TODO(rkn): Should we be passing in the actor cursor as a
# dependency here?
self._actor_method_call("__ray_terminate__",
args=[self._ray_actor_id.id()])
return ActorHandle
def actor_handle_from_class(Class, class_id, actor_creation_resources,
checkpoint_interval, actor_method_cpus):
class_name = Class.__name__.encode("ascii")
actor_handle_class = make_actor_handle_class(class_name)
exported = []
class ActorHandle(actor_handle_class):
@classmethod
def remote(cls, *args, **kwargs):
if ray.worker.global_worker.mode is None:
raise Exception("Actors cannot be created before ray.init() "
"has been called.")
actor_id = ray.local_scheduler.ObjectID(_random_string())
# The ID for this instance of ActorHandle. These should be unique
# across instances with the same _ray_actor_id.
actor_handle_id = ray.local_scheduler.ObjectID(
ray.worker.NIL_ACTOR_ID)
# The actor cursor is a dummy object representing the most recent
# actor method invocation. For each subsequent method invocation,
# the current cursor should be added as a dependency, and then
# updated to reflect the new invocation.
actor_cursor = None
# The number of actor method invocations that we've called so far.
actor_counter = 0
# Get the actor methods of the given class.
actor_methods = inspect.getmembers(
Class, predicate=(lambda x: (inspect.isfunction(x) or
inspect.ismethod(x) or
is_cython(x))))
# Extract the signatures of each of the methods. This will be used
# to catch some errors if the methods are called with inappropriate
# arguments.
method_signatures = dict()
for k, v in actor_methods:
# Print a warning message if the method signature is not
# supported. We don't raise an exception because if the actor
# inherits from a class that has a method whose signature we
# don't support, we there may not be much the user can do about
# it.
signature.check_signature_supported(v, warn=True)
method_signatures[k] = signature.extract_signature(
v, ignore_first=True)
actor_method_names = [method_name for method_name, _ in
actor_methods]
actor_method_num_return_vals = []
for _, method in actor_methods:
if hasattr(method, "__ray_num_return_vals__"):
actor_method_num_return_vals.append(
method.__ray_num_return_vals__)
else:
actor_method_num_return_vals.append(1)
# Do not export the actor class or the actor if run in PYTHON_MODE
# Instead, instantiate the actor locally and add it to
# global_worker's dictionary
if ray.worker.global_worker.mode == ray.PYTHON_MODE:
ray.worker.global_worker.actors[actor_id] = (
Class.__new__(Class))
else:
# Export the actor.
if not exported:
export_actor_class(class_id, Class, actor_method_names,
actor_method_num_return_vals,
checkpoint_interval,
ray.worker.global_worker)
exported.append(0)
actor_cursor = export_actor(actor_id, class_id, class_name,
actor_method_names,
actor_method_num_return_vals,
actor_creation_resources,
actor_method_cpus,
ray.worker.global_worker)
# Instantiate the actor handle.
actor_object = cls.__new__(cls)
actor_object._manual_init(actor_id, class_id, actor_handle_id,
actor_cursor, actor_counter,
actor_method_names,
actor_method_num_return_vals,
method_signatures, checkpoint_interval,
actor_cursor, actor_creation_resources,
actor_method_cpus)
# Call __init__ as a remote function.
if "__init__" in actor_object._ray_actor_method_names:
actor_object._actor_method_call("__init__", args=args,
kwargs=kwargs,
dependency=actor_cursor)
else:
print("WARNING: this object has no __init__ method.")
return actor_object
return ActorHandle
def make_actor(cls, resources, checkpoint_interval, actor_method_cpus):
if checkpoint_interval == 0:
raise Exception("checkpoint_interval must be greater than 0.")
# Modify the class to have an additional method that will be used for
# terminating the worker.
class Class(cls):
def __ray_terminate__(self, actor_id):
# Record that this actor has been removed so that if this node
# dies later, the actor won't be recreated. Alternatively, we could
# remove the actor key from Redis here.
ray.worker.global_worker.redis_client.hset(b"Actor:" + actor_id,
"removed", True)
# Disconnect the worker from the local scheduler. The point of this
# is so that when the worker kills itself below, the local
# scheduler won't push an error message to the driver.
ray.worker.global_worker.local_scheduler_client.disconnect()
import os
os._exit(0)
def __ray_save_checkpoint__(self):
if hasattr(self, "__ray_save__"):
object_to_serialize = self.__ray_save__()
else:
object_to_serialize = self
return pickle.dumps(object_to_serialize)
@classmethod
def __ray_restore_from_checkpoint__(cls, pickled_checkpoint):
checkpoint = pickle.loads(pickled_checkpoint)
if hasattr(cls, "__ray_restore__"):
actor_object = cls.__new__(cls)
actor_object.__ray_restore__(checkpoint)
else:
# TODO(rkn): It's possible that this will cause problems. When
# you unpickle the same object twice, the two objects will not
# have the same class.
actor_object = checkpoint
return actor_object
def __ray_checkpoint__(self):
"""Save a checkpoint.
This task saves the current state of the actor, the current task
frontier according to the local scheduler, and the checkpoint index
(number of tasks executed so far).
"""
worker = ray.worker.global_worker
checkpoint_index = worker.actor_task_counter
# Get the state to save.
checkpoint = self.__ray_save_checkpoint__()
# Get the current task frontier, per actor handle.
# NOTE(swang): This only includes actor handles that the local
# scheduler has seen. Handle IDs for which no task has yet reached
# the local scheduler will not be included, and may not be runnable
# on checkpoint resumption.
actor_id = ray.local_scheduler.ObjectID(worker.actor_id)
frontier = worker.local_scheduler_client.get_actor_frontier(
actor_id)
# Save the checkpoint in Redis. TODO(rkn): Checkpoints
# should not be stored in Redis. Fix this.
set_actor_checkpoint(worker, worker.actor_id, checkpoint_index,
checkpoint, frontier)
def __ray_checkpoint_restore__(self):
"""Restore a checkpoint.
This task looks for a saved checkpoint and if found, restores the
state of the actor, the task frontier in the local scheduler, and
the checkpoint index (number of tasks executed so far).
Returns:
A bool indicating whether a checkpoint was resumed.
"""
worker = ray.worker.global_worker
# Get the most recent checkpoint stored, if any.
checkpoint_index, checkpoint, frontier = get_actor_checkpoint(
worker, worker.actor_id)
# Try to resume from the checkpoint.
checkpoint_resumed = False
if checkpoint_index is not None:
# Load the actor state from the checkpoint.
worker.actors[worker.actor_id] = (
worker.actor_class.__ray_restore_from_checkpoint__(
checkpoint))
# Set the number of tasks executed so far.
worker.actor_task_counter = checkpoint_index
# Set the actor frontier in the local scheduler.
worker.local_scheduler_client.set_actor_frontier(frontier)
checkpoint_resumed = True
return checkpoint_resumed
Class.__module__ = cls.__module__
Class.__name__ = cls.__name__
class_id = _random_string()
return actor_handle_from_class(Class, class_id, resources,
checkpoint_interval, actor_method_cpus)
ray.worker.global_worker.fetch_and_register_actor = fetch_and_register_actor
ray.worker.global_worker.make_actor = make_actor
|
the-stack_106_23520 | #!/usr/bin/env python3
# Copyright 2014 Gaurav Kumar. Apache 2.0
# Gets the unique speakers from the file created by fsp_make_trans.pl
# Note that if a speaker appears multiple times, it is categorized as female
tmpFileLocation = "data/local/tmp/spk2gendertmp"
tmpFile = None
try:
tmpFile = open(tmpFileLocation)
except IOError:
print("The file spk2gendertmp does not exist. Run fsp_make_trans.pl first?")
speakers = {}
for line in tmpFile:
comp = line.split(" ")
if comp[0] in speakers:
speakers[comp[0]] = "f"
else:
speakers[comp[0]] = comp[1]
for speaker, gender in speakers.items():
print(speaker + " " + gender)
|
the-stack_106_23521 | from git import Repo
# Usage: python3 misc/make_changelog.py 0.5.9
import sys
ver = sys.argv[1]
g = Repo('.')
commits = list(g.iter_commits('master', max_count=200))
begin, end = -1, 0
def format(c):
return f'{c.summary} (by **{c.author}**)'
print('Notable changes:')
notable_changes = {}
all_changes = []
details = {
'cpu': 'CPU backends',
'cuda': 'CUDA backend',
'doc': 'Documentation',
'infra': 'Infrastructure',
'ir': 'Intermediate representation',
'lang': 'Language and syntax',
'metal': 'Metal backend',
'opengl': 'OpenGL backend',
'misc': 'Miscellaneous',
'opt': 'Optimization',
}
print(f'- (, 2020) v{ver} released')
for i, c in enumerate(commits):
s = format(c)
if s.startswith('[release]'):
break
tags = []
while s[0] == '[':
r = s.find(']')
tag = s[1:r]
tags.append(tag)
s = s[r + 1:]
for tag in tags:
if tag[0].isupper():
tag = tag.lower()
if tag not in notable_changes:
notable_changes[tag] = []
notable_changes[tag].append(s)
if s.startswith('[release]'):
break
all_changes.append(format(c))
for tag in sorted(notable_changes.keys()):
print(f' - **{details[tag]}**')
for item in notable_changes[tag]:
print(f' -{item}')
print(
f' - [Full log](https://github.com/taichi-dev/taichi/releases/tag/{ver})'
)
print()
print('Full changelog:')
for c in all_changes:
print(f' - {c}')
|
the-stack_106_23522 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
@pytest.fixture
def app_context():
"""
A fixture for running the test inside an app context.
"""
from superset.app import create_app
app = create_app()
with app.app_context():
yield
|
the-stack_106_23523 | from flask import jsonify
def avg(arr):
n = len(arr)
sum = 0
# Traverse through all array elements
for i in range(n):
sum = sum + arr[i]
return sum/n
def handle(event, context):
if event.method == 'POST':
x = [int(i) for i in str(event.body,'utf-8').split(",")]
result=avg(x)
return {
"statusCode": 200,
"body": result
} |
the-stack_106_23525 | """
elasticapm.base
~~~~~~~~~~
:copyright: (c) 2011-2017 Elasticsearch
Large portions are
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import datetime
import logging
import os
import platform
import socket
import sys
import threading
import time
import zlib
from copy import deepcopy
import elasticapm
from elasticapm.conf import Config, constants
from elasticapm.traces import TransactionsStore, get_transaction
from elasticapm.transport.base import TransportException
from elasticapm.utils import compat, is_master_process
from elasticapm.utils import json_encoder as json
from elasticapm.utils import stacks, varmap
from elasticapm.utils.encoding import keyword_field, shorten, transform
from elasticapm.utils.module_import import import_string
__all__ = ("Client",)
class ClientState(object):
ONLINE = 1
ERROR = 0
def __init__(self):
self.status = self.ONLINE
self.last_check = None
self.retry_number = 0
def should_try(self):
if self.status == self.ONLINE:
return True
interval = min(self.retry_number, 6) ** 2
if time.time() - self.last_check > interval:
return True
return False
def set_fail(self):
self.status = self.ERROR
self.retry_number += 1
self.last_check = time.time()
def set_success(self):
self.status = self.ONLINE
self.last_check = None
self.retry_number = 0
def did_fail(self):
return self.status == self.ERROR
class Client(object):
"""
The base ElasticAPM client, which handles communication over the
HTTP API to the APM Server.
Will read default configuration from the environment variable
``ELASTIC_APM_APP_NAME`` and ``ELASTIC_APM_SECRET_TOKEN``
if available. ::
>>> from elasticapm import Client
>>> # Read configuration from environment
>>> client = Client()
>>> # Configure the client manually
>>> client = Client(
>>> include_paths=['my.package'],
>>> service_name='myapp',
>>> secret_token='secret_token',
>>> )
>>> # Record an exception
>>> try:
>>> 1/0
>>> except ZeroDivisionError:
>>> ident = client.capture_exception()
>>> print ("Exception caught; reference is %%s" %% ident)
"""
logger = logging.getLogger("elasticapm")
def __init__(self, config=None, **inline):
# configure loggers first
cls = self.__class__
self.logger = logging.getLogger("%s.%s" % (cls.__module__, cls.__name__))
self.error_logger = logging.getLogger("elasticapm.errors")
self.state = ClientState()
self.transaction_store = None
self.processors = []
self.filter_exception_types_dict = {}
self._send_timer = None
self._transports = {}
self._service_info = None
self.config = Config(config, inline_dict=inline)
if self.config.errors:
for msg in self.config.errors.values():
self.error_logger.error(msg)
self.config.disable_send = True
self._transport_class = import_string(self.config.transport_class)
for exc_to_filter in self.config.filter_exception_types or []:
exc_to_filter_type = exc_to_filter.split(".")[-1]
exc_to_filter_module = ".".join(exc_to_filter.split(".")[:-1])
self.filter_exception_types_dict[exc_to_filter_type] = exc_to_filter_module
self.processors = [import_string(p) for p in self.config.processors] if self.config.processors else []
if platform.python_implementation() == "PyPy":
# PyPy introduces a `_functools.partial.__call__` frame due to our use
# of `partial` in AbstractInstrumentedModule
skip_modules = ("elasticapm.", "_functools")
else:
skip_modules = ("elasticapm.",)
def frames_collector_func():
return self._get_stack_info_for_trace(
stacks.iter_stack_frames(skip_top_modules=skip_modules),
library_frame_context_lines=self.config.source_lines_span_library_frames,
in_app_frame_context_lines=self.config.source_lines_span_app_frames,
with_locals=self.config.collect_local_variables in ("all", "transactions"),
locals_processor_func=lambda local_var: varmap(
lambda k, v: shorten(
v,
list_length=self.config.local_var_list_max_length,
string_length=self.config.local_var_max_length,
),
local_var,
),
)
self.transaction_store = TransactionsStore(
frames_collector_func=frames_collector_func,
collect_frequency=self.config.flush_interval,
sample_rate=self.config.transaction_sample_rate,
max_spans=self.config.transaction_max_spans,
span_frames_min_duration=self.config.span_frames_min_duration_ms,
max_queue_size=self.config.max_queue_size,
ignore_patterns=self.config.transactions_ignore_patterns,
)
self.include_paths_re = stacks.get_path_regex(self.config.include_paths) if self.config.include_paths else None
self.exclude_paths_re = stacks.get_path_regex(self.config.exclude_paths) if self.config.exclude_paths else None
compat.atexit_register(self.close)
def get_handler(self, name):
return import_string(name)
def capture(self, event_type, date=None, context=None, custom=None, stack=None, handled=True, **kwargs):
"""
Captures and processes an event and pipes it off to Client.send.
"""
if event_type == "Exception":
# never gather log stack for exceptions
stack = False
data = self._build_msg_for_logging(
event_type, date=date, context=context, custom=custom, stack=stack, handled=handled, **kwargs
)
if data:
url = self.config.server_url + constants.ERROR_API_PATH
self.send(url, **data)
return data["errors"][0]["id"]
def capture_message(self, message=None, param_message=None, **kwargs):
"""
Creates an event from ``message``.
>>> client.capture_message('My event just happened!')
"""
return self.capture("Message", message=message, param_message=param_message, **kwargs)
def capture_exception(self, exc_info=None, handled=True, **kwargs):
"""
Creates an event from an exception.
>>> try:
>>> exc_info = sys.exc_info()
>>> client.capture_exception(exc_info)
>>> finally:
>>> del exc_info
If exc_info is not provided, or is set to True, then this method will
perform the ``exc_info = sys.exc_info()`` and the requisite clean-up
for you.
"""
return self.capture("Exception", exc_info=exc_info, handled=handled, **kwargs)
def send(self, url, **data):
"""
Encodes and sends data to remote URL using configured transport
:param url: URL of endpoint
:param data: dictionary of data to send
"""
if self.config.disable_send or self._filter_exception_type(data):
return
payload = self.encode(data)
headers = {
"Content-Type": "application/json",
"Content-Encoding": "deflate",
"User-Agent": "elasticapm-python/%s" % elasticapm.VERSION,
}
if self.config.secret_token:
headers["Authorization"] = "Bearer %s" % self.config.secret_token
if not self.state.should_try():
message = self._get_log_message(payload)
self.error_logger.error(message)
return
try:
self._send_remote(url=url, data=payload, headers=headers)
except Exception as e:
self.handle_transport_fail(exception=e)
def encode(self, data):
"""
Serializes ``data`` into a raw string.
"""
return zlib.compress(json.dumps(data).encode("utf8"))
def decode(self, data):
"""
Unserializes a string, ``data``.
"""
return json.loads(zlib.decompress(data).decode("utf8"))
def begin_transaction(self, transaction_type):
"""Register the start of a transaction on the client
"""
return self.transaction_store.begin_transaction(transaction_type)
def end_transaction(self, name=None, result=""):
transaction = self.transaction_store.end_transaction(result, name)
if self.transaction_store.should_collect():
self._collect_transactions()
if not self._send_timer:
# send first batch of data after config._wait_to_first_send
self._start_send_timer(timeout=min(self.config._wait_to_first_send, self.config.flush_interval))
return transaction
def close(self):
self._collect_transactions()
if self._send_timer:
self._stop_send_timer()
for url, transport in list(self._transports.items()):
transport.close()
self._transports.pop(url)
def handle_transport_success(self, **kwargs):
"""
Success handler called by the transport
"""
if kwargs.get("url"):
self.logger.info("Logged error at " + kwargs["url"])
self.state.set_success()
def handle_transport_fail(self, exception=None, **kwargs):
"""
Failure handler called by the transport
"""
if isinstance(exception, TransportException):
message = self._get_log_message(exception.data)
self.error_logger.error(exception.args[0])
else:
# stdlib exception
message = str(exception)
self.error_logger.error(
"Failed to submit message: %r", message, exc_info=getattr(exception, "print_trace", True)
)
self.state.set_fail()
def _collect_transactions(self):
self._stop_send_timer()
transactions = []
if self.transaction_store:
for transaction in self.transaction_store.get_all():
for processor in self.processors:
transaction = processor(self, transaction)
transactions.append(transaction)
if not transactions:
return
data = self._build_msg({"transactions": transactions})
api_path = constants.TRANSACTIONS_API_PATH
self.send(self.config.server_url + api_path, **data)
self._start_send_timer()
def _start_send_timer(self, timeout=None):
timeout = timeout or self.config.flush_interval
self._send_timer = threading.Timer(timeout, self._collect_transactions)
self._send_timer.start()
def _stop_send_timer(self):
if self._send_timer and self._send_timer.is_alive() and not self._send_timer == threading.current_thread():
self._send_timer.cancel()
self._send_timer.join()
def _send_remote(self, url, data, headers=None):
if headers is None:
headers = {}
parsed = compat.urlparse.urlparse(url)
transport = self._get_transport(parsed)
if transport.async_mode:
transport.send_async(
data, headers, success_callback=self.handle_transport_success, fail_callback=self.handle_transport_fail
)
else:
url = transport.send(data, headers, timeout=self.config.server_timeout)
self.handle_transport_success(url=url)
def get_service_info(self):
if self._service_info:
return self._service_info
language_version = platform.python_version()
if hasattr(sys, "pypy_version_info"):
runtime_version = ".".join(map(str, sys.pypy_version_info[:3]))
else:
runtime_version = language_version
result = {
"name": keyword_field(self.config.service_name),
"environment": keyword_field(self.config.environment),
"version": keyword_field(self.config.service_version),
"agent": {"name": "python", "version": elasticapm.VERSION},
"language": {"name": "python", "version": keyword_field(platform.python_version())},
"runtime": {
"name": keyword_field(platform.python_implementation()),
"version": keyword_field(runtime_version),
},
}
if self.config.framework_name:
result["framework"] = {
"name": keyword_field(self.config.framework_name),
"version": keyword_field(self.config.framework_version),
}
self._service_info = result
return result
def get_process_info(self):
return {
"pid": os.getpid(),
"ppid": os.getppid() if hasattr(os, "getppid") else None,
"argv": sys.argv,
"title": None, # Note: if we implement this, the value needs to be wrapped with keyword_field
}
def get_system_info(self):
return {
"hostname": keyword_field(socket.gethostname()),
"architecture": platform.machine(),
"platform": platform.system().lower(),
}
def _build_msg(self, data=None, **kwargs):
data = data or {}
data["service"] = self.get_service_info()
data["process"] = self.get_process_info()
data["system"] = self.get_system_info()
data.update(**kwargs)
return data
def _build_msg_for_logging(
self, event_type, date=None, context=None, custom=None, stack=None, handled=True, **kwargs
):
"""
Captures, processes and serializes an event into a dict object
"""
transaction = get_transaction()
if transaction:
transaction_context = deepcopy(transaction.context)
else:
transaction_context = {}
event_data = {}
if custom is None:
custom = {}
if not date:
date = datetime.datetime.utcnow()
if stack is None:
stack = self.config.auto_log_stacks
if context:
transaction_context.update(context)
context = transaction_context
else:
context = transaction_context
event_data["context"] = context
if transaction and transaction.tags:
context["tags"] = deepcopy(transaction.tags)
# if '.' not in event_type:
# Assume it's a builtin
event_type = "elasticapm.events.%s" % event_type
handler = self.get_handler(event_type)
result = handler.capture(self, **kwargs)
if self._filter_exception_type(result):
return
# data (explicit) culprit takes over auto event detection
culprit = result.pop("culprit", None)
if custom.get("culprit"):
culprit = custom.pop("culprit")
for k, v in compat.iteritems(result):
if k not in event_data:
event_data[k] = v
log = event_data.get("log", {})
if stack and "stacktrace" not in log:
if stack is True:
frames = stacks.iter_stack_frames(skip=3)
else:
frames = stack
frames = stacks.get_stack_info(
frames,
with_locals=self.config.collect_local_variables in ("errors", "all"),
library_frame_context_lines=self.config.source_lines_error_library_frames,
in_app_frame_context_lines=self.config.source_lines_error_app_frames,
include_paths_re=self.include_paths_re,
exclude_paths_re=self.exclude_paths_re,
locals_processor_func=lambda local_var: varmap(
lambda k, v: shorten(
v,
list_length=self.config.local_var_list_max_length,
string_length=self.config.local_var_max_length,
),
local_var,
),
)
log["stacktrace"] = frames
if "stacktrace" in log and not culprit:
culprit = stacks.get_culprit(log["stacktrace"], self.config.include_paths, self.config.exclude_paths)
if "level" in log and isinstance(log["level"], compat.integer_types):
log["level"] = logging.getLevelName(log["level"]).lower()
if log:
event_data["log"] = log
if culprit:
event_data["culprit"] = culprit
if "custom" in context:
context["custom"].update(custom)
else:
context["custom"] = custom
# Run the data through processors
for processor in self.processors:
event_data = processor(self, event_data)
# Make sure all data is coerced
event_data = transform(event_data)
if "exception" in event_data:
event_data["exception"]["handled"] = bool(handled)
event_data.update({"timestamp": date.strftime(constants.TIMESTAMP_FORMAT)})
transaction = get_transaction()
if transaction:
event_data["transaction"] = {"id": transaction.id}
return self._build_msg({"errors": [event_data]})
def _filter_exception_type(self, data):
exception = data.get("exception")
if not exception:
return False
exc_type = exception.get("type")
exc_module = exception.get("module")
if exc_module == "None":
exc_module = None
if exc_type in self.filter_exception_types_dict:
exc_to_filter_module = self.filter_exception_types_dict[exc_type]
if not exc_to_filter_module or exc_to_filter_module == exc_module:
if exc_module:
exc_name = "%s.%s" % (exc_module, exc_type)
else:
exc_name = exc_type
self.logger.info("Ignored %s exception due to exception type filter", exc_name)
return True
return False
def _get_log_message(self, data):
# decode message so we can show the actual event
try:
data = self.decode(data)
except Exception:
message = "<failed decoding data>"
else:
message = data.pop("message", "<no message value>")
return message
def _get_transport(self, parsed_url):
if hasattr(self._transport_class, "sync_transport") and is_master_process():
# when in the master process, always use SYNC mode. This avoids
# the danger of being forked into an inconsistent threading state
self.logger.info("Sending message synchronously while in master " "process. PID: %s", os.getpid())
return self._transport_class.sync_transport(parsed_url)
if parsed_url not in self._transports:
self._transports[parsed_url] = self._transport_class(
parsed_url, verify_server_cert=self.config.verify_server_cert
)
return self._transports[parsed_url]
def _get_stack_info_for_trace(
self,
frames,
library_frame_context_lines=None,
in_app_frame_context_lines=None,
with_locals=True,
locals_processor_func=None,
):
"""Overrideable in derived clients to add frames/info, e.g. templates"""
return stacks.get_stack_info(
frames,
library_frame_context_lines=library_frame_context_lines,
in_app_frame_context_lines=in_app_frame_context_lines,
with_locals=with_locals,
include_paths_re=self.include_paths_re,
exclude_paths_re=self.exclude_paths_re,
locals_processor_func=locals_processor_func,
)
class DummyClient(Client):
"""Sends messages into an empty void"""
def send(self, url, **kwargs):
return None
|
the-stack_106_23526 | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import pytest
import matplotlib as mpl
import cirq
import cirq.google as cg
_CALIBRATION_DATA = {
'@type':
'type.googleapis.com/cirq.api.google.v2.MetricsSnapshot',
'timestampMs':
'1562544000021',
'metrics': [{
'name': 'xeb',
'targets': ['0_0', '0_1'],
'values': [{
'doubleVal': .9999
}]
}, {
'name': 'xeb',
'targets': ['0_0', '1_0'],
'values': [{
'doubleVal': .9998
}]
}, {
'name': 't1',
'targets': ['q0_0'],
'values': [{
'doubleVal': 321
}]
}, {
'name': 't1',
'targets': ['q0_1'],
'values': [{
'doubleVal': 911
}]
}, {
'name': 't1',
'targets': ['q1_0'],
'values': [{
'doubleVal': 505
}]
}, {
'name': 'globalMetric',
'values': [{
'floatVal': 12300
}]
}]
}
def test_calibration_metrics_dictionary():
calibration = cg.Calibration(_CALIBRATION_DATA)
t1s = calibration['t1']
assert t1s == {
(cirq.GridQubit(0, 0),): [321],
(cirq.GridQubit(0, 1),): [911],
(cirq.GridQubit(1, 0),): [505]
}
assert len(calibration) == 3
assert 't1' in calibration
assert 't2' not in calibration
for qubits, values in t1s.items():
assert len(qubits) == 1
assert len(values) == 1
with pytest.raises(TypeError, match="was 1"):
_ = calibration[1]
with pytest.raises(KeyError, match='not-it'):
_ = calibration['not-it']
def test_calibration_str():
calibration = cg.Calibration(_CALIBRATION_DATA)
assert str(calibration) == ("Calibration(keys=['globalMetric', 't1', "
"'xeb'])")
def test_calibration_timestamp_str():
calibration = cg.Calibration(_CALIBRATION_DATA)
assert (calibration.timestamp_str(
tz=datetime.timezone.utc) == '2019-07-08 00:00:00.021021+00:00')
assert (calibration.timestamp_str(
tz=datetime.timezone(datetime.timedelta(
hours=1))) == '2019-07-08 01:00:00.021021+01:00')
def test_calibration_heatmap():
calibration = cg.Calibration(_CALIBRATION_DATA)
heatmap = calibration.heatmap('t1')
figure = mpl.figure.Figure()
axes = figure.add_subplot(111)
heatmap.plot(axes)
|
the-stack_106_23528 | __author__ = 'mp911de'
import paho.mqtt.client as mqtt
MQTT_HOST = 'localhost'
MQTT_PORT = 1883
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("sensors/distancemeter")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect(MQTT_HOST, MQTT_PORT, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever() |
the-stack_106_23529 |
# Write results to this file
OUTFILE = 'runs/snort/100KB/src1-tgt1/ftp-par-ftp-iter00200.result.csv'
# Source computers for the request
SOURCE = ['10.0.0.1']
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# IDS Mode. (ATM: noids, min, max, http, ssl, ftp, icmp, mysql)
IDSMODE = 'ftp'
# Connection mode (par = parallel, seq = sequential)
MODE = 'par'
# Number of evaluation repititions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repitition
ITER = 200
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 5
# Protocol to be used e.g. HTTP, SSL, FTP, MYSQL
PROTOCOL = 'ftp' |
the-stack_106_23530 |
def start(kind, opts, area, grid, scale, iter_raster):
kwargs = {}
if kind == 'text':
if 'flat' in opts:
kwargs['spec'] = {'*': 'XX'}
from ._text import render as start
elif kind == 'tk':
kwargs['static'] = True
from ._tk import ui as start
else:
raise ValueError('unsupported UI {!r}'.format(kind))
return start(iter_raster, area, grid, scale, **kwargs)
|
the-stack_106_23535 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import platform
import socket
import sys
import mock
from oslo_config import cfg
from patron.compute import flavors
import patron.context
import patron.db
from patron import exception
from patron.image import glance
from patron.network import minidns
from patron.network import model as network_model
from patron import objects
import patron.utils
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'patron.netconf')
def get_test_admin_context():
return patron.context.get_admin_context()
def get_test_image_info(context, instance_ref):
if not context:
context = get_test_admin_context()
image_ref = instance_ref['image_ref']
image_service, image_id = glance.get_remote_image_service(context,
image_ref)
return image_service.show(context, image_id)
def get_test_flavor(context=None, options=None):
options = options or {}
if not context:
context = get_test_admin_context()
test_flavor = {'name': 'kinda.big',
'flavorid': 'someid',
'memory_mb': 2048,
'vcpus': 4,
'root_gb': 40,
'ephemeral_gb': 80,
'swap': 1024}
test_flavor.update(options)
try:
flavor_ref = patron.db.flavor_create(context, test_flavor)
except (exception.FlavorExists, exception.FlavorIdExists):
flavor_ref = patron.db.flavor_get_by_name(context, 'kinda.big')
return flavor_ref
def get_test_instance(context=None, flavor=None, obj=False):
if not context:
context = get_test_admin_context()
if not flavor:
flavor = get_test_flavor(context)
test_instance = {'memory_kb': '2048000',
'basepath': '/some/path',
'bridge_name': 'br100',
'vcpus': 4,
'root_gb': 40,
'bridge': 'br101',
'image_ref': 'cedef40a-ed67-4d10-800e-17455edce175',
'instance_type_id': flavor['id'],
'system_metadata': {},
'extra_specs': {},
'user_id': context.user_id,
'project_id': context.project_id,
}
if obj:
instance = objects.Instance(context, **test_instance)
with mock.patch.object(instance, 'save'):
instance.set_flavor(objects.Flavor.get_by_id(context,
flavor['id']))
instance.create()
else:
flavors.save_flavor_info(test_instance['system_metadata'], flavor, '')
instance = patron.db.instance_create(context, test_instance)
return instance
def get_test_network_info(count=1):
ipv6 = CONF.use_ipv6
fake = 'fake'
fake_ip = '0.0.0.0'
fake_vlan = 100
fake_bridge_interface = 'eth0'
def current():
subnet_4 = network_model.Subnet(cidr=fake_ip,
dns=[network_model.IP(fake_ip),
network_model.IP(fake_ip)],
gateway=network_model.IP(fake_ip),
ips=[network_model.IP(fake_ip),
network_model.IP(fake_ip)],
routes=None,
dhcp_server=fake_ip)
subnet_6 = network_model.Subnet(cidr=fake_ip,
gateway=network_model.IP(fake_ip),
ips=[network_model.IP(fake_ip),
network_model.IP(fake_ip),
network_model.IP(fake_ip)],
routes=None,
version=6)
subnets = [subnet_4]
if ipv6:
subnets.append(subnet_6)
network = network_model.Network(id=None,
bridge=fake,
label=None,
subnets=subnets,
vlan=fake_vlan,
bridge_interface=fake_bridge_interface,
injected=False)
vif = network_model.VIF(id='vif-xxx-yyy-zzz',
address=fake,
network=network,
type=network_model.VIF_TYPE_BRIDGE,
devname=None,
ovs_interfaceid=None)
return vif
return network_model.NetworkInfo([current() for x in xrange(0, count)])
def is_osx():
return platform.mac_ver()[0] != ''
def coreutils_readlink_available():
_out, err = patron.utils.trycmd('readlink', '-nm', '/')
return err == ''
test_dns_managers = []
def dns_manager():
global test_dns_managers
manager = minidns.MiniDNS()
test_dns_managers.append(manager)
return manager
def cleanup_dns_managers():
global test_dns_managers
for manager in test_dns_managers:
manager.delete_dns_file()
test_dns_managers = []
def killer_xml_body():
return (("""<!DOCTYPE x [
<!ENTITY a "%(a)s">
<!ENTITY b "%(b)s">
<!ENTITY c "%(c)s">]>
<foo>
<bar>
<v1>%(d)s</v1>
</bar>
</foo>""") % {
'a': 'A' * 10,
'b': '&a;' * 10,
'c': '&b;' * 10,
'd': '&c;' * 9999,
}).strip()
def is_ipv6_supported():
has_ipv6_support = socket.has_ipv6
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
s.close()
except socket.error as e:
if e.errno == errno.EAFNOSUPPORT:
has_ipv6_support = False
else:
raise
# check if there is at least one interface with ipv6
if has_ipv6_support and sys.platform.startswith('linux'):
try:
with open('/proc/net/if_inet6') as f:
if not f.read():
has_ipv6_support = False
except IOError:
has_ipv6_support = False
return has_ipv6_support
def get_api_version(request):
if request.path[2:3].isdigit():
return int(request.path[2:3])
|
the-stack_106_23536 | import scipy.spatial.distance as spdist
import scipy.signal as spsig
import numpy as np
import matplotlib
from applications.eeg.bci_dataset import SAMPLING_FREQUENCY, MONTAGE, load_subject, load_run_from_subject, get_trial, \
get_subject_dataset
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import mne
from graph_utils.visualization import plot_joint_spectrum, plot_temporal_matrix
from pygsp import graphs
def plot_montage(montage):
montage = mne.channels.read_montage(kind="standard_1005", ch_names=montage, unit="m")
fig = montage.plot(scale_factor=10)
fig.savefig("montage.jpg")
def create_spatial_eeg_graph(montage, q=0.05, k=0.1):
montage = mne.channels.read_montage(kind="standard_1005", ch_names=montage, unit="m")
d = spdist.pdist(montage.pos)
W = np.exp(- (d ** 2) / (2 * q ** 2))
W[d > k] = 0
W = spdist.squareform(W)
G = graphs.Graph(W, lap_type="normalized", coords=montage.get_pos2d())
print("Created EEG-graph with q=%.2f and k=%.2f" % (q, k))
print("- Nodes:", G.N)
print("- Edges:", G.Ne)
return G
def create_data_eeg_graph(montage, X, threshold=0.7):
montage = mne.channels.read_montage(kind="standard_1005", ch_names=montage, unit="m")
C = np.matmul(X, X.T) / X.shape[1]
C = C - np.eye(C.shape[0])
C[C < threshold] = 0
G = graphs.Graph(C, lap_type="normalized", coords=montage.get_pos2d())
print("- Nodes:", G.N)
print("- Edges:", G.Ne)
return G
if __name__ == '__main__':
from pygsp import plotting
from applications.eeg.eye_dataset import MONTAGE
s = load_subject(8, True)
r = load_run_from_subject(s, 5)
X, y = get_trial(r, 47)
data, labels = get_subject_dataset(1, False)
print(data.shape)
q = 0.1
k = 0.15
plot_montage(MONTAGE)
G = create_spatial_eeg_graph(MONTAGE, q, k)
plotting.plot_graph(G, "matplotlib", save_as="graph")
fig = plt.figure()
plt.imshow(G.W.todense())
plt.colorbar()
fig.savefig("W.jpg")
# plot_spectrum(3, 2, q, k)
|
the-stack_106_23537 | import os
import glob
import math
import argparse
from PIL import Image
########
# Defs #
########
## Brightness boost to ensure no content becomes transparent in D2
d2darkest = 4 # RGB: 4 / 256 is the darkest non-transparent black in d2 color palette
def boost_brightness(img: Image.Image):
# Get a mask from alpha channel (anything with any transparency get's cut off)
img_A = img.getchannel("A")
mask = img_A.point(lambda i: i > 254 and 255)
# Boost brightness fractionally to bump 0 to 4 (and 256 to 256)
brighter = img.point(lambda i: round(((i + d2darkest) / (255 + d2darkest)) * 255))
# Use the mask to only apply to the content
img.paste(brighter, None, mask)
return img
# Load images into Pillow Image
def load_images(paths):
images = []
for path in paths:
img = Image.open(path)
images.append(img)
return images
# Creates a palette img for use in Pillow
def load_palette(path: str):
# Load palette data
if (not os.path.isfile(path)):
print(f"Could not locate pallete file from '{path}'")
quit()
pal_data = []
with open(path, 'r') as file:
pal_data = list(map(int, file.read().split('\n')))
# Create a palette image to quantize to
pal_img = Image.new('P', (1, 1), 0)
pal_img.putpalette(pal_data + [0] * (768 - len(pal_data))) # Make sure pal_data is 768 entries long
if (args.verbose):
print(f"Palette file: '{path}'")
return pal_img
# Places 'img_overlay' over 'img_src' with 'alpha' transparency
def fade_images(src_img, overlay_img, alpha):
img = src_img.copy()
# Create the mask image for this frame
mask_img = Image.new('RGBA', src_img.size, (0, 0, 0, alpha))
# Paste looped image on top of starting, using alpha
img.paste(overlay_img, (0, 0), mask_img)
return img
########
# Main #
########
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", dest = "input", help = "Glob string to search the files for. Defaults to './renders/*.png'")
parser.add_argument("-o", "--output", dest = "output", help = "Name of resulting animated .gif. Defaults to first image like so: @1TRLITNUHTH_0_0001.png becomes @1TRLITNUHTH.gif.")
parser.add_argument("--fade", dest = "fade", type = int, help="Amount of frames to fade in the loop. Reduces total frame count, but fades together this amount of frames to create a more seamless loop.")
parser.add_argument("-d", "--directions", dest = "directions", type = int, help="Amount of directions. Used for splitting the images into groups when looping.")
parser.add_argument("--verbose", dest = "verbose", action='store_true', help = "Verbose logging")
parser.add_argument("--boost", dest = "boost_brightness", action='store_true', help = "Boosts the brightness the tiniest amount to make full black not transparent in Diablo 2. Transparent base images are never boosted.")
parser.add_argument("--noboost", dest = "boost_brightness", action='store_false', help = "(Default)")
parser.set_defaults(input = "./renders/*.png")
parser.set_defaults(fade = 0)
parser.set_defaults(directions = 1)
parser.set_defaults(boost_brightness = False)
args = parser.parse_args()
can_boost_brightness = args.boost_brightness
image_paths = sorted(glob.glob(args.input))
rootname = args.output if args.output else os.path.basename(image_paths[0]).split('_')[0]
if (len(image_paths) <= 0):
print("Error: Did not find any images. Please check your -input arg.\nNote that you may need to escape special characters (e.g. ./$1TRLITNUHTH_*.png should become ./\$1TRLITNUHTH_*.png).")
quit()
# Separate by direction
if (args.directions <= 0):
args.directions = 1
if (args.directions > len(image_paths)):
print("Error: You are asking for more directions than there are images.")
quit()
amount_per_dir = len(image_paths) // args.directions
imagepaths_by_dir = []
processed_images = [] # List of final images
# Separate by direction
loop_amount = args.fade
loop_max = math.floor(amount_per_dir / 2)
if (loop_amount > loop_max):
loop_amount = loop_max
print(f"Warning: Maximum amount of looping frames is half the total amount of frames. Clamped to {loop_max}")
if ((len(image_paths) / args.directions) % 1 != 0):
print("Error: Directions do not have an equal amount of frames. All directions must have the same amount of frames.")
quit()
if (args.verbose):
print("| Settings |")
print(f"Boost image brightness: {args.boost_brightness}")
print(f"Fade-in loop frames: {loop_amount}")
print(f"Directions: {args.directions}")
print(f"Frames / direction: {amount_per_dir}")
print(f"Total images found: {len(image_paths)}")
d2pal = load_palette('./units_pylist.txt')
## Per direction, loop images if necessary, then process them and add them to the global processed_images list
for d in range(args.directions):
if (args.verbose):
print(f"\n| Direction: {d} |")
dir_imagepaths = image_paths[(d*amount_per_dir):((d+1)*amount_per_dir)]
imagepaths_by_dir.append(dir_imagepaths)
images_in_dir = load_images(dir_imagepaths)
# Loop additional images, keeping in mind the directions
if (loop_amount > 0 and len(images_in_dir) > loop_amount):
loop_start = len(images_in_dir) - loop_amount
for i in range(loop_amount):
# Fade in frames 0, 1, 2 .. on top of last frames
target_frame = loop_start + i
loop_frame = i
src_img = images_in_dir[target_frame]
loop_img = images_in_dir[loop_frame]
# We skip 0 and 255 alpha's
alpha = round((i + 1) * 255 / (loop_amount + 1))
res_img = fade_images(src_img, loop_img, alpha)
res_img.filename = src_img.filename
if (args.verbose):
print(f"Fading {alpha}/255: ({src_img.filename}) into ({loop_img.filename})")
# Overwrite first frames that have loop overlayed
images_in_dir[loop_frame].close()
images_in_dir[loop_frame] = res_img
# Close and delete looped images
for i in range(loop_amount):
im = images_in_dir.pop()
im.close()
# Process images
for img in images_in_dir:
if (args.verbose):
print(f"{img.filename}: [Processing] ...", end = " ")
# Check to ensure we can and should boost blacks
if (img.mode == "RGB" and can_boost_brightness):
print(f"\nWarning: {img.filename} does not have an alpha channel. Cannot boost blacks.")
can_boost_brightness = False
# If we can, boost blacks to the darkest non-transparent black in d2 color palette
if (can_boost_brightness):
if (args.verbose):
print(f"[Boosting brightness] ...", end = " ")
img = boost_brightness(img)
# Convert to RGB and apply D2 palette
if (args.verbose):
print(f"[Converting to D2 palette] ...", end = " ")
img = img.convert("RGB")
img = img.quantize(palette=d2pal)
processed_images.append(img)
if (args.verbose):
print(f"[Done]")
# Done processing
if (args.verbose):
print(f"\nAll images processed, saving as '{rootname}.gif'")
# Compile into a single animated gif
processed_images[0].save(rootname+'.gif',
append_images = processed_images[1:],
background = 0,
transparency = 255,
disposal = 2, # No need to dispose because every image has black background
save_all = True,
optimize = False,
palette = d2pal,
loop = 0,
duration = 0)
for image in processed_images:
image.close()
if (args.verbose):
print(f"DONE")
else:
print(f"Saved as '{rootname}.gif'")
|
the-stack_106_23538 | # Time: O(n)
# Space: O(n)
class Solution(object):
def repeatedSubstringPattern(self, str):
"""
:type str: str
:rtype: bool
"""
def getPrefix(pattern):
prefix = [-1] * len(pattern)
j = -1
for i in xrange(1, len(pattern)):
while j > -1 and pattern[j + 1] != pattern[i]:
j = prefix[j]
if pattern[j + 1] == pattern[i]:
j += 1
prefix[i] = j
return prefix
prefix = getPrefix(str)
return prefix[-1] != -1 and \
(prefix[-1] + 1) % (len(str) - prefix[-1] - 1) == 0
def repeatedSubstringPattern2(self, str):
"""
:type str: str
:rtype: bool
"""
if not str:
return False
ss = (str + str)[1:-1]
return ss.find(str) != -1
|
the-stack_106_23540 | # Copyright (c) 2014 ITOCHU Techno-Solutions Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle
import json
import logging
from mock import *
from rackclient import exceptions
from rackclient.tests import utils
from rackclient.lib import initializing
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.WARN)
class InitializingTest(utils.TestCase):
def setUp(self):
super(InitializingTest, self).setUp()
p = patch("requests.get")
self.addCleanup(p.stop)
mock_request = p.start()
mock_resp = Mock()
mock_resp.text= json.dumps(dict(meta=dict(
proxy_ip="10.0.0.2",gid="gid", pid="pid", ppid="ppid")))
mock_request.return_value = mock_resp
def test_get_rack_context(self):
p = patch("rackclient.lib.initializing.Client")
self.addCleanup(p.stop)
mock_client = p.start()
mock_client = mock_client.return_value
def proxy_info(args):
info = type('', (object,), {})
info.ipc_endpoint = None
info.fs_endpoint = None
info.shm_endpoint = None
return info
mock_client.proxy = Mock()
mock_client_processes = Mock()
mock_client.proxy.get.side_effect = proxy_info
p2 = patch("rackclient.lib.initializing._Messaging")
self.addCleanup(p2.stop)
mock_messaging = p2.start()
mock_messaging = mock_messaging.return_value
mock_messaging.receive_msg.return_value=dict(pid="ppid")
actual_context = initializing.get_rack_context()
expect_context = type('', (object,), dict(
proxy_ip="10.0.0.2",
gid="gid", pid="pid",
ppid="ppid",
ipc_endpoint=None,
fs_endpoint=None,
shm_endpoint=None,
client=mock_client))
self.assertEquals(expect_context.pid, actual_context.pid)
self.assertEquals(expect_context.ppid, actual_context.ppid)
self.assertEquals(expect_context.proxy_ip, actual_context.proxy_ip)
self.assertEquals(expect_context.ipc_endpoint, actual_context.ipc_endpoint)
self.assertEquals(expect_context.fs_endpoint, actual_context.fs_endpoint)
self.assertEquals(expect_context.shm_endpoint, actual_context.shm_endpoint)
def test_get_rack_cotext_ProcessInitError_due_to_proxy(self):
self.p = patch("rackclient.lib.initializing.Client")
self.addCleanup(self.p.stop)
mock_client = self.p.start()
mock_client = mock_client.return_value
mock_client.proxy = Mock()
mock_client_processes = Mock()
mock_client.proxy.get.side_effect = Exception()
self.assertRaises(Exception, initializing.get_rack_context)
def test_get_rack_cotext_ProcessInitError_doe_to_processes(self):
self.p = patch("rackclient.lib.initializing.Client")
self.addCleanup(self.p.stop)
mock_client = self.p.start()
mock_client = mock_client.return_value
mock_client.proxy = Mock()
mock_client_processes = Mock()
mock_client.processes.get.side_effect = exceptions.NotFound("")
self.assertRaises(Exception, initializing.get_rack_context)
@patch("rackclient.lib.initializing._Messaging.Receive")
def test_messaging_receive_msg(self, mock_receive):
self.mock_connection = Mock()
self.mock_channel = Mock()
self.patch_pika_blocking = patch('pika.BlockingConnection', autospec=True)
self.addCleanup(self.patch_pika_blocking.stop)
self.mock_pika_blocking = self.patch_pika_blocking.start()
self.mock_pika_blocking.return_value = self.mock_connection
self.mock_connection.channel.return_value = self.mock_channel
context = type('', (object,), dict(
proxy_ip="10.0.0.2",
gid="gid", pid="pid",
ppid="ppid",
ipc_endpoint=None,
fs_endpoint=None,
shm_endpoint=None))
timeout_limit = 123
msg = initializing._Messaging(context)
message = msg.receive_msg(timeout_limit=timeout_limit)
self.mock_connection.add_timeout.\
assert_called_with(deadline=int(timeout_limit),
callback_method=mock_receive().time_out)
self.mock_channel.\
basic_consume.assert_called_with(mock_receive().get_msg,
queue="pid",
no_ack=False)
self.mock_channel.start_consuming.assert_called_with()
self.assertEqual(message, mock_receive().message)
def test_messaging_send_msg(self):
self.mock_connection = Mock()
self.mock_channel = Mock()
self.patch_pika_blocking = patch('pika.BlockingConnection', autospec=True)
self.addCleanup(self.patch_pika_blocking.stop)
self.mock_pika_blocking = self.patch_pika_blocking.start()
self.mock_pika_blocking.return_value = self.mock_connection
self.mock_connection.channel.return_value = self.mock_channel
context = type('', (object,), dict(
proxy_ip="10.0.0.2",
gid="gid", pid="pid",
ppid="ppid",
ipc_endpoint=None,
fs_endpoint=None,
shm_endpoint=None))
send_msg = 'test_msg'
target = 'test_pid'
msg = initializing._Messaging(context)
msg.send_msg(target)
routing_key = context.gid + '.' + target
send_dict = {'pid': context.pid}
send_msg = cPickle.dumps(send_dict)
self.mock_channel.\
basic_publish.assert_called_with(exchange=context.gid,
routing_key=routing_key,
body=send_msg)
def test_receive_get_msg(self):
self.mock_connection = Mock()
self.mock_channel = Mock()
self.patch_pika_blocking = patch('pika.BlockingConnection', autospec=True)
self.addCleanup(self.patch_pika_blocking.stop)
self.mock_pika_blocking = self.patch_pika_blocking.start()
self.mock_pika_blocking.return_value = self.mock_connection
self.mock_connection.channel.return_value = self.mock_channel
ch = Mock()
method = Mock()
properties = Mock()
receive_msg = 'receive_msg'
body = cPickle.dumps(receive_msg)
ch_object = {'delivery_tag': 'delivery_tag'}
method.configure_mock(**ch_object)
context = type('', (object,), dict(
proxy_ip="10.0.0.2",
gid="gid", pid="pid",
ppid="ppid",
ipc_endpoint=None,
fs_endpoint=None,
shm_endpoint=None))
msg = initializing._Messaging(context)
receive = msg.Receive()
receive.get_msg(ch, method, properties, body)
ch.basic_ack.assert_called_with(delivery_tag=ch_object['delivery_tag'])
ch.stop_consuming.assert_call_with()
self.assertEqual(receive.message, receive_msg)
def test_receive_timeout(self):
self.mock_connection = Mock()
self.mock_channel = Mock()
self.patch_pika_blocking = patch('pika.BlockingConnection', autospec=True)
self.addCleanup(self.patch_pika_blocking.stop)
self.mock_pika_blocking = self.patch_pika_blocking.start()
self.mock_pika_blocking.return_value = self.mock_connection
self.mock_connection.channel.return_value = self.mock_channel
context = type('', (object,), dict(
proxy_ip="10.0.0.2",
gid="gid", pid="pid",
ppid="ppid",
ipc_endpoint="data",
fs_endpoint=None,
shm_endpoint=None))
msg = initializing._Messaging(context)
receive = msg.Receive()
receive.channel = self.mock_channel
receive.time_out()
self.mock_channel.stop_consuming.assert_called_with()
|
the-stack_106_23541 | #gui/configProfiles.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2013 NV Access Limited
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import wx
import config
import api
import gui
from logHandler import log
import appModuleHandler
import globalVars
class ProfilesDialog(wx.Dialog):
shouldSuspendConfigProfileTriggers = True
_instance = None
def __new__(cls, *args, **kwargs):
# Make this a singleton.
if ProfilesDialog._instance is None:
return super(ProfilesDialog, cls).__new__(cls, *args, **kwargs)
return ProfilesDialog._instance
def __init__(self, parent):
if ProfilesDialog._instance is not None:
return
ProfilesDialog._instance = self
# Translators: The title of the Configuration Profiles dialog.
super(ProfilesDialog, self).__init__(parent, title=_("Configuration Profiles"))
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.currentAppName = (gui.mainFrame.prevFocus or api.getFocusObject()).appModule.appName
self.profileNames = [None]
self.profileNames.extend(config.conf.listProfiles())
sizer = wx.BoxSizer(wx.HORIZONTAL)
# Translators: The label of the profile list in the Configuration Profiles dialog.
sizer.Add(wx.StaticText(self, label=_("&Profile")))
item = self.profileList = wx.ListBox(self,
choices=[self.getProfileDisplay(name, includeStates=True) for name in self.profileNames])
item.Bind(wx.EVT_LISTBOX, self.onProfileListChoice)
item.Selection = self.profileNames.index(config.conf.profiles[-1].name)
sizer.Add(item)
mainSizer.Add(sizer)
sizer = wx.BoxSizer(wx.HORIZONTAL)
item = self.changeStateButton = wx.Button(self)
item.Bind(wx.EVT_BUTTON, self.onChangeState)
sizer.Add(item)
self.AffirmativeId = item.Id
item.SetDefault()
# Translators: The label of a button to create a new configuration profile.
item = newButton = wx.Button(self, label=_("&New"))
item.Bind(wx.EVT_BUTTON, self.onNew)
sizer.Add(item)
# Translators: The label of a button to rename a configuration profile.
item = self.renameButton = wx.Button(self, label=_("&Rename"))
item.Bind(wx.EVT_BUTTON, self.onRename)
sizer.Add(item)
# Translators: The label of a button to delete a configuration profile.
item = self.deleteButton = wx.Button(self, label=_("&Delete"))
item.Bind(wx.EVT_BUTTON, self.onDelete)
sizer.Add(item)
mainSizer.Add(sizer)
sizer = wx.BoxSizer(wx.HORIZONTAL)
# Translators: The label of a button to manage triggers
# in the Configuration Profiles dialog.
# See the Configuration Profiles section of the User Guide for details.
triggersButton = wx.Button(self, label=_("&Triggers..."))
triggersButton.Bind(wx.EVT_BUTTON, self.onTriggers)
sizer.Add(triggersButton)
# Translators: The label of a checkbox in the Configuration Profiles dialog.
item = self.disableTriggersToggle = wx.CheckBox(self, label=_("Temporarily d&isable all triggers"))
item.Value = not config.conf.profileTriggersEnabled
sizer.Add(item)
mainSizer.Add(sizer)
# Translators: The label of a button to close a dialog.
item = wx.Button(self, wx.ID_CLOSE, label=_("&Close"))
item.Bind(wx.EVT_BUTTON, lambda evt: self.Close())
mainSizer.Add(item)
self.Bind(wx.EVT_CLOSE, self.onClose)
self.EscapeId = wx.ID_CLOSE
if globalVars.appArgs.secure:
for item in newButton, triggersButton, self.renameButton, self.deleteButton:
item.Disable()
self.onProfileListChoice(None)
mainSizer.Fit(self)
self.Sizer = mainSizer
self.profileList.SetFocus()
self.Center(wx.BOTH | wx.CENTER_ON_SCREEN)
def __del__(self):
ProfilesDialog._instance = None
def getProfileDisplay(self, name, includeStates=False):
# Translators: The item to select the user's normal configuration
# in the profile list in the Configuration Profiles dialog.
disp = name if name else _("(normal configuration)")
if includeStates:
disp += self.getProfileStates(name)
return disp
def getProfileStates(self, name):
try:
profile = config.conf.getProfile(name)
except KeyError:
return ""
states = []
editProfile = config.conf.profiles[-1]
if profile is editProfile:
# Translators: Reported for a profile which is being edited
# in the Configuration Profiles dialog.
states.append(_("editing"))
if name:
# This is a profile (not the normal configuration).
if profile.manual:
# Translators: Reported for a profile which has been manually activated
# in the Configuration Profiles dialog.
states.append(_("manual"))
if profile.triggered:
# Translators: Reported for a profile which is currently triggered
# in the Configuration Profiles dialog.
states.append(_("triggered"))
if states:
return " (%s)" % ", ".join(states)
return ""
def isProfileManual(self, name):
if not name:
return False
try:
profile = config.conf.getProfile(name)
except KeyError:
return False
return profile.manual
def onChangeState(self, evt):
sel = self.profileList.Selection
profile = self.profileNames[sel]
if self.isProfileManual(profile):
profile = None
try:
config.conf.manualActivateProfile(profile)
except:
# Translators: An error displayed when activating a configuration profile fails.
gui.messageBox(_("Error activating profile."),
_("Error"), wx.OK | wx.ICON_ERROR, self)
return
self.Close()
def onNew(self, evt):
self.Disable()
NewProfileDialog(self).Show()
def onDelete(self, evt):
index = self.profileList.Selection
if gui.messageBox(
# Translators: The confirmation prompt displayed when the user requests to delete a configuration profile.
_("Are you sure you want to delete this profile? This cannot be undone."),
# Translators: The title of the confirmation dialog for deletion of a configuration profile.
_("Confirm Deletion"),
wx.YES | wx.NO | wx.ICON_QUESTION, self
) == wx.NO:
return
name = self.profileNames[index]
try:
config.conf.deleteProfile(name)
except:
log.debugWarning("", exc_info=True)
# Translators: An error displayed when deleting a configuration profile fails.
gui.messageBox(_("Error deleting profile."),
_("Error"), wx.OK | wx.ICON_ERROR, self)
return
del self.profileNames[index]
self.profileList.Delete(index)
self.profileList.SetString(0, self.getProfileDisplay(None, includeStates=True))
self.profileList.Selection = 0
self.onProfileListChoice(None)
self.profileList.SetFocus()
def onProfileListChoice(self, evt):
sel = self.profileList.Selection
enable = sel > 0
name = self.profileNames[sel]
if self.isProfileManual(name):
# Translators: The label of the button to manually deactivate the selected profile
# in the Configuration Profiles dialog.
label = _("Manual deactivate")
else:
# Translators: The label of the button to manually activate the selected profile
# in the Configuration Profiles dialog.
label = _("Manual activate")
self.changeStateButton.Label = label
self.changeStateButton.Enabled = enable
if globalVars.appArgs.secure:
return
self.deleteButton.Enabled = enable
self.renameButton.Enabled = enable
def onRename(self, evt):
index = self.profileList.Selection
oldName = self.profileNames[index]
# Translators: The label of a field to enter a new name for a configuration profile.
with wx.TextEntryDialog(self, _("New name:"),
# Translators: The title of the dialog to rename a configuration profile.
_("Rename Profile"), defaultValue=oldName) as d:
if d.ShowModal() == wx.ID_CANCEL:
return
newName = api.filterFileName(d.Value)
try:
config.conf.renameProfile(oldName, newName)
except ValueError:
# Translators: An error displayed when renaming a configuration profile
# and a profile with the new name already exists.
gui.messageBox(_("That profile already exists. Please choose a different name."),
_("Error"), wx.OK | wx.ICON_ERROR, self)
return
except:
log.debugWarning("", exc_info=True)
gui.messageBox(_("Error renaming profile."),
_("Error"), wx.OK | wx.ICON_ERROR, self)
return
self.profileNames[index] = newName
self.profileList.SetString(index, self.getProfileDisplay(newName, includeStates=True))
self.profileList.Selection = index
self.profileList.SetFocus()
def onTriggers(self, evt):
self.Disable()
TriggersDialog(self).Show()
def getSimpleTriggers(self):
# Yields (spec, display, manualEdit)
yield ("app:%s" % self.currentAppName,
# Translators: Displayed for the configuration profile trigger for the current application.
# %s is replaced by the application executable name.
_("Current application (%s)") % self.currentAppName,
False)
# Translators: Displayed for the configuration profile trigger for say all.
yield "sayAll", _("Say all"), True
def onClose(self, evt):
if self.disableTriggersToggle.Value:
config.conf.disableProfileTriggers()
else:
config.conf.enableProfileTriggers()
self.Destroy()
def saveTriggers(self, parentWindow=None):
try:
config.conf.saveProfileTriggers()
except:
log.debugWarning("", exc_info=True)
# Translators: An error displayed when saving configuration profile triggers fails.
gui.messageBox(_("Error saving configuration profile triggers - probably read only file system."),
_("Error"), wx.OK | wx.ICON_ERROR, parent=parentWindow)
class TriggerInfo(object):
__slots__ = ("spec", "display", "profile")
def __init__(self, spec, display, profile):
self.spec = spec
self.display = display
self.profile = profile
class TriggersDialog(wx.Dialog):
def __init__(self, parent):
# Translators: The title of the configuration profile triggers dialog.
super(TriggersDialog, self).__init__(parent, title=_("Profile Triggers"))
mainSizer = wx.BoxSizer(wx.VERTICAL)
processed = set()
triggers = self.triggers = []
confTrigs = config.conf.triggersToProfiles
# Handle simple triggers.
for spec, disp, manualEdit in parent.getSimpleTriggers():
try:
profile = confTrigs[spec]
except KeyError:
profile = None
triggers.append(TriggerInfo(spec, disp, profile))
processed.add(spec)
# Handle all other triggers.
for spec, profile in confTrigs.iteritems():
if spec in processed:
continue
if spec.startswith("app:"):
# Translators: Displayed for a configuration profile trigger for an application.
# %s is replaced by the application executable name.
disp = _("%s application") % spec[4:]
else:
continue
triggers.append(TriggerInfo(spec, disp, profile))
sizer = wx.BoxSizer(wx.HORIZONTAL)
# Translators: The label of the triggers list in the Configuration Profile Triggers dialog.
sizer.Add(wx.StaticText(self, label=_("Triggers")))
item = self.triggerList = wx.ListBox(self, choices=[trig.display for trig in triggers])
item.Bind(wx.EVT_LISTBOX, self.onTriggerListChoice)
item.Selection = 0
sizer.Add(item)
mainSizer.Add(sizer)
sizer = wx.BoxSizer(wx.HORIZONTAL)
# Translators: The label of the profile list in the Configuration Profile Triggers dialog.
sizer.Add(wx.StaticText(self, label=_("Profile")))
item = self.profileList = wx.Choice(self,
choices=[parent.getProfileDisplay(name) for name in parent.profileNames])
item.Bind(wx.EVT_CHOICE, self.onProfileListChoice)
sizer.Add(item)
mainSizer.Add(sizer)
item = wx.Button(self, wx.ID_CLOSE, label=_("&Close"))
item.Bind(wx.EVT_BUTTON, lambda evt: self.Close())
mainSizer.Add(item)
self.Bind(wx.EVT_CLOSE, self.onClose)
self.AffirmativeId = wx.ID_CLOSE
item.SetDefault()
self.EscapeId = wx.ID_CLOSE
self.onTriggerListChoice(None)
mainSizer.Fit(self)
self.Sizer = mainSizer
self.Center(wx.BOTH | wx.CENTER_ON_SCREEN)
def onTriggerListChoice(self, evt):
trig = self.triggers[self.triggerList.Selection]
try:
self.profileList.Selection = self.Parent.profileNames.index(trig.profile)
except ValueError:
log.error("Trigger %s: invalid profile %s"
% (trig.spec, trig.profile))
self.profileList.Selection = 0
trig.profile = None
def onProfileListChoice(self, evt):
trig = self.triggers[self.triggerList.Selection]
trig.profile = self.Parent.profileNames[evt.Selection]
def onClose(self, evt):
confTrigs = config.conf.triggersToProfiles
for trig in self.triggers:
if trig.profile:
confTrigs[trig.spec] = trig.profile
else:
try:
del confTrigs[trig.spec]
except KeyError:
pass
self.Parent.saveTriggers(parentWindow=self)
self.Parent.Enable()
self.Destroy()
class NewProfileDialog(wx.Dialog):
def __init__(self, parent):
# Translators: The title of the dialog to create a new configuration profile.
super(NewProfileDialog, self).__init__(parent, title=_("New Profile"))
mainSizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.BoxSizer(wx.HORIZONTAL)
# Translators: The label of a field to enter the name of a new configuration profile.
sizer.Add(wx.StaticText(self, label=_("Profile name:")))
item = self.profileName = wx.TextCtrl(self)
sizer.Add(item)
mainSizer.Add(sizer)
# Translators: The label of a radio button to specify that a profile will be used for manual activation
# in the new configuration profile dialog.
self.triggers = triggers = [(None, _("Manual activation"), True)]
triggers.extend(parent.getSimpleTriggers())
item = self.triggerChoice = wx.RadioBox(self, label=_("Use this profile for:"),
choices=[trig[1] for trig in triggers])
item.Bind(wx.EVT_RADIOBOX, self.onTriggerChoice)
self.autoProfileName = ""
self.onTriggerChoice(None)
mainSizer.Add(item)
mainSizer.Add(self.CreateButtonSizer(wx.OK | wx.CANCEL))
self.Bind(wx.EVT_BUTTON, self.onOk, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.onCancel, id=wx.ID_CANCEL)
mainSizer.Fit(self)
self.Sizer = mainSizer
self.profileName.SetFocus()
self.Center(wx.BOTH | wx.CENTER_ON_SCREEN)
def onOk(self, evt):
confTrigs = config.conf.triggersToProfiles
spec, disp, manualEdit = self.triggers[self.triggerChoice.Selection]
if spec in confTrigs and gui.messageBox(
# Translators: The confirmation prompt presented when creating a new configuration profile
# and the selected trigger is already associated.
_("This trigger is already associated with another profile. "
"If you continue, it will be removed from that profile and associated with this one.\n"
"Are you sure you want to continue?"),
_("Warning"), wx.ICON_WARNING | wx.YES | wx.NO, self
) == wx.NO:
return
name = api.filterFileName(self.profileName.Value)
if not name:
return
try:
config.conf.createProfile(name)
except ValueError:
# Translators: An error displayed when the user attempts to create a configuration profile which already exists.
gui.messageBox(_("That profile already exists. Please choose a different name."),
_("Error"), wx.OK | wx.ICON_ERROR, self)
return
except:
log.debugWarning("", exc_info=True)
# Translators: An error displayed when creating a configuration profile fails.
gui.messageBox(_("Error creating profile - probably read only file system."),
_("Error"), wx.OK | wx.ICON_ERROR, self)
self.onCancel(evt)
return
if spec:
confTrigs[spec] = name
self.Parent.saveTriggers(parentWindow=self)
parent = self.Parent
if manualEdit:
if gui.messageBox(
# Translators: The prompt asking the user whether they wish to
# manually activate a configuration profile that has just been created.
_("To edit this profile, you will need to manually activate it. "
"Once you have finished editing, you will need to manually deactivate it to resume normal usage.\n"
"Do you wish to manually activate it now?"),
# Translators: The title of the confirmation dialog for manual activation of a created profile.
_("Manual Activation"), wx.YES | wx.NO | wx.ICON_QUESTION, self
) == wx.YES:
config.conf.manualActivateProfile(name)
else:
# Return to the Profiles dialog.
parent.profileNames.append(name)
parent.profileList.Append(name)
parent.profileList.Selection = parent.profileList.Count - 1
parent.onProfileListChoice(None)
parent.profileList.SetFocus()
parent.Enable()
self.Destroy()
return
else:
# Ensure triggers are enabled so the user can edit the profile.
config.conf.enableProfileTriggers()
# The user is done with the Profiles dialog;
# let them get on with editing the profile.
parent.Destroy()
def onCancel(self, evt):
self.Parent.Enable()
self.Destroy()
def onTriggerChoice(self, evt):
spec, disp, manualEdit = self.triggers[self.triggerChoice.Selection]
if not spec:
# Manual activation shouldn't guess a name.
name = ""
elif spec.startswith("app:"):
name = spec[4:]
else:
name = disp
if self.profileName.Value == self.autoProfileName:
# The user hasn't changed the automatically filled value.
self.profileName.Value = name
self.profileName.SelectAll()
self.autoProfileName = name
|
the-stack_106_23542 | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import os
def compute_stats_ar(results, ar_params, verbose=False):
weights = results["weights"]
error = results["predicted"] - results["actual"]
stats = {}
abs_error = np.abs(weights - ar_params)
symmetric_abs_coeff = np.abs(weights) + np.abs(ar_params)
stats["sMAPE (AR-coefficients)"] = 100 * np.mean(abs_error / (10e-9 + symmetric_abs_coeff))
sTPE = 100 * np.sum(abs_error) / (10e-9 + np.sum(symmetric_abs_coeff))
stats["sTPE (AR-coefficients)"] = sTPE
# predictions error
stats["MSE"] = np.mean(error ** 2)
if verbose:
print("MSE: {}".format(stats["MSE"]))
print("sMAPE (AR-coefficients): {:6.3f}".format(stats["sMAPE (AR-coefficients)"]))
print("sTPE (AR-coefficients): {:6.3f}".format(stats["sTPE (AR-coefficients)"]))
# print("Relative error: {:6.3f}".format(stats["TP (AR-coefficients)"]))
# print("Mean relative error: {:6.3f}".format(mean_rel_error))
print("AR params: ")
print(ar_params)
print("Weights: ")
print(weights)
return stats
def plot_loss_curve(losses, test_loss=None, epoch_losses=None, show=False, save=False):
fig = plt.figure()
fig.set_size_inches(12, 6)
ax = plt.axes()
ax.set_xlabel("Iteration")
ax.set_ylabel("Loss")
x_loss = list(range(len(losses)))
plt.plot(x_loss, losses, 'b', alpha=0.3)
if epoch_losses is not None:
iter_per_epoch = int(len(losses) / len(epoch_losses))
epoch_ends = int(iter_per_epoch/2) + iter_per_epoch*np.arange(len(epoch_losses))
plt.plot(epoch_ends, epoch_losses, 'b')
if test_loss is not None:
plt.hlines(test_loss, xmin=x_loss[0], xmax=x_loss[-1])
if save:
if not os.path.exists('results'):
os.makedirs('results')
figname = 'results/loss_DAR.png'
plt.savefig(figname, dpi=600, bbox_inches='tight')
plt.show()
# plt.close()
def plot_prediction_sample(predicted, actual, num_obs=100, model_name="AR-Net", save=False):
fig2 = plt.figure()
fig2.set_size_inches(10, 6)
plt.plot(actual[0:num_obs])
plt.plot(predicted[0:num_obs])
plt.legend(["Actual Time-Series", "{}-Prediction".format(model_name)])
if save:
if not os.path.exists('results'):
os.makedirs('results')
figname = 'results/prediction_{}.png'.format(model_name)
plt.savefig(figname, dpi=600, bbox_inches='tight')
plt.show()
def plot_error_scatter(predicted, actual, model_name="AR-Net", save=False):
# error = predicted - actual
fig3 = plt.figure()
fig3.set_size_inches(6, 6)
plt.scatter(actual, predicted - actual, marker='o', s=10, alpha=0.3)
plt.legend(["{}-Error".format(model_name)])
if save:
if not os.path.exists('results'):
os.makedirs('results')
figname = 'results/scatter_{}.png'.format(model_name)
plt.savefig(figname, dpi=600, bbox_inches='tight')
plt.show()
def plot_weights(ar_val, weights, ar, model_name="AR-Net", save=False):
df = pd.DataFrame(
zip(
list(range(1, ar_val + 1)) * 2,
["AR-Process (True)"] * ar_val + [model_name] * ar_val,
list(ar) + list(weights)
),
columns=["AR-coefficient (lag number)", "model", "value (weight)"]
)
plt.figure(figsize=(10, 6))
palette = {"Classic-AR": "C0", "AR-Net": "C1", "AR-Process (True)": "k"}
sns.barplot(x="AR-coefficient (lag number)", hue="model", y="value (weight)", data=df)
if save:
if not os.path.exists('results'):
os.makedirs('results')
figname = 'results/weights_{}_{}.png'.format(ar_val, model_name, palette=palette)
plt.savefig(figname, dpi=600, bbox_inches='tight')
plt.show()
def plot_results(results, model_name="MODEL", save=False):
plot_prediction_sample(results["predicted"], results["actual"], num_obs=100, model_name=model_name, save=save)
plot_error_scatter(results["predicted"], results["actual"], model_name=model_name, save=save)
def jsonize(results):
for key, value in results.items():
if type(value) is list:
if type(value[0]) is list:
results[key] = [["{:8.5f}".format(xy) for xy in x] for x in value]
else:
results[key] = ["{:8.5f}".format(x) for x in value]
else:
results[key] = "{:8.5f}".format(value)
return results
def list_of_dicts_2_dict_of_lists(sources):
keys = sources[0].keys()
res = {}
for key in keys:
res[key] = [d[key] for d in sources]
return res
def list_of_dicts_2_dict_of_means(sources):
keys = sources[0].keys()
res = {}
for key in keys:
res[key] = np.mean([d[key] for d in sources])
return res
def list_of_dicts_2_dict_of_means_minmax(sources):
keys = sources[0].keys()
res = {}
for key in keys:
values = [d[key] for d in sources]
res[key] = (np.mean(values), min(values), max(values))
return res
def get_json_filenames(values, subdir=None):
ar_filename = get_json_filenames_type("AR", values, subdir)
dar_filename = get_json_filenames_type("DAR", values, subdir)
return ar_filename, dar_filename
def get_json_filenames_type(model_type, values, subdir=None):
filename = 'results/{}{}_{}.json'.format(
subdir + "/" if subdir is not None else "",
model_type,
"-".join([str(x) for x in values])
)
return filename
def intelligent_regularization(sparsity):
if sparsity is not None:
# best:
# lam = 0.01 * (1.0 / sparsity - 1.0)
lam = 0.02 * (1.0 / sparsity - 1.0)
# lam = 0.05 * (1.0 / sparsity - 1.0)
# alternatives
# l1 = 0.02 * (np.log(2) / np.log(1 + sparsity) - 1.0)
# l1 = 0.1 * (1.0 / np.sqrt(sparsity) - 1.0)
else:
lam = 0.0
return lam
|
the-stack_106_23544 | #!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import random
import string
random.seed(917) # Don't want random for regression tests
MOUSE, KEYPRESS, TIMER, TERMINATE = ("MOUSE", "KEYPRESS", "TIMER",
"TERMINATE")
def next():
kinds = ([MOUSE] * 7) + ([KEYPRESS] * 11) + ([TIMER] * 5) + [TERMINATE]
kind = random.choice(kinds)
if kind == MOUSE:
return Event(kind, button=random.randint(1, 3),
x=random.randint(0, 640), y=random.randint(0, 480))
elif kind == KEYPRESS:
return Event(kind, ctrl=random.randint(1, 7) == 1,
shift=random.randint(1, 5) == 1,
key=random.choice(string.ascii_lowercase))
return Event(kind) # TIMER or TERMINATE
class Event:
TimerId = 0
def __init__(self, kind, **kwargs):
assert kind in {MOUSE, KEYPRESS, TIMER, TERMINATE}
self.kind = kind
self.kwargs = kwargs
if self.kind == TIMER:
self.kwargs["id"] = Event.TimerId
Event.TimerId += 1
def __str__(self):
if self.kind == MOUSE:
return "Button {} ({}, {})".format(
self.kwargs.get("button", 1), self.kwargs.get("x", -1),
self.kwargs.get("y", -1))
elif self.kind == KEYPRESS:
return "Key {}{}{}".format(
"Ctrl+" if self.kwargs.get("ctrl", False) else "",
"Shift+" if self.kwargs.get("shift", False) else "",
self.kwargs.get("key", ""))
elif self.kind == TIMER:
return "Timer {}".format(self.kwargs.get("id", -1))
elif self.kind == TERMINATE:
return "Terminate"
|
the-stack_106_23545 | import tensorflow as tf
"""Adapted from https://github.com/tkipf/gcn"""
def masked_softmax_cross_entropy(preds, labels, mask):
"""Softmax cross-entropy loss with masking."""
loss = tf.compat.v1.nn.softmax_cross_entropy_with_logits_v2(logits=preds, labels=labels)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss)
def masked_accuracy(preds, labels, mask):
"""Accuracy with masking."""
correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1))
accuracy_all = tf.cast(correct_prediction, tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
accuracy_all *= mask
return tf.reduce_mean(accuracy_all)
|
the-stack_106_23550 | import math
hour = int(input())
min = int(input())
min = min + 15
if min >= 60:
plus_hour = math.floor(min / 60)
min = min%60
hour += plus_hour
if hour > 24:
hour = math.floor(hour / 24)
elif hour==24:
hour=0
if min < 10:
print(str(hour) + ':0' + str(min))
else:
print(str(hour) + ':' + str(min))
|
the-stack_106_23551 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutControlStatements(Koan):
def test_if_then_else_statements(self):
if True:
result = 'true value'
else:
result = 'false value'
self.assertEqual(__, result)
def test_if_then_statements(self):
result = 'default value'
if True:
result = 'true value'
self.assertEqual(__, result)
def test_if_then_elif_else_statements(self):
if False:
result = 'first value'
elif True:
result = 'true value'
else:
result = 'default value'
self.assertEqual(__, result)
def test_while_statement(self):
i = 1
result = 1
while i <= 10:
result = result * i
i += 1
self.assertEqual(__, result)
def test_break_statement(self):
i = 1
result = 1
while True:
if i > 10: break
result = result * i
i += 1
self.assertEqual(__, result)
def test_continue_statement(self):
i = 0
result = []
while i < 10:
i += 1
if (i % 2) == 0: continue
result.append(i)
self.assertEqual(__, result)
def test_for_statement(self):
phrase = ["fish", "and", "chips"]
result = []
for item in phrase:
result.append(item.upper())
self.assertEqual([__, __, __], result)
def test_for_statement_with_tuples(self):
round_table = [
("Lancelot", "Blue"),
("Galahad", "I don't know!"),
("Robin", "Blue! I mean Green!"),
("Arthur", "Is that an African Swallow or European Swallow?")
]
result = []
for knight, answer in round_table:
result.append("Contestant: '" + knight + "' Answer: '" + answer + "'")
text = __
self.assertRegex(result[2], text)
self.assertNotRegex(result[0], text)
self.assertNotRegex(result[1], text)
self.assertNotRegex(result[3], text)
|
the-stack_106_23552 | from fabric.api import sudo, settings
from . import system
from .containers import conf
from .task import Task
from .utils import upload_config_template
__all__ = [
'install',
'restart',
'reload',
]
class AddPpa(Task):
def do(self):
sudo('add-apt-repository ppa:nginx/stable')
system.package_update.run(force=True)
add_ppa = AddPpa()
class Install(Task):
def do(self):
system.package_install.run(packages='nginx')
sudo('rm --force /etc/nginx/sites-enabled/default')
install = Install()
class PushConfigTask(Task):
@conf
def config(self):
return '/etc/nginx/sites-available/%(instance_name)s'
@conf
def enabled_config(self):
return '/etc/nginx/sites-enabled/%(instance_name)s'
def do(self):
upload_config_template(
self.conf.config_template,
self.conf.config,
context=self.conf,
use_sudo=True)
with settings(warn_only=True):
sudo('ln --symbolic %(config)s %(enabled_config)s' % self.conf)
class PushUwsgiConfig(PushConfigTask):
@conf
def config_template(Task):
return 'nginx_uwsgi.config'
push_uwsgi_config = PushUwsgiConfig()
class Restart(Task):
def do(self):
sudo('invoke-rc.d nginx restart')
restart = Restart()
class Reload(Task):
def do(self):
sudo('invoke-rc.d nginx reload')
reload = Reload()
|
the-stack_106_23553 | try:
import requests
except Exception:
print(chr(69))
import os
import sys
os.system(f"{sys.executable} -m pip install requests")
import requests
import subprocess
import shutil
import json
import sys
import os
def download(url:str) -> None:
get_responce = requests.get(url, stream=True)
filename = url.split('/')[-1]
with open(filename, "wb") as f:
for chunk in get_responce.iter_content(chunk_size=1024):
f.write(chunk)
return filename
def greater_verison_check(current_version:str, pulled_version:str) -> bool:
new_l = pulled_version.split('.')
ver_l = current_version.split('.')
if (newLength := len(new_l)) > (ver_len := len(ver_l)):
ver_l.extend(["0"] * (newLength - ver_len))
elif (newLength := len(new_l)) < (ver_len := len(ver_l)):
new_l.extend(["0"] * (ver_len - newLength))
for i in range(len(new_l)):
if int(new_l[i]) > int(ver_l[i]): return True
if int(new_l[i]) < int(ver_l[i]): return False
return False
def check_update(project_name:str, version:str) -> tuple:
r = requests.get("https://c69projectrepo.crossroadsactua.repl.co/stuff.json")
projects = json.loads(r.content.decode('utf-8'))
if projects[project_name]["version"] != version and greater_verison_check(version, projects[project_name]["version"]):
return (True, projects[project_name])
return (False, "All up to date")
def update_program(project_name:str) -> None:
filename = download(f"https://c69projectrepo.crossroadsactua.repl.co/{project_name}.zip")
shutil.unpack_archive(filename, f"./")
os.remove(filename)
subprocess.Popen(f"{sys.executable} ./Gui.py")
sys.exit(0)
|
the-stack_106_23554 | import logging
import itertools
import os
from typing import List, Tuple, Optional, Iterable
from copy import deepcopy
from tqdm import tqdm
import pandas as pd
import numpy as np
import torch
import gin
from ariadne.tracknet_v2.model import TrackNETv2
from ariadne.tracknet_v2.metrics import point_in_ellipse
from ariadne.preprocessing import (
BaseTransformer,
DataProcessor,
DataChunk,
ProcessedDataChunk,
ProcessedData
)
from ariadne.utils.model import weights_update
from ariadne.utils.base import *
from ariadne.tracknet_v2_1.processor import TrackNetV21Processor, ProcessedTracknetDataChunk, ProcessedTracknetData, TracknetDataChunk
LOGGER = logging.getLogger('ariadne.prepare')
@gin.configurable(denylist=['data_df'])
class TrackNetV21ProcessorWithModel(TrackNetV21Processor):
def __init__(self,
output_dir: str,
data_df: pd.DataFrame,
name_suffix: str,
valid_size: float,
device: str,
tracknet_v2_model: TrackNETv2,
n_times_oversampling: int = 4,
num_grus=1,
tracknet_v2_checkpoint: str = '',
transforms: List[BaseTransformer] = None):
super().__init__(
data_df=data_df,
output_dir=output_dir,
transforms=transforms,
name_suffix=name_suffix,
valid_size=valid_size,
n_times_oversampling=n_times_oversampling
)
self.output_name = f'{self.output_dir}/{name_suffix}'
self.n_times_oversampling = n_times_oversampling
self.valid_size = valid_size
self.chunks = []
self.device = torch.device(device)
self.model = tracknet_v2_model()
#self.model = TrackNETv2(input_features=3)
if tracknet_v2_checkpoint and os.path.isfile(tracknet_v2_checkpoint):
if not torch.cuda.is_available():
self.model = weights_update(model=self.model, checkpoint=torch.load(tracknet_v2_checkpoint, map_location=torch.device('cpu')))
self.device = torch.device('cpu')
else:
self.model = weights_update(model=self.model, checkpoint=torch.load(tracknet_v2_checkpoint))
self.model.to(self.device)
self.model.eval()
self.num_grus = num_grus
def preprocess_chunk(self,
chunk: TracknetDataChunk,
idx: str) -> ProcessedTracknetDataChunk:
df = chunk.df_chunk_data
if df.empty:
return ProcessedTracknetDataChunk(None, '', -1)
chunk_id = int(df.event.values[0])
output_name = f'{self.output_dir}/tracknet_with_model_{idx.replace(".txt", "")}'
grouped_df = df[df['track'] != -1].groupby('track')
last_station = df[df['station'] > 1][['z', 'phi', 'r']].values
multiplicity = grouped_df.ngroups
if multiplicity == 0:
return ProcessedTracknetDataChunk(None, '', -1)
chunk_data_x, chunk_data_y, chunk_data_real = brute_force_hits_two_first_stations(df)
chunk_data_len = np.full(len(chunk_data_x), 2)
chunk_prediction, chunk_gru = self.model(torch.tensor(chunk_data_x).to(self.device),
torch.tensor(chunk_data_len, dtype=torch.int64).to(self.device),
return_gru_states=True)
chunk_prediction = chunk_prediction[:, -1, :]
chunk_gru = chunk_gru[:, -self.num_grus:, :]
if self.num_grus > 1:
chunk_gru = chunk_gru.reshape(-1, self.num_grus*chunk_gru.shape[-1])
prediction_numpy = chunk_prediction.detach().cpu().numpy()
new_prediction = np.zeros((len(chunk_prediction), 5))
new_prediction[:, :2] = prediction_numpy[:, :2]
new_prediction[:, 2] = last_station[0, -1]
new_prediction[:, 3:] = prediction_numpy[:, 2:]
last_station_index = store_in_index(np.ascontiguousarray(last_station), n_dim=3)
nearest_hits_index = search_in_index(new_prediction[:, :3],
last_station_index,
1,
n_dim=3)
nearest_hits = last_station[nearest_hits_index]
nearest_hits, in_ellipse = filter_hits_in_ellipses(new_prediction,
nearest_hits,
nearest_hits_index,
filter_station=True,
z_last=True,
find_n=nearest_hits_index.shape[1],
n_dim=3)
is_close = np.all(
np.isclose(nearest_hits, np.expand_dims(chunk_data_y, 1).repeat(nearest_hits.shape[1], axis=1)), axis=-1)
found_real_track_ending = is_close & np.expand_dims(chunk_data_real.astype(bool), 1).repeat(is_close.shape[1],
axis=1)
is_close = np.all(np.isclose(nearest_hits, np.expand_dims(chunk_data_y, 1).repeat(nearest_hits.shape[1], 1)),
-1)
to_use = found_real_track_ending | in_ellipse
chunk_gru = np.expand_dims(chunk_gru.detach().cpu().numpy(), 1).repeat(nearest_hits.shape[1], 1)
nearest_hits = nearest_hits.reshape(-1, nearest_hits.shape[-1])
nothing_for_real_track = ~np.any(to_use, axis=-1) & (chunk_data_real > 0.5)
# chunk_gru_to_add = chunk_gru[nothing_for_real_track]
to_use = to_use.reshape(-1)
found_real_track_ending = is_close.reshape(-1)
chunk_gru = chunk_gru.reshape(-1, chunk_gru.shape[-1])
chunk_data_len = np.full(len(nearest_hits), 2)
chunk_data_event = np.full(len(nearest_hits), chunk_id)
chunk_data = {'x': {'grus': chunk_gru[to_use], 'preds': nearest_hits[to_use]},
'label': found_real_track_ending[to_use],
'event': chunk_data_event[to_use],
'multiplicity': multiplicity}
self.chunks.append(chunk_id)
return ProcessedTracknetDataChunk(chunk_data, output_name, chunk_id)
def postprocess_chunks(self,
chunks: List[ProcessedTracknetDataChunk]) -> ProcessedTracknetData:
return ProcessedTracknetData(chunks, chunks[0].output_name)
def save_on_disk(self,
processed_data: ProcessedTracknetData):
train_data_preds = []
train_data_inputs = []
train_data_labels = []
train_data_events = []
valid_data_preds = []
valid_data_inputs = []
valid_data_labels = []
valid_data_events = []
train_chunks = np.random.choice(self.chunks, int(len(self.chunks) * (1-self.valid_size)), replace=False)
for data_chunk in processed_data.processed_data:
if data_chunk.processed_object is None:
continue
if data_chunk.id in train_chunks:
initial_len = len(data_chunk.processed_object['label'])
multiplicity = data_chunk.processed_object['multiplicity']
max_len = int(multiplicity + (initial_len / self.n_times_oversampling))
to_use = data_chunk.processed_object['label'] | (np.arange(initial_len) < max_len)
train_data_inputs.append(data_chunk.processed_object['x']['grus'][to_use])
train_data_preds.append(data_chunk.processed_object['x']['preds'][to_use])
train_data_labels.append(data_chunk.processed_object['label'][to_use])
train_data_events.append(data_chunk.processed_object['event'][to_use])
else:
valid_data_inputs.append(data_chunk.processed_object['x']['grus'])
valid_data_preds.append(data_chunk.processed_object['x']['preds'])
valid_data_labels.append(data_chunk.processed_object['label'])
valid_data_events.append(data_chunk.processed_object['event'])
train_data_inputs = np.concatenate(train_data_inputs)
train_data_preds = np.concatenate(train_data_preds)
train_data_labels = np.concatenate(train_data_labels)
train_data_events = np.concatenate(train_data_events)
valid_data_inputs = np.concatenate(valid_data_inputs)
valid_data_preds = np.concatenate(valid_data_preds)
valid_data_labels = np.concatenate(valid_data_labels)
valid_data_events = np.concatenate(valid_data_events)
np.savez(
f'{processed_data.output_name}_train',
grus=train_data_inputs,
preds=train_data_preds,
labels=train_data_labels, # predicted right point and point was real
events=train_data_events
)
np.savez(
f'{processed_data.output_name}_valid',
grus=valid_data_inputs,
preds=valid_data_preds,
labels=valid_data_labels,
events=valid_data_events
)
LOGGER.info(f'Saved train hits to: {processed_data.output_name}_train.npz')
LOGGER.info(f'Saved valid hits to: {processed_data.output_name}_valid.npz')
|
the-stack_106_23555 | from typing import Any, Dict, List, Optional, Set, Callable, Tuple
import torch
import copy
import warnings
from torch.fx import (
GraphModule,
)
from torch.fx.graph import (
Graph,
Node,
Argument,
)
from ..utils import (
activation_is_statically_quantized,
weight_is_quantized,
get_qparam_dict,
_parent_name,
get_swapped_custom_module_class,
)
from ..qconfig import (
QConfigAny,
qconfig_equals
)
from ..qconfig_dict_utils import (
convert_dict_to_ordered_dict,
update_qconfig_for_qat,
)
from .qconfig_utils import (
generate_qconfig_map,
compare_prepare_convert_qconfig_dict,
update_qconfig_for_fusion,
is_qconfig_supported_by_dtype_configs,
)
from torch.ao.quantization.backend_config.utils import (
get_root_module_to_quantized_reference_module,
get_pattern_to_dtype_configs,
get_fused_module_classes,
get_qat_module_classes,
)
from torch.ao.quantization.backend_config import get_native_backend_config_dict
from .graph_module import (
QuantizedGraphModule,
is_observed_module,
is_observed_standalone_module,
)
from ._equalize import update_obs_for_equalization, convert_eq_obs
from .utils import (
get_custom_module_class_keys,
get_quantize_node_info,
create_getattr_from_value,
collect_producer_nodes,
graph_module_from_producer_nodes,
WEIGHT_INDEX_DICT,
)
from torch.ao.quantization.quantize import (
_remove_qconfig,
is_activation_post_process,
)
from .lower_to_fbgemm import lower_to_fbgemm
def restore_state(
observed: torch.nn.Module
) -> Tuple[Dict[str, Tuple[str, type]],
Dict[str, Any],
Set[str]]:
assert is_observed_module(observed), \
'incoming model must be produced by prepare_fx'
prepare_custom_config_dict: Dict[str, Any] = \
observed._prepare_custom_config_dict # type: ignore[assignment]
node_name_to_scope: Dict[str, Tuple[str, type]] = observed._node_name_to_scope # type: ignore[assignment]
observed_node_names: Set[str] = observed._observed_node_names # type: ignore[assignment]
return node_name_to_scope, prepare_custom_config_dict, observed_node_names
def has_none_qconfig(node: Argument, qconfig_map: Dict[str, QConfigAny]) -> bool:
""" Check if a node has a qconfig of None, i.e. user requested to not quantize
the node
"""
return isinstance(node, Node) and node.name in qconfig_map and qconfig_map[node.name] is None
def run_weight_observers(observed: GraphModule) -> None:
""" Extract the subgraph that produces the weight for dynamic quant
or weight only quant node and run the subgraph to observe the weight.
Note that the observers of dynamic quant or weight only quant ops are
run during the convert step.
"""
for node in observed.graph.nodes:
if node.op != 'call_function' or node.target not in WEIGHT_INDEX_DICT:
continue
for i, node_arg in enumerate(node.args):
if i not in WEIGHT_INDEX_DICT[node.target]:
continue
# node_arg is weight
weight_observer_nodes = collect_producer_nodes(node_arg)
if weight_observer_nodes is None:
continue
weight_observer_module = \
graph_module_from_producer_nodes(
observed, weight_observer_nodes)
# run the weight observer
weight_observer_module()
# this method is temporary will be removed soon
def duplicate_quantize_dynamic_node(quantized: QuantizedGraphModule) -> QuantizedGraphModule:
quantized_root = quantized
for node in quantized.graph.nodes:
if (node.op == "call_function" and node.target == torch.quantize_per_tensor_dynamic):
users = list(node.users)
if len(users) > 1:
for user in users:
with quantized.graph.inserting_before(node):
new_node = quantized.graph.create_node(
"call_function",
torch.quantize_per_tensor_dynamic,
node.args,
node.kwargs)
user.replace_input_with(node, new_node)
quantized.graph.erase_node(node)
quantized = QuantizedGraphModule(quantized_root, quantized.graph, quantized_root.preserved_attr_names)
return quantized
def duplicate_dequantize_node(quantized: QuantizedGraphModule) -> QuantizedGraphModule:
"""
If a dequantize node has multiple uses, duplicate it and create one dequantize node for each use.
This is to enable the pattern matching to map from individual quant - dequant - ref_module to
final quantized module.
"""
quantized_root = quantized
for node in quantized.graph.nodes:
if (node.op == "call_method" and node.target == "dequantize" or
(node.op == "call_function" and node.target == torch.dequantize)):
users = list(node.users)
if len(users) > 1:
for user in users:
with quantized.graph.inserting_before(node):
new_node = quantized.graph.create_node("call_method", "dequantize", node.args, {})
user.replace_input_with(node, new_node)
quantized.graph.erase_node(node)
quantized = QuantizedGraphModule(quantized_root, quantized.graph, quantized_root.preserved_attr_names)
return quantized
def remove_extra_dequantize(quantized: QuantizedGraphModule) -> QuantizedGraphModule:
"""
Removes duplicate dequant nodes in the graph, for an operator that has multiple dequant nodes as a user,
replace them with a single dequant node that can be shared across all the uses.
"""
quantized_root = quantized
for node in quantized.graph.nodes:
users = list(node.users)
dequant_users = [user for user in node.users if user.op == "call_method" and user.target == "dequantize" or
(user.op == "call_function" and user.target == torch.dequantize)]
if len(dequant_users) > 1:
with quantized.graph.inserting_after(node):
unique_dq = quantized.graph.create_node("call_method", "dequantize", users[0].args, {})
for dequant in dequant_users:
dequant.replace_all_uses_with(unique_dq)
quantized.graph.erase_node(dequant)
quantized = QuantizedGraphModule(quantized_root, quantized.graph, quantized_root.preserved_attr_names)
return quantized
def remove_quant_dequant_pairs(quantized: QuantizedGraphModule) -> QuantizedGraphModule:
quantized_root = quantized
for node in quantized.graph.nodes:
if node.op == "call_function" and node.target in [torch.quantize_per_tensor, torch.quantize_per_channel]:
users = list(node.users)
user = users[0] if users else None
if len(users) == 1 and user.op == "call_method" and user.target == "dequantize":
user.replace_all_uses_with(node.args[0])
quantized.graph.erase_node(user)
orig_args = list(node.args)
quantized.graph.erase_node(node)
for arg in orig_args:
if isinstance(arg, Node) and len(list(arg.users)) == 0:
quantized.graph.erase_node(arg)
quantized = QuantizedGraphModule(quantized_root, quantized.graph, quantized_root.preserved_attr_names)
return quantized
def maybe_recursive_remove_dequantize(arg: Any, node: Node, graph: Graph):
""" If the arg is a dequantize Node, or a list/tuple/dict of dequantize Node,
we'll recursively remove the dequantize Node
"""
if isinstance(arg, Node) and \
arg.op == "call_method" and \
arg.target == "dequantize":
quantize_node = arg.args[0]
# we only replace the specific use since dequantize could be used by other nodes
# as well
node.replace_input_with(arg, quantize_node)
elif isinstance(arg, (list, tuple)):
for arg_element in arg:
maybe_recursive_remove_dequantize(arg_element, node, graph)
elif isinstance(arg, dict):
for arg_element in arg.values():
maybe_recursive_remove_dequantize(arg_element, node, graph)
else:
warnings.warn(f"Unsupported node type in recursive remove dequantize: {type(arg)}")
def get_module_path_and_prefix(
obs_node: Node,
node_name_to_scope: Dict[str, Tuple[str, type]],
qconfig_map: Dict[str, QConfigAny]):
""" Given and observer node, get the `Scope` or the fully qualified name for
the submodule containing the observed node, also return a prefix of "_input"
when the observed node is an input of a F.linear op, and not the output of another
quantized op.
TODO: this logic is hacky, we should think about how to remove it or make it more
general
"""
observed_node = obs_node.args[0]
# an observer can be inserted for both input of the next operator or output of the previous
# operator (they can be the same)
# this flag identifies if the observer is inserted only because the observed node is
# the input of the next operator
assert isinstance(observed_node, Node), \
f"Expecting observed node to be a Node, but got {observed_node}"
is_input_observer_only = qconfig_map[observed_node.name] is None if observed_node.name in qconfig_map else None
if is_input_observer_only:
# if the quantize function is at the input of op, then we find the first user of the observer_node
# to get the path. If a linear call_function is in the user list, we return the first instance
# of linear node to get the FQN.
users = list(obs_node.users)
first_linear_use_or_first_use = users[0] if users else None
linear_node = None
for n in users:
if n.op == "call_function" and n.target == torch.nn.functional.linear:
linear_node = n
break
if linear_node:
first_linear_use_or_first_use = linear_node
prefix = "_input"
else:
# if the quantize function is at the output of the op, we use the observer input node to get the path
first_linear_use_or_first_use = observed_node
prefix = ""
if first_linear_use_or_first_use and first_linear_use_or_first_use.name in node_name_to_scope:
module_path, _ = node_name_to_scope[first_linear_use_or_first_use.name]
else:
# TODO: it's not used, so actually we can skip quantization
# but this requires changing return type of quantize_node
# we can fix it later if needed
module_path = ""
return module_path, prefix
def insert_dequantize_node(
node: Node,
graph: Graph):
""" Inserts dequantize node for `node` in `graph`
"""
with graph.inserting_after(node):
dequantize_node = graph.call_method("dequantize", (node,))
for user_node in dict(node.users):
if user_node is not dequantize_node:
user_node.replace_input_with(node, dequantize_node)
def maybe_get_observer_for_node(
node: Node,
modules: Dict[str, torch.nn.Module]
) -> Optional[torch.nn.Module]:
"""
If the node is observed, return the observer
instance. Otherwise, return None.
"""
for maybe_obs_node, _ in node.users.items():
if maybe_obs_node.op == 'call_module':
maybe_obs = modules[str(maybe_obs_node.target)]
if is_activation_post_process(maybe_obs):
return maybe_obs
return None
def convert_standalone_module(
node: Node,
modules: Dict[str, torch.nn.Module],
model: torch.fx.GraphModule,
is_reference: bool,
backend_config_dict: Optional[Dict[str, Any]]):
""" Converts a observed standalone module to a quantized standalone module by calling
the fx convert api, currently using the same `is_reference` flag as parent, but we may
changing this behavior in the future (e.g. separating quantization and lowering for
standalone module as well)
Args:
- node: The call_module node of the observed standalone module
- modules: named_module of original model
- model: original model
- is_reference: a flag from parent provided by user to decide if we want to
produce a reference model or a fbgemm/qnnpack model
- backend_config_dict: backend configuration of the target backend of quantization
"""
convert = torch.ao.quantization.quantize_fx.convert_fx # type: ignore[attr-defined]
# We know that observed standalone module is a GraphModule since
# it's produced by us
observed_standalone_module : GraphModule = modules[str(node.target)] # type: ignore[assignment]
sm_input_quantized_idxs = \
observed_standalone_module \
._standalone_module_input_quantized_idxs\
.tolist() # type: ignore[operator]
# remove the dequantize nodes for inputs
args = list(node.args)
for idx in range(len(args)):
if idx in sm_input_quantized_idxs:
arg = args[idx]
if arg.op == "call_method" and arg.target == "dequantize": # type: ignore[union-attr]
quantize_node = arg.args[0] # type: ignore[union-attr]
node.replace_input_with(arg, quantize_node)
if len(arg.users) == 0: # type: ignore[union-attr]
model.graph.erase_node(arg)
# add dequantize node for output
sm_output_quantized_idxs = \
observed_standalone_module \
._standalone_module_output_quantized_idxs \
.tolist() # type: ignore[operator]
if len(sm_output_quantized_idxs) > 0:
assert sm_output_quantized_idxs[0] == 0, "Currently only quantized"
"output idxs = [0] is supported"
# if it's non-empty, then it means the output is kept in quantized form
# we'll just add a dequantize node after this node
insert_dequantize_node(node, model.graph)
# TODO: allow convert_custom_config_dict to override backend_config_dict
# for standalone module
# TODO: think about how to handle `is_reference` here
quantized_standalone_module = convert(
observed_standalone_module,
is_reference=is_reference,
backend_config_dict=backend_config_dict)
parent_name, name = _parent_name(node.target)
# update the modules dict
setattr(modules[parent_name], name, quantized_standalone_module)
modules[str(node.target)] = quantized_standalone_module
def convert_weighted_module(
node: Node,
modules: Dict[str, torch.nn.Module],
observed_node_names: Set[str],
qconfig_map: Dict[str, QConfigAny],
backend_config_dict: Dict[str, Any]):
""" Convert a weighted module to reference quantized module in the model
If the QConfig of a QAT module is not set, the module will still be converted to
a float module.
Args:
- node: The call_module node of the observed standalone module
- modules: named_module of original model
- observed_node_names: names for the set of observed fx node, we can skip
this conversion if the node is not observed
"""
original_module = modules[str(node.target)]
qconfig: QConfigAny = original_module.qconfig # type: ignore[assignment]
weight_post_process = None
qat_module_classes = get_qat_module_classes(backend_config_dict)
if isinstance(
original_module,
qat_module_classes):
# Converting qat module to a float module, we need to attch
# weight fake_quant to the module, weight fake_quant is assumed to be run during
# QAT so we don't need to run it again here
weight_post_process = original_module.weight_fake_quant
original_module = original_module.to_float() # type: ignore[operator]
# change qat module to float module
parent_name, name = _parent_name(node.target)
setattr(modules[parent_name], name, original_module)
is_observed = node.name in observed_node_names
# If a qconfig is not defined for this node, then skip converting to a reference module
if qconfig is None or has_none_qconfig(node, qconfig_map) or not is_observed:
return
# skip converting to reference quantized module if the qconfig is not supported
pattern_to_dtype_configs = get_pattern_to_dtype_configs(backend_config_dict)
dtype_configs = pattern_to_dtype_configs.get(type(original_module), [])
if not is_qconfig_supported_by_dtype_configs(qconfig, dtype_configs):
return
# TODO: rename weight_is_statically_quantized to weight_is_int8_quantized
is_weight_quantized = weight_is_quantized(qconfig)
# the condition for swapping the module to reference quantized module is:
# weights need to be quantized
if not is_weight_quantized:
return
fused_module = None
float_module = original_module
# extract the inidividual float_module and fused module
if isinstance(original_module, torch.nn.intrinsic._FusedModule):
fused_module = float_module
float_module = fused_module[0] # type: ignore[index]
# TODO: move this to the reference quantized module
# weight_qparams or weight_qparams dict
wq_or_wq_dict = {}
if isinstance(float_module, torch.nn.RNNCellBase):
weight_post_process_ih = qconfig.weight() # type: ignore[union-attr, operator]
weight_post_process_hh = qconfig.weight() # type: ignore[union-attr, operator]
weight_post_process_ih(float_module.weight_ih)
weight_post_process_hh(float_module.weight_hh)
weight_qparams_ih = get_qparam_dict(weight_post_process_ih)
weight_qparams_hh = get_qparam_dict(weight_post_process_hh)
wq_or_wq_dict = {
"weight_ih": weight_qparams_ih,
"weight_hh": weight_qparams_hh,
}
elif isinstance(float_module, torch.nn.LSTM):
# format for wq_or_wq_dict (flattened attributes):
# {"weight_ih_l0_scale": ..., "weight_ih_l0_qscheme": ..., ...}
for wn in float_module._flat_weights_names:
if hasattr(float_module, wn) and wn.startswith("weight"):
weight = getattr(float_module, wn)
weight_post_process = qconfig.weight() # type: ignore[union-attr, operator]
if weight_post_process.dtype == torch.qint8: # type: ignore[union-attr]
weight_post_process(weight) # type: ignore[operator, misc]
wq_or_wq_dict[wn] = get_qparam_dict(weight_post_process)
else:
# weight_post_process is None means the original module is not a QAT module
# we need to get weight_post_process from qconfig in this case
if weight_post_process is None:
weight_post_process = qconfig.weight() # type: ignore[union-attr, operator]
# run weight observer
# TODO: This is currently a hack for QAT to get the right shapes for scale and zero point.
# In the future, we should require the user to calibrate the model after calling prepare
# Issue: https://github.com/pytorch/pytorch/issues/73941
weight_post_process(float_module.weight) # type: ignore[operator]
wq_or_wq_dict = get_qparam_dict(weight_post_process)
# We use the same reference module for all modes of quantization: static, dynamic, weight_only
# root_module_to_quantized_reference_module: module mapping from root (floating point) module class
# to quantized reference module class, e.g. nn.Conv2d to nn.quantized._reference.Conv2d
root_module_to_quantized_reference_module = get_root_module_to_quantized_reference_module(backend_config_dict)
ref_qmodule_cls = root_module_to_quantized_reference_module.get(type(float_module), None)
assert ref_qmodule_cls is not None, f"No reference quantized module class configured for {type(float_module)}"
ref_qmodule = ref_qmodule_cls.from_float(float_module, wq_or_wq_dict) # type: ignore[attr-defined]
if fused_module is not None:
fused_module[0] = ref_qmodule # type: ignore[operator]
else:
parent_name, name = _parent_name(node.target)
setattr(modules[parent_name], name, ref_qmodule)
def convert_custom_module(
node: Node,
graph: Graph,
modules: Dict[str, torch.nn.Module],
custom_module_class_mapping: Dict[Callable, Callable],
statically_quantized_custom_module_nodes: Set[Node]):
""" Converts an observed custom module to a quantized custom module based on
`custom_module_class_mapping`
For static quantization, we'll also remove the previous `dequantize` node and
attach the observer node for output to the module, the observer for the node
will be converted to a dequantize node instead of quantize-dequantize pairs
later in the graph. In the end we would have a quantized custom module that
has the same interface as a default quantized module in nn.quantized namespace,
i.e. quantized input and quantized output.
Args:
- node: The call_module node of the observed standalone module
- graph: The graph containing the node
- modules: named_module of original model
- custom_module_class_mapping: mapping from observed custom module class to
quantized custom module class, used to swap custom modules
- statically_quantized_custom_module_nodes: we'll add the custom module node
if we find it is statically quantized, this will be used later when converting
observers to quant/dequant node pairs, if the observed node is a statically
quantized custom module nodes, we'll convert the observer to a dequantize node,
this is to keep the interface the same as the default quantized module.
TODO: maybe we want to redesign this part to align with reference model design
as well, but there has been some discussions around the interface, so we can do
it later.
"""
observed_custom_module = modules[str(node.target)]
maybe_obs = maybe_get_observer_for_node(node, modules)
qconfig = observed_custom_module.qconfig
if activation_is_statically_quantized(qconfig):
statically_quantized_custom_module_nodes.add(node)
# remove the previous dequant node
prev_node = node.args[0]
# expecting the input node for a custom module node to be a Node
assert isinstance(prev_node, Node), \
f"Expecting the argument for custom module node to be a Node, but got {prev_node}"
if prev_node.op == "call_method" and prev_node.target == "dequantize":
# change the connection for custom module, we'll change the input
# of custom module node to quantize node:
# Before: quantize - dequantize - custom - module
# After: quantize - custom - module
# \ - dequantize
node.replace_input_with(prev_node, prev_node.args[0])
# Remove the dequantize node if it doesn't have other users
if len(prev_node.users) == 0:
graph.erase_node(prev_node)
# absorb the following observer into the module conversion
activation_post_process = maybe_get_observer_for_node(node, modules)
assert activation_post_process is not None
observed_custom_module.activation_post_process = activation_post_process
# swap the observed custom module to quantized custom module
quantized_custom_module_class = get_swapped_custom_module_class(
observed_custom_module, custom_module_class_mapping, qconfig)
quantized_custom_module = \
quantized_custom_module_class.from_observed(observed_custom_module)
parent_name, name = _parent_name(node.target)
setattr(modules[parent_name], name, quantized_custom_module)
def convert(
model: GraphModule, is_reference: bool = False,
convert_custom_config_dict: Dict[str, Any] = None,
is_standalone_module: bool = False,
_remove_qconfig_flag: bool = True,
convert_qconfig_dict: Dict[str, Any] = None,
backend_config_dict: Optional[Dict[str, Any]] = None) -> torch.nn.Module:
"""
We will convert an observed model (a module with observer calls) to a reference
quantized model, the rule is simple:
1. for each observer module call in the graph, we'll convert it to calls to
quantize and dequantize functions based on the observer instance
2. for weighted operations like linear/conv, we need to convert them to reference
quantized module, this requires us to know whether the dtype configured for the
weight is supported in the backend, this is done in prepare step and the result
is stored in observed_node_names, we can decide whether we need to swap the
module based on this set
standalone_module means it a submodule that is not inlined in
parent module, and will be quantized separately as one unit.
Returns a quantized standalone module, whether input/output is quantized is
specified by prepare_custom_config_dict, with
input_quantized_idxs, output_quantized_idxs, please
see docs for prepare_fx for details
"""
if convert_custom_config_dict is None:
convert_custom_config_dict = {}
node_name_to_scope, prepare_custom_config_dict, observed_node_names = restore_state(model)
qconfig_map: Dict[str, QConfigAny] = model._qconfig_map # type: ignore[assignment]
# TODO this should be removed now that gpu support for quantization is being supported.
# however in practice, as of 7/22/2021, certain functions that get called by convert expect
# only cpu arguments.
# As an example, in TestQuantizeFxModels.test_qat_functional_linear when device='cuda',
# fold_weight will call quantized::linear_prepack which doesn't support QuantizedCuda backend.
if not is_reference:
model.cpu()
# mapping from fully qualified module name to module instance
# for example,
# {
# '': Model(...),
# 'linear': Linear(...),
# 'linear.weight_fake_quant': PerChannelMinMaxObserver(...),
# }
# We use remove_duplicate=False here because torch.cat uses
# the same activation_post_process module instance but different names
modules = dict(model.named_modules(remove_duplicate=False))
# TODO refactor this code once we update the prepare logic to have additional information on
# which graph nodes have been observed and share that with convert to decide which observers to ignore.
if convert_qconfig_dict:
prepare_qconfig_dict: Dict[str, Dict[Any, Any]] = model._qconfig_dict # type: ignore[assignment]
modules_copy = copy.deepcopy(modules)
convert_dict_to_ordered_dict(convert_qconfig_dict)
if model._is_qat:
convert_qconfig_dict = update_qconfig_for_qat(convert_qconfig_dict, {})
convert_qconfig_dict = update_qconfig_for_fusion(model, convert_qconfig_dict)
compare_prepare_convert_qconfig_dict(prepare_qconfig_dict, convert_qconfig_dict) # type: ignore[arg-type]
convert_qconfig_map = generate_qconfig_map(model, modules_copy, model.graph, convert_qconfig_dict, node_name_to_scope)
# check the convert_qconfig_map generated and ensure that all the values either match what was set in prepare qconfig_map
# or are set to None in the convert_qconfig_map.
for k, v in qconfig_map.items():
assert k in convert_qconfig_map, 'Expected key {} in convert qconfig_map'.format(k)
if convert_qconfig_map[k] is not None:
assert qconfig_equals(v, convert_qconfig_map[k]), 'Expected k {} to have the same value in prepare qconfig_dict \
and convert qconfig_dict, found {} updated to {}.'.format(k, v, convert_qconfig_map[k])
qconfig_map = convert_qconfig_map
custom_module_classes = get_custom_module_class_keys(
convert_custom_config_dict,
"observed_to_quantized_custom_module_class")
custom_module_class_mapping = convert_custom_config_dict.get("observed_to_quantized_custom_module_class", {})
if model._equalization_qconfig_map is not None:
# If we want to do equalization then do the following:
# Calculate the equalization scale, update the observers with the scaled
# inputs, and scale the weight
weight_eq_obs_dict = update_obs_for_equalization(model, modules)
convert_eq_obs(model, modules, weight_eq_obs_dict)
# always run weight observers in the top level forward method
# for dynamic quant ops or weight only quant ops
run_weight_observers(model)
graph_inputs: List[str] = []
for node in model.graph.nodes:
if node.op == 'placeholder':
graph_inputs.append(node.name)
# TODO: move this outside of this function
def replace_observer_with_quantize_dequantize_node(
model: torch.nn.Module,
graph: Graph,
node: Node,
modules: Dict[str, torch.nn.Module],
node_name_to_scope: Dict[str, Tuple[str, type]],
qconfig_map: Dict[str, QConfigAny]) -> None:
""" Replace activation_post_process module call node with quantize and
dequantize node
Before:
... -> observer_0(x) -> ...
After:
... -> torch.quantize_per_tensor(x, ...) -> x.dequantize() -> ...
"""
assert modules is not None
assert isinstance(node.target, str)
module_path, prefix = get_module_path_and_prefix(node, node_name_to_scope, qconfig_map)
observer_module = modules[node.target]
maybe_quantize_node_info = get_quantize_node_info(observer_module)
# Skip replacing observers to quant/dequant nodes if the qconfigs of all
# consumers and producers of this observer are None
skip_replacement = all([
has_none_qconfig(n, qconfig_map) for n in
list(node.args) + list(node.users.keys())])
if skip_replacement or maybe_quantize_node_info is None:
# didn't find correponding quantize op and info for the observer_module
# so we just remove the observer
with graph.inserting_before(node):
node.replace_all_uses_with(node.args[0])
graph.erase_node(node)
else:
# otherwise, we can convert the observer moduel call to quantize/dequantize node
node_type, quantize_op, qparams = maybe_quantize_node_info
# replace observer node with quant - dequant node
with graph.inserting_before(node):
input_node = node.args[0]
inputs = [input_node]
for key, value in qparams.items():
# TODO: we can add the information of whether a value needs to
# be registered as an attribute in qparams dict itself
if key in ['_scale_', '_zero_point_']:
# For scale and zero_point values we register them as buffers in the root module.
# TODO: maybe need more complex attr name here
qparam_node = create_getattr_from_value(model, graph, module_path + prefix + key, value)
inputs.append(qparam_node)
else:
# for qparams that are not scale/zero_point (like axis, dtype) we store them as literals in the graph.
inputs.append(value)
quantized_node = graph.create_node(node_type, quantize_op, tuple(inputs), {})
dequantized_node = graph.call_method("dequantize", args=(quantized_node,))
node.replace_all_uses_with(dequantized_node)
graph.erase_node(node)
# this is a temporary hack for custom module, we may want to implement
# this properly after the custom module class design is finalized
def replace_observer_with_dequantize_node(node: Node, graph: Graph):
call_custom_module_node = node.args[0]
assert isinstance(call_custom_module_node, Node), \
f"Expecting the for call custom module node to be a Node, but got {call_custom_module_node}"
node.replace_all_uses_with(call_custom_module_node)
graph.erase_node(node)
insert_dequantize_node(call_custom_module_node, graph)
# additional state to override inputs to be quantized, if specified
# by the user
placeholder_node_seen_cnt = 0
input_quantized_idxs: List[int] = prepare_custom_config_dict.get(
"input_quantized_idxs", [])
output_quantized_idxs: List[int] = prepare_custom_config_dict.get(
"output_quantized_idxs", [])
if backend_config_dict is None:
backend_config_dict = get_native_backend_config_dict()
root_module_to_quantized_reference_module = get_root_module_to_quantized_reference_module(backend_config_dict)
# convert tuples so that it can work with isinstance(module, tuple_of_classes)
root_module_classes = tuple(root_module_to_quantized_reference_module.keys())
qat_module_classes = get_qat_module_classes(backend_config_dict)
fused_module_classes = get_fused_module_classes(backend_config_dict)
statically_quantized_custom_module_nodes: Set[Node] = set()
for node in list(model.graph.nodes):
if node.op == 'placeholder':
cur_placeholder_node_idx = placeholder_node_seen_cnt
placeholder_node_seen_cnt += 1
if cur_placeholder_node_idx in input_quantized_idxs:
# Inputs are assumed to be quantized if the user specifid the
# input_quantized_idxs override.
# we need to dequantize the inputs since all operators took
# floating point inputs in reference quantized models
insert_dequantize_node(node, model.graph)
elif node.op == "output":
# If the argument is empty we don't need to do anything
if len(output_quantized_idxs) == 0:
continue
# Result are kept quantized if the user specified the
# output_quantized_idxs override.
# Remove the dequantize operator for the node in the end if any
return_node = node
output = node.args[0]
# outputs can be Node, list, tuple, dict, other cases are not supported yet
if isinstance(output, (list, tuple)):
for idx in output_quantized_idxs:
maybe_recursive_remove_dequantize(output[idx], return_node, model.graph)
elif isinstance(output, (Node, dict)):
# we treat dict as a single argument currently, but it can be extended
# to support {"key": dtype} after we change output_quantized_idxs to
# dict
if 0 in output_quantized_idxs:
maybe_recursive_remove_dequantize(output, return_node, model.graph)
else:
warnings.warn(f"Unsupported node type for output_quantized_idxs: {type(output)}")
elif node.op == "call_module":
if is_activation_post_process(modules[node.target]):
observed_node = node.args[0]
if observed_node in statically_quantized_custom_module_nodes:
replace_observer_with_dequantize_node(node, model.graph)
else:
replace_observer_with_quantize_dequantize_node(
model, model.graph, node, modules, node_name_to_scope,
qconfig_map)
elif is_observed_standalone_module(modules[node.target]):
convert_standalone_module(
node, modules, model, is_reference, backend_config_dict)
elif type(modules[node.target]) in set(
root_module_classes).union(qat_module_classes).union(fused_module_classes):
# extra check for fused module classes to make sure they are fused module classes
# of target modules
if type(modules[node.target]) in fused_module_classes and \
type(modules[node.target][0]) not in root_module_classes:
continue
convert_weighted_module(
node, modules, observed_node_names, qconfig_map, backend_config_dict)
elif type(modules[node.target]) in custom_module_classes:
convert_custom_module(
node, model.graph, modules, custom_module_class_mapping,
statically_quantized_custom_module_nodes)
preserved_attributes = set(convert_custom_config_dict.get("preserved_attributes", []))
model = QuantizedGraphModule(model, copy.deepcopy(model.graph), preserved_attributes)
# remove deadcode after converting observers to quant/dequant ops
model.graph.eliminate_dead_code()
model.recompile()
# TODO: maybe move this to quantize_fx.py
if not is_reference:
model = duplicate_dequantize_node(model)
model = duplicate_quantize_dynamic_node(model)
model = lower_to_fbgemm(model, qconfig_map, node_name_to_scope)
model = remove_quant_dequant_pairs(model)
model = remove_extra_dequantize(model)
# TODO: this looks hacky, we want to check why we need this and see if we can
# remove this
# removes qconfig and activation_post_process modules
if _remove_qconfig_flag:
_remove_qconfig(model)
return model
|
the-stack_106_23556 | # qubit number=2
# total number=3
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.cx(input_qubit[0],input_qubit[1]) # number=2
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = FakeVigo()
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_noisy1.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_106_23557 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, division, print_function
import re
import random
import itertools
import functools
from decimal import Decimal
from pathlib import Path
from cached_property import cached_property
from ._common import PokerEnum, _ReprMixin
from .card import Rank, Card, BROADWAY_RANKS
__all__ = ['Shape', 'Hand', 'Combo', 'Range', 'PAIR_HANDS', 'OFFSUIT_HANDS', 'SUITED_HANDS']
# pregenerated all the possible suit combinations, so we don't have to count them all the time
_PAIR_SUIT_COMBINATIONS = ('cd', 'ch', 'cs', 'dh', 'ds', 'hs')
_OFFSUIT_SUIT_COMBINATIONS = ('cd', 'ch', 'cs', 'dc', 'dh', 'ds',
'hc', 'hd', 'hs', 'sc', 'sd', 'sh')
_SUITED_SUIT_COMBINATIONS = ('cc', 'dd', 'hh', 'ss')
class Shape(PokerEnum):
OFFSUIT = 'o', 'offsuit', 'off'
SUITED = 's', 'suited'
PAIR = '',
class _HandMeta(type):
"""Makes Hand class iterable. __iter__ goes through all hands in ascending order."""
def __new__(metacls, clsname, bases, classdict):
"""Cache all possible Hand instances on the class itself."""
cls = super(_HandMeta, metacls).__new__(metacls, clsname, bases, classdict)
cls._all_hands = tuple(cls._get_non_pairs()) + tuple(cls._get_pairs())
return cls
def _get_non_pairs(cls):
for rank1 in Rank:
for rank2 in (r for r in Rank if r < rank1):
yield cls('{}{}o'.format(rank1, rank2))
yield cls('{}{}s'.format(rank1, rank2))
def _get_pairs(cls):
for rank in Rank:
yield cls(rank.val * 2)
def __iter__(cls):
return iter(cls._all_hands)
def make_random(cls):
obj = object.__new__(cls)
first = Rank.make_random()
second = Rank.make_random()
obj._set_ranks_in_order(first, second)
if first == second:
obj._shape = ''
else:
obj._shape = random.choice(['s', 'o'])
return obj
@functools.total_ordering
class Hand(_ReprMixin):
"""General hand without a precise suit. Only knows about two ranks and shape."""
__metaclass__ = _HandMeta
__slots__ = ('first', 'second', '_shape')
def __new__(cls, hand):
if isinstance(hand, cls):
return hand
if len(hand) not in (2, 3):
raise ValueError('Length should be 2 (pair) or 3 (hand)')
first, second = hand[:2]
self = object.__new__(cls)
if len(hand) == 2:
if first != second:
raise ValueError('%r, Not a pair! Maybe you need to specify a suit?' % hand)
self._shape = ''
elif len(hand) == 3:
shape = hand[2].lower()
if first == second:
raise ValueError("{!r}; pairs can't have a suit: {!r}".format(hand, shape))
if shape not in ('s', 'o'):
raise ValueError('{!r}; Invalid shape: {!r}'.format(hand, shape))
self._shape = shape
self._set_ranks_in_order(first, second)
return self
def __unicode__(self):
return '{}{}{}'.format(self.first, self.second, self.shape)
def __hash__(self):
return hash(self.first) + hash(self.second) + hash(self.shape)
def __getstate__(self):
return {'first': self.first, 'second': self.second, '_shape': self._shape}
def __setstate__(self, state):
self.first, self.second, self._shape = state['first'], state['second'], state['_shape']
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
# AKs != AKo, because AKs is better
return (self.first == other.first and
self.second == other.second and
self.shape.val == other.shape.val)
def __lt__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
# pairs are better than non-pairs
if not self.is_pair and other.is_pair:
return True
elif self.is_pair and not other.is_pair:
return False
elif (not self.is_pair and not other.is_pair and
self.first == other.first and self.second == other.second and
self._shape != other._shape):
# when Rank match, only suit is the deciding factor
# so, offsuit hand is 'less' than suited
return self._shape == 'o'
elif self.first == other.first:
return self.second < other.second
else:
return self.first < other.first
def _set_ranks_in_order(self, first, second):
# set as Rank objects.
self.first, self.second = Rank(first), Rank(second)
if self.first < self.second:
self.first, self.second = self.second, self.first
def to_combos(self):
first, second = self.first.val, self.second.val
if self.is_pair:
return tuple(Combo(first + s1 + first + s2) for s1, s2 in _PAIR_SUIT_COMBINATIONS)
elif self.is_offsuit:
return tuple(Combo(first + s1 + second + s2) for s1, s2 in _OFFSUIT_SUIT_COMBINATIONS)
else:
return tuple(Combo(first + s1 + second + s2) for s1, s2 in _SUITED_SUIT_COMBINATIONS)
@property
def is_suited_connector(self):
return self.is_suited and self.is_connector
@property
def is_suited(self):
return self._shape == 's'
@property
def is_offsuit(self):
return self._shape == 'o'
@property
def is_connector(self):
return self.rank_difference == 1
@property
def is_one_gapper(self):
return self.rank_difference == 2
@property
def is_two_gapper(self):
return self.rank_difference == 3
@property
def rank_difference(self):
"""The difference between the first and second rank of the Hand."""
# self.first >= self.second
return Rank.difference(self.first, self.second)
@property
def is_broadway(self):
return (self.first in BROADWAY_RANKS and self.second in BROADWAY_RANKS)
@property
def is_pair(self):
return self.first == self.second
@property
def shape(self):
return Shape(self._shape)
@shape.setter
def shape(self, value):
self._shape = Shape(value).val
PAIR_HANDS = tuple(hand for hand in Hand if hand.is_pair)
"""Tuple of all pair hands in ascending order."""
OFFSUIT_HANDS = tuple(hand for hand in Hand if hand.is_offsuit)
"""Tuple of offsuit hands in ascending order."""
SUITED_HANDS = tuple(hand for hand in Hand if hand.is_suited)
"""Tuple of suited hands in ascending order."""
@functools.total_ordering
class Combo(_ReprMixin):
"""Hand combination."""
__slots__ = ('first', 'second')
def __new__(cls, combo):
if isinstance(combo, Combo):
return combo
if len(combo) != 4:
raise ValueError('%r, should have a length of 4' % combo)
elif (combo[0] == combo[2] and combo[1] == combo[3]):
raise ValueError("{!r}, Pair can't have the same suit: {!r}".format(combo, combo[1]))
self = super(Combo, cls).__new__(cls)
self._set_cards_in_order(combo[:2], combo[2:])
return self
@classmethod
def from_cards(cls, first, second):
self = super(Combo, cls).__new__(cls)
first = first.rank.val + first.suit.val
second = second.rank.val + second.suit.val
self._set_cards_in_order(first, second)
return self
def __unicode__(self):
return '{}{}'.format(self.first, self.second)
def __hash__(self):
return hash(self.first) + hash(self.second)
def __getstate__(self):
return {'first': self.first, 'second': self.second}
def __setstate__(self, state):
self.first, self.second = state['first'], state['second']
def __eq__(self, other):
if self.__class__ is other.__class__:
return self.first == other.first and self.second == other.second
return NotImplemented
def __lt__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
# lookup optimization
self_is_pair, other_is_pair = self.is_pair, other.is_pair
self_first, other_first = self.first, other.first
if self_is_pair and other_is_pair:
if self_first == other_first:
return self.second < other.second
return self_first < other_first
elif self_is_pair or other_is_pair:
# Pairs are better than non-pairs
return self_is_pair < other_is_pair
else:
if self_first.rank == other_first.rank:
if self.second.rank == other.second.rank:
# same ranks, suited go first in order by Suit rank
if self.is_suited or other.is_suited:
return self.is_suited < other.is_suited
# both are suited
return self_first.suit < other_first.suit
return self.second < other.second
return self_first < other_first
def _set_cards_in_order(self, first, second):
self.first, self.second = Card(first), Card(second)
if self.first < self.second:
self.first, self.second = self.second, self.first
def to_hand(self):
"""Convert combo to :class:`Hand` object, losing suit information."""
return Hand('{}{}{}'.format(self.first.rank, self.second.rank, self.shape))
@property
def is_suited_connector(self):
return self.is_suited and self.is_connector
@property
def is_suited(self):
return self.first.suit == self.second.suit
@property
def is_offsuit(self):
return not self.is_suited and not self.is_pair
@property
def is_connector(self):
return self.rank_difference == 1
@property
def is_one_gapper(self):
return self.rank_difference == 2
@property
def is_two_gapper(self):
return self.rank_difference == 3
@property
def rank_difference(self):
"""The difference between the first and second rank of the Combo."""
# self.first >= self.second
return Rank.difference(self.first.rank, self.second.rank)
@property
def is_pair(self):
return self.first.rank == self.second.rank
@property
def is_broadway(self):
return self.first.is_broadway and self.second.is_broadway
@property
def shape(self):
if self.is_pair:
return Shape.PAIR
elif self.is_suited:
return Shape.SUITED
else:
return Shape.OFFSUIT
@shape.setter
def shape(self, value):
self._shape = Shape(value).val
class _RegexRangeLexer(object):
_separator_re = re.compile(r"[,;\s]*")
_rank = r"([2-9TJQKA])"
_suit = r"[cdhs♣♦♥♠]"
# the second card is not the same as the first
# (negative lookahead for the first matching group)
# this will not match pairs, but will match e.g. 86 or AK
_nonpair1 = r"{0}(?!\1){0}".format(_rank)
_nonpair2 = r"{0}(?!\2){0}".format(_rank)
rules = (
# NAME, REGEX, value extractor METHOD NAME
('ALL', r"XX", '_get_value'),
('PAIR', r"{}\1$".format(_rank), '_get_first'),
('PAIR_PLUS', r"{}\1\+$".format(_rank), '_get_first'),
('PAIR_MINUS', r"{}\1-$".format(_rank), '_get_first'),
('PAIR_DASH', r"{0}\1-{0}\2$".format(_rank), '_get_for_pair_dash'),
('BOTH', _nonpair1 + r"$", '_get_first_two'),
('BOTH_PLUS', r"{}\+$".format(_nonpair1), '_get_first_two'),
('BOTH_MINUS', r"{}-$".format(_nonpair1), '_get_first_two'),
('BOTH_DASH', r"{}-{}$".format(_nonpair1, _nonpair2), '_get_for_both_dash'),
('SUITED', r"{}s$".format(_nonpair1), '_get_first_two'),
('SUITED_PLUS', r"{}s\+$".format(_nonpair1), '_get_first_two'),
('SUITED_MINUS', r"{}s-$".format(_nonpair1), '_get_first_two'),
('SUITED_DASH', r"{}s-{}s$".format(_nonpair1, _nonpair2), '_get_for_shaped_dash'),
('OFFSUIT', r"{}o$".format(_nonpair1), '_get_first_two'),
('OFFSUIT_PLUS', r"{}o\+$".format(_nonpair1), '_get_first_two'),
('OFFSUIT_MINUS', r"{}o-$".format(_nonpair1), '_get_first_two'),
('OFFSUIT_DASH', r"{}o-{}o$".format(_nonpair1, _nonpair2), '_get_for_shaped_dash'),
('X_SUITED', r"{0}Xs$|X{0}s$".format(_rank), '_get_rank'),
('X_SUITED_PLUS', r"{0}Xs\+$|X{0}s\+$".format(_rank), '_get_rank'),
('X_SUITED_MINUS', r"{0}Xs-$|X{0}s-$".format(_rank), '_get_rank'),
('X_OFFSUIT', r"{0}Xo$|X{0}o$".format(_rank), '_get_rank'),
('X_OFFSUIT_PLUS', r"{0}Xo\+$|X{0}o\+$".format(_rank), '_get_rank'),
('X_OFFSUIT_MINUS', r"{0}Xo-$|X{0}o-$".format(_rank), '_get_rank'),
('X_PLUS', r"{0}X\+$|X{0}\+$".format(_rank), '_get_rank'),
('X_MINUS', r"{0}X-$|X{0}-$".format(_rank), '_get_rank'),
('X_BOTH', r"{0}X$|X{0}$".format(_rank), '_get_rank'),
# might be anything, even pair
# FIXME: 5s5s accepted
('COMBO', r"{0}{1}{0}{1}$".format(_rank, _suit), '_get_value'),
)
# compile regexes when initializing class, so every instance will have them precompiled
rules = [(name, re.compile(regex, re.IGNORECASE), method) for (name, regex, method) in rules]
def __init__(self, range=''):
# filter out empty matches
self.tokens = [token for token in self._separator_re.split(range) if token]
def __iter__(self):
"""Goes through all the tokens and compare them with the regex rules. If it finds a match,
makes an appropriate value for the token and yields them.
"""
for token in self.tokens:
for name, regex, method_name in self.rules:
if regex.match(token):
val_method = getattr(self, method_name)
yield name, val_method(token)
break
else:
raise ValueError('Invalid token: %s' % token)
@staticmethod
def _get_value(token):
return token
@staticmethod
def _get_first(token):
return token[0]
@staticmethod
def _get_rank(token):
return token[0] if token[1].upper() == 'X' else token[1]
@classmethod
def _get_in_order(cls, first_part, second_part, token):
smaller, bigger = cls._get_rank_in_order(token, first_part, second_part)
return smaller.val, bigger.val
@classmethod
def _get_first_two(cls, token):
return cls._get_in_order(0, 1, token)
@classmethod
def _get_for_pair_dash(cls, token):
return cls._get_in_order(0, 3, token)
@classmethod
def _get_first_smaller_bigger(cls, first_part, second_part, token):
smaller1, bigger1 = cls._get_rank_in_order(token[first_part], 0, 1)
smaller2, bigger2 = cls._get_rank_in_order(token[second_part], 0, 1)
if bigger1 != bigger2:
raise ValueError('Invalid token: %s' % token)
smaller, bigger = min(smaller1, smaller2), max(smaller1, smaller2)
return bigger1.val, smaller.val, bigger.val
@staticmethod
def _get_rank_in_order(token, first_part, second_part):
first, second = Rank(token[first_part]), Rank(token[second_part])
smaller, bigger = min(first, second), max(first, second)
return smaller, bigger
@classmethod
# for 'A5-AT'
def _get_for_both_dash(cls, token):
return cls._get_first_smaller_bigger(slice(0, 2), slice(3, 5), token)
@classmethod
# for 'A5o-ATo' and 'A5s-ATs'
def _get_for_shaped_dash(cls, token):
return cls._get_first_smaller_bigger(slice(0, 2), slice(4, 6), token)
@functools.total_ordering
class Range(object):
"""Parses a str range into tuple of Combos (or Hands)."""
slots = ('_hands', '_combos')
def __init__(self, range=''):
self._hands = set()
self._combos = set()
for name, value in _RegexRangeLexer(range):
if name == 'ALL':
for card in itertools.combinations('AKQJT98765432', 2):
self._add_offsuit(card)
self._add_suited(card)
for rank in 'AKQJT98765432':
self._add_pair(rank)
# full range, no need to parse any more name
break
elif name == 'PAIR':
self._add_pair(value)
elif name == 'PAIR_PLUS':
smallest = Rank(value)
for rank in (rank.val for rank in Rank if rank >= smallest):
self._add_pair(rank)
elif name == 'PAIR_MINUS':
biggest = Rank(value)
for rank in (rank.val for rank in Rank if rank <= biggest):
self._add_pair(rank)
elif name == 'PAIR_DASH':
first, second = Rank(value[0]), Rank(value[1])
ranks = (rank.val for rank in Rank if first <= rank <= second)
for rank in ranks:
self._add_pair(rank)
elif name == 'BOTH':
self._add_offsuit(value[0] + value[1])
self._add_suited(value[0] + value[1])
elif name == 'X_BOTH':
for rank in (r.val for r in Rank if r < Rank(value)):
self._add_suited(value + rank)
self._add_offsuit(value + rank)
elif name == 'OFFSUIT':
self._add_offsuit(value[0] + value[1])
elif name == 'SUITED':
self._add_suited(value[0] + value[1])
elif name == 'X_OFFSUIT':
biggest = Rank(value)
for rank in (rank.val for rank in Rank if rank < biggest):
self._add_offsuit(value + rank)
elif name == 'X_SUITED':
biggest = Rank(value)
for rank in (rank.val for rank in Rank if rank < biggest):
self._add_suited(value + rank)
elif name == 'BOTH_PLUS':
smaller, bigger = Rank(value[0]), Rank(value[1])
for rank in (rank.val for rank in Rank if smaller <= rank < bigger):
self._add_suited(value[1] + rank)
self._add_offsuit(value[1] + rank)
elif name == 'BOTH_MINUS':
smaller, bigger = Rank(value[0]), Rank(value[1])
for rank in (rank.val for rank in Rank if rank <= smaller):
self._add_suited(value[1] + rank)
self._add_offsuit(value[1] + rank)
elif name in ('X_PLUS', 'X_SUITED_PLUS', 'X_OFFSUIT_PLUS'):
smallest = Rank(value)
first_ranks = (rank for rank in Rank if rank >= smallest)
for rank1 in first_ranks:
second_ranks = (rank for rank in Rank if rank < rank1)
for rank2 in second_ranks:
if name != 'X_OFFSUIT_PLUS':
self._add_suited(rank1.val + rank2.val)
if name != 'X_SUITED_PLUS':
self._add_offsuit(rank1.val + rank2.val)
elif name in ('X_MINUS', 'X_SUITED_MINUS', 'X_OFFSUIT_MINUS'):
biggest = Rank(value)
first_ranks = (rank for rank in Rank if rank <= biggest)
for rank1 in first_ranks:
second_ranks = (rank for rank in Rank if rank < rank1)
for rank2 in second_ranks:
if name != 'X_OFFSUIT_MINUS':
self._add_suited(rank1.val + rank2.val)
if name != 'X_SUITED_MINUS':
self._add_offsuit(rank1.val + rank2.val)
elif name == 'COMBO':
self._combos.add(Combo(value))
elif name == 'OFFSUIT_PLUS':
smaller, bigger = Rank(value[0]), Rank(value[1])
for rank in (rank.val for rank in Rank if smaller <= rank < bigger):
self._add_offsuit(value[1] + rank)
elif name == 'OFFSUIT_MINUS':
smaller, bigger = Rank(value[0]), Rank(value[1])
for rank in (rank.val for rank in Rank if rank <= smaller):
self._add_offsuit(value[1] + rank)
elif name == 'SUITED_PLUS':
smaller, bigger = Rank(value[0]), Rank(value[1])
for rank in (rank.val for rank in Rank if smaller <= rank < bigger):
self._add_suited(value[1] + rank)
elif name == 'SUITED_MINUS':
smaller, bigger = Rank(value[0]), Rank(value[1])
for rank in (rank.val for rank in Rank if rank <= smaller):
self._add_suited(value[1] + rank)
elif name == 'BOTH_DASH':
smaller, bigger = Rank(value[1]), Rank(value[2])
for rank in (rank.val for rank in Rank if smaller <= rank <= bigger):
self._add_offsuit(value[0] + rank)
self._add_suited(value[0] + rank)
elif name == 'OFFSUIT_DASH':
smaller, bigger = Rank(value[1]), Rank(value[2])
for rank in (rank.val for rank in Rank if smaller <= rank <= bigger):
self._add_offsuit(value[0] + rank)
elif name == 'SUITED_DASH':
smaller, bigger = Rank(value[1]), Rank(value[2])
for rank in (rank.val for rank in Rank if smaller <= rank <= bigger):
self._add_suited(value[0] + rank)
@classmethod
def from_file(cls, filename):
"""Creates an instance from a given file, containing a range.
It can handle the PokerCruncher (.rng extension) format.
"""
range_string = Path(filename).open().read()
return cls(range_string)
@classmethod
def from_objects(cls, iterable):
"""Make an instance from an iterable of Combos, Hands or both."""
range_string = ' '.join(unicode(obj) for obj in iterable)
return cls(range_string)
def __eq__(self, other):
if self.__class__ is other.__class__:
return self._all_combos == other._all_combos
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return len(self._all_combos) < len(other._all_combos)
return NotImplemented
def __contains__(self, item):
if isinstance(item, Combo):
return item in self._combos or item.to_hand() in self._hands
elif isinstance(item, Hand):
return item in self._all_hands
elif isinstance(item, unicode):
if len(item) == 4:
combo = Combo(item)
return combo in self._combos or combo.to_hand() in self._hands
else:
return Hand(item) in self._all_hands
def __len__(self):
return self._count_combos()
def __unicode__(self):
return ', '.join(self.rep_pieces)
def __str__(self):
return unicode(self).encode('utf-8')
def __repr__(self):
range = ' '.join(self.rep_pieces)
return "{}('{}')".format(self.__class__.__name__, range).encode('utf-8')
def __getstate__(self):
return {'_hands': self._hands, '_combos': self._combos}
def __setstate__(self, state):
self._hands, self._combos = state['_hands'], state['_combos']
def __hash__(self):
return hash(self.combos)
def to_html(self):
"""Returns a 13x13 HTML table representing the range.
The table's CSS class is ``range``, pair cells (td element) are ``pair``, offsuit hands are
``offsuit`` and suited hand cells has ``suited`` css class.
The HTML contains no extra whitespace at all.
Calculating it should not take more than 30ms (which takes calculating a 100% range).
"""
# note about speed: I tried with functools.lru_cache, and the initial call was 3-4x slower
# than without it, and the need for calling this will usually be once, so no need to cache
html = ['<table class="range">']
for row in reversed(Rank):
html.append('<tr>')
for col in reversed(Rank):
if row > col:
suit, cssclass = 's', 'suited'
elif row < col:
suit, cssclass = 'o', 'offsuit'
else:
suit, cssclass = '', 'pair'
html.append('<td class="%s">' % cssclass)
hand = Hand(row.val + col.val + suit)
if hand in self.hands:
html.append(unicode(hand))
html.append('</td>')
html.append('</tr>')
html.append('</table>')
return ''.join(html)
def to_ascii(self, border=False):
"""Returns a nicely formatted ASCII table with optional borders."""
table = []
if border:
table.append('┌' + '─────┬' * 12 + '─────┐\n')
line = '├' + '─────┼' * 12 + '─────┤\n'
border = '│ '
lastline = '\n└' + '─────┴' * 12 + '─────┘'
else:
line = border = lastline = ''
for row in reversed(Rank):
for col in reversed(Rank):
if row > col:
suit = 's'
elif row < col:
suit = 'o'
else:
suit = ''
hand = Hand(row.val + col.val + suit)
hand = unicode(hand) if hand in self.hands else ''
table.append(border)
table.append(hand.ljust(4))
if row.val != '2':
table.append(border)
table.append('\n')
table.append(line)
table.append(border)
table.append(lastline)
return ''.join(table)
@property
def rep_pieces(self):
"""List of str pieces how the Range is represented."""
if self._count_combos() == 1326:
return ['XX']
all_combos = self._all_combos
pairs = list(filter(lambda c: c.is_pair, all_combos))
pair_pieces = self._get_pieces(pairs, 6)
suiteds = list(filter(lambda c: c.is_suited, all_combos))
suited_pieces = self._get_pieces(suiteds, 4)
offsuits = list(filter(lambda c: c.is_offsuit, all_combos))
offsuit_pieces = self._get_pieces(offsuits, 12)
pair_strs = self._shorten_pieces(pair_pieces)
suited_strs = self._shorten_pieces(suited_pieces)
offsuit_strs = self._shorten_pieces(offsuit_pieces)
return pair_strs + suited_strs + offsuit_strs
def _get_pieces(self, combos, combos_in_hand):
if not combos:
return []
sorted_combos = sorted(combos, reverse=True)
hands_and_combos = []
current_combos = []
last_combo = sorted_combos[0]
for combo in sorted_combos:
if (last_combo.first.rank == combo.first.rank and
last_combo.second.rank == combo.second.rank):
current_combos.append(combo)
length = len(current_combos)
if length == combos_in_hand:
hands_and_combos.append(combo.to_hand())
current_combos = []
else:
hands_and_combos.extend(current_combos)
current_combos = [combo]
last_combo = combo
# add the remainder if any, current_combos might be empty
hands_and_combos.extend(current_combos)
return hands_and_combos
def _shorten_pieces(self, pieces):
if not pieces:
return []
str_pieces = []
first = last = pieces[0]
for current in pieces[1:]:
if isinstance(last, Combo):
str_pieces.append(unicode(last))
first = last = current
elif isinstance(current, Combo):
str_pieces.append(self._get_format(first, last))
first = last = current
elif ((current.is_pair and Rank.difference(last.first, current.first) == 1) or
(last.first == current.first and
Rank.difference(last.second, current.second) == 1)):
last = current
else:
str_pieces.append(self._get_format(first, last))
first = last = current
# write out any remaining pieces
str_pieces.append(self._get_format(first, last))
return str_pieces
def _get_format(self, first, last):
if first == last:
return unicode(first)
elif (first.is_pair and first.first.val == 'A' or
Rank.difference(first.first, first.second) == 1):
return '%s+' % last
elif last.second.val == '2':
return '%s-' % first
else:
return '{}-{}'.format(first, last)
def _add_pair(self, rank):
self._hands.add(Hand(rank * 2))
def _add_offsuit(self, tok):
self._hands.add(Hand(tok[0] + tok[1] + 'o'))
def _add_suited(self, tok):
self._hands.add(Hand(tok[0] + tok[1] + 's'))
@cached_property
def hands(self):
"""Tuple of hands contained in this range. If only one combo of the same hand is present,
it will be shown here. e.g. ``Range('2s2c').hands == (Hand('22'),)``
"""
return tuple(sorted(self._all_hands))
@cached_property
def combos(self):
return tuple(sorted(self._all_combos))
@cached_property
def percent(self):
"""What percent of combos does this range have compared to all the possible combos.
There are 1326 total combos in Hold'em: 52 * 51 / 2 (because order doesn't matter)
Precision: 2 decimal point
"""
dec_percent = (Decimal(self._count_combos()) / 1326 * 100)
# round to two decimal point
return float(dec_percent.quantize(Decimal('1.00')))
def _count_combos(self):
combo_count = len(self._combos)
for hand in self._hands:
if hand.is_pair:
combo_count += 6
elif hand.is_offsuit:
combo_count += 12
elif hand.is_suited:
combo_count += 4
return combo_count
@cached_property
def _all_combos(self):
hand_combos = {combo for hand in self._hands for combo in hand.to_combos()}
return hand_combos | self._combos
@cached_property
def _all_hands(self):
combo_hands = {combo.to_hand() for combo in self._combos}
return combo_hands | self._hands
if __name__ == '__main__':
import cProfile
print('_all_COMBOS')
cProfile.run("Range('XX')._all_combos", sort='tottime')
print('COMBOS')
cProfile.run("Range('XX').combos", sort='tottime')
print('HANDS')
cProfile.run("Range('XX').hands", sort='tottime')
r = ('KK-QQ, 88-77, A5s, A3s, K8s+, K3s, Q7s+, Q5s, Q3s, J9s-J5s, T4s+, 97s, 95s-93s, 87s, '
'85s-84s, 75s, 64s-63s, 53s, ATo+, K5o+, Q7o-Q5o, J9o-J7o, J4o-J3o, T8o-T3o, 96o+, '
'94o-93o, 86o+, 84o-83o, 76o, 74o, 63o, 54o, 22')
print('R _all_COMBOS')
cProfile.run("Range('%s')._all_combos" % r, sort='tottime')
print('R COMBOS')
cProfile.run("Range('%s').combos" % r, sort='tottime')
print('R HANDS')
cProfile.run("Range('%s').hands" % r, sort='tottime')
|
the-stack_106_23559 | #!/usr/bin/python
#imports
import string
import sys
import os
import shutil
import copy
import math
#the usage string, printed when the user is abusing our tool, lol
usage = """usage: ptcl2vms.py [options] <infile1> [[options] <infile2>] ... <outfile>
valid options are...
-stride (int) adjusts with what stride we read and generate points
-radius (float) adjusts the diameter of the generated point meshes
-ref (int) adjusts tesselation level of point meshes
-startts (int) sets the starting timestep of the output
-quiet suppresses all messages
-help prints this message
"""
#a bunch of vector manipulation functions! :D
def dot(a, b):
return (a[0] * b[0]) + (a[1] * b[1]) + (a[2] * b[2])
def mag2(v):
return dot(v, v)
def mag(v):
return math.sqrt(mag2(v))
def div(v, n):
return v[0] / n, v[1] / n, v[2] / n
def mul(v, n):
return v[0] * n, v[1] * n, v[2] * n
def add(a, b):
return a[0] + b[0], a[1] + b[1], a[2] + b[2]
def sub(a, b):
return a[0] - b[0], a[1] - b[1], a[2] - b[2]
def norm(a):
return div(a, mag(a))
def resize(a, n):
return mul(a, n / mag(a))
def centroid(points):
c = (0.0, 0.0, 0.0)
for p in points:
c = add(c, p)
c = div(c, len(points))
return c
#A tetrahedron for subdivision by subdivnorm
overts = [ \
norm((-1, -1, 1)),
norm(( 1, 1, 1)),
norm((-1, 1, -1)),
norm(( 1, -1, -1))]
ofaces = [ \
(0, 1, 2),
(3, 2, 1),
(0, 3, 1),
(3, 0, 2)]
def subdivnorm(verts, faces):
nfaces = len(faces)
for faceIndex in range(0, nfaces) :
face = faces[faceIndex] #choose a face to subdivide
fverts = [] #get the vertices of the face
for i in face:
fverts.append(verts[i])
#ctr = norm(centroid(fverts)) #get the centroid of the face
everts = [] #get and normalize the new edge vertices
for i in range(0, len(fverts)):
ni = (i + 1) % len(fverts)
everts.append(norm(centroid([fverts[i], fverts[ni]])))
#add all the new vertices, remembering where we stored them
eidx = len(verts)
verts.extend(everts)
#replace the existing face with the first new face
faces[faceIndex] = (eidx, eidx + 1, eidx + 2)
#build new faces, appending them to the faces array
for i in range(0, len(face)):
ni = (i + 1) % len(face)
faces.append((eidx + i, face[ni], eidx + ni))
def catmullnorm(verts, faces) :
nfaces = len(faces)
for faceIndex in xrange(nfaces) :
face = faces[faceIndex] #choose a face to subdivide
fverts = [] #get the vertices of the face
for i in face:
fverts.append(verts[i])
ctr = norm(centroid(fverts)) #get the centroid of the face
everts = [] #get and normalize the new edge vertices
for i in range(0, len(fverts)):
ni = (i + 1) % len(fverts)
everts.append(norm(centroid([fverts[i], fverts[ni]])))
#add all the new vertices, remembering where we stored them
cidx = len(verts)
verts.append(ctr)
eidx = len(verts)
verts.extend(everts)
#rebuild the existing face as the first new face
faces[faceIndex] = (eidx, face[1], eidx + 1, cidx)
#build new quads, appending them to the faces array
for i in range(1, len(face)):
ni = (i + 1) % len(face)
faces.append((eidx + i, face[ni], eidx + ni, cidx))
def showMesh(verts, faces):
for v in verts:
print("v " + str(v[0]) + " " + str(v[1]) + " " + str(v[2]))
for f in faces:
if len(f) == 3:
print("f "+str(f[0]+1)+" "+str(f[1]+1)+" "+str(f[2]+1))
else:
print("f "+str(f[0]+1)+" "+str(f[1]+1)+" "+str(f[2]+1)+" "+str(f[3]+1))
print("")
#remove the scriptname from argv, but store it just in case :P
scriptname = sys.argv.pop(0)
quiet = False
for arg in sys.argv :
if arg == "-quiet" :
quiet = True
#check for valid number of arguments
if len(sys.argv) < 1 :
print(">>>ERROR: not enough arguments!\n" + usage)
exit(-1)
if not quiet and sys.argv[0] == "-help" :
print(usage)
exit(0)
#check for valid number of arguments
if len(sys.argv) < 2 :
print(">>>ERROR: not enough arguments!\n" + usage)
exit(-1)
#get the name of output file and directory
vmsname = sys.argv.pop()
if vmsname[:len(vmsname) - 4] != ".vms" :
vmsname = vmsname + ".vms"
if not quiet:
print(">>>OUTFILE: " + vmsname)
dirname = vmsname[0:vmsname.rfind('.')] + "_data"
#remove any preexisting data by the same name
if os.path.exists(dirname) :
shutil.rmtree(dirname, ignore_errors=False, onerror=None)
if os.path.exists(vmsname) :
os.remove(vmsname)
#make data directory
os.mkdir(dirname)
#process each option or file
stride = 1
ref = 0
radius = 1
startts = 0
plylist = []
while len(sys.argv) > 0 :
#get the next argument
arg = sys.argv.pop(0)
#handle options
if arg[:1] == "-" :
if arg == "-stride" :
stride = int(sys.argv.pop(0))
if not quiet :
print("stride = " + str(stride))
elif arg == "-radius" :
radius = float(sys.argv.pop(0))
if not quiet :
print("radius = " + str(radius))
elif arg == "-ref" :
ref = int(sys.argv.pop(0))
if not quiet :
print("refinement = " + str(ref))
elif arg == "-startts" :
startts = int(sys.argv.pop(0))
if not quiet :
print("startts = " + str(startts))
else:
print(">>>ERROR: unknown option: '" + arg + "'\n" + usage)
exit(-1)
continue
#open input and output files
if not quiet :
print(">>>READING: " + arg)
infile = open(arg)
#get input from input file
counter = 1
points = []
for line in infile.readlines() :
strc = line.split()
if len(strc) < 3:
continue
coords = []
for i in strc:
coords.append(float(i))
counter += 1
if counter >= stride :
points.append(coords)
counter = 1
infile.close()
#generate a sphere with specified refinement, starting with our tetrahedron
lverts = copy.deepcopy(overts)
lfaces = copy.deepcopy(ofaces)
for i in range(ref) :
subdivnorm(lverts, lfaces)
lnorms = copy.deepcopy(lverts)
for i in range(0, len(lverts)) :
lverts[i] = resize(lverts[i], radius)
#copy the sphere to various positions! :D
verts = []
faces = []
norms = []
offset = 0
#this loop runs once for each copy (each must be at a different position)
for pos in range(0, len(points), stride) :
#add the current position to each point before appending it to verts
for vert in lverts :
verts.append(add(vert, points[pos]))
for nm in lnorms :
norms.append(nm)
for face in lfaces :
faces.append((face[0] + offset,
face[1] + offset,
face[2] + offset))
#the next face will index its verts from the next starting position
offset = len(verts)
#write the model to a file!
plyname = dirname+"/"+os.path.basename(arg[0:arg.rfind(".")])+".ply"
if not quiet :
print(">>>WRITING: " + plyname)
ply = open(plyname, "w")
ply.write('ply\nformat ascii 1.0\nelement vertex ' + str(len(verts)) + '\nproperty float x\nproperty float y\nproperty float z\nproperty float nx\nproperty float ny\nproperty float nz\nelement face ' + str(len(faces)) + '\nproperty list uchar int vertex_indices\nend_header\n')
for vi in range(len(verts)):
v = verts[vi]
n = norms[vi]
ply.write(str(v[0])+" "+str(v[1])+" "+str(v[2])+" "\
+str(n[0])+" "+str(n[1])+" "+str(n[2])+"\n")
for f in faces:
ply.write("3 "+str(f[0])+" "+str(f[1])+" "+str(f[2])+"\n")
ply.close()
plylist.append(plyname)
#write the VMS file, listing all our outputs in it!
if len(plylist) < 1:
print("ERROR: No input files specified,\n or one input and no outputs!\n")
exit(1)
vms = open(vmsname, "w")
counter = 0
vms.write('<?xml version="1.0" encoding="ISO-8859-1" standalone="yes"?>\n<ModelScene>\n')
for i in range(startts) :
vms.write('<!-- ts ' + str(counter) + ' -->\n')
vms.write('<TimeStep>\n')
vms.write('</TimeStep>\n')
counter += 1
for ply in plylist :
vms.write('<!-- ts ' + str(counter) + ' -->\n')
vms.write('<TimeStep>\n')
vms.write('<File>' + ply + '</File>\n')
vms.write('</TimeStep>\n')
counter += 1
vms.write('</ModelScene>\n')
vms.close()
|
the-stack_106_23561 | import tkinter as tk
from tkinter import *
root = tk.Tk()
root.title("C语言中文网")
root.geometry('450x180+300+200')
root.iconbitmap('C:/Users/Administrator/Desktop/C语言中文网logo.ico')
# 创建一个滚动条控件,默认为垂直方向
sbar1 = tk.Scrollbar(root)
# 将滚动条放置在右侧,并设置当窗口大小改变时滚动条会沿着垂直方向延展
sbar1.pack(side=RIGHT, fill=Y)
# 创建水平滚动条,默认为水平方向,当拖动窗口时会沿着X轴方向填充
sbar2 = Scrollbar(root, orient=HORIZONTAL)
sbar2.pack(side=BOTTOM, fill=X)
# 创建列表框控件,并添加两个滚动条(分别在垂直和水平方向),使用 set() 进行设置
mylist = tk.Listbox(root, xscrollcommand=sbar2.set, yscrollcommand=sbar1.set)
for i in range(30):
mylist.insert(END, '第' + str(i + 1) + '次:' + 'C语言中文网,网址为:c.biancheng.net' + '\n')
# 当窗口改变大小时会在X与Y方向填满窗口
mylist.pack(side=LEFT, fill=BOTH)
# 使用 command 关联控件的 yview、xview方法
sbar1.config(command=mylist.yview)
sbar2.config(command=mylist.xview)
# 显示主窗口
root.mainloop()
|
the-stack_106_23562 | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generic EC2 Resource Tag / Filters and actions
These work for the whole family of resources associated
to ec2 (subnets, vpc, security-groups, volumes, instances,
snapshots).
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from concurrent.futures import as_completed
from datetime import datetime, timedelta
from dateutil import zoneinfo
from dateutil.parser import parse
import itertools
import time
from c7n.actions import BaseAction as Action, AutoTagUser
from c7n.exceptions import PolicyValidationError
from c7n.filters import Filter, OPERATORS
from c7n.filters.offhours import Time
from c7n import utils
DEFAULT_TAG = "maid_status"
def register_ec2_tags(filters, actions):
filters.register('marked-for-op', TagActionFilter)
filters.register('tag-count', TagCountFilter)
actions.register('auto-tag-user', AutoTagUser)
actions.register('mark-for-op', TagDelayedAction)
actions.register('tag-trim', TagTrim)
actions.register('mark', Tag)
actions.register('tag', Tag)
actions.register('unmark', RemoveTag)
actions.register('untag', RemoveTag)
actions.register('remove-tag', RemoveTag)
actions.register('rename-tag', RenameTag)
actions.register('normalize-tag', NormalizeTag)
def register_universal_tags(filters, actions):
filters.register('marked-for-op', TagActionFilter)
filters.register('tag-count', TagCountFilter)
actions.register('mark', UniversalTag)
actions.register('tag', UniversalTag)
actions.register('auto-tag-user', AutoTagUser)
actions.register('mark-for-op', UniversalTagDelayedAction)
actions.register('unmark', UniversalUntag)
actions.register('untag', UniversalUntag)
actions.register('remove-tag', UniversalUntag)
def universal_augment(self, resources):
# Resource Tagging API Support
# https://goo.gl/uccKc9
# Bail on empty set
if not resources:
return resources
# For global resources, tags don't populate in the get_resources call
# unless the call is being made to us-east-1
region = getattr(self.resource_type, 'global_resource', None) and 'us-east-1' or self.region
client = utils.local_session(
self.session_factory).client('resourcegroupstaggingapi', region_name=region)
paginator = client.get_paginator('get_resources')
resource_type = getattr(self.get_model(), 'resource_type', None)
if not resource_type:
resource_type = self.get_model().service
if self.get_model().type:
resource_type += ":" + self.get_model().type
resource_tag_map_list = list(itertools.chain(
*[p['ResourceTagMappingList'] for p in paginator.paginate(
ResourceTypeFilters=[resource_type])]))
resource_tag_map = {
r['ResourceARN']: r['Tags'] for r in resource_tag_map_list}
for arn, r in zip(self.get_arns(resources), resources):
if arn in resource_tag_map:
r['Tags'] = resource_tag_map[arn]
return resources
def _common_tag_processer(executor_factory, batch_size, concurrency,
process_resource_set, id_key, resources, tags,
log):
with executor_factory(max_workers=concurrency) as w:
futures = []
for resource_set in utils.chunks(resources, size=batch_size):
futures.append(
w.submit(process_resource_set, resource_set, tags))
for f in as_completed(futures):
if f.exception():
log.error(
"Exception with tags: %s on resources: %s \n %s" % (
tags,
", ".join([r[id_key] for r in resource_set]),
f.exception()))
class TagTrim(Action):
"""Automatically remove tags from an ec2 resource.
EC2 Resources have a limit of 50 tags, in order to make
additional tags space on a set of resources, this action can
be used to remove enough tags to make the desired amount of
space while preserving a given set of tags.
.. code-block :: yaml
- policies:
- name: ec2-tag-trim
comment: |
Any instances with 48 or more tags get tags removed until
they match the target tag count, in this case 47 so we
that we free up a tag slot for another usage.
resource: ec2
filters:
# Filter down to resources which already have 8 tags
# as we need space for 3 more, this also ensures that
# metrics reporting is correct for the policy.
type: value
key: "[length(Tags)][0]"
op: ge
value: 48
actions:
- type: tag-trim
space: 3
preserve:
- OwnerContact
- ASV
- CMDBEnvironment
- downtime
- custodian_status
"""
max_tag_count = 50
schema = utils.type_schema(
'tag-trim',
space={'type': 'integer'},
preserve={'type': 'array', 'items': {'type': 'string'}})
schema_alias = True
permissions = ('ec2:DeleteTags',)
def process(self, resources):
self.id_key = self.manager.get_model().id
self.preserve = set(self.data.get('preserve'))
self.space = self.data.get('space', 3)
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_resource, resources))
def process_resource(self, i):
# Can't really go in batch parallel without some heuristics
# without some more complex matching wrt to grouping resources
# by common tags populations.
tag_map = {
t['Key']: t['Value'] for t in i.get('Tags', [])
if not t['Key'].startswith('aws:')}
# Space == 0 means remove all but specified
if self.space and len(tag_map) + self.space <= self.max_tag_count:
return
keys = set(tag_map)
preserve = self.preserve.intersection(keys)
candidates = keys - self.preserve
if self.space:
# Free up slots to fit
remove = len(candidates) - (
self.max_tag_count - (self.space + len(preserve)))
candidates = list(sorted(candidates))[:remove]
if not candidates:
self.log.warning(
"Could not find any candidates to trim %s" % i[self.id_key])
return
self.process_tag_removal(i, candidates)
def process_tag_removal(self, resource, tags):
client = utils.local_session(
self.manager.session_factory).client('ec2')
self.manager.retry(
client.delete_tags,
Tags=[{'Key': c} for c in tags],
Resources=[resource[self.id_key]],
DryRun=self.manager.config.dryrun)
class TagActionFilter(Filter):
"""Filter resources for tag specified future action
Filters resources by a 'custodian_status' tag which specifies a future
date for an action.
The filter parses the tag values looking for an 'op@date'
string. The date is parsed and compared to do today's date, the
filter succeeds if today's date is gte to the target date.
The optional 'skew' parameter provides for incrementing today's
date a number of days into the future. An example use case might
be sending a final notice email a few days before terminating an
instance, or snapshotting a volume prior to deletion.
The optional 'skew_hours' parameter provides for incrementing the current
time a number of hours into the future.
Optionally, the 'tz' parameter can get used to specify the timezone
in which to interpret the clock (default value is 'utc')
.. code-block :: yaml
- policies:
- name: ec2-stop-marked
resource: ec2
filters:
- type: marked-for-op
# The default tag used is custodian_status
# but that is configurable
tag: custodian_status
op: stop
# Another optional tag is skew
tz: utc
actions:
- stop
"""
schema = utils.type_schema(
'marked-for-op',
tag={'type': 'string'},
tz={'type': 'string'},
skew={'type': 'number', 'minimum': 0},
skew_hours={'type': 'number', 'minimum': 0},
op={'type': 'string'})
schema_alias = True
current_date = None
def validate(self):
op = self.data.get('op')
if self.manager and op not in self.manager.action_registry.keys():
raise PolicyValidationError(
"Invalid marked-for-op op:%s in %s" % (op, self.manager.data))
tz = zoneinfo.gettz(Time.TZ_ALIASES.get(self.data.get('tz', 'utc')))
if not tz:
raise PolicyValidationError(
"Invalid timezone specified '%s' in %s" % (
self.data.get('tz'), self.manager.data))
return self
def __call__(self, i):
tag = self.data.get('tag', DEFAULT_TAG)
op = self.data.get('op', 'stop')
skew = self.data.get('skew', 0)
skew_hours = self.data.get('skew_hours', 0)
tz = zoneinfo.gettz(Time.TZ_ALIASES.get(self.data.get('tz', 'utc')))
v = None
for n in i.get('Tags', ()):
if n['Key'] == tag:
v = n['Value']
break
if v is None:
return False
if ':' not in v or '@' not in v:
return False
msg, tgt = v.rsplit(':', 1)
action, action_date_str = tgt.strip().split('@', 1)
if action != op:
return False
try:
action_date = parse(action_date_str)
except Exception:
self.log.warning("could not parse tag:%s value:%s on %s" % (
tag, v, i['InstanceId']))
if self.current_date is None:
self.current_date = datetime.now()
if action_date.tzinfo:
# if action_date is timezone aware, set to timezone provided
action_date = action_date.astimezone(tz)
self.current_date = datetime.now(tz=tz)
return self.current_date >= (
action_date - timedelta(days=skew, hours=skew_hours))
class TagCountFilter(Filter):
"""Simplify tag counting..
ie. these two blocks are equivalent
.. code-block :: yaml
- filters:
- type: value
key: "[length(Tags)][0]"
op: gte
value: 8
- filters:
- type: tag-count
value: 8
"""
schema = utils.type_schema(
'tag-count',
count={'type': 'integer', 'minimum': 0},
op={'enum': list(OPERATORS.keys())})
schema_alias = True
def __call__(self, i):
count = self.data.get('count', 10)
op_name = self.data.get('op', 'gte')
op = OPERATORS.get(op_name)
tag_count = len([
t['Key'] for t in i.get('Tags', [])
if not t['Key'].startswith('aws:')])
return op(tag_count, count)
class Tag(Action):
"""Tag an ec2 resource.
"""
batch_size = 25
concurrency = 2
schema = utils.type_schema(
'tag', aliases=('mark',),
tags={'type': 'object'},
key={'type': 'string'},
value={'type': 'string'},
tag={'type': 'string'},
)
schema_alias = True
permissions = ('ec2:CreateTags',)
def validate(self):
if self.data.get('key') and self.data.get('tag'):
raise PolicyValidationError(
"Can't specify both key and tag, choose one in %s" % (
self.manager.data,))
return self
def process(self, resources):
self.id_key = self.manager.get_model().id
# Legacy
msg = self.data.get('msg')
msg = self.data.get('value') or msg
tag = self.data.get('tag', DEFAULT_TAG)
tag = self.data.get('key') or tag
# Support setting multiple tags in a single go with a mapping
tags = self.data.get('tags')
if tags is None:
tags = []
else:
tags = [{'Key': k, 'Value': v} for k, v in tags.items()]
if msg:
tags.append({'Key': tag, 'Value': msg})
self.interpolate_values(tags)
batch_size = self.data.get('batch_size', self.batch_size)
_common_tag_processer(
self.executor_factory, batch_size, self.concurrency,
self.process_resource_set, self.id_key, resources, tags, self.log)
def process_resource_set(self, resource_set, tags):
client = utils.local_session(
self.manager.session_factory).client('ec2')
self.manager.retry(
client.create_tags,
Resources=[v[self.id_key] for v in resource_set],
Tags=tags,
DryRun=self.manager.config.dryrun)
def interpolate_values(self, tags):
params = {
'account_id': self.manager.config.account_id,
'now': utils.FormatDate.utcnow(),
'region': self.manager.config.region}
interpolate_tag_values(tags, params)
def interpolate_tag_values(tags, params):
for t in tags:
t['Value'] = t['Value'].format(**params)
class RemoveTag(Action):
"""Remove tags from ec2 resources.
"""
batch_size = 100
concurrency = 2
schema = utils.type_schema(
'untag', aliases=('unmark', 'remove-tag'),
tags={'type': 'array', 'items': {'type': 'string'}})
permissions = ('ec2:DeleteTags',)
def process(self, resources):
self.id_key = self.manager.get_model().id
tags = self.data.get('tags', [DEFAULT_TAG])
batch_size = self.data.get('batch_size', self.batch_size)
_common_tag_processer(
self.executor_factory, batch_size, self.concurrency,
self.process_resource_set, self.id_key, resources, tags, self.log)
def process_resource_set(self, vol_set, tag_keys):
client = utils.local_session(
self.manager.session_factory).client('ec2')
return self.manager.retry(
client.delete_tags,
Resources=[v[self.id_key] for v in vol_set],
Tags=[{'Key': k} for k in tag_keys],
DryRun=self.manager.config.dryrun)
class RenameTag(Action):
""" Create a new tag with identical value & remove old tag
"""
schema = utils.type_schema(
'rename-tag',
old_key={'type': 'string'},
new_key={'type': 'string'})
schema_alias = True
permissions = ('ec2:CreateTags', 'ec2:DeleteTags')
tag_count_max = 50
def delete_tag(self, client, ids, key, value):
client.delete_tags(
Resources=ids,
Tags=[{'Key': key, 'Value': value}])
def create_tag(self, client, ids, key, value):
client.create_tags(
Resources=ids,
Tags=[{'Key': key, 'Value': value}])
def process_rename(self, tag_value, resource_set):
"""
Move source tag value to destination tag value
- Collect value from old tag
- Delete old tag
- Create new tag & assign stored value
"""
self.log.info("Renaming tag on %s instances" % (len(resource_set)))
old_key = self.data.get('old_key')
new_key = self.data.get('new_key')
c = utils.local_session(self.manager.session_factory).client('ec2')
# We have a preference to creating the new tag when possible first
resource_ids = [r[self.id_key] for r in resource_set if len(
r.get('Tags', [])) < self.tag_count_max]
if resource_ids:
self.create_tag(c, resource_ids, new_key, tag_value)
self.delete_tag(
c, [r[self.id_key] for r in resource_set], old_key, tag_value)
# For resources with 50 tags, we need to delete first and then create.
resource_ids = [r[self.id_key] for r in resource_set if len(
r.get('Tags', [])) > self.tag_count_max - 1]
if resource_ids:
self.create_tag(c, resource_ids, new_key, tag_value)
def create_set(self, instances):
old_key = self.data.get('old_key', None)
resource_set = {}
for r in instances:
tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
if tags[old_key] not in resource_set:
resource_set[tags[old_key]] = []
resource_set[tags[old_key]].append(r)
return resource_set
def filter_resources(self, resources):
old_key = self.data.get('old_key', None)
res = 0
for r in resources:
tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
if old_key not in tags.keys():
resources.pop(res)
res += 1
return resources
def process(self, resources):
count = len(resources)
resources = self.filter_resources(resources)
self.log.info(
"Filtered from %s resources to %s" % (count, len(resources)))
self.id_key = self.manager.get_model().id
resource_set = self.create_set(resources)
with self.executor_factory(max_workers=3) as w:
futures = []
for r in resource_set:
futures.append(
w.submit(self.process_rename, r, resource_set[r]))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception renaming tag set \n %s" % (
f.exception()))
return resources
class TagDelayedAction(Action):
"""Tag resources for future action.
The optional 'tz' parameter can be used to adjust the clock to align
with a given timezone. The default value is 'utc'.
If neither 'days' nor 'hours' is specified, Cloud Custodian will default
to marking the resource for action 4 days in the future.
.. code-block :: yaml
- policies:
- name: ec2-mark-for-stop-in-future
resource: ec2
filters:
- type: value
key: Name
value: instance-to-stop-in-four-days
actions:
- type: mark-for-op
op: stop
"""
schema = utils.type_schema(
'mark-for-op',
tag={'type': 'string'},
msg={'type': 'string'},
days={'type': 'integer', 'minimum': 0, 'exclusiveMinimum': False},
hours={'type': 'integer', 'minimum': 0, 'exclusiveMinimum': False},
tz={'type': 'string'},
op={'type': 'string'})
schema_alias = True
permissions = ('ec2:CreateTags',)
batch_size = 200
concurrency = 2
default_template = 'Resource does not meet policy: {op}@{action_date}'
def validate(self):
op = self.data.get('op')
if self.manager and op not in self.manager.action_registry.keys():
raise PolicyValidationError(
"mark-for-op specifies invalid op:%s in %s" % (
op, self.manager.data))
self.tz = zoneinfo.gettz(
Time.TZ_ALIASES.get(self.data.get('tz', 'utc')))
if not self.tz:
raise PolicyValidationError(
"Invalid timezone specified %s in %s" % (
self.tz, self.manager.data))
return self
def generate_timestamp(self, days, hours):
n = datetime.now(tz=self.tz)
if days is None or hours is None:
# maintains default value of days being 4 if nothing is provided
days = 4
action_date = (n + timedelta(days=days, hours=hours))
if hours > 0:
action_date_string = action_date.strftime('%Y/%m/%d %H%M %Z')
else:
action_date_string = action_date.strftime('%Y/%m/%d')
return action_date_string
def process(self, resources):
self.tz = zoneinfo.gettz(
Time.TZ_ALIASES.get(self.data.get('tz', 'utc')))
self.id_key = self.manager.get_model().id
# Move this to policy? / no resources bypasses actions?
if not len(resources):
return
msg_tmpl = self.data.get('msg', self.default_template)
op = self.data.get('op', 'stop')
tag = self.data.get('tag', DEFAULT_TAG)
days = self.data.get('days', 0)
hours = self.data.get('hours', 0)
action_date = self.generate_timestamp(days, hours)
msg = msg_tmpl.format(
op=op, action_date=action_date)
self.log.info("Tagging %d resources for %s on %s" % (
len(resources), op, action_date))
tags = [{'Key': tag, 'Value': msg}]
batch_size = self.data.get('batch_size', self.batch_size)
_common_tag_processer(
self.executor_factory, batch_size, self.concurrency,
self.process_resource_set, self.id_key, resources, tags, self.log)
def process_resource_set(self, resource_set, tags):
client = utils.local_session(self.manager.session_factory).client('ec2')
return self.manager.retry(
client.create_tags,
Resources=[v[self.id_key] for v in resource_set],
Tags=tags,
DryRun=self.manager.config.dryrun)
class NormalizeTag(Action):
"""Transform the value of a tag.
Set the tag value to uppercase, title, lowercase, or strip text
from a tag key.
.. code-block :: yaml
policies:
- name: ec2-service-transform-lower
resource: ec2
comment: |
ec2-service-tag-value-to-lower
query:
- instance-state-name: running
filters:
- "tag:testing8882": present
actions:
- type: normalize-tag
key: lower_key
action: lower
- name: ec2-service-strip
resource: ec2
comment: |
ec2-service-tag-strip-blah
query:
- instance-state-name: running
filters:
- "tag:testing8882": present
actions:
- type: normalize-tag
key: strip_key
action: strip
value: blah
"""
schema_alias = True
schema = utils.type_schema(
'normalize-tag',
key={'type': 'string'},
action={'type': 'string',
'items': {
'enum': ['upper', 'lower', 'title' 'strip', 'replace']}},
value={'type': 'string'})
permissions = ('ec2:CreateTags',)
def create_tag(self, client, ids, key, value):
self.manager.retry(
client.create_tags,
Resources=ids,
Tags=[{'Key': key, 'Value': value}])
def process_transform(self, tag_value, resource_set):
"""
Transform tag value
- Collect value from tag
- Transform Tag value
- Assign new value for key
"""
self.log.info("Transforming tag value on %s instances" % (
len(resource_set)))
key = self.data.get('key')
c = utils.local_session(self.manager.session_factory).client('ec2')
self.create_tag(
c,
[r[self.id_key] for r in resource_set if len(
r.get('Tags', [])) < 50],
key, tag_value)
def create_set(self, instances):
key = self.data.get('key', None)
resource_set = {}
for r in instances:
tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
if tags[key] not in resource_set:
resource_set[tags[key]] = []
resource_set[tags[key]].append(r)
return resource_set
def filter_resources(self, resources):
key = self.data.get('key', None)
res = 0
for r in resources:
tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
if key not in tags.keys():
resources.pop(res)
res += 1
return resources
def process(self, resources):
count = len(resources)
resources = self.filter_resources(resources)
self.log.info(
"Filtered from %s resources to %s" % (count, len(resources)))
self.id_key = self.manager.get_model().id
resource_set = self.create_set(resources)
with self.executor_factory(max_workers=3) as w:
futures = []
for r in resource_set:
action = self.data.get('action')
value = self.data.get('value')
new_value = False
if action == 'lower' and not r.islower():
new_value = r.lower()
elif action == 'upper' and not r.isupper():
new_value = r.upper()
elif action == 'title' and not r.istitle():
new_value = r.title()
elif action == 'strip' and value and value in r:
new_value = r.strip(value)
if new_value:
futures.append(
w.submit(self.process_transform, new_value, resource_set[r]))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception renaming tag set \n %s" % (
f.exception()))
return resources
class UniversalTag(Tag):
"""Applies one or more tags to the specified resources.
"""
batch_size = 20
concurrency = 1
permissions = ('resourcegroupstaggingapi:TagResources',)
def process(self, resources):
self.id_key = self.manager.get_model().id
# Legacy
msg = self.data.get('msg')
msg = self.data.get('value') or msg
tag = self.data.get('tag', DEFAULT_TAG)
tag = self.data.get('key') or tag
# Support setting multiple tags in a single go with a mapping
tags = self.data.get('tags', {})
if msg:
tags[tag] = msg
batch_size = self.data.get('batch_size', self.batch_size)
_common_tag_processer(
self.executor_factory, batch_size, self.concurrency,
self.process_resource_set, self.id_key, resources, tags, self.log)
def process_resource_set(self, resource_set, tags):
client = utils.local_session(
self.manager.session_factory).client('resourcegroupstaggingapi')
arns = self.manager.get_arns(resource_set)
return universal_retry(
client.tag_resources, ResourceARNList=arns, Tags=tags)
class UniversalUntag(RemoveTag):
"""Removes the specified tags from the specified resources.
"""
batch_size = 20
concurrency = 1
permissions = ('resourcegroupstaggingapi:UntagResources',)
def process_resource_set(self, resource_set, tag_keys):
client = utils.local_session(
self.manager.session_factory).client('resourcegroupstaggingapi')
arns = self.manager.get_arns(resource_set)
return universal_retry(
client.untag_resources, ResourceARNList=arns, TagKeys=tag_keys)
class UniversalTagDelayedAction(TagDelayedAction):
"""Tag resources for future action.
:example:
.. code-block :: yaml
policies:
- name: ec2-mark-stop
resource: ec2
filters:
- type: image-age
op: ge
days: 90
actions:
- type: mark-for-op
tag: custodian_cleanup
op: terminate
days: 4
"""
batch_size = 20
concurrency = 2
permissions = ('resourcegroupstaggingapi:TagResources',)
def process(self, resources):
self.tz = zoneinfo.gettz(
Time.TZ_ALIASES.get(self.data.get('tz', 'utc')))
self.id_key = self.manager.get_model().id
# Move this to policy? / no resources bypasses actions?
if not len(resources):
return
msg_tmpl = self.data.get('msg', self.default_template)
op = self.data.get('op', 'stop')
tag = self.data.get('tag', DEFAULT_TAG)
days = self.data.get('days', 0)
hours = self.data.get('hours', 0)
action_date = self.generate_timestamp(days, hours)
msg = msg_tmpl.format(
op=op, action_date=action_date)
self.log.info("Tagging %d resources for %s on %s" % (
len(resources), op, action_date))
tags = {tag: msg}
batch_size = self.data.get('batch_size', self.batch_size)
_common_tag_processer(
self.executor_factory, batch_size, self.concurrency,
self.process_resource_set, self.id_key, resources, tags, self.log)
def process_resource_set(self, resource_set, tags):
client = utils.local_session(
self.manager.session_factory).client('resourcegroupstaggingapi')
arns = self.manager.get_arns(resource_set)
return universal_retry(
client.tag_resources, ResourceARNList=arns, Tags=tags)
def universal_retry(method, ResourceARNList, **kw):
"""Retry support for resourcegroup tagging apis.
The resource group tagging api typically returns a 200 status code
with embedded resource specific errors. To enable resource specific
retry on throttles, we extract those, perform backoff w/ jitter and
continue. Other errors are immediately raised.
We do not aggregate unified resource responses across retries, only the
last successful response is returned for a subset of the resources if
a retry is performed.
"""
max_attempts = 6
for idx, delay in enumerate(
utils.backoff_delays(1.5, 2 ** 8, jitter=True)):
response = method(ResourceARNList=ResourceARNList, **kw)
failures = response.get('FailedResourcesMap', {})
if not failures:
return response
errors = {}
throttles = set()
for f_arn in failures:
if failures[f_arn]['ErrorCode'] == 'ThrottlingException':
throttles.add(f_arn)
else:
errors[f_arn] = failures[f_arn]['ErrorCode']
if errors:
raise Exception("Resource Tag Errors %s" % (errors))
if idx == max_attempts - 1:
raise Exception("Resource Tag Throttled %s" % (", ".join(throttles)))
time.sleep(delay)
ResourceARNList = list(throttles)
|
the-stack_106_23564 | import flask,os
from tqdm import tqdm
from flask import render_template,jsonify,request,redirect,url_for
import graphDataBuilder as gdb
import dependancyManager as dm
import clustering as cl
import mimetypes
mimetypes.add_type('application/javascript', '.mjs')
app = flask.Flask(__name__)
@app.route('/', methods=['GET'])
def home():
return redirect(url_for('userForce'))
@app.route('/userGraphData', methods=['GET'])
def usGraphDa():
return jsonify(userGraph)
@app.route('/userCloudData', methods=['GET'])
def userCloudData():
user = request.args.get('user')
userTerms = topUserTerms.get(user)
wordList = []
for key, value in userTerms.items():
wordList.append({'word':key,'size':value})
return jsonify(wordList)
@app.route('/cloud', methods=['GET'])
def userCloud():
code = int(request.args.get('code'))
user = userGraph['nodes'][code]['id']
return render_template('cloud.html', user=user, topn=topNPercentWords)
@app.route('/users', methods=['GET'])
def userForce():
return render_template('users.html', tcount=topCount,ecount=topEdges)
@app.route('/clusters', methods=['GET'])
def clusters():
return render_template("Clusters.html",kcount=clusterCount,ucount=usersToCluster)
@app.route('/userGraphPreferences', methods=['GET','POST'])
def topCount():
global topCount
global userGraph
global topEdges
if request.args.get('users') is not None and request.args.get('users')!='':
topCount = int(request.args.get('users'))
if request.args.get('edges') is not None and request.args.get('edges')!='':
topEdges = int(request.args.get('edges'))
links = dm.getLinks()
userGraph = getUserGraph(links)
return redirect(url_for('userForce'))
@app.route('/clusterGraphData', methods=['GET'])
def clusterGraph():
# print(clusterDataCsv)
return clusterDataCsv
@app.route('/clusterGraphPreferences', methods=['GET', 'POST'])
def clusterPrefs():
global clusterCount
global userGraph
global usersToCluster
if request.args.get('users') is not None and request.args.get('users') != '':
usersToCluster = int(request.args.get('users'))
if request.args.get('clusters') is not None and request.args.get('clusters') != '':
clusterCount = int(request.args.get('clusters'))
reCluster()
return redirect(url_for('clusters'))
@app.route('/clusterCloudData', methods=['GET'])
def clusterCloudData():
global topClustTerms
clusterID = int(request.args.get('cluster'))
clusterTerms = topClustTerms.get(clusterID)
wordList = []
for key, value in clusterTerms.items():
wordList.append({'word': key, 'size': value})
return jsonify(wordList)
@app.route('/clusterCloud', methods=['GET'])
def clusterCloud():
code = int(request.args.get('code'))
# user = 'Cluster '+str(code)
return render_template('cloud.html', cluster=code, topn=topNPercentWords)
@app.route('/topTermsCloud', methods=['GET'])
def topCloudTerms():
global topNPercentWords
global topClustTerms
topNPercentWords = int(request.args.get('count'))
topClustTerms = cl.getTopClusterTerms(clusterDataRaw, topNPercentWords)
if request.args.get('userID') is not None and request.args.get('userID') != '':
uid = request.args.get('userID')
return render_template('cloud.html', user=uid, topn=topNPercentWords)
elif request.args.get('clusterCode') is not None and request.args.get('clusterCode') != '':
code = int(request.args.get('clusterCode'))
return render_template('cloud.html', cluster=code, topn=topNPercentWords)
def topUsers(rawLinks,tc = None,chop=True):
global topCount
count = topCount
if tc is not None:
count = tc
userTotals = {}
for user, others in rawLinks.items():
total = sum(others.values())
userTotals[user] = total
sortedUsers = {k: v for k, v in sorted(userTotals.items(), key=lambda item: item[1])}
topUsers = list(reversed(list(sortedUsers)))[:count]
if chop:
nodes = [user.split('@')[0] for user in topUsers]
else:
nodes = topUsers
return nodes
def getUserGraph(rawLinks):
topNodes = topUsers(rawLinks)
links = gdb.formatLinks(rawLinks, topNodes,topEdges)
nodes = {'nodes': [{'id': name} for name in topNodes]}
userGraph = {}
userGraph.update(links)
userGraph.update(nodes)
return userGraph
def reCluster():
global clusterDataCsv
global topClustTerms
clusterDataRaw = cl.startCluster(k=clusterCount, userCount=usersToCluster)
topClustTerms = cl.getTopClusterTerms(clusterDataRaw, topNPercentWords)
clusterDataCsv = cl.clusterDataToCsv(clusterDataRaw)
if __name__ == '__main__':
topCount = 80
topEdges = 100
clusterCount = 4
usersToCluster = 100
topNPercentWords = 5
app.config["DEBUG"] = False
links = dm.getLinks()
userGraph = getUserGraph(links)
vectorUsers = dm.getuvec()
topUserTerms = gdb.topTerms(vectorUsers, topNPercentWords)
clusterDataRaw = cl.startCluster(k=clusterCount, userCount=usersToCluster)
topClustTerms = cl.getTopClusterTerms(clusterDataRaw, topNPercentWords)
clusterDataCsv = cl.clusterDataToCsv(clusterDataRaw)
# app.run(host='0.0.0.0',port=5000)
app.run()
|
the-stack_106_23565 | #!/usr/bin/env python
"""
@package ion.agents.agent_alert_manager
@file ion/agents/agent_alert_manager.py
@author Edward Hunter
@brief Class for managing alerts and aggregated alerts based on data streams,
state changes, and command errors, for opt-in use by agents.
"""
__author__ = 'Edward Hunter'
# Pyon imports
from pyon.public import log
import copy
from interface.objects import StreamAlertType
from interface.objects import DeviceStatusType
from interface.objects import AggregateStatusType
# Alarms.
from ion.agents.alerts.alerts import *
class AgentAlertManager(object):
"""
"""
def __init__(self, agent):
self._agent = agent
agent.aparam_set_alerts = self.aparam_set_alerts
agent.aparam_get_alerts = self.aparam_get_alerts
# Always default the aggstatus to unknown.
for aggregate_type in AggregateStatusType._str_map.keys():
agent.aparam_aggstatus[aggregate_type] = DeviceStatusType.STATUS_UNKNOWN
agent.aparam_set_aggstatus = self.aparam_set_aggstatus
def process_alerts(self, **kwargs):
log.debug("process_alerts: aparam_alerts=%s; kwargs=%s", self._agent.aparam_alerts, kwargs)
for a in self._agent.aparam_alerts:
a.eval_alert(**kwargs)
# update the aggreate status for this device
self._process_aggregate_alerts()
def _update_aggstatus(self, aggregate_type, new_status, alerts_list=None):
"""
Called by this manager to set a new status value for an aggstatus type.
Here the method simple assigns the new value and publishes a
DeviceAggregateStatusEvent event. This is the standard behavior
for InstrumentAgents (this method was introduced for refactoring and
integration purposes -- no changes in functionality at all).
This method can be overwritten as appropriate, in particular the
handling is a bit different for platform agents,
which also handle other statuses (child status and rollup status).
@param aggregate_type type of status to be updated
@param new_status The new status value
"""
old_status = self._agent.aparam_aggstatus[aggregate_type]
self._agent.aparam_aggstatus[aggregate_type] = new_status
self._publish_agg_status_event(aggregate_type, new_status, old_status, alerts_list )
def _process_aggregate_alerts(self):
"""
loop thru alerts list and retrieve status of any alert that contributes to the aggregate status and update the state
"""
#init working status
updated_status = {}
agg_alerts = {}
for aggregate_type in AggregateStatusType._str_map.keys():
updated_status[aggregate_type] = DeviceStatusType.STATUS_OK
for a in self._agent.aparam_alerts:
curr_state = a.get_status()
#if this alert does not contribue to an aggregate type then pass
if a._aggregate_type:
#get the current value for this aggregate status
current_agg_state = updated_status[ a._aggregate_type ]
#check if the status of this alert has changed, if so save for event description
if a._prev_status != a._status:
if a._aggregate_type in agg_alerts:
agg_alerts[a._aggregate_type].append(a._name)
else:
agg_alerts[a._aggregate_type]= [a._name]
if a._status is not None:
if a._status is True:
# this alert is not 'tripped' so the status is OK
#check behavior here. if there are any unknowns then set to agg satus to unknown?
if current_agg_state is DeviceStatusType.STATUS_UNKNOWN:
updated_status[ a._aggregate_type ] = DeviceStatusType.STATUS_OK
elif a._status is False:
#the alert is active, either a warning or an alarm
if a._alert_type is StreamAlertType.ALARM:
updated_status[ a._aggregate_type ] = DeviceStatusType.STATUS_CRITICAL
elif a._alert_type is StreamAlertType.WARNING and current_agg_state is not DeviceStatusType.STATUS_CRITICAL:
updated_status[ a._aggregate_type ] = DeviceStatusType.STATUS_WARNING
#compare old state with new state and publish alerts for any agg status that has changed.
for aggregate_type in AggregateStatusType._str_map.keys():
if updated_status[aggregate_type] != self._agent.aparam_aggstatus[aggregate_type]:
alerts_list = None
if aggregate_type in agg_alerts:
alerts_list = agg_alerts[aggregate_type]
self._update_aggstatus(aggregate_type, updated_status[aggregate_type], alerts_list)
def _publish_agg_status_event(self, status_type, new_status, old_status, alerts_list=None):
"""
Publish resource config change event.
"""
evt_out = dict(event_type='DeviceAggregateStatusEvent',
origin_type=self._agent.__class__.ORIGIN_TYPE,
origin=self._agent.resource_id,
values = alerts_list,
status_name=status_type,
status=new_status,
prev_status=old_status)
log.debug("_publish_agg_status_event publishing: %s", evt_out)
try:
self._agent._event_publisher.publish_event(**evt_out)
except Exception as exc:
log.error('Agent %s could not publish aggregate status change event. Exception message: %s',
self._agent._proc_name, exc.message)
def aparam_set_alerts(self, params):
"""
Construct alert objects from kwarg dicts.
"""
if not isinstance(params, (list,tuple)) or len(params)==0:
return -1
params = copy.deepcopy(params)
if isinstance(params[0], str):
action = params[0]
params = params[1:]
else:
action = 'set'
if action not in ('set','add','remove','clear'):
return -1
if action in ('set', 'clear'):
[x.stop() for x in self._agent.aparam_alerts]
self._agent.aparam_alerts = []
if action in ('set', 'add'):
for alert_def in params:
try:
cls = alert_def.pop('alert_class')
alert_def['resource_id'] = self._agent.resource_id
alert_def['origin_type'] = self._agent.__class__.ORIGIN_TYPE
if cls == 'LateDataAlert':
alert_def['get_state'] = self._agent._fsm.get_current_state
alert = eval('%s(**alert_def)' % cls)
self._agent.aparam_alerts.append(alert)
except Exception as ex:
log.error('Agent %s error constructing alert %s. Exception: %s.',
self._agent._proc_name, str(alert_def), str(ex))
elif action == 'remove':
new_alerts = copy.deepcopy(self._agent.aparam_alerts)
new_alerts = [x for x in new_alerts if x.name not in params]
old_alerts = [x for x in new_alerts if x.name in params]
[x.stop() for x in old_alerts]
self._agent.aparam_alerts = new_alerts
for a in self._agent.aparam_alerts:
log.info('Agent alert: %s', str(a))
def aparam_get_alerts(self):
"""
Return kwarg representationn of all alerts.
"""
result = [x.get_status() for x in self._agent.aparam_alerts]
return result
def stop_all(self):
"""
"""
[a.stop() for a in self._agent.aparam_alerts]
def aparam_set_aggstatus(self, params):
return -1
|
the-stack_106_23567 | from __future__ import print_function, division
import os, os.path, sys, re, glob
import itertools
from copy import deepcopy
import json
from .config import on_rtd
from .logger import getLogger
logger = getLogger()
if not on_rtd:
import numpy as np
import pandas as pd
import numpy.random as rand
from scipy.stats import gaussian_kde
import scipy
import emcee
import corner
try:
import pymultinest
except ImportError:
logger.warning("PyMultiNest not imported. MultiNest fits will not work.")
import configobj
from astropy.coordinates import SkyCoord
try:
basestring
except NameError:
basestring = str
from .utils import addmags
from .observation import ObservationTree, Observation, Source
from .priors import AgePrior, DistancePrior, AVPrior, QPrior, FlatPrior
from .priors import SalpeterPrior, ChabrierPrior, FehPrior, EEP_prior, QPrior
from .isochrone import get_ichrone
from .models import ModelGridInterpolator
from .likelihood import star_lnlike, gauss_lnprob
try:
from .fit import fit_emcee3
except ImportError:
pass
def _parse_config_value(v):
try:
val = float(v)
except:
try:
val = [float(x) for x in v]
except:
val = v
# print('{} becomes {}, type={}'.format(v,val,type(val)))
return val
class StarModel(object):
"""
:param ic:
:class:`Isochrone` object used to model star.
:param obs: (optional)
:class:`ObservationTree` object containing photometry information.
If not provided, then one will be constructed from the provided
keyword arguments (which must include at least one photometric
bandpass). This should only happen in the simplest case
of a single star system---if multiple stars are detected
in any of the observations being used, an :class:`ObservationTree`
should be passed. If `obs` is a string, then it is assumed
to be a filename of an obs summary DataFrame.
:param N:
Number of model stars to assign to each "leaf node" of the
:class:`ObservationTree`. If you want to model a binary star,
provide ``N=2``.
:param **kwargs:
Keyword arguments must be properties of given isochrone, e.g., logg,
feh, Teff, and/or magnitudes. The values represent measurements of
the star, and must be in (value,error) format. All such keyword
arguments will be held in ``self.properties``. ``parallax`` is
also a valid property, and should be provided in miliarcseconds,
as is ``density`` [g/cc], and ``nu_max`` and ``delta_nu``
(asteroseismic parameters in uHz.)
"""
# These are allowable parameters that are not photometric bands
_not_a_band = (
"RA",
"dec",
"ra",
"Dec",
"maxAV",
"parallax",
"AV",
"logg",
"Teff",
"feh",
"density",
"separation",
"PA",
"resolution",
"relative",
"N",
"index",
"id",
"nu_max",
"delta_nu",
)
def __init__(
self,
ic,
obs=None,
N=1,
index=0,
name="",
use_emcee=False,
RA=None,
dec=None,
coords=None,
eep_bounds=None,
**kwargs
):
self.name = name
if not name:
if obs is not None:
self.name = obs.name
if coords is None:
if RA is not None and dec is not None:
try:
coords = SkyCoord(RA, dec)
except:
coords = SkyCoord(float(RA), float(dec), unit="deg")
self.coords = coords
self._ic = ic
self.use_emcee = use_emcee
# If obs is not provided, build it
if obs is None:
self._build_obs(**kwargs)
self.obs.define_models(ic, N=N, index=index)
self._add_properties(**kwargs)
elif isinstance(obs, basestring):
df = pd.read_csv(obs)
obs = ObservationTree.from_df(df)
obs.define_models(ic, N=N, index=index)
self.obs = obs
self._add_properties(**kwargs)
else:
self.obs = obs
if len(self.obs.get_model_nodes()) == 0:
self.obs.define_models(ic, N=N, index=index)
self._add_properties(**kwargs)
self._priors = {
"mass": ChabrierPrior(),
"feh": FehPrior(),
"q": QPrior(),
"age": AgePrior(),
"distance": DistancePrior(),
"AV": AVPrior(),
}
self._priors["eep"] = EEP_prior(self.ic, self._priors[self.ic.eep_replaces], bounds=eep_bounds)
self._bounds = {
k: p.bounds if k not in ["mass", "feh", "age"] else None for k, p in self._priors.items()
}
if "maxAV" in kwargs:
self.set_bounds(AV=(0, kwargs["maxAV"]))
if "max_distance" in kwargs:
self.set_bounds(distance=(0, kwargs["max_distance"]))
self._bands = None
self._props = None
self._directory = None
self._samples = None
@property
def bands(self):
if self._bands is None:
try:
self._bands = list({n.band for n in self.obs.get_obs_nodes()})
except AttributeError: # if no magnitudes are in obs
self._bands = []
return self._bands
@property
def props(self):
if self._props is None:
props = {k for v in self.obs.spectroscopy.values() for k in v.keys()}
self._props = list(props - {"Teff", "logg", "feh"})
return self._props
@property
def directory(self):
return self._directory if self._directory else "."
@property
def ic(self):
if type(self._ic) == type:
self._ic = self._ic()
return self._ic
@classmethod
def _parse_band(cls, kw):
"""Returns photometric band from inifile keyword
"""
m = re.search(r"([a-zA-Z0-9]+)(_\d+)?", kw)
if m:
if m.group(1) in cls._not_a_band:
return None
else:
return m.group(1)
@classmethod
def get_bands(cls, inifile):
bands = []
c = configobj.ConfigObj(inifile)
for kw, v in c.items():
if type(v) is configobj.Section:
for kw in v:
b = cls._parse_band(kw)
if b is not None:
bands.append(b)
else:
b = cls._parse_band(kw)
if b is not None:
bands.append(b)
return list(set(bands))
@classmethod
def from_ini(cls, ic, folder=".", ini_file="star.ini", **kwargs):
"""
Initialize a StarModel from a .ini file
The "classic" format (version <= 0.9) should still work for a single star,
where all properties are just listed in the file; e.g.,
J = 10, 0.05
H = 9.5, 0.05
K = 9.0, 0.05
Teff = 5000, 150
If there are multiple stars observed, you can either define them in
the ini file, or use the `obsfile` keyword, pointing to a file with
the summarized photometric observations. In this case, spectroscopic/parallax
info should still be included in the .ini file; e.g.,
obsfile = obs.csv
Teff = 5000, 150
The obsfile should be a comma-separated table with the following columns:
`[name, band, resolution, mag, e_mag, separation, pa, relative]`.
* `name` is the name of instrument
* `band` is the photometric bandpass
* `resolution` is the approximate spatial resolution of instrument
* `mag`, `e_mag` describe magnitude of source (absolute or relative)
* `separation`, `pa` describe position of source
* `relative`: single-bit flag; if 1 then magnitudes taken with this
instrument are assumed to be relative rather than absolute.
If an obsfile is not provided, you can also define all the same information
in the ini file, following these rules:
* Every instrument/survey gets its own [section]. Sections are only
created for different photometric observations.
* if photometry relates to *all* stars in aperture,
there is no extra info in the section, just the photometry. In this case, it is
also assumed that the photometry is absolute. (`relative=False`)
* If 'resolution' is an attribute under a particular survey section (and
'relative' is not explicitly stated), then the survey is assumed to have relative
photometry, and to be listing
information about companion stars. In this case, there must be "separation"
and "PA" included for each companion. If there is more than one companion star,
they must be identifed by tag, e.g., separation_1, PA_1, Ks_1, J_1, etc. The
tag can be anything alphanumeric, but it must be consistent within a particular
section (instrument). If there
is no tag, there is assumed to be only one companion detected.
* If there are no sections, then bands will be interpreted at face value
and will all be assumed to apply to all stars modeled.
* Default is to model each star in the highest-resolution observation as a
single star, at the same distance/age/feh/AV.
The `N` and `index`
parameters may also be provided, to specify the relations between the
model stars. If these are not provided, then `N` will default to `1`
(one model star per star observed in highest-resolution observation)
and `index` will default to all `0` (all stars physically associated).
"""
if not os.path.isabs(ini_file):
ini_file = os.path.join(folder, ini_file)
bands = cls.get_bands(ini_file)
if not isinstance(ic, ModelGridInterpolator):
ic = get_ichrone(ic, bands)
logger.debug("Initializing StarModel from {}".format(ini_file))
c = configobj.ConfigObj(ini_file)
RA = c.get("RA")
dec = c.get("dec")
maxAV = c.get("maxAV")
if len(c.sections) == 0:
for k in c:
kwargs[k] = _parse_config_value(c[k])
obs = None
else:
columns = ["name", "band", "resolution", "relative", "separation", "pa", "mag", "e_mag"]
df = pd.DataFrame(columns=columns)
i = 0
for k in c:
if type(c[k]) != configobj.Section:
kwargs[k] = _parse_config_value(c[k])
else:
instrument = k
# Set values of 'resolution' and 'relative'
if "resolution" in c[k]:
resolution = float(c[k]["resolution"])
relative = True
else:
resolution = 4.0 # default
relative = False
# Overwrite value of 'relative' if it is explicitly set
if "relative" in c[k]:
relative = c[k]["relative"] == "True"
# Check if there are multiple stars (defined by whether
# any separations are listed).
# While we're at it, keep track of tags if they exist,
# and pull out the names of the bands.
multiple = False
tags = []
bands = []
for label in c[k]:
m = re.search(r"separation(_\w+)?", label)
if m:
multiple = True
if m.group(1) is not None:
if m.group(1) not in tags:
tags.append(m.group(1))
elif (
re.search(r"PA", label)
or re.search(r"id", label)
or label in ["resolution", "relative"]
):
continue
else:
# At this point, this should be a photometric band
m = re.search(r"([a-zA-Z0-9]+)(_\w+)?", label)
b = m.group(1)
if b not in bands:
bands.append(b)
# If a blank tags needs to be created, do so
if len(bands) > 0 and (len(tags) == 0 or bands[0] in c[k]):
tags.append("")
# For each band and each star, create a row
for b in bands:
for tag in tags:
if "{}{}".format(b, tag) not in c[k]:
continue
row = {}
row["name"] = instrument
row["band"] = b
row["resolution"] = resolution
row["relative"] = relative
if "separation{}".format(tag) in c[k]:
row["separation"] = c[k]["separation{}".format(tag)]
row["pa"] = c[k]["PA{}".format(tag)]
else:
row["separation"] = 0.0
row["pa"] = 0.0
mag, e_mag = c[k]["{}{}".format(b, tag)]
row["mag"] = float(mag)
row["e_mag"] = float(e_mag)
if not np.isnan(row["mag"]) and not np.isnan(row["e_mag"]):
df = df.append(pd.DataFrame(row, index=[i]))
i += 1
# put the reference star in w/ mag=0
if relative:
row = {}
row["name"] = instrument
row["band"] = b
row["resolution"] = resolution
row["relative"] = relative
row["separation"] = 0.0
row["pa"] = 0.0
row["mag"] = 0.0
row["e_mag"] = 0.01
df = df.append(pd.DataFrame(row, index=[i]))
i += 1
obs = ObservationTree.from_df(df)
if "obsfile" in c:
obs = c["obsfile"]
logger.debug("Obs is {}".format(obs))
name = kwargs.pop("name", os.path.basename(folder))
new = cls(ic, obs=obs, **kwargs, name=name)
new._directory = os.path.abspath(folder)
return new
def print_ascii(self):
"""Prints an ascii representation of the observation tree structure.
"""
return self.obs.print_ascii()
def convert_pars_to_eep(self, pars):
"""Replaces old parameter vectors containing mass with the closest EEP equivalent
"""
pardict = self.obs.p2pardict(pars)
eeps = {s: self.ic.get_eep(*p[0:3], accurate=True) for s, p in pardict.items()}
new_pardict = pardict.copy()
for s in pardict:
new_pardict[s][0] = eeps[s]
return self.obs.pardict2p(new_pardict)
def bounds(self, prop):
if self._bounds[prop] is not None:
return self._bounds[prop]
elif prop == "mass":
lo, hi = self.ic.model_grid.get_limits("mass")
self._bounds["mass"] = (lo, hi)
self._priors["mass"].bounds = (lo, hi)
elif prop == "feh":
lo, hi = self.ic.model_grid.get_limits("feh")
self._bounds["feh"] = (lo, hi)
self._priors["feh"].bounds = (lo, hi)
elif prop == "age":
lo, hi = self.ic.model_grid.get_limits("age")
self._bounds["age"] = (lo, hi)
self._priors["age"].bounds = (lo, hi)
else:
raise ValueError("Unknown property {}".format(prop))
return self._bounds[prop]
def set_bounds(self, **kwargs):
for k, v in kwargs.items():
if len(v) != 2:
raise ValueError("Must provide (min, max)")
self._bounds[k] = v
self._priors[k].bounds = v
def _build_obs(self, **kwargs):
"""
Builds ObservationTree out of keyword arguments
Ignores anything that is not a photometric bandpass.
This should not be used if there are multiple stars observed.
Creates self.obs
"""
logger.debug("Building ObservationTree...")
tree = ObservationTree()
for k, v in kwargs.items():
if k in self.ic.bands:
if np.size(v) != 2:
logger.warning("{}={} ignored (no uncertainty).".format(k, v))
# continue
v = [v, np.nan]
o = Observation("", k, 99) # bogus resolution=99
s = Source(v[0], v[1])
o.add_source(s)
logger.debug("Adding {} ({})".format(s, o))
tree.add_observation(o)
self.obs = tree
def _add_properties(self, **kwargs):
"""
Adds non-photometry properties to ObservationTree
"""
for k, v in kwargs.items():
if k in self.ic.bands:
continue
elif k == "parallax":
self.obs.add_parallax(v)
elif k == "AV":
self.obs.add_AV(v)
elif k in ["Teff", "logg", "feh", "density"]:
par = {k: v}
self.obs.add_spectroscopy(**par)
elif re.search(r"_", k):
m = re.search(r"^(\w+)_(\w+)$", k)
prop = m.group(1)
tag = m.group(2)
self.obs.add_spectroscopy(**{prop: v, "label": "0_{}".format(tag)})
@property
def param_description(self):
return self.obs.param_description
@property
def param_names(self):
return self.param_description
@property
def mags(self):
return {n.band: n.value[0] for n in self.obs.get_obs_nodes()}
def lnpost(self, p, **kwargs):
lnpr = self.lnprior(p)
if not np.isfinite(lnpr):
return -np.inf
return lnpr + self.lnlike(p, **kwargs)
def lnlike(self, p, **kwargs):
pardict = self.obs.p2pardict(p)
model_values = {}
for star, pars in pardict.items():
Teff, logg, feh, mags = self.ic.interp_mag(pars, self.bands)
vals = {"Teff": Teff, "logg": logg, "feh": feh}
vals.update({b: m for b, m in zip(self.bands, mags)})
model_values[star] = vals
lnl = self.obs.lnlike(pardict, model_values, **kwargs)
return lnl
def lnprior(self, p):
N = self.obs.Nstars
i = 0
lnp = 0
if self.ic.eep_replaces == "mass":
for s in self.obs.systems:
age, feh, dist, AV = p[i + N[s] : i + N[s] + 4]
for prop, val in zip(["age", "feh", "distance", "AV"], [age, feh, dist, AV]):
lo, hi = self.bounds(prop)
if val < lo or val > hi:
return -np.inf
lnp += self._priors[prop].lnpdf(val)
if not np.isfinite(lnp):
logger.debug("lnp=-inf for {}={} (system {})".format(prop, val, s))
return -np.inf
# Note: this all is just assuming proper order for multiple stars.
# Is this OK? Should keep eye out for bugs here.
# Compute EEP priors. Note, this implicitly treats each stars as an independent
# draw from the IMF (i.e. flat mass-ratio prior):
# eeps = p[i:i + N[s]]
# Enforce that eeps are in descending order
eeps = np.array(p[i : i + N[s]])
if not (eeps[1:] <= eeps[:-1]).all():
return -np.inf
for eep in eeps:
lnp += self._priors["eep"].lnpdf(eep, age=p[i + N[s]], feh=p[i + N[s] + 1])
# masses, dm_deeps = zip(*[self.ic.interp_value([eep, age, feh], ['initial_mass', 'dm_deep'])
# for eep in eeps])
# if any(np.isnan(masses)):
# return -np.inf
# # Priors for mass ratios
# for j in range(N[s]-1):
# q = masses[j+1]/masses[0]
# qmin, qmax = self.bounds('q')
# ## The following would enforce MA > MB > MC, but seems to make things very slow:
# #if j+1 > 1:
# # qmax = masses[j] / masses[0]
# lnp += np.log(self.prior('q', q))
# if not np.isfinite(lnp):
# logger.debug('lnp=-inf for q={} (system {})'.format(q, s))
# return -np.inf
i += N[s] + 4
elif self.ic.eep_replaces == "age":
raise NotImplementedError("Prior not implemented for evolution track grids")
return lnp
def prior_transform(self, cube):
pars = np.array(cube) * 0
i = 0
for _, n in self.obs.Nstars.items():
mineep, maxeep = self.bounds("eep")
for j in range(n):
pars[i + j] = (maxeep - mineep) * cube[i + j] + mineep
for j, par in enumerate(["age", "feh", "distance", "AV"]):
lo, hi = self.bounds(par)
pars[i + n + j] = (hi - lo) * cube[i + n + j] + lo
i += 4 + n
return pars
def set_prior(self, **kwargs):
for prop, prior in kwargs.items():
self._priors[prop] = prior
self._bounds[prop] = prior.bounds
def prior(self, prop, val, **kwargs):
return self._priors[prop](val, **kwargs)
@property
def n_params(self):
tot = 0
for _, n in self.obs.Nstars.items():
tot += 4 + n
return tot
def mnest_prior(self, cube, ndim, nparams):
i = 0
for _, n in self.obs.Nstars.items():
mineep, maxeep = self.bounds("eep")
eeps = [(maxeep - mineep) * cube[i + j] + mineep for j in range(n)]
eeps.sort(reverse=True)
for j in range(n):
cube[i + j] = eeps[j]
for j, par in enumerate(["age", "feh", "distance", "AV"]):
lo, hi = self.bounds(par)
cube[i + n + j] = (hi - lo) * cube[i + n + j] + lo
i += 4 + n
def mnest_loglike(self, cube, ndim, nparams):
"""loglikelihood function for multinest
"""
return self.lnpost(cube)
@property
def labelstring(self):
return "--".join(["-".join([n.label for n in l.children]) for l in self.obs.get_obs_leaves()])
def fit(self, **kwargs):
if self.use_emcee:
return self.fit_mcmc(**kwargs)
else:
return self.fit_multinest(**kwargs)
@property
def mnest_basename(self):
"""Full path to basename
"""
if not hasattr(self, "_mnest_basename"):
s = self.labelstring
if s == "0_0":
s = "single"
elif s == "0_0-0_1":
s = "binary"
elif s == "0_0-0_1-0_2":
s = "triple"
s = "{}-{}".format(self.ic.name, s)
if self.name:
s = "{}-{}".format(self.name, s)
self._mnest_basename = os.path.join("chains", s + "-")
if os.path.isabs(self._mnest_basename):
return self._mnest_basename
else:
return os.path.join(self.directory, self._mnest_basename)
@mnest_basename.setter
def mnest_basename(self, basename):
if os.path.isabs(basename):
self._mnest_basename = basename
else:
self._mnest_basename = os.path.join("chains", basename)
def lnpost_polychord(self, theta):
phi = [0.0] # nDerived
return self.lnpost(theta), phi
def fit_polychord(self, basename, verbose=False, **kwargs):
from .config import POLYCHORD
sys.path.append(POLYCHORD)
import PyPolyChord.PyPolyChord as PolyChord
return PolyChord.run_nested_sampling(
self.lnpost_polychord, self.n_params, 0, file_root=basename, **kwargs
)
def fit_multinest(
self,
n_live_points=1000,
basename=None,
verbose=True,
refit=False,
overwrite=False,
test=False,
force_no_MPI=False,
**kwargs
):
"""
Fits model using MultiNest, via pymultinest.
:param n_live_points:
Number of live points to use for MultiNest fit.
:param basename:
Where the MulitNest-generated files will live.
By default this will be in a folder named `chains`
in the current working directory. Calling this
will define a `_mnest_basename` attribute for
this object.
:param verbose:
Whether you want MultiNest to talk to you.
:param refit, overwrite:
Set either of these to true if you want to
delete the MultiNest files associated with the
given basename and start over.
:param **kwargs:
Additional keyword arguments will be passed to
:func:`pymultinest.run`.
"""
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
except ImportError:
comm = None
rank = 0
if basename is not None: # Should this even be allowed?
self.mnest_basename = basename
basename = self.mnest_basename
if verbose:
logger.info("MultiNest basename: {}".format(basename))
folder = os.path.abspath(os.path.dirname(basename))
if rank == 0 or force_no_MPI:
if not os.path.exists(folder):
os.makedirs(folder)
if refit or overwrite:
files = glob.glob("{}*".format(basename))
[os.remove(f) for f in files]
short_basename = self._mnest_basename
mnest_kwargs = dict(
n_live_points=n_live_points, outputfiles_basename=short_basename, verbose=verbose
)
if force_no_MPI:
mnest_kwargs["force_no_MPI"] = force_no_MPI
for k, v in kwargs.items():
mnest_kwargs[k] = v
if test:
print("pymultinest.run() with the following kwargs: {}".format(mnest_kwargs))
else:
wd = os.getcwd()
os.chdir(os.path.join(folder, ".."))
pymultinest.run(self.mnest_loglike, self.mnest_prior, self.n_params, **mnest_kwargs)
os.chdir(wd)
# with open(propfile, 'w') as f:
# json.dump(self.properties, f, indent=2)
self._make_samples()
@property
def mnest_analyzer(self):
"""
PyMultiNest Analyzer object associated with fit.
See PyMultiNest documentation for more.
"""
return pymultinest.Analyzer(self.n_params, self.mnest_basename)
@property
def evidence(self):
"""
Log(evidence) from multinest fit
"""
s = self.mnest_analyzer.get_stats()
return (s["global evidence"], s["global evidence error"])
def maxlike(self, p0, **kwargs):
""" Finds (local) optimum in parameter space.
"""
def fn(p):
return -self.lnpost(p)
if "method" not in kwargs:
kwargs["method"] = "Nelder-Mead"
p0 = [0.8, 9.5, 0.0, 200, 0.2]
fit = scipy.optimize.minimize(fn, p0, **kwargs)
return fit
def sample_from_prior(self, n):
return self.emcee_p0(n)
def emcee_p0(self, nwalkers):
def sample_row(nstars, n=nwalkers):
p = []
age0 = self._priors["age"].sample(n)
feh0 = self._priors["feh"].sample(n)
d0 = self._priors["distance"].sample(n)
AV0 = self._priors["AV"].sample(n)
mass0 = self._priors["mass"].sample(n)
if self.ic.eep_replaces == "age":
eep0 = self._priors["eep"].sample(n, mass=mass0, feh=feh0)
else:
eep0 = self._priors["eep"].sample(n, age=age0, feh=feh0)
for i in range(nstars):
p += [eep0]
p += [age0, feh0, d0, AV0]
return p
p0 = []
for _, n in self.obs.Nstars.items():
p0 += sample_row(n)
p0 = np.array(p0).T
nbad = 1
while True:
ibad = []
for i, p in enumerate(p0):
if not np.isfinite(self.lnpost(p)):
ibad.append(i)
nbad = len(ibad)
if nbad == 0:
break
pnew = []
for _, n in self.obs.Nstars.items():
pnew += sample_row(n, n=nbad)
pnew = np.array(pnew).T
p0[ibad, :] = pnew
return p0
def fit_mcmc(self, **kwargs):
return self.fit_mcmc_old(**kwargs)
def fit_mcmc_old(
self,
nwalkers=300,
nburn=200,
niter=100,
p0=None,
initial_burn=None,
ninitial=50,
loglike_kwargs=None,
**kwargs
):
"""Fits stellar model using MCMC.
:param nwalkers: (optional)
Number of walkers to pass to :class:`emcee.EnsembleSampler`.
Default is 200.
:param nburn: (optional)
Number of iterations for "burn-in." Default is 100.
:param niter: (optional)
Number of for-keeps iterations for MCMC chain.
Default is 200.
:param p0: (optional)
Initial parameters for emcee. If not provided, then chains
will behave according to whether inital_burn is set.
:param initial_burn: (optional)
If `True`, then initialize walkers first with a random initialization,
then cull the walkers, keeping only those with > 15% acceptance
rate, then reinitialize sampling. If `False`, then just do
normal burn-in. Default is `None`, which will be set to `True` if
fitting for distance (i.e., if there are apparent magnitudes as
properties of the model), and `False` if not.
:param ninitial: (optional)
Number of iterations to test walkers for acceptance rate before
re-initializing.
:param loglike_args:
Any arguments to pass to :func:`StarModel.loglike`, such
as what priors to use.
:param **kwargs:
Additional keyword arguments passed to :class:`emcee.EnsembleSampler`
constructor.
:return:
:class:`emcee.EnsembleSampler` object.
"""
# clear any saved _samples
if self._samples is not None:
self._samples = None
npars = self.n_params
if p0 is None:
logger.debug("Generating initial p0 for {} walkers...".format(nwalkers))
p0 = self.emcee_p0(nwalkers)
if initial_burn:
sampler = emcee.EnsembleSampler(nwalkers, npars, self.lnpost, **kwargs)
# ninitial = 300 #should this be parameter?
pos, prob, state = sampler.run_mcmc(p0, ninitial)
# Choose walker with highest final lnprob to seed new one
i, j = np.unravel_index(sampler.lnprobability.argmax(), sampler.shape)
p0_best = sampler.chain[i, j, :]
logger.debug("After initial burn, p0={}".format(p0_best))
p0 = p0_best * (1 + rand.normal(size=p0.shape) * 0.001)
logger.debug(p0)
else:
p0 = np.array(p0)
p0 = rand.normal(size=(nwalkers, npars)) * 0.01 + p0.T[None, :]
sampler = emcee.EnsembleSampler(nwalkers, npars, self.lnpost)
pos, prob, state = sampler.run_mcmc(p0, nburn)
sampler.reset()
sampler.run_mcmc(pos, niter, rstate0=state)
self._sampler = sampler
return sampler
@property
def sampler(self):
"""
Sampler object from MCMC run.
"""
if hasattr(self, "_sampler"):
return self._sampler
else:
raise AttributeError("MCMC must be run to access sampler")
def _make_samples(self):
if not self.use_emcee:
filename = "{}post_equal_weights.dat".format(self.mnest_basename)
try:
chain = np.loadtxt(filename)
try:
lnprob = chain[:, -1]
chain = chain[:, :-1]
except IndexError:
lnprob = np.array([chain[-1]])
chain = np.array([chain[:-1]])
except:
logger.error("Error loading chains from {}".format(filename))
raise
else:
chain = self.sampler.flatchain
lnprob = self.sampler.lnprobability.ravel()
df = pd.DataFrame()
i = 0
for s, n in self.obs.Nstars.items():
age = chain[:, i + n]
feh = chain[:, i + n + 1]
distance = chain[:, i + n + 2]
AV = chain[:, i + n + 3]
for j in range(n):
mass = chain[:, i + j]
d = self.ic(mass, age, feh, distance=distance, AV=AV)
for c in d.columns:
df[c + "_{}_{}".format(s, j)] = d[c]
df["age_{}".format(s)] = age
df["feh_{}".format(s)] = feh
df["distance_{}".format(s)] = distance
df["AV_{}".format(s)] = AV
i += 4 + n
for b in self.ic.bands:
tot = np.inf
for s, n in self.obs.Nstars.items():
for j in range(n):
tot = addmags(tot, df[b + "_mag_{}_{}".format(s, j)])
df[b + "_mag"] = tot
df["lnprob"] = lnprob
self._samples = df.copy()
@property
def samples(self):
"""Dataframe with samples drawn from isochrone according to posterior
Columns include both the sampling parameters from the MCMC
fit (mass, age, Fe/H, [distance, A_V]), and also evaluation
of the :class:`Isochrone` at each of these sample points---this
is how chains of physical/observable parameters get produced.
"""
if not hasattr(self, "sampler") and self._samples is None:
raise AttributeError("Must run MCMC (or load from file) " + "before accessing samples")
if self._samples is not None:
df = self._samples
else:
self._make_samples()
df = self._samples
return df
def random_samples(self, n):
"""
Returns a random sampling of given size from the existing samples.
:param n:
Number of samples
:return:
:class:`pandas.DataFrame` of length ``n`` with random samples.
"""
samples = self.samples
inds = rand.randint(len(samples), size=int(n))
newsamples = samples.iloc[inds]
newsamples.reset_index(inplace=True)
return newsamples
def triangle(self, *args, **kwargs):
return self.corner(*args, **kwargs)
def corner(self, params, query=None, **kwargs):
df = self.samples
if query is not None:
df = df.query(query)
priors = []
for p in params:
if re.match("mass", p):
priors.append(lambda x: self.prior("mass", x, bounds=self.bounds("mass")))
elif re.match("age", p):
priors.append(lambda x: self.prior("age", x, bounds=self.bounds("age")))
elif re.match("feh", p):
priors.append(lambda x: self.prior("feh", x, bounds=self.bounds("feh")))
elif re.match("distance", p):
priors.append(lambda x: self.prior("distance", x, bounds=self.bounds("distance")))
elif re.match("AV", p):
priors.append(lambda x: self.prior("AV", x, bounds=self.bounds("AV")))
else:
priors.append(None)
try:
fig = corner.corner(df[params], labels=params, priors=priors, **kwargs)
except:
logger.warning("Use Tim's version of corner to plot priors.")
fig = corner.corner(df[params], labels=params, **kwargs)
fig.suptitle(self.name, fontsize=22)
return fig
def triangle_physical(self, *args, **kwargs):
return self.corner_physical(*args, **kwargs)
def corner_plots(self, basename, **kwargs):
fig1, fig2 = self.corner_physical(**kwargs), self.corner_observed(**kwargs)
fig1.savefig(basename + "_physical.png")
fig2.savefig(basename + "_observed.png")
return fig1, fig2
def triangle_plots(self, *args, **kwargs):
return self.corner_plots(*args, **kwargs)
def corner_physical(self, props=["eep", "mass", "radius", "feh", "age", "distance", "AV"], **kwargs):
collective_props = ["feh", "age", "distance", "AV"]
indiv_props = [p for p in props if p not in collective_props]
sys_props = [p for p in props if p in collective_props]
props = ["{}_{}".format(p, l) for p in indiv_props for l in self.obs.leaf_labels]
props += ["{}_{}".format(p, s) for p in sys_props for s in self.obs.systems]
if "range" not in kwargs:
rng = [0.995 for p in props]
return self.corner(props, range=rng, **kwargs)
def mag_plot(self, *args, **kwargs):
pass
def corner_observed(self, **kwargs):
"""Makes corner plot for each observed node magnitude
"""
samples = []
names = []
truths = []
rng = []
for n in self.obs.get_obs_nodes():
labels = [l.label for l in n.get_model_nodes()]
try:
band = n.band
except AttributeError: # only root node
continue
mags = [self.samples["{}_mag_{}".format(band, l)] for l in labels]
tot_mag = addmags(*mags)
if n.relative:
name = "{} $\Delta${}".format(n.instrument, n.band)
ref = n.reference
if ref is None:
continue
ref_labels = [l.label for l in ref.get_model_nodes()]
ref_mags = [self.samples["{}_mag_{}".format(band, l)] for l in ref_labels]
tot_ref_mag = addmags(*ref_mags)
samples.append(tot_mag - tot_ref_mag)
truths.append(n.value[0] - ref.value[0])
else:
name = "{} {}".format(n.instrument, n.band)
samples.append(tot_mag)
truths.append(n.value[0])
names.append(name)
rng.append(
(
min(truths[-1], np.percentile(samples[-1], 0.5)),
max(truths[-1], np.percentile(samples[-1], 99.5)),
)
)
for s, d in self.obs.spectroscopy.items():
for k in d:
try:
name = "{}_{}".format(k, s)
samples.append(self.samples[name])
except KeyError:
# Use system tag if star tag doesn't exist
name = "{}_{}".format(k, s[0])
samples.append(self.samples[name])
truths.append(d[k][0])
rng.append(
(
min(truths[-1], np.percentile(samples[-1], 0.5)),
max(truths[-1], np.percentile(samples[-1], 99.5)),
)
)
names.append(name)
for s, val in self.obs.parallax.items():
plax_samples = 1000.0 / self.samples["distance_{}".format(s)]
samples.append(plax_samples)
truths.append(val[0])
rng.append(
(
min(truths[-1], np.percentile(samples[-1], 0.5)),
max(truths[-1], np.percentile(samples[-1], 99.5)),
)
)
names.append("parallax_{}".format(s))
samples = np.array(samples).T
return corner.corner(samples, labels=names, truths=truths, range=rng, **kwargs)
def save_hdf(self, filename, path="", overwrite=False, append=False):
"""Saves object data to HDF file (only works if MCMC is run)
Samples are saved to /samples location under given path,
:class:`ObservationTree` is saved to /obs location under given path.
:param filename:
Name of file to save to. Should be .h5 file.
:param path: (optional)
Path within HDF file structure to save to.
:param overwrite: (optional)
If ``True``, delete any existing file by the same name
before writing.
:param append: (optional)
If ``True``, then if a file exists, then just the path
within the file will be updated.
"""
if os.path.exists(filename):
with pd.HDFStore(filename) as store:
if path in store:
if overwrite:
os.remove(filename)
elif not append:
raise IOError(
"{} in {} exists. Set either overwrite or append option.".format(path, filename)
)
if self.samples is not None:
self.samples.to_hdf(filename, path + "/samples", format="table")
else:
pd.DataFrame().to_hdf(filename, path + "/samples", format="table")
self.obs.save_hdf(filename, path + "/obs", append=True)
with pd.HDFStore(filename) as store:
# store = pd.HDFStore(filename)
attrs = store.get_storer("{}/samples".format(path)).attrs
attrs.ic_type = type(self.ic)
attrs.ic_bands = list(self.ic.bands)
attrs.use_emcee = self.use_emcee
if hasattr(self, "_mnest_basename"):
attrs._mnest_basename = self._mnest_basename
attrs._bounds = self._bounds
attrs._priors = {k: v for k, v in self._priors.items() if k != "eep"}
# attrs._priors = self._priors
attrs.name = self.name
store.close()
@classmethod
def load_hdf(cls, filename, path="", name=None):
"""
A class method to load a saved StarModel from an HDF5 file.
File must have been created by a call to :func:`StarModel.save_hdf`.
:param filename:
H5 file to load.
:param path: (optional)
Path within HDF file.
:return:
:class:`StarModel` object.
"""
if not os.path.exists(filename):
raise IOError("{} does not exist.".format(filename))
store = pd.HDFStore(filename)
try:
samples = store[path + "/samples"]
attrs = store.get_storer(path + "/samples").attrs
except:
store.close()
raise
try:
ic = attrs.ic_type(attrs.ic_bands)
except AttributeError:
ic = attrs.ic_type
use_emcee = attrs.use_emcee
mnest = True
try:
basename = attrs._mnest_basename
except AttributeError:
mnest = False
bounds = attrs._bounds
priors = attrs._priors
if name is None:
try:
name = attrs.name
except:
name = ""
store.close()
obs = ObservationTree.load_hdf(filename, path + "/obs", ic=ic)
mod = cls(ic, obs=obs, use_emcee=use_emcee, name=name)
mod._samples = samples
if mnest:
mod._mnest_basename = basename
mod._directory = os.path.dirname(filename)
mod._priors.update(priors)
mod._bounds = bounds
return mod
class StarModelGroup(object):
"""A collection of StarModel objects with different model node specifications
Pass a single StarModel, and model nodes will be cleared and replaced with
different variants.
"""
def __init__(self, base_model, max_multiples=1, max_stars=2):
self.base_model = deepcopy(base_model)
self.base_model.obs.clear_models()
self.max_multiples = max_multiples
self.max_stars = max_stars
self.models = []
for N, index in self.model_options:
mod = deepcopy(self.base_model)
mod.obs.define_models(self.ic, N=N, index=index)
self.models.append(mod)
@property
def ic(self):
return self.base_model.ic
@property
def N_stars(self):
return len(self.base_model.obs.leaves)
@property
def N_options(self):
return N_options(self.N_stars, max_multiples=self.max_multiples, max_stars=self.max_stars)
@property
def index_options(self):
return index_options(self.N_stars)
@property
def model_options(self):
return [(N, index) for N in self.N_options for index in self.index_options]
class BasicStarModel(StarModel):
"""Bare bones starmodel, without "obs" complication.
Use this for straight-up single, binary, or triple fits, no
mix of blended/unblended.
"""
use_emcee = False
def __init__(
self,
ic,
eep_bounds=None,
name="",
directory=".",
N=1,
maxAV=None,
max_distance=None,
halo_fraction=None,
ra=None,
dec=None,
obs=None,
use_emcee=False,
**kwargs
):
self._ic = ic
self.eep_bounds = eep_bounds if eep_bounds is not None else self.ic.eep_bounds
self.name = str(name)
self.use_emcee = use_emcee
self.ra = ra
self.dec = dec
self.obs = None
if N > 1 and ic.eep_replaces == "age":
raise ValueError("Can only fit mulitple stars with IsochroneInterpolator!")
if N == 1:
if ic.eep_replaces == "age":
self.mass_index = 0
self.feh_index = 2
self.distance_index = 3
self.AV_index = 4
elif ic.eep_replaces == "mass":
self.age_index = 1
self.feh_index = 2
self.distance_index = 3
self.AV_index = 4
elif N == 2:
self.age_index = 2
self.feh_index = 3
self.distance_index = 4
self.AV_index = 5
elif N == 3:
self.age_index = 3
self.feh_index = 4
self.distance_index = 5
self.AV_index = 6
self.N = N
# remove kwargs for backward compatibility
if "use_emcee" in kwargs:
del kwargs["use_emcee"]
self.kwargs = {}
for k, v in kwargs.items():
try:
val, unc = v
if not (np.isnan(val) or np.isnan(unc)):
self.kwargs[k] = (np.float64(val), np.float64(unc))
except TypeError:
logger.warning("kwarg {}={} ignored!".format(k, v))
self._bands = None
self._spec_props = None
self._props = None
self._param_names = None
self._priors = {
"mass": ChabrierPrior(),
"feh": FehPrior(),
"age": AgePrior(),
"distance": DistancePrior(),
"AV": AVPrior(),
}
self._priors["eep"] = EEP_prior(self.ic, self._priors[self.ic.eep_replaces], bounds=eep_bounds)
self._bounds = {
"mass": None,
"feh": None,
"age": None,
"distance": DistancePrior().bounds,
"AV": AVPrior().bounds,
"eep": self._priors["eep"].bounds,
}
# Reset bounds to match IC bounds. Likely different from default priors.
for par in ["mass", "feh", "age"]:
self.bounds(par)
if maxAV is not None:
self.set_bounds(AV=(0, maxAV))
if max_distance is not None:
self.set_bounds(distance=(0, max_distance))
else:
if "parallax" in kwargs:
value, unc = kwargs["parallax"] # plax in mas
if value > 0:
max_distance = 1.0 / value * 2000 # distance in pc
self.set_bounds(distance=(0, max_distance))
elif value < 0:
max_distance = 1.0 / np.abs(unc) * 2000 # Does this make any sense?
self.set_bounds(distance=(0, max_distance))
else: # Parallax is nan
pass
if halo_fraction is not None:
self._priors["feh"] = FehPrior(halo_fraction=halo_fraction)
self._directory = str(directory)
self._samples = None
self._derived_samples = None
def write_ini(self, root="."):
path = os.path.join(root, self.name)
if not os.path.exists(path):
os.makedirs(path)
c = configobj.ConfigObj(os.path.join(path, "star.ini"))
if self.ra is not None and self.dec is not None:
c["ra"] = self.ra
c["dec"] = self.dec
for k, v in self.kwargs.items():
c[k] = v
c.write()
@property
def labelstring(self):
if self.N == 1:
return "single"
elif self.N == 2:
return "binary"
elif self.N == 3:
return "triple"
@property
def param_names(self):
if self._param_names is None:
self._param_names = self.ic.param_names
if self.N == 2:
self._param_names = tuple(["eep_0", "eep_1"] + list(self.ic.param_names[1:]))
elif self.N == 3:
self._param_names = tuple(["eep_0", "eep_1", "eep_2"] + list(self.ic.param_names[1:]))
return self._param_names
@property
def bands(self):
if self._bands is None:
self._bands = [k for k in self.kwargs if k in self.ic.bc_grid.bands]
return self._bands
@property
def props(self):
if self._props is None:
self._props = [k for k in self.kwargs if k in self._not_a_band]
return self._props
@property
def spec_props(self):
if self._spec_props is None:
self._spec_props = [self.kwargs.get(k, (np.nan, np.nan)) for k in ["Teff", "logg", "feh"]]
return self._spec_props
def bounds(self, prop):
if prop in ["eep_0", "eep_1", "eep_2"]:
prop = "eep"
if self._bounds[prop] is not None:
return self._bounds[prop]
elif prop == "mass":
lo, hi = self.ic.model_grid.get_limits("mass")
self._bounds["mass"] = (lo, hi)
self._priors["mass"].bounds = (lo, hi)
elif prop == "feh":
lo, hi = self.ic.model_grid.get_limits("feh")
self._bounds["feh"] = (lo, hi)
self._priors["feh"].bounds = (lo, hi)
elif prop == "age":
lo, hi = self.ic.model_grid.get_limits("age")
self._bounds["age"] = (lo, hi)
self._priors["age"].bounds = (lo, hi)
else:
raise ValueError("Unknown property {}".format(prop))
return self._bounds[prop]
@property
def n_params(self):
return len(self.param_names)
def lnlike(self, pars):
if self.N == 1:
pars = np.array([pars[0], pars[1], pars[2], pars[3], pars[4]], dtype=float)
primary_pars = pars
elif self.N == 2:
primary_pars = np.array([pars[0], pars[2], pars[3], pars[4], pars[5]])
pars = np.array([pars[0], pars[1], pars[2], pars[3], pars[4], pars[5]], dtype=float)
elif self.N == 3:
primary_pars = np.array([pars[0], pars[3], pars[4], pars[5], pars[6]])
pars = np.array([pars[0], pars[1], pars[2], pars[3], pars[4], pars[5], pars[6]], dtype=float)
spec_vals, spec_uncs = zip(*[prop for prop in self.spec_props])
if self.bands:
mag_vals, mag_uncs = zip(*[self.kwargs[b] for b in self.bands])
i_mags = [self.ic.bc_grid.interp.column_index[b] for b in self.bands]
else:
mag_vals, mag_uncs = np.array([], dtype=float), np.array([], dtype=float)
i_mags = np.array([], dtype=int)
lnlike = star_lnlike(
pars,
self.ic.param_index_order,
spec_vals,
spec_uncs,
mag_vals,
mag_uncs,
i_mags,
self.ic.model_grid.interp.grid,
self.ic.model_grid.interp.column_index["Teff"],
self.ic.model_grid.interp.column_index["logg"],
self.ic.model_grid.interp.column_index["feh"],
self.ic.model_grid.interp.column_index["Mbol"],
*self.ic.model_grid.interp.index_columns,
self.ic.bc_grid.interp.grid,
*self.ic.bc_grid.interp.index_columns
)
if "parallax" in self.kwargs:
plax, plax_unc = self.kwargs["parallax"]
lnlike += gauss_lnprob(plax, plax_unc, 1000.0 / pars[self.distance_index])
# Asteroseismology
if "nu_max" in self.kwargs:
model_nu_max, model_delta_nu = self.ic.interp_value(primary_pars, ["nu_max", "delta_nu"])
nu_max, nu_max_unc = self.kwargs["nu_max"]
lnlike += gauss_lnprob(nu_max, nu_max_unc, model_nu_max)
if "delta_nu" in self.kwargs:
delta_nu, delta_nu_unc = self.kwargs["delta_nu"]
lnlike += gauss_lnprob(delta_nu, delta_nu, model_delta_nu)
return lnlike
def lnprior(self, pars):
lnp = 0
if self.N == 2:
if pars[1] > pars[0]:
return -np.inf
elif self.N == 3:
if not (pars[0] > pars[1]) and (pars[1] > pars[2]):
return -np.inf
for val, par in zip(pars, self.param_names):
if par in ["eep", "eep_0", "eep_1", "eep_2"]:
if self.ic.eep_replaces == "age":
lnp += self._priors["eep"].lnpdf(
val, mass=pars[self.mass_index], feh=pars[self.feh_index]
)
elif self.ic.eep_replaces == "mass":
lnp += self._priors["eep"].lnpdf(val, age=pars[self.age_index], feh=pars[self.feh_index])
else:
lnp += self._priors[par].lnpdf(val)
return lnp
def mnest_prior(self, cube, ndim, nparams):
for i, par in enumerate(self.param_names):
lo, hi = self.bounds(par)
cube[i] = (hi - lo) * cube[i] + lo
def mnest_loglike(self, cube, ndim, nparams):
"""loglikelihood function for multinest
"""
return self.lnpost(cube)
@property
def derived_samples(self):
if self._derived_samples is None:
self._make_samples()
return self._derived_samples
def _make_samples(self):
filename = "{}post_equal_weights.dat".format(self.mnest_basename)
try:
df = pd.read_csv(filename, names=self.param_names + ("lnprob",), delim_whitespace=True)
except OSError:
logger.error("Error loading chains from {}".format(filename))
raise
self._samples = df
if self.N == 1:
self._derived_samples = self.ic(*[df[c].values for c in self.param_names])
elif self.N == 2 or self.N == 3:
self._derived_samples = df.copy()
primary_params = ["eep_0", "age", "feh", "distance", "AV"]
primary_df = self.ic(*[df[c].values for c in primary_params])
column_map = {
c: "{}_0".format(c)
for c in primary_df.columns
if c not in ["eep", "eep_0", "age", "distance", "AV"]
}
primary_df = primary_df.rename(columns=column_map).drop(["age", "eep"], axis=1)
secondary_params = ["eep_1", "age", "feh", "distance", "AV"]
secondary_df = self.ic(*[df[c].values for c in secondary_params])
column_map = {
c: "{}_1".format(c)
for c in secondary_df.columns
if c not in ["eep", "eep_1", "age", "distance", "AV"]
}
secondary_df = secondary_df.rename(columns=column_map).drop(["age", "eep"], axis=1)
self._derived_samples = pd.concat([self._derived_samples, primary_df, secondary_df], axis=1)
if self.N == 2:
for b in self.bands:
mag_0 = self._derived_samples[b + "_mag_0"]
mag_1 = self._derived_samples[b + "_mag_1"]
self._derived_samples[b + "_mag"] = addmags(mag_0, mag_1)
if self.N == 3:
tertiary_params = ["eep_2", "age", "feh", "distance", "AV"]
tertiary_df = self.ic(*[df[c].values for c in tertiary_params])
column_map = {
c: "{}_2".format(c)
for c in tertiary_df.columns
if c not in ["eep", "eep_2", "age", "distance", "AV"]
}
tertiary_df = tertiary_df.rename(columns=column_map).drop(["eep", "age"], axis=1)
self._derived_samples = pd.concat([self._derived_samples, tertiary_df], axis=1)
for b in self.bands:
mag_0 = self._derived_samples[b + "_mag_0"]
mag_1 = self._derived_samples[b + "_mag_1"]
mag_2 = self._derived_samples[b + "_mag_2"]
self._derived_samples[b + "_mag"] = addmags(mag_0, mag_1, mag_2)
self._derived_samples["parallax"] = 1000.0 / df["distance"]
self._derived_samples["distance"] = df["distance"]
self._derived_samples["AV"] = df["AV"]
def sample_from_prior(self, n, values=False, require_valid=True):
if n == 0:
return pd.DataFrame(columns=self.param_names)
pars = []
columns = []
for p in self.param_names:
if p != "eep":
samples = self._priors[p].sample(n)
pars.append(samples)
columns.append(p)
df = pd.DataFrame(np.array(pars).T, columns=columns)
# Resample EEPs with proper weights
if self.ic.eep_replaces == "age":
df["eep"] = self._priors["eep"].sample(n, mass=df["mass"], feh=df["feh"])
else:
df["eep"] = self._priors["eep"].sample(n, age=df["age"], feh=df["feh"])
if require_valid:
pars = df[list(self.param_names)].values
lnprob = np.array([self.lnpost(pars[i, :]) for i in range(len(pars))])
bad = np.logical_not(np.isfinite(lnprob))
nbad = bad.sum()
if nbad:
new_values = self.sample_from_prior(nbad, require_valid=True)
new_values.index = df.iloc[bad, :].index
df.iloc[bad, :] = new_values
if values:
return df[list(self.param_names)].values
else:
return df
def corner_params(self, **kwargs):
fig = corner.corner(self.samples, labels=self.samples.columns, **kwargs)
fig.suptitle(self.name, fontsize=22)
return fig
@property
def physical_quantities(self):
if self.N == 1:
cols = ["mass", "radius", "age", "Teff", "logg", "feh", "distance", "AV"]
elif self.N == 2:
cols = [
"mass_0",
"radius_0",
"mass_1",
"radius_1",
"Teff_0",
"Teff_1",
"logg_0",
"logg_1",
"age",
"feh",
"distance",
"AV",
]
elif self.N == 3:
cols = [
"mass_0",
"radius_0",
"mass_1",
"radius_1",
"mass_2",
"radius_2",
"Teff_0",
"Teff_1",
"Teff_2",
"logg_0",
"logg_1",
"logg_2",
"age",
"feh",
"distance",
"AV",
]
return cols
@property
def observed_quantities(self):
if self.N == 1:
cols = ["{}_mag".format(b) for b in self.bands] + self.props
elif self.N == 2 or self.N == 3:
cols = ["{}_mag".format(b) for b in self.bands]
cols += [p if p in self.derived_samples.columns else "{}_0".format(p) for p in self.props]
return cols
def corner_derived(self, cols, **kwargs):
fig = corner.corner(self.derived_samples[cols], labels=cols, **kwargs)
fig.suptitle(self.name, fontsize=22)
return fig
def corner_physical(self, **kwargs):
return self.corner_derived(self.physical_quantities)
def corner_observed(self, **kwargs):
cols = self.observed_quantities
truths = [self.kwargs[b][0] for b in self.bands] + [self.kwargs[p][0] for p in self.props]
ranges = [
(
min(truth - 0.01, self.derived_samples[col].min()),
max(truth + 0.01, self.derived_samples[col].max()),
)
for truth, col in zip(truths, cols)
]
return self.corner_derived(cols, truths=truths, range=ranges, **kwargs)
@property
def posterior_predictive(self):
chisq = 0
for b in self.bands:
val, unc = self.kwargs[b]
chisq += (val - self.derived_samples["{}_mag".format(b)]) ** 2 / unc ** 2
for p in self.props:
val, unc = self.kwargs[p]
chisq += (val - self.derived_samples[p]) ** 2 / unc ** 2
return chisq.mean() / (len(self.bands) + len(self.props))
@property
def map_pars(self):
i_max = self.samples.lnprob.idxmax()
return self.samples.loc[i_max].drop("lnprob").values
def save_hdf(self, filename, path="", overwrite=False, append=False):
"""Saves object data to HDF file (only works if MCMC is run)
Samples are saved to /samples location under given path,
:class:`ObservationTree` is saved to /obs location under given path.
:param filename:
Name of file to save to. Should be .h5 file.
:param path: (optional)
Path within HDF file structure to save to.
:param overwrite: (optional)
If ``True``, delete any existing file by the same name
before writing.
:param append: (optional)
If ``True``, then if a file exists, then just the path
within the file will be updated.
"""
if os.path.exists(filename):
with pd.HDFStore(filename) as store:
if path in store:
if overwrite:
os.remove(filename)
elif not append:
raise IOError(
"{} in {} exists. Set either overwrite or append option.".format(path, filename)
)
if self.samples is not None:
self.samples.to_hdf(filename, path + "/samples")
self.derived_samples.to_hdf(filename, path + "/derived_samples")
else:
pd.DataFrame().to_hdf(filename, path + "/samples")
pd.Dataframe().to_hdf(filename, path + "/derived_samples")
with pd.HDFStore(filename) as store:
# store = pd.HDFStore(filename)
attrs = store.get_storer("{}/samples".format(path)).attrs
attrs.ic_type = type(self.ic)
attrs.ic_bands = list(self.ic.bands)
attrs.use_emcee = self.use_emcee
if hasattr(self, "_mnest_basename"):
attrs._mnest_basename = self._mnest_basename
attrs.kwargs = self.kwargs
attrs._bounds = self._bounds
attrs._priors = {k: v for k, v in self._priors.items() if k != "eep"}
attrs.eep_bounds = self.eep_bounds
attrs.name = self.name
attrs.directory = self.directory
@classmethod
def load_hdf(cls, filename, path="", name=None):
"""
A class method to load a saved StarModel from an HDF5 file.
File must have been created by a call to :func:`StarModel.save_hdf`.
:param filename:
H5 file to load.
:param path: (optional)
Path within HDF file.
:return:
:class:`StarModel` object.
"""
if not os.path.exists(filename):
raise IOError("{} does not exist.".format(filename))
with pd.HDFStore(filename) as store:
try:
samples = store[path + "/samples"]
derived_samples = store[path + "/derived_samples"]
attrs = store.get_storer(path + "/samples").attrs
except:
store.close()
raise
try:
ic = attrs.ic_type(attrs.ic_bands)
except AttributeError:
ic = attrs.ic_type
use_emcee = attrs.use_emcee
mnest = True
try:
basename = attrs._mnest_basename
except AttributeError:
mnest = False
bounds = attrs._bounds
priors = attrs._priors
eep_bounds = attrs.eep_bounds
kwargs = attrs.kwargs
directory = attrs.directory
if name is None:
try:
name = attrs.name
except:
name = ""
store.close()
mod = cls(ic, name=name, directory=directory, eep_bounds=eep_bounds, **kwargs)
mod._samples = samples
mod._derived_samples = derived_samples
if mnest:
mod._mnest_basename = basename
mod._priors.update(priors)
mod._bounds = bounds
return mod
def write_results(self, corner_kwargs=None, directory=None):
"""
kwargs are passed to `.save_hdf()` (e.g., `overwrite=True`)
"""
if self._samples is None:
raise RuntimeError("Run .fit() before .write_results()!")
if directory is None:
directory = self.directory
if corner_kwargs is None:
corner_kwargs = {}
# Save the StarModel to file
starmodel_filename = "{}starmodel.h5".format(os.path.basename(self.mnest_basename))
starmodel_path = os.path.join(directory, starmodel_filename)
self.save_hdf(starmodel_path, overwrite=True)
# Create and save corner plots
corner_basename = os.path.join(directory, os.path.basename(self.mnest_basename))
fig_params = self.corner_params(**corner_kwargs)
fig_params.savefig("{}params.png".format(corner_basename))
fig_observed = self.corner_observed(**corner_kwargs)
fig_observed.savefig("{}observed.png".format(corner_basename))
fig_physical = self.corner_physical(**corner_kwargs)
fig_physical.savefig("{}physical.png".format(corner_basename))
class SingleStarModel(BasicStarModel):
def __init__(self, *args, **kwargs):
kwargs["N"] = 1
super().__init__(*args, **kwargs)
class BinaryStarModel(BasicStarModel):
def __init__(self, *args, **kwargs):
kwargs["N"] = 2
super().__init__(*args, **kwargs)
class TripleStarModel(BasicStarModel):
def __init__(self, *args, **kwargs):
kwargs["N"] = 3
super().__init__(*args, **kwargs)
class IsoTrackModel(BasicStarModel):
param_names = ["eep", "mass", "age", "feh", "distance", "AV"]
def __init__(self, iso, track, **kwargs):
self._iso = iso
self._track = track
super().__init__(iso, **kwargs)
self.set_prior(eep=EEP_prior(self.track, self._priors["age"], bounds=self.eep_bounds))
@property
def ic(self):
return self.track
@property
def iso(self):
if type(self._iso) == type:
self._iso = self._iso()
return self._iso
@property
def track(self):
if type(self._track) == type:
self._track = self._track()
return self._track
def lnlike(self, pars):
# eep, age, feh, distance, AV
iso_pars = np.array([pars[0], pars[2], pars[3], pars[4], pars[5]], dtype=float)
# mass, eep, feh, distance, AV
track_pars = np.array([pars[1], pars[0], pars[3], pars[4], pars[5]], dtype=float)
spec_vals, spec_uncs = zip(*[prop for prop in self.spec_props])
if self.bands:
mag_vals, mag_uncs = zip(*[self.kwargs[b] for b in self.bands])
i_mags = [self.ic.bc_grid.interp.column_index[b] for b in self.bands]
else:
mag_vals, mag_uncs = np.array([], dtype=float), np.array([], dtype=float)
i_mags = np.array([], dtype=int)
iso_lnlike = star_lnlike(
iso_pars,
self.iso.param_index_order,
spec_vals,
spec_uncs,
mag_vals,
mag_uncs,
i_mags,
self.iso.model_grid.interp.grid,
self.iso.model_grid.interp.column_index["Teff"],
self.iso.model_grid.interp.column_index["logg"],
self.iso.model_grid.interp.column_index["feh"],
self.iso.model_grid.interp.column_index["Mbol"],
*self.iso.model_grid.interp.index_columns,
self.iso.bc_grid.interp.grid,
*self.iso.bc_grid.interp.index_columns
)
track_lnlike = star_lnlike(
track_pars,
self.track.param_index_order,
spec_vals,
spec_uncs,
mag_vals,
mag_uncs,
i_mags,
self.track.model_grid.interp.grid,
self.track.model_grid.interp.column_index["Teff"],
self.track.model_grid.interp.column_index["logg"],
self.track.model_grid.interp.column_index["feh"],
self.track.model_grid.interp.column_index["Mbol"],
*self.track.model_grid.interp.index_columns,
self.track.bc_grid.interp.grid,
*self.track.bc_grid.interp.index_columns
)
lnlike = iso_lnlike + track_lnlike
if "parallax" in self.kwargs:
lnlike += gauss_lnprob(*self.kwargs["parallax"], 1000.0 / pars[4])
return lnlike
def lnprior(self, pars):
lnp = 0
for val, par in zip(pars, self.param_names):
if par in ["eep", "eep_0", "eep_1", "eep_2"]:
lnp += self._priors["eep"].lnpdf(val, mass=pars[1], feh=pars[3])
else:
lnp += self._priors[par].lnpdf(val)
return lnp
########## Utility functions ###############
def N_options(N_stars, max_multiples=1, max_stars=2):
return [
N
for N in itertools.product(np.arange(max_stars) + 1, repeat=N_stars)
if (np.array(N) > 1).sum() <= max_multiples
]
def index_options(N_stars):
if N_stars == 1:
return [0]
options = []
for ind in itertools.product(range(N_stars), repeat=N_stars):
diffs = np.array(ind[1:]) - np.array(ind[:-1])
if ind[0] == 0 and diffs.max() <= 1:
options.append(ind)
return options
|
the-stack_106_23571 | # Usage::
#
# {{thumbnail:.files/img/favicon.png 200x100 exact_size}}
#
# where width = 200 & height = 100
#
# By default, the macro preserves the aspect ratio of the image. If you set 'exact_size', then the generated thumbnail
# will be of the same passed size exactly. 'exact_size' is optional
import os
DEFAULT_THUMB_SIZE = '150x100'
def main(j, args, params, tags, tasklet):
page = args.page
try:
import Image
except ImportError:
# pyflakes.ignore
from PIL import Image
space_name = args.doc.getSpaceName()
space_path = j.portal.server.active.getSpace(space_name).model.path
macro_params = args.cmdstr.split(' ')
img_url = macro_params[0]
if len(macro_params) >= 2:
thumb_size = macro_params[1]
else:
thumb_size = args.doc.docparams.get('thumb_size', DEFAULT_THUMB_SIZE)
if len(macro_params) >= 3:
exact_size = macro_params[2]
else:
exact_size = False
thumb_size = thumb_size or args.doc.docparams.get('thumb_size', DEFAULT_THUMB_SIZE)
width, height = [int(x) for x in thumb_size.split('x')]
img_path = img_url.strip('/')
full_img_path = os.path.join(space_path, img_path)
# Add 's_' to file name to tell that this is a thumbnail, and add width & height too
thumbnail_path = ('{0}s_{1}x{2}_').format(os.path.sep, width, height).join(os.path.split(full_img_path))
img_url_base, img_name = os.path.split(img_url)
thumbnail_url = os.path.join(space_name, img_url_base.strip('/'), r's_{0}x{1}_{2}'.format(width, height, img_name))
# If the thumbnail doesn't exist on the desk, generate it
if not os.path.exists(thumbnail_path):
im = Image.open(full_img_path)
if exact_size:
im = im.resize((width, height), Image.ANTIALIAS)
else:
im.thumbnail((width, height), Image.ANTIALIAS)
im.save(thumbnail_path)
page.addMessage('<img src="/{0}" />'.format(thumbnail_url))
params.result = page
return params
def match(j, args, params, tags, tasklet):
return True
|
the-stack_106_23572 | #!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from marshmallow import ValidationError
from polyaxon.schemas.fields.params import PARAM_REGEX
def validate_image(image, allow_none=False):
if not image:
if allow_none:
return
else:
raise ValidationError("Image is required")
param = PARAM_REGEX.search(image)
if param:
return
if " " in image:
raise ValidationError("Invalid docker image `{}`".format(image))
tagged_image = image.split(":")
if len(tagged_image) > 3:
raise ValidationError("Invalid docker image `{}`".format(image))
if len(tagged_image) == 3 and (
"/" not in tagged_image[1] or tagged_image[1].startswith("/")
):
raise ValidationError("Invalid docker image `{}`".format(image))
|
the-stack_106_23573 | """
Test growfactors.csv file contents.
"""
def test_growfactor_start_year(growfactors):
"""
Check that growfactors.csv can support Tax-Calculator Policy needs.
"""
first_growfactors_year = growfactors.index.min()
first_taxcalc_policy_year = 2013
assert first_growfactors_year <= first_taxcalc_policy_year
def test_growfactor_values(growfactors):
"""
Check that each grow factor value is in plausible min,max range.
"""
first_year = growfactors.index.min()
for fname in growfactors:
if fname != 'YEAR':
assert growfactors[fname][first_year] == 1.0
min_value = 0.50
max_value = 1.60
for fname in growfactors:
if fname != 'YEAR':
assert growfactors[fname].min() >= min_value
assert growfactors[fname].max() <= max_value
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.