filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_19157
|
from pygments.style import Style
from pygments.token import (
Comment, Error, Keyword, Literal, Name, Number, Operator, String, Text
)
class BaseSixteenStyle(Style):
base00 = '#000000'
base01 = '#121212'
base02 = '#222222'
base03 = '#333333'
base04 = '#999999'
base05 = '#c1c1c1'
base06 = '#999999'
base07 = '#c1c1c1'
base08 = '#5f8787'
base09 = '#aaaaaa'
base0a = '#626b67'
base0b = '#a5aaa7'
base0c = '#aaaaaa'
base0d = '#888888'
base0e = '#999999'
base0f = '#444444'
default_style = ''
background_color = base00
highlight_color = base02
styles = {
Text: base05,
Error: base08, # .err
Comment: f'italic {base03}', # .c
Comment.Preproc: base0f, # .cp
Comment.PreprocFile: base0b, # .cpf
Keyword: base0e, # .k
Keyword.Type: base08, # .kt
Name.Attribute: base0d, # .na
Name.Builtin: base0d, # .nb
Name.Builtin.Pseudo: base08, # .bp
Name.Class: base0d, # .nc
Name.Constant: base09, # .no
Name.Decorator: base09, # .nd
Name.Function: base0d, # .nf
Name.Namespace: base0d, # .nn
Name.Tag: base0e, # .nt
Name.Variable: base0d, # .nv
Name.Variable.Instance: base08, # .vi
Number: base09, # .m
Operator: base0c, # .o
Operator.Word: base0e, # .ow
Literal: base0b, # .l
String: base0b, # .s
String.Interpol: base0f, # .si
String.Regex: base0c, # .sr
String.Symbol: base09, # .ss
}
from string import capwords # noqa: E402
BaseSixteenStyle.__name__ = 'BaseSixteen{}Style'.format(
capwords('black-metal-marduk', '-').replace('-', '')
)
globals()[BaseSixteenStyle.__name__] = globals()['BaseSixteenStyle']
del globals()['BaseSixteenStyle']
del capwords
|
the-stack_106_19159
|
# -*- coding: utf-8 -*-
"""
.. admonition:: License
Copyright 2019 CNES
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied. See the License for the specific language governing permissions and
limitations under the License.
"""
from resto_client.base_exceptions import RestoClientUserError
from resto_client.cli.resto_client_cli import resto_client_run
from resto_client_tests.resto_client_cli_test import TestRestoClientCli
class VTestCliShow(TestRestoClientCli):
"""
Validation Tests of the cli show module
"""
def test_n_show_server(self) -> None:
"""
Validation test of show server in nominal cases
"""
resto_client_run(arguments=['set', 'server', 'kalideos'])
output1 = self.get_command_output(['show', 'server'])
output2 = self.get_command_output(['show', 'server', 'kalideos'])
# verify integrity betwen show server X and show current server
self.assertEqual(output1, output2)
# Test on first phrase of print only
first_line = output2.splitlines()[0]
self.assertEqual(first_line, 'Server URL: https://www.kalideos.fr/resto2/')
def test_n_show_server_stats(self) -> None:
"""
Validation test of show server in nominal cases
"""
output = self.get_command_output(['show', 'server', 'kalideos', '--stats'])
expect_out = (['No statistics available for KALCNES',
'No statistics available for KALHAITI',
'STATISTICS for all collections'])
self.assertEqual(output.splitlines()[7:10], expect_out)
def test_n_show_collection(self) -> None:
"""
Validation test of show collection in nominal cases: nothing persisted
"""
output = self.get_command_output(['show', 'collection',
'KALHAITI', '--server=kalideos'])
second_line = output.splitlines()[1]
self.assertEqual(second_line[1:-1].strip(), "Collection's Characteristics")
def test_n_show_current_collection(self) -> None:
"""
Validation test of show collection in nominal cases : current collection
"""
resto_client_run(arguments=['set', 'server', 'kalideos'])
resto_client_run(arguments=['set', 'collection', 'KALCNES'])
output = self.get_command_output(['show', 'collection'])
second_line = output.splitlines()[1]
self.assertEqual(second_line[1:-1].strip(), "Collection's Characteristics")
def test_n_show_other_collection(self) -> None:
"""
Validation test of show collection in nominal cases : another collection on the current
server
"""
resto_client_run(arguments=['set', 'server', 'kalideos'])
resto_client_run(arguments=['set', 'collection', 'KALCNES'])
output = self.get_command_output(['show', 'collection', 'KALHAITI'])
second_line = output.splitlines()[1]
self.assertEqual(second_line[1:-1].strip(), "Collection's Characteristics")
def test_n_show_settings(self) -> None:
"""
Validation test of show settings in nominal cases
"""
output = self.get_command_output(['show', 'settings'])
second_line = output.splitlines()[1]
self.assertEqual(second_line[1:-1].strip(), 'Settings from : resto_client_settings.json')
def test_n_show_feature(self) -> None:
"""
Validation test of show feature in nominal cases
With Kalideos and Creodias
"""
output = self.get_command_output(['show', 'feature', '1363714904970542',
'--collection=KALCNES', '--server=kalideos'])
first_line = output.splitlines()[0]
self.assertEqual(first_line, 'Metadata available for product 1363714904970542')
id_prod = ('/eodata/Envisat/Meris/FRS/2012/04/08/' +
'MER_FRS_1PPEPA20120408_105857_000005063113_00267_52867_0978.N1')
output = self.get_command_output(['show', 'feature', id_prod, '--collection=Envisat',
'--server=creodias'])
first_line = output.splitlines()[0]
self.assertEqual(first_line, 'Metadata available for product {}'.format(id_prod))
def test_d_show_server(self) -> None:
"""
Validation test of show server in degraded cases (no result found)
"""
with self.assertRaises(RestoClientUserError) as ctxt:
resto_client_run(arguments=['show', 'server'])
expected_msg = 'No persisted server and None is not a valid server name.'
self.assertEqual(expected_msg, str(ctxt.exception))
def test_d_show_collection(self) -> None:
"""
Validation test of show collection in degraded cases (no result found)
"""
with self.assertRaises(RestoClientUserError) as ctxt:
resto_client_run(arguments=['show', 'collection', 'oups', '--server=kalideos'])
expected_msg = 'No collection found with name oups'
self.assertEqual(expected_msg, str(ctxt.exception))
|
the-stack_106_19160
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : qichun tang
# @Contact : [email protected]
from collections import defaultdict
from copy import deepcopy
from math import inf
from time import time
from typing import Tuple, Union, List, Dict
import numpy as np
from ConfigSpace import Configuration
from sklearn.utils.validation import check_random_state
from ultraopt.structure import Job
from ultraopt.utils.config_space import add_configs_origin, get_dict_from_config
from ultraopt.utils.hash import get_hash_of_config
from ultraopt.utils.logging_ import get_logger
def runId_info():
return {"start_time": time(), "end_time": -1}
class BaseOptimizer():
def __init__(self):
self.logger = get_logger(self)
self.is_init = False
self.configId2config: Dict[str, dict] = {}
self.runId2info: Dict[Tuple[str, float], dict] = defaultdict(runId_info)
def initialize(self, config_space, budgets=(1,), random_state=42, initial_points=None, budget2obvs=None):
if self.is_init:
return
self.is_init = True
self.initial_points = initial_points
self.random_state = random_state
self.config_space = config_space
self.config_space.seed(random_state)
self.budgets = budgets
if budget2obvs is None:
budget2obvs = self.get_initial_budget2obvs(self.budgets)
self.budget2obvs = budget2obvs
# other variable
self.rng = check_random_state(self.random_state)
self.initial_points_index = 0
@classmethod
def get_initial_budget2obvs(cls, budgets):
return {budget: {"losses": [], "configs": [], "vectors": [], "locks": []} for budget in budgets}
def tell(self, config: Union[dict, Configuration], loss: float, budget: float = 1, update_model=True):
config = get_dict_from_config(config)
job = Job(get_hash_of_config(config))
job.kwargs = {
"budget": budget,
"config": config,
"config_info": {}
}
job.result = {
"loss": loss
}
self.new_result(job, update_model=update_model)
def new_result(self, job: Job, update_model=True):
##############################
### 1. update observations ###
##############################
if job.result is None:
# One could skip crashed results, but we decided to
# assign a +inf loss and count them as bad configurations
loss = np.inf
else:
# same for non numeric losses.
# Note that this means losses of minus infinity will count as bad!
loss = job.result["loss"] if np.isfinite(job.result["loss"]) else np.inf
budget = job.kwargs["budget"]
config_dict = job.kwargs["config"]
configId = get_hash_of_config(config_dict)
runId = (configId, budget)
if runId in self.runId2info:
self.runId2info[runId]["end_time"] = time()
self.runId2info[runId]["loss"] = loss
else:
self.logger.error(f"runId {runId} not in runId2info, it's impossible!!!")
# config_info = job.kwargs["config_info"]
config = Configuration(self.config_space, config_dict)
# add lock (It may be added twice, but it does not affect)
self.budget2obvs[budget]["locks"].append(config.get_array().copy())
self.budget2obvs[budget]["configs"].append(deepcopy(config))
self.budget2obvs[budget]["vectors"].append(config.get_array())
self.budget2obvs[budget]["losses"].append(loss)
losses = np.array(self.budget2obvs[budget]["losses"])
vectors = np.array(self.budget2obvs[budget]["vectors"])
###################################################################
### 2. Judge whether the EPM training conditions are satisfied ###
###################################################################
if not update_model:
return
self._new_result(budget, vectors, losses)
def _new_result(self, budget, vectors: np.ndarray, losses: np.ndarray):
raise NotImplementedError
def ask(self, budget=1, n_points=None, strategy="cl_min") -> Union[List[Tuple[dict, dict]], Tuple[dict, dict]]:
if n_points is None:
return self.get_config(budget)
supported_strategies = ["cl_min", "cl_mean", "cl_max"]
if not (isinstance(n_points, int) and n_points > 0):
raise ValueError(
"n_points should be int > 0, got " + str(n_points)
)
if strategy not in supported_strategies:
raise ValueError(
"Expected parallel_strategy to be one of " +
str(supported_strategies) + ", " + "got %s" % strategy
)
opt = deepcopy(self)
config_info_pairs = []
for i in range(n_points):
start_time = time()
config, config_info = opt.get_config(budget=budget)
config_info_pairs.append((config, config_info))
losses = opt.budget2obvs[budget]["losses"]
if strategy == "cl_min":
y_lie = np.min(losses) if losses else 0.0 # CL-min lie
elif strategy == "cl_mean":
y_lie = np.mean(losses) if losses else 0.0 # CL-mean lie
elif strategy == "cl_max":
y_lie = np.max(losses) if losses else 0.0 # CL-max lie
else:
raise NotImplementedError
opt.tell(config, y_lie)
self.register_config(config, budget, start_time=start_time)
return config_info_pairs
def get_config(self, budget) -> Tuple[dict, dict]:
# get max_budget
# calc by budget2epm
start_time = time()
max_budget = self.get_available_max_budget()
# initial points
if self.initial_points is not None and self.initial_points_index < len(self.initial_points):
while True:
if self.initial_points_index >= len(self.initial_points):
break
initial_point_dict = self.initial_points[self.initial_points_index]
initial_point = Configuration(self.config_space, initial_point_dict)
self.initial_points_index += 1
initial_point.origin = "User Defined"
if not self.is_config_exist(budget, initial_point):
self.logger.debug(f"Using initial points [{self.initial_points_index - 1}]")
return self.process_config_info_pair(initial_point, {}, budget)
config, config_info = self._get_config(budget, max_budget)
self.register_config(config, budget, start_time)
return config, config_info
def register_config(self, config, budget, start_time=None):
configId = get_hash_of_config(config)
runId = (configId, budget)
if runId in self.runId2info: # don't set second time
return
self.configId2config[configId] = config
info = self.runId2info[runId] # auto set start_time
if start_time:
info["start_time"] = start_time
def _get_config(self, budget, max_budget):
raise NotImplementedError
def is_config_exist(self, budget, config: Configuration):
vectors_list = []
budgets = [budget_ for budget_ in list(self.budget2obvs.keys()) if budget_ >= budget]
for budget_ in budgets:
vectors = np.array(self.budget2obvs[budget_]["locks"])
if vectors.size:
vectors_list.append(vectors)
if len(vectors_list) == 0:
return False
vectors = np.vstack(vectors_list)
if np.any(np.array(vectors.shape) == 0):
return False
vectors[np.isnan(vectors)] = -1
vector = config.get_array().copy()
vector[np.isnan(vector)] = -1
if np.any(np.all(vector == vectors, axis=1)):
return True
return False
def get_available_max_budget(self):
raise NotImplementedError
def process_config_info_pair(self, config: Configuration, info_dict: dict, budget):
self.budget2obvs[budget]["locks"].append(config.get_array().copy())
info_dict = deepcopy(info_dict)
if config.origin is None:
config.origin = "unknown"
info_dict.update({
"origin": config.origin
})
return config.get_dictionary(), info_dict
def process_all_configs_exist(self, info_dict, budget):
seed = self.rng.randint(1, 8888)
self.config_space.seed(seed)
config = self.config_space.sample_configuration()
add_configs_origin(config, "Initial Design")
info_dict.update({"sampling_different_samples_failed": True, "seed": seed})
return self.process_config_info_pair(config, info_dict, budget)
def pick_random_initial_config(self, budget, max_sample=1000, origin="Initial Design"):
i = 0
info_dict = {"model_based_pick": False}
while i < max_sample:
i += 1
config = self.config_space.sample_configuration()
add_configs_origin(config, origin)
if self.is_config_exist(budget, config):
self.logger.debug(f"The sample already exists and needs to be resampled. "
f"It's the {i}-th time sampling in random sampling. ")
else:
return self.process_config_info_pair(config, info_dict, budget)
return self.process_all_configs_exist(info_dict, budget)
def reset_time(self):
min_time = inf
for info in self.runId2info.values():
min_time = min(info["start_time"], min_time)
for info in self.runId2info.values():
info["start_time"] -= min_time
info["end_time"] -= min_time
def resume_time(self):
max_time = -inf
for info in self.runId2info.values():
max_time = max(info["end_time"], max_time)
delta_time = time() - max_time
for info in self.runId2info.values():
info["start_time"] += delta_time
info["end_time"] += delta_time
|
the-stack_106_19161
|
# Copyright 2019 Shift Cryptosecurity AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Library to interact with a BitBox02 device. """
from __future__ import print_function
import sys
__version__ = "5.3.0"
if sys.version_info.major != 3 or sys.version_info.minor < 6:
print(
"Python version is {}.{}, but 3.6+ is required by this script.".format(
sys.version_info.major, sys.version_info.minor
),
file=sys.stderr,
)
sys.exit(1)
try:
import hid
hid.device # pylint: disable=pointless-statement
except AttributeError:
print(
"Unable to reference hid.device; maybe hid package is masking "
"hidapi? Try:\n\t$ pip3 uninstall hid",
file=sys.stderr,
)
sys.exit(1)
# pylint: disable=wrong-import-position
from .bitbox02 import (
Backup,
BitBox02,
BTCInputType,
BTCOutputExternal,
BTCOutputInternal,
BTCOutputType,
BTCPrevTxInputType,
BTCPrevTxOutputType,
DuplicateEntryException,
hww,
btc,
common,
eth,
system,
)
from .bootloader import Bootloader
|
the-stack_106_19162
|
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from fcm.utils import get_device_model
Device = get_device_model()
class Command(BaseCommand):
args = ['<device_id>', '<message>']
help = 'Send message through fcm api'
def add_arguments(self, parser):
#this store list
parser.add_argument('--device_id', nargs='*', type=str)
parser.add_argument('--msg', nargs='*', type=str)
parser.add_argument(
'--devices',
action='store_true',
dest='devices',
default=False,
help='List of available devices',
)
parser.add_argument(
'--collapse-key',
dest='collapse_key',
default='message',
help='Set value of collapse_key flag, default is "message"',
)
def handle(self, *args, **options):
if options['devices']:
devices = Device.objects.filter(is_active=True)
self.stdout.write("Devices list:\n")
for device in devices:
self.stdout.write("(#%s) %s\n" % (device.id, device.name))
self.stdout.write("\n")
else:
collapse_key = options['collapse_key']
try:
id = options['device_id'][0]
message = options['msg'][0]
except IndexError:
raise CommandError(
"Invalid params. You have to put all params: "
"python manage.py fcm_messenger <device_id> <msg>")
try:
device = Device.objects.get(pk=int(id), is_active=True)
except Device.DoesNotExist:
raise CommandError(
'Unknown device (id=%s). Check list: '
'python manage.py fcm_messenger --devices' % id)
else:
result = device.send_message(
{'message': message}, collapse_key=collapse_key)
self.stdout.write("[OK] device #%s (%s): %s\n" %
(id, device.name, result))
|
the-stack_106_19163
|
# program r7_01.py
# Rozpoczynamy program
import os
from r7_functions import *
def exif_anonymize():
directory = "."
images_files = [".jpg", ".jpeg", ".png"]
for dirpath, dirname, files in os.walk(directory):
for file in files:
image_file = os.path.join(dirpath, file)
ext = os.path.splitext(image_file)[1].lower()
print(f"Dla pliku: {image_file} rozszerzenie {ext}")
if ext in images_files:
print(f"Anonimizujemy plik: {image_file}")
if __name__ == "__main__":
exif_anonymize()
else:
print("Skrypt do wykonania samodzielnego.")
|
the-stack_106_19168
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateResolverEndpointDetails(object):
"""
The body for updating an existing resolver endpoint.
**Warning:** Oracle recommends that you avoid using any confidential information when you supply string values using the API.
"""
#: A constant which can be used with the endpoint_type property of a UpdateResolverEndpointDetails.
#: This constant has a value of "VNIC"
ENDPOINT_TYPE_VNIC = "VNIC"
def __init__(self, **kwargs):
"""
Initializes a new UpdateResolverEndpointDetails object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.dns.models.UpdateResolverVnicEndpointDetails`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param endpoint_type:
The value to assign to the endpoint_type property of this UpdateResolverEndpointDetails.
Allowed values for this property are: "VNIC"
:type endpoint_type: str
"""
self.swagger_types = {
'endpoint_type': 'str'
}
self.attribute_map = {
'endpoint_type': 'endpointType'
}
self._endpoint_type = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['endpointType']
if type == 'VNIC':
return 'UpdateResolverVnicEndpointDetails'
else:
return 'UpdateResolverEndpointDetails'
@property
def endpoint_type(self):
"""
Gets the endpoint_type of this UpdateResolverEndpointDetails.
The type of resolver endpoint. VNIC is currently the only supported type.
Allowed values for this property are: "VNIC"
:return: The endpoint_type of this UpdateResolverEndpointDetails.
:rtype: str
"""
return self._endpoint_type
@endpoint_type.setter
def endpoint_type(self, endpoint_type):
"""
Sets the endpoint_type of this UpdateResolverEndpointDetails.
The type of resolver endpoint. VNIC is currently the only supported type.
:param endpoint_type: The endpoint_type of this UpdateResolverEndpointDetails.
:type: str
"""
allowed_values = ["VNIC"]
if not value_allowed_none_or_none_sentinel(endpoint_type, allowed_values):
raise ValueError(
"Invalid value for `endpoint_type`, must be None or one of {0}"
.format(allowed_values)
)
self._endpoint_type = endpoint_type
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
the-stack_106_19169
|
# -*- coding: utf-8 -*-
# Copyright 2018 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensure the expected functioning of ``memote.suite.reporting.history``."""
from __future__ import absolute_import
from six import iteritems
import pytest
from memote.suite.reporting import HistoryReport
from memote.suite.results.result import MemoteResult
MOCK_CONFIG_SCORES = {
'cards': {
'scored': {
'sections': {
'scored_sub_section': {
'cases': [
'test_number',
'test_parametrized'
],
'title': 'Scored Sub Section',
'weight': 1.0
}
},
'title': 'Core Tests'
}
},
'weights': {
'test_number': 1.0
}
}
MOCK_CONFIG = {
'cards': {
'scored': {
'sections': {
'scored_sub_section': {
'cases': [
'test_parametrized'
],
'title': 'Scored Sub Section',
'weight': 1.0
}
},
'title': 'Core Tests'
},
'test_basic': {
'cases': [
'test_number'
],
'title': 'Basic Information'
}
},
'weights': {
'test_number': 1.0,
'test_parametrized': 1.0,
}
}
@pytest.fixture(scope="session")
def mock_history_manager():
"""Build a mock history manager that already contains results."""
result1 = MemoteResult({
"meta": {
"branch": "master",
"commit_author": "John Doe",
"commit_hash": "3f4665356a24d76a9461043f62a2b12dab56c75f",
"packages": {
"SomePackage": "0.1.0"},
"platform": "Darwin",
"python": "2.7.10",
"release": "14.5.0",
"timestamp": "2017-05-03 18:26:11+02:00"
},
"tests": {
"test_parametrized": {
"data": {
"parameter1": ["item2", "item3"],
"parameter2": ["item4", "item3"]
},
"duration": {
"parameter1": 0.12,
"parameter2": 0.32
},
"format_type": 'percent',
"message": {
"parameter1": "Some Message 1",
"parameter2": "Some Message 2"
},
"metric": {
"parameter1": 0.5,
"parameter2": 0.9
},
"result": {
"parameter1": "failed",
"parameter2": "failed"
},
"summary": "Some description of the test",
"title": "Parametrized Test"
},
"test_number": {
"data": ['x', 'y', 'z'],
"duration": 0.002,
"format_type": "count",
"message": "Some Message 3",
"result": "passed",
"summary": "Some description again",
"metric": 0.2,
"title": "Non-Parametrized Test"
}
}
})
result2 = MemoteResult({
"meta": {
"branch": "develop",
"commit_author": "John Doe",
"commit_hash": "6e30d6236f5d47ebb4be39253eaa6a5dcb487687",
"packages": {
"SomePackage": "0.1.0"},
"platform": "Darwin",
"python": "2.7.10",
"release": "14.5.0",
"timestamp": "2017-05-03 18:50:11+02:00"
},
"tests": {
"test_parametrized": {
"data": {
"parameter1": ["item1", "item2"],
"parameter2": ["item2", "item3"]
},
"duration": {
"parameter1": 0.2,
"parameter2": 0.1
},
"format_type": 'percent',
"message": {
"parameter1": "Some Message 1",
"parameter2": "Some Message 2"
},
"metric": {
"parameter1": 1.0,
"parameter2": 0.0
},
"result": {
"parameter1": "failed",
"parameter2": "failed"
},
"summary": "Some description of the test",
"title": "Parametrized Test"
},
"test_number": {
"data": ['x', 'y', 'z'],
"duration": 0.002,
"format_type": "count",
"message": "Some Message 3",
"result": "passed",
"summary": "Some description again",
"metric": 0.6,
"title": "Non-Parametrized Test"
}
}
})
branch_structure = {
"commits": {
"3f4665356a24d76a9461043f62a2b12dab56c75f": {
"timestamp": "2017-05-03 18:26:11+02:00",
"author": "John Doe",
"email": "[email protected]"
},
"6e30d6236f5d47ebb4be39253eaa6a5dcb487687": {
"timestamp": "2017-05-03 18:50:11+02:00",
"author": "John Doe",
"email": "[email protected]"
}
},
"branches": {
"master": ["3f4665356a24d76a9461043f62a2b12dab56c75f"],
"develop": ["6e30d6236f5d47ebb4be39253eaa6a5dcb487687",
"3f4665356a24d76a9461043f62a2b12dab56c75f"]
}
}
results = {
"3f4665356a24d76a9461043f62a2b12dab56c75f": result1,
"6e30d6236f5d47ebb4be39253eaa6a5dcb487687": result2,
}
# Create mock history manager.
class History(object):
def __init__(self, **kwargs):
super(History, self).__init__(**kwargs)
self._results = results
self._history = branch_structure
def get_result(self, commit):
return results[commit]
def iter_branches(self):
return iteritems(self._history["branches"])
def build_branch_structure(self):
pass
def load_history(self):
pass
return History()
def test_structure(mock_history_manager):
"""Expect this one thing to be true."""
history = mock_history_manager
results = HistoryReport(history, MOCK_CONFIG).result
assert set(results.keys()) == set(['cards', 'tests', 'score', 'weights'])
assert set(results["score"].keys()) == set(['total_score'])
assert set(
results["score"]["total_score"].keys()) == \
set(['history', 'format_type'])
assert set(results["score"]["total_score"]["history"][0]) == \
set(["commit", "metric", "branch"])
assert set(
results["tests"]["test_parametrized"]["history"]["parameter1"][0]
) == set(["commit", "metric", "branch", "data", "result"])
assert set(results["tests"]["test_number"]["history"][0]) == \
set(["commit", "metric", "branch", "data", "result"])
def test_score_param(mock_history_manager):
"""Expect all scores to be calculated correctly for parametrized tests."""
history = mock_history_manager
score_collection = HistoryReport(
history, MOCK_CONFIG_SCORES).result['score']
for score in score_collection["total_score"]["history"]:
# Equation for result 1:
# ((((1-0.5)+(1-0.9))/2 + (1-0.2)*1)*1)/(1+1*1)*1
if score["commit"] == "3f4665356a24d76a9461043f62a2b12dab56c75f":
assert score["metric"] == 0.55
# Equation for result 2:
# ((((1-0)+(1-1))/2 + (1-0.6)*1)*1)/(1+1*1)*1
if score["commit"] == "6e30d6236f5d47ebb4be39253eaa6a5dcb487687":
assert score["metric"] == 0.45
|
the-stack_106_19170
|
# Copyright (C) 2018 Corefracture, Chris Coleman.
# www.corefracture.com - @corefracture
#
# Licensed under the MIT License, https://opensource.org/licenses/MIT
# See LICENSE.md for more details
import logging
from enum import Enum
LOGGER = logging.getLogger(__name__)
class NetemType(Enum):
LATENCY = "0"
JITTER = "1"
DUPE = "2"
LOSS = "3"
REORDER = "4"
CORRUPT = "5"
BANDWIDTH = "6"
class NetemSettings:
def __init__(self):
self._rate = '1000000'
self._loss = NetemLoss()
self._dupe = NetemDupe()
self._reorder = NetemReorder()
self._corrupt = NetemCorrupt()
self._latency = NetemLatency()
self._jitter = NetemJitter()
def netem_setting(self, setting_type, set_val=None):
if setting_type == NetemType.LATENCY:
if(set_val is not None):
self._latency.set(set_val)
return self._latency
if setting_type is NetemType.JITTER:
if(set_val is not None):
self._jitter.set(set_val)
return self._jitter
if setting_type is NetemType.CORRUPT:
if(set_val is not None):
self._corrupt.set(set_val)
return self._corrupt
if setting_type is NetemType.LOSS:
if(set_val is not None):
self._loss.set(set_val)
return self._loss
if setting_type is NetemType.DUPE:
if(set_val is not None):
self._dupe.set(set_val)
return self._dupe
if setting_type is NetemType.REORDER:
if(set_val is not None):
self._reorder.set(set_val)
return self._reorder
if setting_type is NetemType.BANDWIDTH:
if(set_val is not None):
self.set_bandwidth(set_val)
return self._rate
def set_bandwidth(self, rate_kbps):
try:
rate_kbps = str(int(float(rate_kbps)))
self._rate = rate_kbps
return self._rate
except Exception as exp:
print(exp.__str__())
#TODO: cf: logging
return
def set_setting(self, setting_type, setting_val):
self.netem_setting(NetemType(setting_type), set_val=setting_val)
class NetemAtrrib:
def __init__(self, val, corr_percent=None):
self._base_val = val
self._corr_percent = corr_percent
self._attrib_str = None
@staticmethod
def is_base_val_zero(val):
try:
test = float(val)
return test <= 0
except Exception as exp:
return True
def set(self, val):
try:
#verify the cast to float so we ensure we're dealing with proper values
#and add prefix 0's if there aren't any
self._base_val = str(float(val))
except Exception as exp:
self._base_val = "0"
# TODO: Logging for exp here
return
def get_val(self):
return self._base_val
def set_corr_percent(self, corr_percent):
try:
self._corr_percent = float(corr_percent)
except Exception as exp:
# TODO: cf: Logging for exp
return
def get_corr_percent(self):
return self._corr_percent
def __str__(self):
if NetemAtrrib.is_base_val_zero(self._base_val) is True:
return ""
attrib_str = '{0} {1}%'.format(self._attrib_str, self._base_val)
if self._corr_percent is not None:
attrib_str = "{0} {1}%".format(attrib_str, self._corr_percent)
return attrib_str
class NetemCorrupt(NetemAtrrib):
def __init__(self, base_corrupt='0', correlation=None):
NetemAtrrib.__init__(self, base_corrupt, correlation)
self._attrib_str = "corrupt"
class NetemReorder(NetemAtrrib):
def __init__(self, base_reorder='0', correlation=None):
NetemAtrrib.__init__(self, base_reorder, correlation)
self._attrib_str = "reorder"
class NetemDupe(NetemAtrrib):
def __init__(self, base_dupe='0', correlation=None):
NetemAtrrib.__init__(self, base_dupe, correlation)
self._attrib_str = "duplicate"
class NetemLoss(NetemAtrrib):
def __init__(self, base_loss='0', correlation=None):
NetemAtrrib.__init__(self, base_loss, correlation)
self._attrib_str = "loss"
class NetemLatency(NetemAtrrib):
def __init__(self, base_lat='0', correlation=None):
NetemAtrrib.__init__(self, base_lat, correlation)
self._attrib_str = "delay"
def __str__(self):
if NetemAtrrib.is_base_val_zero(self._base_val):
self._base_val = "0"
attrib_str = '{0} {1}ms'.format(self._attrib_str, self._base_val)
return attrib_str
class NetemJitter(NetemAtrrib):
def __init__(self, base_lat='0', correlation=None):
NetemAtrrib.__init__(self, base_lat, correlation)
# JITTER IS SET WITH DELAY, therefor do not set attrib name or correlation
self._attrib_str = ""
def __str__(self):
if NetemAtrrib.is_base_val_zero(self._base_val):
return ""
attrib_str = '{0}ms'.format(self._base_val)
return attrib_str
|
the-stack_106_19172
|
#autotranslate.py
# pip install easyread
# pip install openpyxl
from easyread.translator import Translate
from openpyxl import Workbook
from datetime import datetime
article = open('article.txt','r',encoding='utf-8')
article = article.read()
article = article.split()
print('Count: ',len(article))
result = []
for word in article:
#print(word)
res = Translate(word)
if res != None:
#print(res['meaning'])
result.append([word,res['meaning']])
# result.append(['Cat','[N] แมว'])
#print(result)
excelfile = Workbook()
sheet = excelfile.active
header = ['Vocab','Translate']
sheet.append(header)
for rs in result:
sheet.append(rs)
dt = datetime.now().strftime('%Y-%m-%d %H%M%S')
excelfile.save('Vocab - {}.xlsx'.format(dt))
|
the-stack_106_19173
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import logging
import os
import pandas as pd
from pybel.constants import IS_A
from pybel.utils import ensure_quotes
from pybel_tools.constants import PYBEL_RESOURCES_ENV
from pybel_tools.definition_utils import write_namespace, get_date
from pybel_tools.document_utils import write_boilerplate
from pybel_tools.resources import HGNC_HUMAN_GENES, HGNC_GENE_FAMILIES, CONFIDENCE
from pybel_tools.resources import deploy_namespace, deploy_knowledge, get_today_arty_namespace
from .constants import HGNC_GENE_FAMILY_URL
log = logging.getLogger(__name__)
HGNC_GENE_FAMILIES_NAMESPACE_MODULE_NAME = 'hgnc-gene-families'
HGNC_GENE_FAMILIES_MEMBERSHIP_MODULE_NAME = 'hgnc-gene-family-membership'
def get_data():
"""Gets the source data.
:return: A data frame containing the original source data
:rtype: pandas.DataFrame
"""
df = pd.read_csv(HGNC_GENE_FAMILY_URL, sep='\t')
return df
def get_gfam_names(df=None):
"""Processes the source data.
:param pandas.DataFrame df: A data frame containing the original data source
:return: Returns the set of current HGNC Gene Family names
:rtype: set[str]
"""
df = get_data() if df is None else df
entries = set(df['Gene family description'].unique())
return entries
def write_belns(file, df=None):
"""Writes the HGNC Gene Families as a BEL namespace file.
:param file file: A writable file or file-like
:param pandas.DataFrame df: A data frame containing the original data source
"""
values = get_gfam_names(df=df)
write_namespace(
namespace_name="HGNC Gene Families",
namespace_keyword="GFAM",
namespace_domain="Gene and Gene Products",
namespace_species='9606',
namespace_description="HUGO Gene Nomenclature Committee (HGNC) curated gene families",
citation_name=HGNC_GENE_FAMILY_URL,
author_name='Charles Tapley Hoyt',
author_contact="[email protected]",
author_copyright='Creative Commons by 4.0',
values=values,
functions="GRP",
file=file
)
def write_hgnc_gene_families(file, df=None):
"""Writes the HGNC gene family hierarchy a BEL script.
:param file file: A writable file or file-like
:param pandas.DataFrame df: A data frame containing the original data source
"""
df = get_data() if df is None else df
write_boilerplate(
document_name='HGNC Gene Family Definitions',
authors='Charles Tapley Hoyt',
contact='[email protected]',
licenses='Creative Commons by 4.0',
copyright='Copyright (c) 2017 Charles Tapley Hoyt. All Rights Reserved.',
description="""This BEL document represents the gene families curated by HGNC, describing various functional, structural, and logical classifications""",
namespace_dict={
'HGNC': HGNC_HUMAN_GENES,
'GFAM': HGNC_GENE_FAMILIES,
},
namespace_patterns={},
annotations_dict={'Confidence': CONFIDENCE},
annotations_patterns={},
file=file
)
print('SET Citation = {"PubMed","HGNC","25361968"}', file=file)
print('SET Evidence = "HGNC Definitions"', file=file)
print('SET Confidence = "Axiomatic"', file=file)
for _, gfam, gene in df[['Gene family description', 'Approved Symbol']].itertuples():
gfam_clean = ensure_quotes(gfam.strip())
gene_clean = ensure_quotes(gene.strip())
print('g(HGNC:{}) {} g(GFAM:{})'.format(gene_clean, IS_A, gfam_clean), file=file)
def add_to_pybel_resources():
"""Gets the data and writes BEL namespace files to the PyBEL resources directory"""
if PYBEL_RESOURCES_ENV not in os.environ:
raise ValueError('{} not in os.environ'.format(PYBEL_RESOURCES_ENV))
log.info('pybel resources at: %s', os.environ[PYBEL_RESOURCES_ENV])
df = get_data()
namespace_path = os.path.join(os.environ[PYBEL_RESOURCES_ENV], 'namespace', HGNC_GENE_FAMILIES_NAMESPACE_MODULE_NAME + '.belns')
with open(namespace_path, 'w') as file:
write_belns(file, df=df)
membership_path = os.path.join(os.environ[PYBEL_RESOURCES_ENV], 'knowledge', HGNC_GENE_FAMILIES_MEMBERSHIP_MODULE_NAME + '.bel')
with open(membership_path, 'w') as file:
write_hgnc_gene_families(file, df=df)
def deploy_to_arty():
"""Gets the data and writes BEL namespace file to artifactory"""
df = get_data()
# Deploy Namespace
arty_qname = get_today_arty_namespace(HGNC_GENE_FAMILIES_NAMESPACE_MODULE_NAME)
with open(arty_qname, 'w') as file:
write_belns(file, df=df)
deploy_namespace(arty_qname, HGNC_GENE_FAMILIES_NAMESPACE_MODULE_NAME)
# Deploy Membership Knowledge
arty_qname = '{}-{}.bel'.format(HGNC_GENE_FAMILIES_MEMBERSHIP_MODULE_NAME, get_date())
with open(arty_qname, 'w') as file:
write_hgnc_gene_families(file, df=df)
deploy_knowledge(arty_qname, HGNC_GENE_FAMILIES_MEMBERSHIP_MODULE_NAME)
|
the-stack_106_19174
|
from math import inf
import os
import numpy as np
import datetime
from . import globs
################################
# line search functions
################################
def ternary_ls(obj_fct, x, direction, accuracy):
gamma_ub = 1
gamma_lb = 0
# initialize
y = x + direction # end point
endpoint_val = obj_fct.evaluate(y)
val_y = endpoint_val
val_x = obj_fct.evaluate(x)
i = 0
while abs(val_y - val_x) > accuracy:
zx = x + 1/float(3) * (y - x)
zy = x + 2/float(3) * (y - x)
value_zx = obj_fct.evaluate(zx)
value_zy = obj_fct.evaluate(zy)
if value_zx < value_zy:
y = zy
gamma_ub = gamma_lb + (gamma_ub-gamma_lb) * 2/3
val_y = value_zy # update value y because position of y changed
else:
x = zx
gamma_lb = gamma_lb + (gamma_ub-gamma_lb) * 1/3
val_x = value_zx # update value x because position of x changed
i += 1
return gamma_lb, i
def backtracking_ls_FW(objectiveFunction, x, grad, direction, steps):
step_size = 1
grad_direction = np.inner(grad, direction)
i = 0
# assert grad_direction <= 0, 'grad_direction is {}'.format(grad_direction)
if grad_direction == 0:
return 0, i
evalu_oldpint = objectiveFunction.evaluate(x)
evalu_newpoint = objectiveFunction.evaluate(x + step_size * direction)
while (evalu_newpoint - evalu_oldpint) > globs.ls_eps * step_size * grad_direction:
if i > steps:
if evalu_oldpint - evalu_newpoint >= 0:
return step_size, i
else:
return 0, i
step_size *= globs.ls_tau
evalu_newpoint = objectiveFunction.evaluate(x + step_size * direction)
i += 1
# assert (evalu_oldpint - evalu_newpoint >= 0)
return step_size, i
def backtracking_ls_on_alpha(alpha_list, objectiveFunction, s_list, step_size_ub, direction, steps,
func_val_improve_last, strict_dropSteps = True):
"""
backtracking line search method from https://people.maths.ox.ac.uk/hauser/hauser_lecture2.pdf
used on sub-algorithm
"""
step_size = step_size_ub
grad_direction = -np.inner(direction, direction)
x_old = np.dot(np.transpose(s_list), alpha_list)
x_new = np.dot(np.transpose(s_list), alpha_list + step_size * direction) # end point
evalu_oldpint = objectiveFunction.evaluate(x_old)
evalu_newpoint = objectiveFunction.evaluate(x_new)
# relax dropping criterion
if func_val_improve_last != 'N/A':
if not strict_dropSteps:
drop_criteria = min(0.5 * func_val_improve_last, globs.ls_eps)
else:
drop_criteria = 0
if evalu_newpoint <= evalu_oldpint + drop_criteria:
return step_size, 0, 'P'
# begin line search
i = 0
while (evalu_newpoint - evalu_oldpint) > globs.ls_eps*step_size * grad_direction:
if i > steps and evalu_newpoint - evalu_oldpint >= 0:
return 0, i, 'PS'
step_size *= globs.ls_tau
x_new = np.dot(np.transpose(s_list), alpha_list + step_size * direction)
evalu_newpoint = objectiveFunction.evaluate(x_new)
i += 1
if evalu_newpoint >= evalu_oldpint:
return 0, i, 'PS'
return step_size, i, 'P'
################################
# cache functions:
################################
def inSequence(array, sequence):
"""Return True when Numpy array is an element of sequence.
>>> inSequence(np.array([1,2,3]), [np.array([0,1,2]),
... np.array([1.0, 2.0, 3.0])])
True
>>> inSequence(np.array([1,2,3]), [np.array([0,1,2]),
... np.array([-2.0, 1.0, 3.0])])
False
"""
for i in sequence:
if np.all(array == i):
return True
return False
def removeFromCache(x):
"""Remove point x from cache if there.
>>> _ignore = reset_cache()
>>> for i in range(3):
... _ignore = addToCache(np.array([i]))
>>> removeFromCache(np.array([2]))
point deleted from cache, current number of points in cache 2
>>> removeFromCache(np.array([3]))
>>> removeFromCache(np.array([1]))
point deleted from cache, current number of points in cache 1
"""
current_cache_length = len(globs.previousPoints)
key = hash(x.tostring())
try:
del globs.previousPoints[key]
except KeyError:
pass
else:
assert current_cache_length - len(globs.previousPoints) == 1
def addToCache(x, clean=None):
if clean:
result = dict(globs.previousPoints)
current_value = np.inner(x, x)
for key, y in globs.previousPoints.items():
if np.inner(x, y) > current_value:
result.pop(key)
globs.previousPoints = result
key = hash(x.tostring())
if key not in globs.previousPoints:
globs.previousPoints[key] = x
def checkForCache(c, goal):
"""Search for a cached numpy array with small objective value.
c: objective
goal: upper bound on the acceptable objective value.
>>> reset_cache()
>>> _ignore = addToCache(np.array([1., 0.]))
>>> _ignore = addToCache(np.array([0., 1.]))
>>> checkForCache(np.array([1,2]), goal=1)
array([ 1., 0.])
>>> checkForCache(np.array([2,1]), goal=1)
array([ 0., 1.])
>>> checkForCache(np.array([1,3]), goal=.5)
"""
for x in globs.previousPoints.values():
if np.inner(c, x) <= goal:
break
else:
x = None
return x
def checkForPairwiseCache(c, c_tilde, goal):
mi = inf
x_plus = None
mi_tilde = inf
x_minus = None
for x in globs.previousPoints.values():
if np.inner(c, x) < mi:
mi = np.inner(c, x)
x_plus = x
if np.inner(c_tilde, x) < mi_tilde:
mi_tilde = np.inner(c_tilde, x)
x_minus = x
if mi + mi_tilde <= goal:
break
return x_plus, x_minus
def find_closest_cache(c):
m = inf
m_x = None
for x in globs.previousPoints.values():
if np.inner(c, x) < m:
m_x = x
m = np.inner(c, x)
return m_x
def reset_cache():
# reset global statistic variables
globs.previousPoints = {}
####################################
# for reporting on console
####################################
def console_header(all_headers, run_config):
# under normal mode
header = all_headers[:3] + all_headers[4:6] + [all_headers[7]]
width = np.array([12, 8, 22, 22, 12, 12])
# it will be: ['Iteration', 'Type', 'Function Value', 'Dual Bound', '#Atoms', 'WTime']
if run_config['verbosity'] == 'verbose':
header += [all_headers[3]] # add 'Primal Improve' to the console output
width = np.append(width, 22)
return header, width
def console_info(all_info, run_config):
# under normal mode
info = all_info[:3] + all_info[4:6] + [all_info[7]]
if run_config['verbosity'] == 'verbose':
info += [all_info[3]] # add 'Primal Improve' to the console output
return info
|
the-stack_106_19175
|
import json
from django.contrib import messages
from django.core.files import File
from django.db import transaction
from django.db.models import Count, F, Prefetch, Q
from django.forms.models import inlineformset_factory
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import redirect
from django.urls import resolve, reverse
from django.utils.functional import cached_property
from django.utils.timezone import now
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.generic import ListView
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView, SingleObjectMixin
from django.views.generic.edit import DeleteView
from django_countries.fields import Country
from pretix.base.forms import I18nFormSet
from pretix.base.models import (
CartPosition, Item, ItemCategory, ItemVariation, Order, Question,
QuestionAnswer, QuestionOption, Quota, Voucher,
)
from pretix.base.models.event import SubEvent
from pretix.base.models.items import ItemAddOn, ItemBundle
from pretix.base.services.tickets import invalidate_cache
from pretix.base.signals import quota_availability
from pretix.control.forms.item import (
CategoryForm, ItemAddOnForm, ItemAddOnsFormSet, ItemBundleForm,
ItemBundleFormSet, ItemCreateForm, ItemUpdateForm, ItemVariationForm,
ItemVariationsFormSet, QuestionForm, QuestionOptionForm, QuotaForm,
)
from pretix.control.permissions import (
EventPermissionRequiredMixin, event_permission_required,
)
from pretix.control.signals import item_forms, nav_item
from . import ChartContainingView, CreateView, PaginationMixin, UpdateView
class ItemList(ListView):
model = Item
context_object_name = 'items'
# paginate_by = 30
# Pagination is disabled as it is very unlikely to be necessary
# here and could cause problems with the "reorder-within-category" feature
template_name = 'pretixcontrol/items/index.html'
def get_queryset(self):
return Item.objects.filter(
event=self.request.event
).annotate(
var_count=Count('variations')
).prefetch_related("category").order_by(
'category__position', 'category', 'position'
)
def item_move(request, item, up=True):
"""
This is a helper function to avoid duplicating code in item_move_up and
item_move_down. It takes an item and a direction and then tries to bring
all items for this category in a new order.
"""
try:
item = request.event.items.get(
id=item
)
except Item.DoesNotExist:
raise Http404(_("The requested product does not exist."))
items = list(request.event.items.filter(category=item.category).order_by("position"))
index = items.index(item)
if index != 0 and up:
items[index - 1], items[index] = items[index], items[index - 1]
elif index != len(items) - 1 and not up:
items[index + 1], items[index] = items[index], items[index + 1]
for i, item in enumerate(items):
if item.position != i:
item.position = i
item.save()
messages.success(request, _('The order of items has been updated.'))
@event_permission_required("can_change_items")
def item_move_up(request, organizer, event, item):
item_move(request, item, up=True)
return redirect('control:event.items',
organizer=request.event.organizer.slug,
event=request.event.slug)
@event_permission_required("can_change_items")
def item_move_down(request, organizer, event, item):
item_move(request, item, up=False)
return redirect('control:event.items',
organizer=request.event.organizer.slug,
event=request.event.slug)
class CategoryDelete(EventPermissionRequiredMixin, DeleteView):
model = ItemCategory
form_class = CategoryForm
template_name = 'pretixcontrol/items/category_delete.html'
permission = 'can_change_items'
context_object_name = 'category'
def get_object(self, queryset=None) -> ItemCategory:
try:
return self.request.event.categories.get(
id=self.kwargs['category']
)
except ItemCategory.DoesNotExist:
raise Http404(_("The requested product category does not exist."))
@transaction.atomic
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
for item in self.object.items.all():
item.category = None
item.save()
success_url = self.get_success_url()
self.object.log_action('pretix.event.category.deleted', user=self.request.user)
self.object.delete()
messages.success(request, _('The selected category has been deleted.'))
return HttpResponseRedirect(success_url)
def get_success_url(self) -> str:
return reverse('control:event.items.categories', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
})
class CategoryUpdate(EventPermissionRequiredMixin, UpdateView):
model = ItemCategory
form_class = CategoryForm
template_name = 'pretixcontrol/items/category.html'
permission = 'can_change_items'
context_object_name = 'category'
def get_object(self, queryset=None) -> ItemCategory:
url = resolve(self.request.path_info)
try:
return self.request.event.categories.get(
id=url.kwargs['category']
)
except ItemCategory.DoesNotExist:
raise Http404(_("The requested product category does not exist."))
@transaction.atomic
def form_valid(self, form):
messages.success(self.request, _('Your changes have been saved.'))
if form.has_changed():
self.object.log_action(
'pretix.event.category.changed', user=self.request.user, data={
k: form.cleaned_data.get(k) for k in form.changed_data
}
)
return super().form_valid(form)
def get_success_url(self) -> str:
return reverse('control:event.items.categories', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
})
def form_invalid(self, form):
messages.error(self.request, _('We could not save your changes. See below for details.'))
return super().form_invalid(form)
class CategoryCreate(EventPermissionRequiredMixin, CreateView):
model = ItemCategory
form_class = CategoryForm
template_name = 'pretixcontrol/items/category.html'
permission = 'can_change_items'
context_object_name = 'category'
def get_success_url(self) -> str:
return reverse('control:event.items.categories', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
})
@transaction.atomic
def form_valid(self, form):
form.instance.event = self.request.event
messages.success(self.request, _('The new category has been created.'))
ret = super().form_valid(form)
form.instance.log_action('pretix.event.category.added', data=dict(form.cleaned_data), user=self.request.user)
return ret
def form_invalid(self, form):
messages.error(self.request, _('We could not save your changes. See below for details.'))
return super().form_invalid(form)
class CategoryList(PaginationMixin, ListView):
model = ItemCategory
context_object_name = 'categories'
template_name = 'pretixcontrol/items/categories.html'
def get_queryset(self):
return self.request.event.categories.all()
def category_move(request, category, up=True):
"""
This is a helper function to avoid duplicating code in category_move_up and
category_move_down. It takes a category and a direction and then tries to bring
all categories for this event in a new order.
"""
try:
category = request.event.categories.get(
id=category
)
except ItemCategory.DoesNotExist:
raise Http404(_("The requested product category does not exist."))
categories = list(request.event.categories.order_by("position"))
index = categories.index(category)
if index != 0 and up:
categories[index - 1], categories[index] = categories[index], categories[index - 1]
elif index != len(categories) - 1 and not up:
categories[index + 1], categories[index] = categories[index], categories[index + 1]
for i, cat in enumerate(categories):
if cat.position != i:
cat.position = i
cat.save()
messages.success(request, _('The order of categories has been updated.'))
@event_permission_required("can_change_items")
def category_move_up(request, organizer, event, category):
category_move(request, category, up=True)
return redirect('control:event.items.categories',
organizer=request.event.organizer.slug,
event=request.event.slug)
@event_permission_required("can_change_items")
def category_move_down(request, organizer, event, category):
category_move(request, category, up=False)
return redirect('control:event.items.categories',
organizer=request.event.organizer.slug,
event=request.event.slug)
class QuestionList(PaginationMixin, ListView):
model = Question
context_object_name = 'questions'
template_name = 'pretixcontrol/items/questions.html'
def get_queryset(self):
return self.request.event.questions.prefetch_related('items')
def question_move(request, question, up=True):
"""
This is a helper function to avoid duplicating code in question_move_up and
question_move_down. It takes a question and a direction and then tries to bring
all items for this question in a new order.
"""
try:
question = request.event.questions.get(
id=question
)
except Question.DoesNotExist:
raise Http404(_("The selected question does not exist."))
questions = list(request.event.questions.order_by("position"))
index = questions.index(question)
if index != 0 and up:
questions[index - 1], questions[index] = questions[index], questions[index - 1]
elif index != len(questions) - 1 and not up:
questions[index + 1], questions[index] = questions[index], questions[index + 1]
for i, qt in enumerate(questions):
if qt.position != i:
qt.position = i
qt.save()
messages.success(request, _('The order of questions has been updated.'))
@event_permission_required("can_change_items")
def question_move_up(request, organizer, event, question):
question_move(request, question, up=True)
return redirect('control:event.items.questions',
organizer=request.event.organizer.slug,
event=request.event.slug)
@event_permission_required("can_change_items")
def question_move_down(request, organizer, event, question):
question_move(request, question, up=False)
return redirect('control:event.items.questions',
organizer=request.event.organizer.slug,
event=request.event.slug)
class QuestionDelete(EventPermissionRequiredMixin, DeleteView):
model = Question
template_name = 'pretixcontrol/items/question_delete.html'
permission = 'can_change_items'
context_object_name = 'question'
def get_object(self, queryset=None) -> Question:
try:
return self.request.event.questions.get(
id=self.kwargs['question']
)
except Question.DoesNotExist:
raise Http404(_("The requested question does not exist."))
def get_context_data(self, *args, **kwargs) -> dict:
context = super().get_context_data(*args, **kwargs)
context['dependent'] = list(self.get_object().items.all())
return context
@transaction.atomic
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
success_url = self.get_success_url()
self.object.log_action(action='pretix.event.question.deleted', user=request.user)
self.object.delete()
messages.success(request, _('The selected question has been deleted.'))
return HttpResponseRedirect(success_url)
def get_success_url(self) -> str:
return reverse('control:event.items.questions', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
})
class QuestionMixin:
@cached_property
def formset(self):
formsetclass = inlineformset_factory(
Question, QuestionOption,
form=QuestionOptionForm, formset=I18nFormSet,
can_order=True, can_delete=True, extra=0
)
return formsetclass(self.request.POST if self.request.method == "POST" else None,
queryset=(QuestionOption.objects.filter(question=self.object)
if self.object else QuestionOption.objects.none()),
event=self.request.event)
def save_formset(self, obj):
if self.formset.is_valid():
for form in self.formset.initial_forms:
if form in self.formset.deleted_forms:
if not form.instance.pk:
continue
obj.log_action(
'pretix.event.question.option.deleted', user=self.request.user, data={
'id': form.instance.pk
}
)
form.instance.delete()
form.instance.pk = None
forms = self.formset.ordered_forms + [
ef for ef in self.formset.extra_forms
if ef not in self.formset.ordered_forms and ef not in self.formset.deleted_forms
]
for i, form in enumerate(forms):
form.instance.position = i
form.instance.question = obj
created = not form.instance.pk
form.save()
if form.has_changed():
change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}
change_data['id'] = form.instance.pk
obj.log_action(
'pretix.event.question.option.added' if created else
'pretix.event.question.option.changed',
user=self.request.user, data=change_data
)
return True
return False
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['formset'] = self.formset
return ctx
class QuestionView(EventPermissionRequiredMixin, QuestionMixin, ChartContainingView, DetailView):
model = Question
template_name = 'pretixcontrol/items/question.html'
permission = 'can_change_items'
template_name_field = 'question'
def get_answer_statistics(self):
qs = QuestionAnswer.objects.filter(
question=self.object, orderposition__isnull=False,
orderposition__order__event=self.request.event
)
if self.request.GET.get("status", "np") != "":
s = self.request.GET.get("status", "np")
if s == 'o':
qs = qs.filter(orderposition__order__status=Order.STATUS_PENDING,
orderposition__order__expires__lt=now().replace(hour=0, minute=0, second=0))
elif s == 'np':
qs = qs.filter(orderposition__order__status__in=[Order.STATUS_PENDING, Order.STATUS_PAID])
elif s == 'ne':
qs = qs.filter(orderposition__order__status__in=[Order.STATUS_PENDING, Order.STATUS_EXPIRED])
else:
qs = qs.filter(orderposition__order__status=s)
if self.request.GET.get("item", "") != "":
i = self.request.GET.get("item", "")
qs = qs.filter(orderposition__item_id__in=(i,))
if self.object.type == Question.TYPE_FILE:
qs = [
{
'answer': ugettext('File uploaded'),
'count': qs.filter(file__isnull=False).count(),
}
]
elif self.object.type in (Question.TYPE_CHOICE, Question.TYPE_CHOICE_MULTIPLE):
qs = qs.order_by('options').values('options', 'options__answer') \
.annotate(count=Count('id')).order_by('-count')
for a in qs:
a['alink'] = a['options']
a['answer'] = str(a['options__answer'])
del a['options__answer']
elif self.object.type in (Question.TYPE_TIME, Question.TYPE_DATE, Question.TYPE_DATETIME):
qs = qs.order_by('answer')
model_cache = {a.answer: a for a in qs}
qs = qs.values('answer').annotate(count=Count('id')).order_by('answer')
for a in qs:
a['alink'] = a['answer']
a['answer'] = str(model_cache[a['answer']])
else:
qs = qs.order_by('answer').values('answer').annotate(count=Count('id')).order_by('-count')
if self.object.type == Question.TYPE_BOOLEAN:
for a in qs:
a['alink'] = a['answer']
a['answer'] = ugettext('Yes') if a['answer'] == 'True' else ugettext('No')
a['answer_bool'] = a['answer'] == 'True'
elif self.object.type == Question.TYPE_COUNTRYCODE:
for a in qs:
a['alink'] = a['answer']
a['answer'] = Country(a['answer']).name or a['answer']
return list(qs)
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx['items'] = self.object.items.all()
stats = self.get_answer_statistics()
ctx['stats'] = stats
ctx['stats_json'] = json.dumps(stats)
return ctx
def get_object(self, queryset=None) -> Question:
try:
return self.request.event.questions.get(
id=self.kwargs['question']
)
except Question.DoesNotExist:
raise Http404(_("The requested question does not exist."))
def get_success_url(self) -> str:
return reverse('control:event.items.questions', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
})
class QuestionUpdate(EventPermissionRequiredMixin, QuestionMixin, UpdateView):
model = Question
form_class = QuestionForm
template_name = 'pretixcontrol/items/question_edit.html'
permission = 'can_change_items'
context_object_name = 'question'
def get_object(self, queryset=None) -> Question:
try:
return self.request.event.questions.get(
id=self.kwargs['question']
)
except Question.DoesNotExist:
raise Http404(_("The requested question does not exist."))
@transaction.atomic
def form_valid(self, form):
if form.cleaned_data.get('type') in ('M', 'C'):
if not self.save_formset(self.get_object()):
return self.get(self.request, *self.args, **self.kwargs)
if form.has_changed():
self.object.log_action(
'pretix.event.question.changed', user=self.request.user, data={
k: form.cleaned_data.get(k) for k in form.changed_data
}
)
messages.success(self.request, _('Your changes have been saved.'))
return super().form_valid(form)
def get_success_url(self) -> str:
return reverse('control:event.items.questions', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
})
def form_invalid(self, form):
messages.error(self.request, _('We could not save your changes. See below for details.'))
return super().form_invalid(form)
class QuestionCreate(EventPermissionRequiredMixin, QuestionMixin, CreateView):
model = Question
form_class = QuestionForm
template_name = 'pretixcontrol/items/question_edit.html'
permission = 'can_change_items'
context_object_name = 'question'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['instance'] = Question(event=self.request.event)
return kwargs
def get_success_url(self) -> str:
return reverse('control:event.items.questions', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
})
def get_object(self, **kwargs):
return None
def form_invalid(self, form):
messages.error(self.request, _('We could not save your changes. See below for details.'))
return super().form_invalid(form)
@transaction.atomic
def form_valid(self, form):
if form.cleaned_data.get('type') in ('M', 'C'):
if not self.formset.is_valid():
return self.get(self.request, *self.args, **self.kwargs)
messages.success(self.request, _('The new question has been created.'))
ret = super().form_valid(form)
form.instance.log_action('pretix.event.question.added', user=self.request.user, data=dict(form.cleaned_data))
if form.cleaned_data.get('type') in ('M', 'C'):
self.save_formset(form.instance)
return ret
class QuotaList(PaginationMixin, ListView):
model = Quota
context_object_name = 'quotas'
template_name = 'pretixcontrol/items/quotas.html'
def get_queryset(self):
qs = Quota.objects.filter(
event=self.request.event
).prefetch_related(
Prefetch(
"items",
queryset=Item.objects.annotate(has_variations=Count('variations'))
),
"variations",
"variations__item"
)
if self.request.GET.get("subevent", "") != "":
s = self.request.GET.get("subevent", "")
qs = qs.filter(subevent_id=s)
return qs
class QuotaCreate(EventPermissionRequiredMixin, CreateView):
model = Quota
form_class = QuotaForm
template_name = 'pretixcontrol/items/quota_edit.html'
permission = 'can_change_items'
context_object_name = 'quota'
def get_success_url(self) -> str:
return reverse('control:event.items.quotas', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
})
@transaction.atomic
def form_valid(self, form):
form.instance.event = self.request.event
messages.success(self.request, _('The new quota has been created.'))
ret = super().form_valid(form)
form.instance.log_action('pretix.event.quota.added', user=self.request.user, data=dict(form.cleaned_data))
return ret
def form_invalid(self, form):
messages.error(self.request, _('We could not save your changes. See below for details.'))
return super().form_invalid(form)
class QuotaView(ChartContainingView, DetailView):
model = Quota
template_name = 'pretixcontrol/items/quota.html'
context_object_name = 'quota'
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data()
avail = self.object.availability()
ctx['avail'] = avail
data = [
{
'label': ugettext('Paid orders'),
'value': self.object.count_paid_orders(),
'sum': True,
},
{
'label': ugettext('Pending orders'),
'value': self.object.count_pending_orders(),
'sum': True,
},
{
'label': ugettext('Vouchers and waiting list reservations'),
'value': self.object.count_blocking_vouchers(),
'sum': True,
},
{
'label': ugettext('Current user\'s carts'),
'value': self.object.count_in_cart(),
'sum': True,
},
]
sum_values = sum([d['value'] for d in data if d['sum']])
s = self.object.size - sum_values if self.object.size is not None else ugettext('Infinite')
data.append({
'label': ugettext('Available quota'),
'value': s,
'sum': False,
'strong': True
})
data.append({
'label': ugettext('Waiting list (pending)'),
'value': self.object.count_waiting_list_pending(),
'sum': False,
})
if self.object.size is not None:
data.append({
'label': ugettext('Currently for sale'),
'value': avail[1],
'sum': False,
'strong': True
})
ctx['quota_chart_data'] = json.dumps([r for r in data if r.get('sum')])
ctx['quota_table_rows'] = list(data)
ctx['quota_overbooked'] = sum_values - self.object.size if self.object.size is not None else 0
ctx['has_plugins'] = False
res = (
Quota.AVAILABILITY_GONE if self.object.size is not None and self.object.size - sum_values <= 0 else
Quota.AVAILABILITY_OK,
self.object.size - sum_values if self.object.size is not None else None
)
for recv, resp in quota_availability.send(sender=self.request.event, quota=self.object, result=res,
count_waitinglist=True):
if resp != res:
ctx['has_plugins'] = True
ctx['has_ignore_vouchers'] = Voucher.objects.filter(
Q(allow_ignore_quota=True) &
Q(Q(valid_until__isnull=True) | Q(valid_until__gte=now())) &
Q(Q(self.object._position_lookup) | Q(quota=self.object)) &
Q(redeemed__lt=F('max_usages'))
).exists()
return ctx
def get_object(self, queryset=None) -> Quota:
try:
return self.request.event.quotas.get(
id=self.kwargs['quota']
)
except Quota.DoesNotExist:
raise Http404(_("The requested quota does not exist."))
class QuotaUpdate(EventPermissionRequiredMixin, UpdateView):
model = Quota
form_class = QuotaForm
template_name = 'pretixcontrol/items/quota_edit.html'
permission = 'can_change_items'
context_object_name = 'quota'
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data()
return ctx
def get_object(self, queryset=None) -> Quota:
try:
return self.request.event.quotas.get(
id=self.kwargs['quota']
)
except Quota.DoesNotExist:
raise Http404(_("The requested quota does not exist."))
@transaction.atomic
def form_valid(self, form):
messages.success(self.request, _('Your changes have been saved.'))
if form.has_changed():
self.object.log_action(
'pretix.event.quota.changed', user=self.request.user, data={
k: form.cleaned_data.get(k) for k in form.changed_data
}
)
if ((form.initial.get('subevent') and not form.instance.subevent) or
(form.instance.subevent and form.initial.get('subevent') != form.instance.subevent.pk)):
if form.initial.get('subevent'):
se = SubEvent.objects.get(event=self.request.event, pk=form.initial.get('subevent'))
se.log_action(
'pretix.subevent.quota.deleted', user=self.request.user, data={
'id': form.instance.pk
}
)
if form.instance.subevent:
form.instance.subevent.log_action(
'pretix.subevent.quota.added', user=self.request.user, data={
'id': form.instance.pk
}
)
form.instance.rebuild_cache()
return super().form_valid(form)
def get_success_url(self) -> str:
return reverse('control:event.items.quotas.show', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
'quota': self.object.pk
})
def form_invalid(self, form):
messages.error(self.request, _('We could not save your changes. See below for details.'))
return super().form_invalid(form)
class QuotaDelete(EventPermissionRequiredMixin, DeleteView):
model = Quota
template_name = 'pretixcontrol/items/quota_delete.html'
permission = 'can_change_items'
context_object_name = 'quota'
def get_object(self, queryset=None) -> Quota:
try:
return self.request.event.quotas.get(
id=self.kwargs['quota']
)
except Quota.DoesNotExist:
raise Http404(_("The requested quota does not exist."))
def get_context_data(self, *args, **kwargs) -> dict:
context = super().get_context_data(*args, **kwargs)
context['dependent'] = list(self.get_object().items.all())
return context
@transaction.atomic
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
success_url = self.get_success_url()
self.object.log_action(action='pretix.event.quota.deleted', user=request.user)
self.object.delete()
messages.success(self.request, _('The selected quota has been deleted.'))
return HttpResponseRedirect(success_url)
def get_success_url(self) -> str:
return reverse('control:event.items.quotas', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
})
class ItemDetailMixin(SingleObjectMixin):
model = Item
context_object_name = 'item'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
nav = sorted(
sum(
(list(a[1]) for a in nav_item.send(self.request.event, request=self.request, item=self.get_object())),
[]
),
key=lambda r: str(r['label'])
)
ctx['extra_nav'] = nav
return ctx
def get_object(self, queryset=None) -> Item:
try:
if not hasattr(self, 'object') or not self.object:
self.item = self.request.event.items.get(
id=self.kwargs['item']
)
self.object = self.item
return self.object
except Item.DoesNotExist:
raise Http404(_("The requested item does not exist."))
class ItemCreate(EventPermissionRequiredMixin, CreateView):
form_class = ItemCreateForm
template_name = 'pretixcontrol/item/create.html'
permission = 'can_change_items'
def get_success_url(self) -> str:
return reverse('control:event.item', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
'item': self.object.id,
})
def get_initial(self):
initial = super().get_initial()
trs = list(self.request.event.tax_rules.all())
if len(trs) == 1:
initial['tax_rule'] = trs[0]
return initial
@transaction.atomic
def form_valid(self, form):
messages.success(self.request, _('Your changes have been saved.'))
ret = super().form_valid(form)
form.instance.log_action('pretix.event.item.added', user=self.request.user, data={
k: (form.cleaned_data.get(k).name
if isinstance(form.cleaned_data.get(k), File)
else form.cleaned_data.get(k))
for k in form.changed_data
})
return ret
def get_form_kwargs(self):
"""
Returns the keyword arguments for instantiating the form.
"""
newinst = Item(event=self.request.event)
kwargs = super().get_form_kwargs()
kwargs.update({'instance': newinst, 'user': self.request.user})
return kwargs
def form_invalid(self, form):
messages.error(self.request, _('We could not save your changes. See below for details.'))
return super().form_invalid(form)
class ItemUpdateGeneral(ItemDetailMixin, EventPermissionRequiredMixin, UpdateView):
form_class = ItemUpdateForm
template_name = 'pretixcontrol/item/index.html'
permission = 'can_change_items'
@cached_property
def plugin_forms(self):
forms = []
for rec, resp in item_forms.send(sender=self.request.event, item=self.item, request=self.request):
if isinstance(resp, (list, tuple)):
forms.extend(resp)
else:
forms.append(resp)
return forms
def get_success_url(self) -> str:
return reverse('control:event.item', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
'item': self.get_object().id,
})
def post(self, request, *args, **kwargs):
self.get_object()
form = self.get_form()
if form.is_valid() and all(f.is_valid() for f in self.plugin_forms):
return self.form_valid(form)
else:
return self.form_invalid(form)
@transaction.atomic
def form_valid(self, form):
messages.success(self.request, _('Your changes have been saved.'))
if form.has_changed() or any(f.has_changed() for f in self.plugin_forms):
data = {
k: form.cleaned_data.get(k)
for k in form.changed_data
}
for f in self.plugin_forms:
data.update({
k: (f.cleaned_data.get(k).name
if isinstance(f.cleaned_data.get(k), File)
else f.cleaned_data.get(k))
for k in f.changed_data
})
self.object.log_action(
'pretix.event.item.changed', user=self.request.user, data=data
)
invalidate_cache.apply_async(kwargs={'event': self.request.event.pk, 'item': self.object.pk})
for f in self.plugin_forms:
f.save()
return super().form_valid(form)
def form_invalid(self, form):
messages.error(self.request, _('We could not save your changes. See below for details.'))
return super().form_invalid(form)
def get_context_data(self, **kwargs):
ctx = super().get_context_data()
ctx['plugin_forms'] = self.plugin_forms
if not ctx['item'].active and ctx['item'].bundled_with.count() > 0:
messages.info(self.request, _("You disabled this item, but it is still part of a product bundle. "
"Your participants won't be able to buy the bundle unless you remove this "
"item from it."))
return ctx
class ItemVariations(ItemDetailMixin, EventPermissionRequiredMixin, TemplateView):
permission = 'can_change_items'
template_name = 'pretixcontrol/item/variations.html'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.item = None
@cached_property
def formset(self):
formsetclass = inlineformset_factory(
Item, ItemVariation,
form=ItemVariationForm, formset=ItemVariationsFormSet,
can_order=True, can_delete=True, extra=0
)
return formsetclass(self.request.POST if self.request.method == "POST" else None,
queryset=ItemVariation.objects.filter(item=self.get_object()),
event=self.request.event)
def post(self, request, *args, **kwargs):
with transaction.atomic():
if self.formset.is_valid():
for form in self.formset.deleted_forms:
if not form.instance.pk:
continue
self.get_object().log_action(
'pretix.event.item.variation.deleted', user=self.request.user, data={
'value': form.instance.value,
'id': form.instance.pk
}
)
form.instance.delete()
form.instance.pk = None
forms = self.formset.ordered_forms + [
ef for ef in self.formset.extra_forms
if ef not in self.formset.ordered_forms and ef not in self.formset.deleted_forms
]
for i, form in enumerate(forms):
form.instance.position = i
form.instance.item = self.get_object()
created = not form.instance.pk
form.save()
if form.has_changed():
change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}
change_data['value'] = form.instance.value
change_data['id'] = form.instance.pk
self.get_object().log_action(
'pretix.event.item.variation.changed' if not created else
'pretix.event.item.variation.added',
user=self.request.user, data=change_data
)
messages.success(self.request, _('Your changes have been saved.'))
return redirect(self.get_success_url())
messages.error(self.request, _('We could not save your changes. See below for details.'))
return self.get(request, *args, **kwargs)
def get_success_url(self) -> str:
return reverse('control:event.item.variations', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
'item': self.get_object().id,
})
def get_context_data(self, **kwargs) -> dict:
self.object = self.get_object()
context = super().get_context_data(**kwargs)
context['formset'] = self.formset
return context
class ItemAddOns(ItemDetailMixin, EventPermissionRequiredMixin, TemplateView):
permission = 'can_change_items'
template_name = 'pretixcontrol/item/addons.html'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.item = None
@cached_property
def formset(self):
formsetclass = inlineformset_factory(
Item, ItemAddOn,
form=ItemAddOnForm, formset=ItemAddOnsFormSet,
can_order=True, can_delete=True, extra=0
)
return formsetclass(self.request.POST if self.request.method == "POST" else None,
queryset=ItemAddOn.objects.filter(base_item=self.get_object()),
event=self.request.event)
def post(self, request, *args, **kwargs):
with transaction.atomic():
if self.formset.is_valid():
for form in self.formset.deleted_forms:
if not form.instance.pk:
continue
self.get_object().log_action(
'pretix.event.item.addons.removed', user=self.request.user, data={
'category': form.instance.addon_category.pk
}
)
form.instance.delete()
form.instance.pk = None
forms = self.formset.ordered_forms + [
ef for ef in self.formset.extra_forms
if ef not in self.formset.ordered_forms and ef not in self.formset.deleted_forms
]
for i, form in enumerate(forms):
form.instance.base_item = self.get_object()
form.instance.position = i
created = not form.instance.pk
form.save()
if form.has_changed():
change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}
change_data['id'] = form.instance.pk
self.get_object().log_action(
'pretix.event.item.addons.changed' if not created else
'pretix.event.item.addons.added',
user=self.request.user, data=change_data
)
messages.success(self.request, _('Your changes have been saved.'))
return redirect(self.get_success_url())
return self.get(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
if self.get_object().category and self.get_object().category.is_addon:
messages.error(self.request, _('You cannot add add-ons to a product that is only available as an add-on '
'itself.'))
return redirect(self.get_previous_url())
return super().get(request, *args, **kwargs)
def get_previous_url(self) -> str:
return reverse('control:event.item', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
'item': self.get_object().id,
})
def get_success_url(self) -> str:
return reverse('control:event.item.addons', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
'item': self.get_object().id,
})
def get_context_data(self, **kwargs) -> dict:
context = super().get_context_data(**kwargs)
context['formset'] = self.formset
return context
class ItemBundles(ItemDetailMixin, EventPermissionRequiredMixin, TemplateView):
permission = 'can_change_items'
template_name = 'pretixcontrol/item/bundles.html'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.item = None
@cached_property
def formset(self):
formsetclass = inlineformset_factory(
Item, ItemBundle,
form=ItemBundleForm, formset=ItemBundleFormSet,
fk_name='base_item',
can_order=False, can_delete=True, extra=0
)
return formsetclass(self.request.POST if self.request.method == "POST" else None,
queryset=ItemBundle.objects.filter(base_item=self.get_object()),
event=self.request.event, item=self.item)
def post(self, request, *args, **kwargs):
with transaction.atomic():
if self.formset.is_valid():
for form in self.formset.deleted_forms:
if not form.instance.pk:
continue
self.get_object().log_action(
'pretix.event.item.bundles.removed', user=self.request.user, data={
'bundled_item': form.instance.bundled_item.pk,
'bundled_variation': (form.instance.bundled_variation.pk if form.instance.bundled_variation else None),
'count': form.instance.count,
'designated_price': str(form.instance.designated_price),
}
)
form.instance.delete()
form.instance.pk = None
forms = [
ef for ef in self.formset.forms
if ef not in self.formset.deleted_forms
]
for i, form in enumerate(forms):
form.instance.base_item = self.get_object()
created = not form.instance.pk
form.save()
if form.has_changed():
change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}
change_data['id'] = form.instance.pk
self.get_object().log_action(
'pretix.event.item.bundles.changed' if not created else
'pretix.event.item.bundles.added',
user=self.request.user, data=change_data
)
messages.success(self.request, _('Your changes have been saved.'))
return redirect(self.get_success_url())
return self.get(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
if self.get_object().category and self.get_object().category.is_addon:
messages.error(self.request, _('You cannot add bundles to a product that is only available as an add-on '
'itself.'))
return redirect(self.get_previous_url())
return super().get(request, *args, **kwargs)
def get_previous_url(self) -> str:
return reverse('control:event.item', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
'item': self.get_object().id,
})
def get_success_url(self) -> str:
return reverse('control:event.item.bundles', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
'item': self.get_object().id,
})
def get_context_data(self, **kwargs) -> dict:
context = super().get_context_data(**kwargs)
context['formset'] = self.formset
return context
class ItemDelete(EventPermissionRequiredMixin, DeleteView):
model = Item
template_name = 'pretixcontrol/item/delete.html'
permission = 'can_change_items'
context_object_name = 'item'
def get_context_data(self, *args, **kwargs) -> dict:
context = super().get_context_data(*args, **kwargs)
context['possible'] = self.is_allowed()
return context
def is_allowed(self) -> bool:
return not self.get_object().orderposition_set.exists()
def get_object(self, queryset=None) -> Item:
if not hasattr(self, 'object') or not self.object:
try:
self.object = self.request.event.items.get(
id=self.kwargs['item']
)
except Item.DoesNotExist:
raise Http404(_("The requested product does not exist."))
return self.object
@transaction.atomic
def delete(self, request, *args, **kwargs):
success_url = self.get_success_url()
o = self.get_object()
if o.allow_delete():
CartPosition.objects.filter(addon_to__item=self.get_object()).delete()
self.get_object().cartposition_set.all().delete()
self.get_object().log_action('pretix.event.item.deleted', user=self.request.user)
self.get_object().delete()
messages.success(request, _('The selected product has been deleted.'))
return HttpResponseRedirect(success_url)
else:
o = self.get_object()
o.active = False
o.save()
o.log_action('pretix.event.item.changed', user=self.request.user, data={
'active': False
})
messages.success(request, _('The selected product has been deactivated.'))
return HttpResponseRedirect(success_url)
def get_success_url(self) -> str:
return reverse('control:event.items', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug,
})
|
the-stack_106_19178
|
import argparse
import numpy
from .._helpers import _writer_map, read, reader_map, write
from ._helpers import _get_version_text
def convert(argv=None):
# Parse command line arguments.
parser = _get_convert_parser()
args = parser.parse_args(argv)
# read mesh data
mesh = read(args.infile, file_format=args.input_format)
if args.prune:
mesh.prune()
if (
args.prune_z_0
and mesh.points.shape[1] == 3
and numpy.all(numpy.abs(mesh.points[:, 2]) < 1.0e-13)
):
mesh.points = mesh.points[:, :2]
# Some converters (like VTK) require `points` to be contiguous.
mesh.points = numpy.ascontiguousarray(mesh.points)
if args.sets_to_int_data:
mesh.sets_to_int_data()
if args.int_data_to_sets:
mesh.int_data_to_sets()
# write it out
kwargs = {"file_format": args.output_format}
if args.float_format is not None:
kwargs["float_fmt"] = args.float_format
if args.ascii:
kwargs["binary"] = False
write(args.outfile, mesh, **kwargs)
def _get_convert_parser():
# Avoid repeating format names
# https://stackoverflow.com/a/31124505/353337
class CustomHelpFormatter(argparse.HelpFormatter):
def _format_action_invocation(self, action):
if not action.option_strings or action.nargs == 0:
return super()._format_action_invocation(action)
default = self._get_default_metavar_for_optional(action)
args_string = self._format_args(action, default)
return ", ".join(action.option_strings) + " " + args_string
parser = argparse.ArgumentParser(
description=("Convert between mesh formats."),
# formatter_class=argparse.RawTextHelpFormatter,
formatter_class=CustomHelpFormatter,
)
parser.add_argument("infile", type=str, help="mesh file to be read from")
parser.add_argument(
"--input-format",
"-i",
type=str,
choices=sorted(list(reader_map.keys())),
help="input file format",
default=None,
)
parser.add_argument(
"--output-format",
"-o",
type=str,
choices=sorted(list(_writer_map.keys())),
help="output file format",
default=None,
)
parser.add_argument(
"--ascii",
"-a",
action="store_true",
help="write in ASCII format variant (where applicable, default: binary)",
)
parser.add_argument("outfile", type=str, help="mesh file to be written to")
parser.add_argument(
"--float-format",
"-f",
type=str,
help="float format used in output ASCII files (default: .16e)",
)
parser.add_argument(
"--prune",
"-p",
action="store_true",
help="remove lower order cells, remove orphaned nodes",
)
parser.add_argument(
"--prune-z-0",
"-z",
action="store_true",
help="remove third (z) dimension if all points are 0",
)
parser.add_argument(
"--sets-to-int-data",
"-s",
action="store_true",
help="if possible, convert sets to integer data (useful if the output type does not support sets)",
)
parser.add_argument(
"--int-data-to-sets",
"-d",
action="store_true",
help="if possible, convert integer data to sets (useful if the output type does not support integer data)",
)
parser.add_argument(
"--version",
"-v",
action="version",
version=_get_version_text(),
help="display version information",
)
return parser
|
the-stack_106_19180
|
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""tvOS test runner rule."""
load(
"@build_bazel_rules_apple//apple/testing:apple_test_rules.bzl",
"AppleTestRunnerInfo",
)
def _get_template_substitutions(ctx):
"""Returns the template substitutions for this runner."""
test_env = ctx.configuration.test_env
subs = {
"device_type": ctx.attr.device_type,
"os_version": ctx.attr.os_version,
"test_env": ",".join([k + "=" + v for (k, v) in test_env.items()]),
"testrunner_binary": ctx.executable._testrunner.short_path,
}
return {"%(" + k + ")s": subs[k] for k in subs}
def _get_test_environment(ctx):
"""Returns the test environment for this runner."""
test_environment = dict(ctx.configuration.test_env)
xcode_version = str(ctx.attr._xcode_config[apple_common.XcodeVersionConfig].xcode_version())
if xcode_version:
test_environment["XCODE_VERSION"] = xcode_version
return test_environment
def _tvos_test_runner_impl(ctx):
"""Implementation for the tvos_test_runner rule."""
ctx.actions.expand_template(
template = ctx.file._test_template,
output = ctx.outputs.test_runner_template,
substitutions = _get_template_substitutions(ctx),
)
return [
AppleTestRunnerInfo(
test_runner_template = ctx.outputs.test_runner_template,
execution_requirements = ctx.attr.execution_requirements,
test_environment = _get_test_environment(ctx),
),
DefaultInfo(
runfiles = ctx.runfiles(
files = [ctx.file._testrunner],
),
),
]
tvos_test_runner = rule(
_tvos_test_runner_impl,
attrs = {
"device_type": attr.string(
default = "",
doc = """
The device type of the tvOS simulator to run test. The supported types correspond
to the output of `xcrun simctl list devicetypes`. E.g., Apple TV, AppleTV 4K.
By default, it is the latest supported Apple TV type.'
""",
),
"execution_requirements": attr.string_dict(
allow_empty = False,
default = {"requires-darwin": ""},
doc = """
Dictionary of strings to strings which specifies the execution requirements for
the runner. In most common cases, this should not be used.
""",
),
"os_version": attr.string(
default = "",
doc = """
The os version of the tvOS simulator to run test. The supported os versions
correspond to the output of `xcrun simctl list runtimes`. ' 'E.g., 11.2, 9.1.
By default, it is the latest supported version of the device type.'
""",
),
"_test_template": attr.label(
default = Label(
"@build_bazel_rules_apple//apple/testing/default_runner:tvos_test_runner.template.sh",
),
allow_single_file = True,
),
"_testrunner": attr.label(
default = Label(
"@xctestrunner//file",
),
allow_single_file = True,
executable = True,
cfg = "host",
doc = """
It is the rule that needs to provide the AppleTestRunnerInfo provider. This
dependency is the test runner binary.
""",
),
"_xcode_config": attr.label(
default = configuration_field(
fragment = "apple",
name = "xcode_config_label",
),
),
},
outputs = {
"test_runner_template": "%{name}.sh",
},
fragments = ["apple", "objc"],
doc = """
Rule to identify a tvOS runner that runs tests for tvOS.
The runner will create a new simulator according to the given arguments to run
tests.
Outputs:
AppleTestRunnerInfo:
test_runner_template: Template file that contains the specific mechanism
with which the tests will be performed.
execution_requirements: Dictionary that represents the specific hardware
requirements for this test.
Runfiles:
files: The files needed during runtime for the test to be performed.
""",
)
|
the-stack_106_19182
|
import torch
import numpy as np
from . import MultiOutputKernel, Parameter, config
class IndependentMultiOutputKernel(MultiOutputKernel):
def __init__(self, *kernels, output_dims=None, name="IMO"):
if output_dims is None:
output_dims = len(kernels)
super(IndependentMultiOutputKernel, self).__init__(output_dims, name=name)
self.kernels = self._check_kernels(kernels, output_dims)
def __getitem__(self, key):
return self.kernels[key]
def Ksub(self, i, j, X1, X2=None):
# X has shape (data_points,input_dims)
if i == j:
return self.kernels[i](X1, X2)
else:
if X2 is None:
X2 = X1
return torch.zeros(X1.shape[0], X2.shape[0], device=config.device, dtype=config.dtype)
class MultiOutputSpectralKernel(MultiOutputKernel):
def __init__(self, output_dims, input_dims, active_dims=None, name="MOSM"):
super(MultiOutputSpectralKernel, self).__init__(output_dims, input_dims, active_dims, name)
# TODO: incorporate mixtures?
# TODO: allow different input_dims per channel
magnitude = torch.rand(output_dims)
mean = torch.rand(output_dims, input_dims)
variance = torch.rand(output_dims, input_dims)
delay = torch.zeros(output_dims, input_dims)
phase = torch.zeros(output_dims)
self.input_dims = input_dims
self.magnitude = Parameter(magnitude, lower=config.positive_minimum)
self.mean = Parameter(mean, lower=config.positive_minimum)
self.variance = Parameter(variance, lower=config.positive_minimum)
if 1 < output_dims:
self.delay = Parameter(delay)
self.phase = Parameter(phase)
self.twopi = np.power(2.0*np.pi,float(self.input_dims)/2.0)
def Ksub(self, i, j, X1, X2=None):
# X has shape (data_points,input_dims)
tau = self.distance(X1,X2) # NxMxD
if i == j:
variance = self.variance()[i]
alpha = self.magnitude()[i]**2 * self.twopi * variance.prod().sqrt() # scalar
exp = torch.exp(-0.5*torch.tensordot(tau**2, variance, dims=1)) # NxM
cos = torch.cos(2.0*np.pi * torch.tensordot(tau, self.mean()[i], dims=1)) # NxM
return alpha * exp * cos
else:
inv_variances = 1.0/(self.variance()[i] + self.variance()[j]) # D
diff_mean = self.mean()[i] - self.mean()[j] # D
magnitude = self.magnitude()[i]*self.magnitude()[j]*torch.exp(-np.pi**2 * diff_mean.dot(inv_variances*diff_mean)) # scalar
mean = inv_variances * (self.variance()[i]*self.mean()[j] + self.variance()[j]*self.mean()[i]) # D
variance = 2.0 * self.variance()[i] * inv_variances * self.variance()[j] # D
delay = self.delay()[i] - self.delay()[j] # D
phase = self.phase()[i] - self.phase()[j] # scalar
alpha = magnitude * self.twopi * variance.prod().sqrt() # scalar
exp = torch.exp(-0.5 * torch.tensordot((tau+delay)**2, variance, dims=1)) # NxM
cos = torch.cos(2.0*np.pi * torch.tensordot(tau+delay, mean, dims=1) + phase) # NxM
return alpha * exp * cos
class CrossSpectralKernel(MultiOutputKernel):
def __init__(self, output_dims, input_dims, Rq=1, active_dims=None, name="CSM"):
super(CrossSpectralKernel, self).__init__(output_dims, input_dims, active_dims, name)
amplitude = torch.rand(output_dims, Rq)
mean = torch.rand(input_dims)
variance = torch.rand(input_dims)
shift = torch.zeros(output_dims, Rq)
self.input_dims = input_dims
self.Rq = Rq
self.amplitude = Parameter(amplitude, lower=config.positive_minimum)
self.mean = Parameter(mean, lower=config.positive_minimum)
self.variance = Parameter(variance, lower=config.positive_minimum)
self.shift = Parameter(shift)
def Ksub(self, i, j, X1, X2=None):
# X has shape (data_points,input_dims)
tau = self.distance(X1,X2) # NxMxD
if i == j:
# put Rq into third dimension and sum at the end
amplitude = self.amplitude()[i].reshape(1,1,-1) # 1x1xRq
exp = torch.exp(-0.5 * torch.tensordot(tau**2, self.variance(), dims=1)).unsqueeze(2) # NxMx1
# the following cos is as written in the paper, instead we take phi out of the product with the mean
#cos = torch.cos(torch.tensordot(tau.unsqueeze(2), self.mean(), dims=1))
cos = torch.cos(2.0*np.pi * torch.tensordot(tau, self.mean(), dims=1).unsqueeze(2)) # NxMxRq
return torch.sum(amplitude * exp * cos, dim=2)
else:
shift = self.shift()[i] - self.shift()[j] # Rq
# put Rq into third dimension and sum at the end
amplitude = torch.sqrt(self.amplitude()[i]*self.amplitude()[j]).reshape(1,1,-1) # 1x1xRq
exp = torch.exp(-0.5 * torch.tensordot(tau**2, self.variance(), dims=1)).unsqueeze(2) # NxMx1
# the following cos is as written in the paper, instead we take phi out of the product with the mean
#cos = torch.cos(torch.tensordot(tau.unsqueeze(2) + shift.reshape(1,1,-1,1), self.mean(), dims=1))
cos = torch.cos(2.0*np.pi * (torch.tensordot(tau, self.mean(), dims=1).unsqueeze(2) + shift.reshape(1,1,-1))) # NxMxRq
return torch.sum(amplitude * exp * cos, dim=2)
class LinearModelOfCoregionalizationKernel(MultiOutputKernel):
def __init__(self, *kernels, output_dims, input_dims, Q=None, Rq=1, name="LMC"):
super(LinearModelOfCoregionalizationKernel, self).__init__(output_dims, input_dims, name=name)
if Q is None:
Q = len(kernels)
kernels = self._check_kernels(kernels, Q)
weight = torch.rand(output_dims, Q, Rq)
self.kernels = kernels
self.weight = Parameter(weight, lower=config.positive_minimum)
def __getitem__(self, key):
return self.kernels[key]
def Ksub(self, i, j, X1, X2=None):
# X has shape (data_points,input_dims)
weight = torch.sum(self.weight()[i] * self.weight()[j], dim=1) # Q
kernels = torch.stack([kernel(X1,X2) for kernel in self.kernels], dim=2) # NxMxQ
return torch.tensordot(kernels, weight, dims=1)
class GaussianConvolutionProcessKernel(MultiOutputKernel):
def __init__(self, output_dims, input_dims, active_dims=None, name="CONV"):
super(GaussianConvolutionProcessKernel, self).__init__(output_dims, input_dims, active_dims, name)
weight = torch.rand(output_dims)
variance = torch.rand(output_dims, input_dims)
base_variance = torch.rand(input_dims)
self.input_dims = input_dims
self.weight = Parameter(weight, lower=config.positive_minimum)
self.variance = Parameter(variance, lower=0.0)
self.base_variance = Parameter(base_variance, lower=config.positive_minimum)
def Ksub(self, i, j, X1, X2=None):
# X has shape (data_points,input_dims)
tau = self.squared_distance(X1,X2) # NxMxD
# differences with the thesis from Parra is that it lacks a multiplication of 2*pi, lacks a minus in the exponencial function, and doesn't write the variance matrices as inverted
if X2 is None:
variances = 2.0*self.variance()[i] + self.base_variance() # D
weight = self.weight()[i]**2 * torch.sqrt(self.base_variance().prod()/variances.prod()) # scalar
exp = torch.exp(-0.5 * torch.tensordot(tau, 1.0/variances, dims=1)) # NxM
return weight * exp
else:
variances = self.variance()[i] + self.variance()[j] + self.base_variance() # D
weight_variance = torch.sqrt(self.base_variance().prod()/variances.prod()) # scalar
weight = self.weight()[i] * self.weight()[j] * weight_variance # scalar
exp = torch.exp(-0.5 * torch.tensordot(tau, 1.0/variances, dims=1)) # NxM
return weight * exp
|
the-stack_106_19183
|
from typing import Optional, List
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from dispatch.exceptions import NotFoundError
from dispatch.project import service as project_service
from .models import (
SourceStatus,
SourceStatusCreate,
SourceStatusUpdate,
SourceStatusRead,
)
def get(*, db_session, source_status_id: int) -> Optional[SourceStatus]:
"""Gets a status by its id."""
return db_session.query(SourceStatus).filter(SourceStatus.id == source_status_id).one_or_none()
def get_by_name(*, db_session, project_id: int, name: str) -> Optional[SourceStatus]:
"""Gets a status by its name."""
return (
db_session.query(SourceStatus)
.filter(SourceStatus.name == name)
.filter(SourceStatus.project_id == project_id)
.one_or_none()
)
def get_by_name_or_raise(
*, db_session, project_id, source_status_in=SourceStatusRead
) -> SourceStatusRead:
"""Returns the status specified or raises ValidationError."""
status = get_by_name(db_session=db_session, project_id=project_id, name=source_status_in.name)
if not status:
raise ValidationError(
[
ErrorWrapper(
NotFoundError(
msg="SourceStatus not found.",
status=source_status_in.name,
),
loc="status",
)
],
model=SourceStatusRead,
)
return status
def get_all(*, db_session, project_id: int) -> List[Optional[SourceStatus]]:
"""Gets all sources."""
return db_session.query(SourceStatus).filter(SourceStatus.project_id == project_id)
def create(*, db_session, source_status_in: SourceStatusCreate) -> SourceStatus:
"""Creates a new status."""
project = project_service.get_by_name_or_raise(
db_session=db_session, project_in=source_status_in.project
)
source_status = SourceStatus(**source_status_in.dict(exclude={"project"}), project=project)
db_session.add(source_status)
db_session.commit()
return source_status
def get_or_create(*, db_session, source_status_in: SourceStatusCreate) -> SourceStatus:
"""Gets or creates a new status."""
# prefer the status id if available
if source_status_in.id:
q = db_session.query(SourceStatus).filter(SourceStatus.id == source_status_in.id)
else:
q = db_session.query(SourceStatus).filter_by(name=source_status_in.name)
instance = q.first()
if instance:
return instance
return create(
db_session=db_session,
source_status_in=source_status_in,
)
def update(
*,
db_session,
source_status: SourceStatus,
source_status_in: SourceStatusUpdate,
) -> SourceStatus:
"""Updates an existing status."""
source_status_data = source_status.dict()
update_data = source_status_in.dict(skip_defaults=True, exclude={})
for field in source_status_data:
if field in update_data:
setattr(source_status, field, update_data[field])
db_session.commit()
return source_status
def delete(*, db_session, source_status_id: int):
"""Deletes an existing status."""
source_status = (
db_session.query(SourceStatus).filter(SourceStatus.id == source_status_id).one_or_none()
)
db_session.delete(source_status)
db_session.commit()
|
the-stack_106_19188
|
#!/usr/bin/env python
"""A builder implementation for windows clients."""
import ctypes
import io
import logging
import os
import re
import shutil
import subprocess
import sys
from typing import List
import zipfile
import win32process
from grr_response_client_builder import build
from grr_response_client_builder import build_helpers
from grr_response_core import config
from grr_response_core.lib import package
from grr_response_core.lib import utils
MODULE_PATTERNS = [
# Visual Studio runtime libs.
re.compile("msvcr.+.dll", re.I),
re.compile("msvcp.+.dll", re.I)
]
# We copy these files manually because pyinstaller destroys them to the point
# where they can't be signed. They don't ever seem to be loaded but they are
# part of the VC90 manifest.
FILES_FROM_VIRTUALENV = [
r"Lib\site-packages\pythonwin\mfc90.dll",
r"Lib\site-packages\pythonwin\mfc90u.dll",
# TODO(user): check if building/repacking works without lines below.
r"Lib\site-packages\pythonwin\mfc140.dll",
r"Lib\site-packages\pythonwin\mfcm140u.dll"
]
PROCESS_QUERY_INFORMATION = 0x400
PROCESS_VM_READ = 0x10
def _EnumMissingModules():
"""Enumerate all modules which match the patterns MODULE_PATTERNS.
PyInstaller often fails to locate all dlls which are required at
runtime. We import all the client modules here, we simply introspect
all the modules we have loaded in our current running process, and
all the ones matching the patterns are copied into the client
package.
Yields:
a source file for a linked dll.
"""
module_handle = ctypes.c_ulong()
count = ctypes.c_ulong()
process_handle = ctypes.windll.kernel32.OpenProcess(
PROCESS_QUERY_INFORMATION
| PROCESS_VM_READ, 0, os.getpid())
ctypes.windll.psapi.EnumProcessModules(process_handle,
ctypes.byref(module_handle),
ctypes.sizeof(module_handle),
ctypes.byref(count))
# The size of a handle is pointer size (i.e. 64 bit of amd64 and 32 bit on
# i386).
if sys.maxsize > 2**32:
handle_type = ctypes.c_ulonglong
else:
handle_type = ctypes.c_ulong
module_list = (handle_type * (count.value // ctypes.sizeof(handle_type)))()
ctypes.windll.psapi.EnumProcessModulesEx(process_handle,
ctypes.byref(module_list),
ctypes.sizeof(module_list),
ctypes.byref(count), 2)
for x in module_list:
module_filename = win32process.GetModuleFileNameEx(process_handle, x)
for pattern in MODULE_PATTERNS:
if pattern.match(os.path.basename(module_filename)):
yield module_filename
for venv_file in FILES_FROM_VIRTUALENV:
path = os.path.join(sys.prefix, venv_file)
if os.path.exists(path):
yield path
def _MakeZip(input_dir, output_path):
"""Creates a ZIP archive of the files in the input directory.
Args:
input_dir: the name of the input directory.
output_path: path to the output ZIP archive without extension.
"""
logging.info("Generating zip template file at %s", output_path)
basename, _ = os.path.splitext(output_path)
# TODO(user):pytype: incorrect make_archive() definition in typeshed.
# pytype: disable=wrong-arg-types
shutil.make_archive(
basename, "zip", base_dir=".", root_dir=input_dir, verbose=True)
# pytype: enable=wrong-arg-types
def _MakeMsi(input_dir: str, output_path: str) -> None:
"""Packages the PyInstaller files as MSI."""
wxs_file = package.ResourcePath("grr-response-core",
"install_data/windows/grr.wxs")
fleetspeak_wxs_lib = os.path.join(
config.CONFIG["ClientBuilder.fleetspeak_install_dir"],
"fleetspeak_lib.wxs")
# Don't automatically harvest these files using heat.exe, since they
# are treated specially in grr.wxs.
exclude_files = [
"grr-client.exe",
"dbg_grr-client.exe",
"GRRservice.exe",
"dbg_GRRservice.exe",
"fleetspeak-client.exe",
"grr-client.exe.manifest",
]
def Run(args: List[str]):
logging.info("Running: %s.", args)
subprocess.check_call(args)
with utils.TempDirectory() as temp_dir:
for exclude_file in exclude_files:
shutil.move(
os.path.join(input_dir, exclude_file),
os.path.join(temp_dir, exclude_file))
Run([
os.path.join(config.CONFIG["ClientBuilder.wix_tools_path"], "bin",
"heat.exe"),
"dir",
input_dir,
"-sfrag",
"-srd",
"-cg",
"CompGrrAutoFiles",
"-ag",
"-var",
"var.InputDir",
"-dr",
"INSTALLDIR",
"-out",
os.path.join(temp_dir, "heat.wxs"),
])
for exclude_file in exclude_files:
shutil.move(
os.path.join(temp_dir, exclude_file),
os.path.join(input_dir, exclude_file))
for placeholder_file in [
"grr-config.yaml", "fleetspeak-client.config",
"fleetspeak-service-config.txt"
]:
with open(os.path.join(input_dir, placeholder_file), "w", newline="\n"):
pass
# Due to a limitation in the olefile library, at repacking time, the CAB
# in the MSI file needs to be repacked to a CAB file of same size.
# To do so, we add 3 MB of random (uncompressable) data into a padding
# file.
# At repacking time, the padding file will get truncated to make space for
# other files (config files, signed EXE and DLL files) to grow.
with open(os.path.join(input_dir, "padding-file.bin"), "wb") as f:
for _ in range(3):
f.write(os.urandom(1024 * 1024))
# To conditionally restart fleetspeak in a fleetspeak-enabled setup,
# a dummy file is needed.
with open(os.path.join(input_dir, "restart-dummy.txt"), "w"):
pass
# To conditionally delete the legacy nanny service,
# a dummy file is needed.
with open(os.path.join(input_dir, "remove-dummy.txt"), "w"):
pass
object_files = []
for source_file in (wxs_file, fleetspeak_wxs_lib,
os.path.join(temp_dir, "heat.wxs")):
object_file = os.path.join(temp_dir,
os.path.basename(source_file) + ".wxobj")
Run([
os.path.join(config.CONFIG["ClientBuilder.wix_tools_path"], "bin",
"candle.exe"),
source_file,
"-arch",
"x64",
"-ext",
"WixUtilExtension",
"-dFLEETSPEAK_EXECUTABLE=" +
os.path.join(input_dir, "fleetspeak-client.exe"),
"-dVERSION=" + config.CONFIG["Source.version_string"],
"-sw1150",
f"-dInputDir={input_dir}",
"-out",
object_file,
])
object_files.append(object_file)
Run([
os.path.join(config.CONFIG["ClientBuilder.wix_tools_path"], "bin",
"light.exe"),
] + object_files + [
"-ext",
"WixUtilExtension",
"-sw1076",
"-out",
os.path.join(temp_dir, "installer.msi"),
])
with zipfile.ZipFile(output_path, "w") as zip_output:
zip_output.write(os.path.join(temp_dir, "installer.msi"), "installer.msi")
zip_output.write(os.path.join(input_dir, "build.yaml"), "build.yaml")
class WindowsClientBuilder(build.ClientBuilder):
"""Builder class for the Windows client."""
BUILDER_CONTEXT = "Target:Windows"
def BuildNanny(self, dest_dir: str):
"""Use VS2010 to build the windows Nanny service."""
# When running under cygwin, the following environment variables are not set
# (since they contain invalid chars). Visual Studio requires these or it
# will fail.
os.environ["ProgramFiles(x86)"] = r"C:\Program Files (x86)"
with utils.TempDirectory() as temp_dir:
nanny_dir = os.path.join(temp_dir, "grr", "client", "grr_response_client",
"nanny")
nanny_src_dir = config.CONFIG.Get(
"ClientBuilder.nanny_source_dir", context=self.context)
logging.info("Copying Nanny build files from %s to %s", nanny_src_dir,
nanny_dir)
shutil.copytree(
config.CONFIG.Get(
"ClientBuilder.nanny_source_dir", context=self.context),
nanny_dir)
build_type = config.CONFIG.Get(
"ClientBuilder.build_type", context=self.context)
vs_arch = config.CONFIG.Get(
"ClientBuilder.vs_arch", default=None, context=self.context)
# We have to set up the Visual Studio environment first and then call
# msbuild.
env_script = config.CONFIG.Get(
"ClientBuilder.vs_env_script", default=None, context=self.context)
if vs_arch is None or env_script is None or not os.path.exists(
env_script) or config.CONFIG.Get(
"ClientBuilder.use_prebuilt_nanny", context=self.context):
# Visual Studio is not installed. We just use pre-built binaries in that
# case.
logging.warning(
"Visual Studio does not appear to be installed, "
"Falling back to prebuilt GRRNanny binaries."
"If you want to build it you must have VS 2012 installed.")
binaries_dir = config.CONFIG.Get(
"ClientBuilder.nanny_prebuilt_binaries", context=self.context)
shutil.copy(
os.path.join(binaries_dir, "GRRNanny_%s.exe" % vs_arch),
os.path.join(dest_dir, "GRRservice.exe"))
else:
# Lets build the nanny with the VS env script.
subprocess.check_call(
"cmd /c \"\"%s\" && msbuild /p:Configuration=%s;Platform=%s\"" %
(env_script, build_type, vs_arch),
cwd=nanny_dir)
# The templates always contain the same filenames - the repack step
# might rename them later.
shutil.copy(
os.path.join(nanny_dir, vs_arch, build_type, "GRRNanny.exe"),
os.path.join(dest_dir, "GRRservice.exe"))
def CopyBundledFleetspeak(self, output_dir):
src_dir = config.CONFIG.Get(
"ClientBuilder.fleetspeak_install_dir", context=self.context)
shutil.copy(os.path.join(src_dir, "fleetspeak-client.exe"), output_dir)
def _CreateOutputDir(self):
"""Windows templates also include the nanny."""
build_helpers.MakeBuildDirectory(context=self.context)
output_dir = build_helpers.BuildWithPyInstaller(context=self.context)
# Get any dll's that pyinstaller forgot:
for module in _EnumMissingModules():
logging.info("Copying additional dll %s.", module)
shutil.copy(module, output_dir)
self.BuildNanny(output_dir)
# Generate a prod and a debug version of nanny executable.
shutil.copy(
os.path.join(output_dir, "GRRservice.exe"),
os.path.join(output_dir, "dbg_GRRservice.exe"))
with io.open(os.path.join(output_dir, "GRRservice.exe"), "rb+") as fd:
build_helpers.SetPeSubsystem(fd, console=False)
with io.open(os.path.join(output_dir, "dbg_GRRservice.exe"), "rb+") as fd:
build_helpers.SetPeSubsystem(fd, console=True)
# Generate a prod and a debug version of client executable.
shutil.copy(
os.path.join(output_dir, "grr-client.exe"),
os.path.join(output_dir, "dbg_grr-client.exe"))
with io.open(os.path.join(output_dir, "grr-client.exe"), "rb+") as fd:
build_helpers.SetPeSubsystem(fd, console=False)
with io.open(os.path.join(output_dir, "dbg_grr-client.exe"), "rb+") as fd:
build_helpers.SetPeSubsystem(fd, console=True)
self.CopyBundledFleetspeak(output_dir)
return output_dir
def MakeExecutableTemplate(self, output_path):
output_dir = self._CreateOutputDir()
if config.CONFIG["ClientBuilder.build_msi"]:
_MakeMsi(output_dir, output_path)
else:
_MakeZip(output_dir, output_path)
|
the-stack_106_19189
|
# Ordered list of items in Custom Item Pool page and Starting Inventory page
CUSTOMITEMS = [
"bow", "progressivebow", "boomerang", "redmerang", "hookshot",
"mushroom", "powder", "firerod", "icerod", "bombos",
"ether", "quake", "lamp", "hammer", "shovel",
"flute", "bugnet", "book", "bottle", "somaria",
"byrna", "cape", "mirror", "boots", "powerglove",
"titansmitt", "progressiveglove", "flippers", "pearl", "heartpiece",
"heartcontainer", "sancheart", "sword1", "sword2", "sword3",
"sword4", "progressivesword", "shield1", "shield2", "shield3",
"progressiveshield", "mail2", "mail3", "progressivemail", "halfmagic",
"quartermagic", "bombsplus5", "bombsplus10", "arrowsplus5", "arrowsplus10",
"arrow1", "arrow10", "bomb1", "bomb3", "bomb10",
"rupee1", "rupee5", "rupee20", "rupee50", "rupee100",
"rupee300", "blueclock", "greenclock", "redclock", "silversupgrade",
"generickeys", "triforcepieces", "triforcepiecesgoal", "triforce", "rupoor",
"rupoorcost"
]
# These can't be in the Starting Inventory page
CANTSTARTWITH = [
"triforcepiecesgoal", "triforce", "rupoor",
"rupoorcost"
]
# In the same order as CUSTOMITEMS, these are Pretty Labels for each option
CUSTOMITEMLABELS = [
"Bow", "Progressive Bow", "Blue Boomerang", "Red Boomerang", "Hookshot",
"Mushroom", "Magic Powder", "Fire Rod", "Ice Rod", "Bombos",
"Ether", "Quake", "Lamp", "Hammer", "Shovel",
"Ocarina", "Bug Catching Net", "Book of Mudora", "Bottle", "Cane of Somaria",
"Cane of Byrna", "Cape", "Magic Mirror", "Pegasus Boots", "Power Glove",
"Titans Mitts", "Progressive Glove", "Flippers", "Moon Pearl", "Piece of Heart",
"Boss Heart Container", "Sanctuary Heart Container", "Fighter Sword", "Master Sword", "Tempered Sword",
"Golden Sword", "Progressive Sword", "Blue Shield", "Red Shield", "Mirror Shield",
"Progressive Shield", "Blue Mail", "Red Mail", "Progressive Armor", "Magic Upgrade (1/2)",
"Magic Upgrade (1/4)", "Bomb Upgrade (+5)", "Bomb Upgrade (+10)", "Arrow Upgrade (+5)", "Arrow Upgrade (+10)",
"Single Arrow", "Arrows (10)", "Single Bomb", "Bombs (3)", "Bombs (10)",
"Rupee (1)", "Rupees (5)", "Rupees (20)", "Rupees (50)", "Rupees (100)",
"Rupees (300)", "Blue Clock", "Green Clock", "Red Clock", "Silver Arrows",
"Small Key (Universal)", "Triforce Piece", "Triforce Piece Goal", "Triforce", "Rupoor",
"Rupoor Cost"
]
# Stuff on each page to save, according to internal names as defined by the widgets definitions
# and how it eventually translates to YAML/JSON weight files
SETTINGSTOPROCESS = {
"randomizer": {
"item": {
"hints": "hints",
"retro": "retro",
"shopsanity": "shopsanity",
"pseudoboots": "pseudoboots",
"worldstate": "mode",
"logiclevel": "logic",
"goal": "goal",
"crystals_gt": "crystals_gt",
"crystals_ganon": "crystals_ganon",
"weapons": "swords",
"itempool": "difficulty",
"itemfunction": "item_functionality",
"timer": "timer",
"progressives": "progressive",
"accessibility": "accessibility",
"sortingalgo": "algorithm",
"beemizer": "beemizer"
},
"entrance": {
"openpyramid": "openpyramid",
"shuffleganon": "shuffleganon",
"shufflelinks": "shufflelinks",
"entranceshuffle": "shuffle"
},
"enemizer": {
"enemyshuffle": "shuffleenemies",
"bossshuffle": "shufflebosses",
"enemydamage": "enemy_damage",
"enemyhealth": "enemy_health"
},
"dungeon": {
"mapshuffle": "mapshuffle",
"compassshuffle": "compassshuffle",
"smallkeyshuffle": "keyshuffle",
"bigkeyshuffle": "bigkeyshuffle",
"keydropshuffle": "keydropshuffle",
"dungeondoorshuffle": "door_shuffle",
"dungeonintensity": "intensity",
"potshuffle": "shufflepots",
"experimental": "experimental",
"dungeon_counters": "dungeon_counters",
"mixed_travel": "mixed_travel",
"standardize_palettes": "standardize_palettes",
},
"gameoptions": {
"nobgm": "disablemusic",
"quickswap": "quickswap",
"heartcolor": "heartcolor",
"heartbeep": "heartbeep",
"menuspeed": "fastmenu",
"owpalettes": "ow_palettes",
"uwpalettes": "uw_palettes",
"reduce_flashing": "reduce_flashing"
},
"generation": {
"createspoiler": "create_spoiler",
"createrom": "create_rom",
"calcplaythrough": "calc_playthrough",
"usestartinventory": "usestartinventory",
"usecustompool": "custom",
"saveonexit": "saveonexit"
}
},
"bottom": {
"content": {
"names": "names",
"seed": "seed",
"generationcount": "count"
}
}
}
|
the-stack_106_19190
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import sys
from datetime import datetime, timedelta
from django.core.management import BaseCommand, CommandError
from corehq.blobs.migrate import MIGRATIONS
from corehq.blobs.util import set_max_connections
from corehq.util.decorators import change_log_level
from corehq.util.teeout import tee_output
from six.moves import range
DEFAULT_WORKER_POOL_SIZE = 10
DEFAULT_BOTOCORE_MAX_POOL_CONNECTIONS = 10
USAGE = """Usage: ./manage.py run_blob_migration [options] <slug>
Slugs:
{}
""".format('\n'.join(sorted(MIGRATIONS)))
class Command(BaseCommand):
"""
Example: ./manage.py run_blob_migration [options] saved_exports
"""
help = USAGE
def add_arguments(self, parser):
def add_argument(*args, **kw):
name = args[-1].lstrip("-").replace("-", "_")
self.option_names.add(name)
parser.add_argument(*args, **kw)
self.option_names = set()
add_argument(
'slug',
choices=sorted(MIGRATIONS),
help="Migration slug: {}".format(', '.join(sorted(MIGRATIONS))),
)
add_argument(
'--log-dir',
help="Migration log directory.",
)
add_argument(
'--reset',
action="store_true",
default=False,
help="Discard any existing migration state.",
)
add_argument(
'--chunk-size',
type=int,
default=100,
help="Maximum number of records to read from couch at once.",
)
add_argument(
'--num-workers',
type=int,
default=DEFAULT_WORKER_POOL_SIZE,
help=(
"Worker pool size for parallel processing. This option is "
"ignored by migration types that do not support it."
),
)
add_argument(
'--date-range',
help=(
"Creation date range of blobs to be migrated specified as one "
"or two dates in YYYYMMDD format. If only one date is "
"specified, it will be used as the end date, leaving the "
"start date unbounded. Some migrations may not support this"
"parameter. Example value: 20180109-20190109"
),
)
add_argument(
'--process_day_by_day',
action='store_true',
default=False,
help=(
"Run migration for each day in the given date-range separately "
"to allow cancelling and resuming on any day. Only applicable with date-range option"
),
)
@change_log_level('boto3', logging.WARNING)
@change_log_level('botocore', logging.WARNING)
def handle(self, slug, log_dir=None, **options):
try:
migrator = MIGRATIONS[slug]
except KeyError:
raise CommandError(USAGE)
# drop options not added by this command
for name in list(options):
if name not in self.option_names:
options.pop(name)
if not migrator.has_worker_pool:
num_workers = options.pop("num_workers")
if num_workers != DEFAULT_WORKER_POOL_SIZE:
print("--num-workers={} ignored because this migration "
"does not use a worker pool".format(num_workers))
elif options["num_workers"] > DEFAULT_BOTOCORE_MAX_POOL_CONNECTIONS:
set_max_connections(options["num_workers"])
if "date_range" in options:
rng = options["date_range"]
if rng is None:
options.pop("date_range")
else:
if "-" not in rng:
rng = (None, get_date(rng))
else:
rng = rng.split("-")
if len(rng) != 2:
raise CommandError("bad date range: {}".format(rng))
rng = tuple(get_date(v) for v in rng)
# date_range is a tuple containing two date values
# a value of None means that side of the range is unbounded
options["date_range"] = rng
if log_dir is None:
summary_file = log_file = None
else:
now = datetime.utcnow().strftime("%Y%m%dT%H%M%SZ")
summary_file = os.path.join(log_dir,
"{}-blob-migration-{}-summary.txt".format(slug, now))
log_file = os.path.join(log_dir,
"{}-blob-migration-{}.txt".format(slug, now))
assert not os.path.exists(summary_file), summary_file
assert not os.path.exists(log_file), log_file
def _migrate():
with tee_output(summary_file):
try:
total, skips = migrator.migrate(log_file, **options)
if skips:
sys.exit(skips)
except KeyboardInterrupt:
print("stopped by operator")
if options.get('date_range'):
print("while processing date range {}".format(options['date_range']))
sys.exit(1)
process_day_by_day = options.pop('process_day_by_day')
if 'date_range' in options and process_day_by_day:
start, end = options.pop('date_range')
num_days = (end - start).days
for day in range(num_days + 1):
date = start + timedelta(days=day)
options['date_range'] = (date, date)
print("Migrating for date {} ".format(date))
_migrate()
print("Finished migration for date {} ".format(date))
else:
_migrate()
def get_date(value):
if not value:
return None
try:
return datetime.strptime(value, "%Y%m%d").date()
except ValueError:
raise CommandError("bad date value: {}".format(value))
|
the-stack_106_19192
|
"""
Text in plots
"""
#*****************************************************************************
# Copyright (C) 2006 Alex Clemesha <[email protected]>,
# William Stein <[email protected]>,
# 2008 Mike Hansen <[email protected]>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.plot.primitive import GraphicPrimitive
from sage.misc.decorators import options, rename_keyword
from sage.plot.colors import to_mpl_color
class Text(GraphicPrimitive):
"""
Base class for Text graphics primitive.
TESTS:
We test creating some text::
sage: text("I like Fibonacci",(3,5))
Graphics object consisting of 1 graphics primitive
.. PLOT::
sphinx_plot(text("I like Fibonacci",(3,5)))
"""
def __init__(self, string, point, options):
"""
Initializes base class Text.
EXAMPLES::
sage: T = text("I like Fibonacci", (3,5))
sage: t = T[0]
sage: t.string
'I like Fibonacci'
sage: t.x
3.0
sage: t.options()['fontsize']
10
"""
self.string = string
self.x = float(point[0])
self.y = float(point[1])
GraphicPrimitive.__init__(self, options)
def get_minmax_data(self):
"""
Returns a dictionary with the bounding box data. Notice
that, for text, the box is just the location itself.
EXAMPLES::
sage: T = text("Where am I?",(1,1))
sage: t=T[0]
sage: t.get_minmax_data()['ymin']
1.0
sage: t.get_minmax_data()['ymax']
1.0
"""
from sage.plot.plot import minmax_data
return minmax_data([self.x], [self.y], dict=True)
def _repr_(self):
"""
String representation of Text primitive.
EXAMPLES::
sage: T = text("I like cool constants", (pi,e))
sage: t=T[0];t
Text 'I like cool constants' at the point (3.1415926535...,2.7182818284...)
"""
return "Text '%s' at the point (%s,%s)" % (self.string, self.x, self.y)
def _allowed_options(self):
"""
Return the allowed options for the Text class.
EXAMPLES::
sage: T = text("ABC",(1,1),zorder=3)
sage: T[0]._allowed_options()['fontsize']
"How big the text is. Either the size in points or a relative size, e.g. 'smaller', 'x-large', etc"
sage: T[0]._allowed_options()['zorder']
'The layer level in which to draw'
sage: T[0]._allowed_options()['rotation']
'How to rotate the text: angle in degrees, vertical, horizontal'
"""
return {'fontsize': 'How big the text is. Either the size in points or a relative size, e.g. \'smaller\', \'x-large\', etc',
'fontstyle': 'A string either \'normal\', \'italic\' or \'oblique\'',
'fontweight': 'A numeric value in the range 0-1000 or a string'
'\'ultralight\', \'light\', \'normal\', \'regular\', \'book\','
'\'medium\', \'roman\', \'semibold\', \'demibold\', \'demi\','
'\'bold,\', \'heavy\', \'extra bold\', \'black\'',
'rgbcolor': 'The color as an RGB tuple',
'background_color': 'The background color',
'bounding_box': 'A dictionary specifying a bounding box',
'hue': 'The color given as a hue',
'alpha': 'A float (0.0 transparent through 1.0 opaque)',
'axis_coords': 'If True use axis coordinates: (0,0) lower left and (1,1) upper right',
'rotation': 'How to rotate the text: angle in degrees, vertical, horizontal',
'vertical_alignment': 'How to align vertically: top, center, bottom',
'horizontal_alignment': 'How to align horizontally: left, center, right',
'zorder': 'The layer level in which to draw',
'clip': 'Whether to clip or not'}
def _plot3d_options(self, options=None):
"""
Translate 2D plot options into 3D plot options.
EXAMPLES::
sage: T = text("ABC",(1,1))
sage: t = T[0]
sage: t.options()['rgbcolor']
(0.0, 0.0, 1.0)
sage: s=t.plot3d()
sage: s.jmol_repr(s.testing_render_params())[0][1]
'color atom [0,0,255]'
"""
if options is None:
options = dict(self.options())
options_3d = {}
for s in ['fontfamily', 'fontsize', 'fontstyle', 'fontweight']:
if s in options:
options_3d[s] = options.pop(s)
# TODO: figure out how to implement rather than ignore
for s in ['axis_coords', 'clip', 'horizontal_alignment',
'rotation', 'vertical_alignment']:
if s in options:
del options[s]
options_3d.update(GraphicPrimitive._plot3d_options(self, options))
return options_3d
def plot3d(self, **kwds):
"""
Plots 2D text in 3D.
EXAMPLES::
sage: T = text("ABC",(1,1))
sage: t = T[0]
sage: s=t.plot3d()
sage: s.jmol_repr(s.testing_render_params())[0][2]
'label "ABC"'
sage: s._trans
(1.0, 1.0, 0)
"""
from sage.plot.plot3d.shapes2 import text3d
options = self._plot3d_options()
options.update(kwds)
return text3d(self.string, (self.x, self.y, 0), **options)
def _render_on_subplot(self, subplot):
"""
TESTS::
sage: t1 = text("Hello",(1,1), vertical_alignment="top", fontsize=30, rgbcolor='black')
sage: t2 = text("World", (1,1), horizontal_alignment="left", fontsize=20, zorder=-1)
sage: t1 + t2 # render the sum
Graphics object consisting of 2 graphics primitives
"""
options = self.options()
opts = {}
opts['color'] = options['rgbcolor']
opts['verticalalignment'] = options['vertical_alignment']
opts['horizontalalignment'] = options['horizontal_alignment']
if 'background_color' in options:
opts['backgroundcolor'] = options['background_color']
if 'fontweight' in options:
opts['fontweight'] = options['fontweight']
if 'alpha' in options:
opts['alpha'] = options['alpha']
if 'fontstyle' in options:
opts['fontstyle'] = options['fontstyle']
if 'bounding_box' in options:
opts['bbox'] = options['bounding_box']
if 'zorder' in options:
opts['zorder'] = options['zorder']
if options['axis_coords']:
opts['transform'] = subplot.transAxes
if 'fontsize' in options:
val = options['fontsize']
if isinstance(val, str):
opts['fontsize'] = val
else:
opts['fontsize'] = int(val)
if 'rotation' in options:
val = options['rotation']
if isinstance(val, str):
opts['rotation'] = options['rotation']
else:
opts['rotation'] = float(options['rotation'])
p = subplot.text(self.x, self.y, self.string, clip_on=options['clip'], **opts)
if not options['clip']:
self._bbox_extra_artists = [p]
@rename_keyword(color='rgbcolor')
@options(fontsize=10, rgbcolor=(0,0,1), horizontal_alignment='center',
vertical_alignment='center', axis_coords=False, clip=False)
def text(string, xy, **options):
r"""
Returns a 2D text graphics object at the point `(x,y)`.
Type ``text.options`` for a dictionary of options for 2D text.
2D OPTIONS:
- ``fontsize`` - How big the text is. Either an integer that
specifies the size in points or a string which specifies a size (one of
'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large')
- ``fontstyle`` - A string either 'normal', 'italic' or 'oblique'
- ``fontweight`` - A numeric value in the range 0-1000 or a string (one of
'ultralight', 'light', 'normal', 'regular', 'book',' 'medium', 'roman',
'semibold', 'demibold', 'demi', 'bold', 'heavy', 'extra bold', 'black')
- ``rgbcolor`` - The color as an RGB tuple
- ``hue`` - The color given as a hue
- ``alpha`` - A float (0.0 transparent through 1.0 opaque)
- ``background_color`` - The background color
- ``rotation`` - How to rotate the text: angle in degrees, vertical, horizontal
- ``vertical_alignment`` - How to align vertically: top, center, bottom
- ``horizontal_alignment`` - How to align horizontally: left, center, right
- ``zorder`` - The layer level in which to draw
- ``clip`` - (default: False) Whether to clip or not
- ``axis_coords`` - (default: False) If True, use axis coordinates, so that
(0,0) is the lower left and (1,1) upper right, regardless of the x and y
range of plotted values.
- ``bounding_box`` - A dictionary specifying a bounding box. Currently the text location.
EXAMPLES::
sage: text("Sage graphics are really neat because they use matplotlib!", (2,12))
Graphics object consisting of 1 graphics primitive
.. PLOT::
t = "Sage graphics are really neat because they use matplotlib!"
sphinx_plot(text(t,(2,12)))
Larger font, bold, colored red and transparent text::
sage: text("I had a dream!", (2,12), alpha=0.3, fontsize='large', fontweight='bold', color='red')
Graphics object consisting of 1 graphics primitive
.. PLOT::
sphinx_plot(text("I had a dream!", (2,12), alpha=0.3, fontsize='large', fontweight='bold', color='red'))
By setting ``horizontal_alignment`` to 'left' the text is guaranteed to be
in the lower left no matter what::
sage: text("I got a horse and he lives in a tree", (0,0), axis_coords=True, horizontal_alignment='left')
Graphics object consisting of 1 graphics primitive
.. PLOT::
t = "I got a horse and he lives in a tree"
sphinx_plot(text(t, (0,0), axis_coords=True, horizontal_alignment='left'))
Various rotations::
sage: text("noitator", (0,0), rotation=45.0, horizontal_alignment='left', vertical_alignment='bottom')
Graphics object consisting of 1 graphics primitive
.. PLOT::
sphinx_plot(text("noitator", (0,0), rotation=45.0, horizontal_alignment='left', vertical_alignment='bottom'))
::
sage: text("Sage is really neat!!",(0,0), rotation="vertical")
Graphics object consisting of 1 graphics primitive
.. PLOT::
sphinx_plot(text("Sage is really neat!!",(0,0), rotation="vertical"))
You can also align text differently::
sage: t1 = text("Hello",(1,1), vertical_alignment="top")
sage: t2 = text("World", (1,0.5), horizontal_alignment="left")
sage: t1 + t2 # render the sum
Graphics object consisting of 2 graphics primitives
.. PLOT::
t1 = text("Hello",(1,1), vertical_alignment="top")
t2 = text("World", (1,0.5), horizontal_alignment="left")
sphinx_plot(t1 + t2)
You can save text as part of PDF output::
sage: text("sage", (0,0), rgbcolor=(0,0,0)).save(os.path.join(SAGE_TMP, 'a.pdf'))
Some examples of bounding box::
sage: bbox = {'boxstyle':"rarrow,pad=0.3", 'fc':"cyan", 'ec':"b", 'lw':2}
sage: text("I feel good", (1,2), bounding_box=bbox)
Graphics object consisting of 1 graphics primitive
.. PLOT::
bbox = {'boxstyle':"rarrow,pad=0.3", 'fc':"cyan", 'ec':"b", 'lw':2}
sphinx_plot(text("I feel good", (1,2), bounding_box=bbox))
::
sage: text("So good", (0,0), bounding_box={'boxstyle':'round', 'fc':'w'})
Graphics object consisting of 1 graphics primitive
.. PLOT::
bbox = {'boxstyle':'round', 'fc':'w'}
sphinx_plot(text("So good", (0,0), bounding_box=bbox))
The possible options of the bounding box are 'boxstyle' (one of 'larrow',
'rarrow', 'round', 'round4', 'roundtooth', 'sawtooth', 'square'), 'fc' or
'facecolor', 'ec' or 'edgecolor', 'ha' or 'horizontalalignment', 'va' or
'verticalalignment', 'lw' or 'linewidth'.
A text with a background color::
sage: text("So good", (-2,2), background_color='red')
Graphics object consisting of 1 graphics primitive
.. PLOT::
sphinx_plot(text("So good", (-2,2), background_color='red'))
Text must be 2D (use the text3d command for 3D text)::
sage: t = text("hi",(1,2,3))
Traceback (most recent call last):
...
ValueError: use text3d instead for text in 3d
sage: t = text3d("hi",(1,2,3))
Extra options will get passed on to show(), as long as they are valid::
sage: text("MATH IS AWESOME", (0, 0), fontsize=40, axes=False)
Graphics object consisting of 1 graphics primitive
sage: text("MATH IS AWESOME", (0, 0), fontsize=40).show(axes=False) # These are equivalent
"""
try:
x, y = xy
except ValueError:
if isinstance(xy, (list, tuple)) and len(xy) == 3:
raise ValueError("use text3d instead for text in 3d")
raise
from sage.plot.all import Graphics
options['rgbcolor'] = to_mpl_color(options['rgbcolor'])
point = (float(x), float(y))
g = Graphics()
g._set_extra_kwds(Graphics._extract_kwds_for_show(options, ignore='fontsize'))
g.add_primitive(Text(string, point, options))
return g
|
the-stack_106_19194
|
import click
from setup.executor import Executor
from setup.requirement_exception import RequirementException
from builder.builder import Builder
from uploader.sftp_uploader import SFTPUploader
@click.group()
def cli():
pass
@click.command()
def check():
print("Checking requisites...")
try:
Executor.requisites_check()
except RequirementException as re:
print(str(re))
@click.command()
def serve():
Executor.run_command(['php', '-S', 'localhost:8000', '-t', 'site/'])
@click.command()
def build():
b = Builder()
b.build()
@click.command()
def upload():
u = SFTPUploader()
u.upload()
@click.command()
def deploy():
b = Builder()
b.build()
u = SFTPUploader()
u.upload()
if __name__ == '__main__':
cli.add_command(check)
cli.add_command(serve)
cli.add_command(build)
cli.add_command(upload)
cli.add_command(deploy)
cli()
|
the-stack_106_19196
|
import os
import json
import logging
from seldon_core.utils import (
extract_request_parts,
construct_response,
json_to_seldon_message,
seldon_message_to_json,
construct_response_json,
extract_request_parts_json,
extract_feedback_request_parts,
)
from seldon_core.user_model import (
INCLUDE_METRICS_IN_CLIENT_RESPONSE,
client_predict,
client_aggregate,
client_route,
client_custom_metrics,
client_transform_output,
client_transform_input,
client_send_feedback,
client_health_status,
SeldonNotImplementedError,
)
from seldon_core.flask_utils import SeldonMicroserviceException
from seldon_core.metrics import SeldonMetrics
from seldon_core.metadata import validate_model_metadata, SeldonInvalidMetadataError
from google.protobuf import json_format
from seldon_core.proto import prediction_pb2
from typing import Any, Union, List, Dict
import numpy as np
logger = logging.getLogger(__name__)
def handle_raw_custom_metrics(
msg: Union[prediction_pb2.SeldonMessage, Dict],
seldon_metrics: SeldonMetrics,
is_proto: bool,
):
"""
Update SeldonMetrics object with custom metrics from raw methods.
If INCLUDE_METRICS_IN_CLIENT_RESPONSE environmental variable is set to "true"
metrics will be dropped from msg.
"""
if is_proto:
metrics = seldon_message_to_json(msg.meta).get("metrics", [])
if metrics and not INCLUDE_METRICS_IN_CLIENT_RESPONSE:
del msg.meta.metrics[:]
else:
metrics = msg.get("meta", {}).get("metrics", [])
if metrics and not INCLUDE_METRICS_IN_CLIENT_RESPONSE:
del msg["meta"]["metrics"]
seldon_metrics.update(metrics)
def predict(
user_model: Any,
request: Union[prediction_pb2.SeldonMessage, List, Dict],
seldon_metrics: SeldonMetrics,
) -> Union[prediction_pb2.SeldonMessage, List, Dict]:
"""
Call the user model to get a prediction and package the response
Parameters
----------
user_model
User defined class instance
request
The incoming request
Returns
-------
The prediction
"""
is_proto = isinstance(request, prediction_pb2.SeldonMessage)
if hasattr(user_model, "predict_rest") and not is_proto:
logger.warning("predict_rest is deprecated. Please use predict_raw")
return user_model.predict_rest(request)
elif hasattr(user_model, "predict_grpc") and is_proto:
logger.warning("predict_grpc is deprecated. Please use predict_raw")
return user_model.predict_grpc(request)
else:
if hasattr(user_model, "predict_raw"):
try:
response = user_model.predict_raw(request)
handle_raw_custom_metrics(response, seldon_metrics, is_proto)
return response
except SeldonNotImplementedError:
pass
if is_proto:
(features, meta, datadef, data_type) = extract_request_parts(request)
client_response = client_predict(
user_model, features, datadef.names, meta=meta
)
metrics = client_custom_metrics(user_model, seldon_metrics)
return construct_response(
user_model, False, request, client_response, meta, metrics
)
else:
(features, meta, datadef, data_type) = extract_request_parts_json(request)
class_names = datadef["names"] if datadef and "names" in datadef else []
client_response = client_predict(
user_model, features, class_names, meta=meta
)
metrics = client_custom_metrics(user_model, seldon_metrics)
return construct_response_json(
user_model, False, request, client_response, meta, metrics
)
def send_feedback(
user_model: Any, request: prediction_pb2.Feedback, predictive_unit_id: str
) -> prediction_pb2.SeldonMessage:
"""
Parameters
----------
user_model
A Seldon user model
request
SeldonMesage proto
predictive_unit_id
The ID of the enclosing container predictive unit. Will be taken from environment.
Returns
-------
"""
if hasattr(user_model, "send_feedback_rest"):
logger.warning("send_feedback_rest is deprecated. Please use send_feedback_raw")
request_json = json_format.MessageToJson(request)
response_json = user_model.send_feedback_rest(request_json)
return json_to_seldon_message(response_json)
elif hasattr(user_model, "send_feedback_grpc"):
logger.warning("send_feedback_grpc is deprecated. Please use send_feedback_raw")
response_json = user_model.send_feedback_grpc(request)
return json_to_seldon_message(response_json)
else:
if hasattr(user_model, "send_feedback_raw"):
try:
return user_model.send_feedback_raw(request)
except SeldonNotImplementedError:
pass
(datadef_request, features, truth, reward) = extract_feedback_request_parts(
request
)
routing = request.response.meta.routing.get(predictive_unit_id)
client_response = client_send_feedback(
user_model, features, datadef_request.names, reward, truth, routing
)
if client_response is None:
client_response = np.array([])
else:
client_response = np.array(client_response)
return construct_response(user_model, False, request.request, client_response)
def transform_input(
user_model: Any,
request: Union[prediction_pb2.SeldonMessage, List, Dict],
seldon_metrics: SeldonMetrics,
) -> Union[prediction_pb2.SeldonMessage, List, Dict]:
"""
Parameters
----------
user_model
User defined class to handle transform input
request
The incoming request
Returns
-------
The transformed request
"""
is_proto = isinstance(request, prediction_pb2.SeldonMessage)
if hasattr(user_model, "transform_input_rest"):
logger.warning(
"transform_input_rest is deprecated. Please use transform_input_raw"
)
return user_model.transform_input_rest(request)
elif hasattr(user_model, "transform_input_grpc"):
logger.warning(
"transform_input_grpc is deprecated. Please use transform_input_raw"
)
return user_model.transform_input_grpc(request)
else:
if hasattr(user_model, "transform_input_raw"):
try:
response = user_model.transform_input_raw(request)
handle_raw_custom_metrics(response, seldon_metrics, is_proto)
return response
except SeldonNotImplementedError:
pass
if is_proto:
(features, meta, datadef, data_type) = extract_request_parts(request)
client_response = client_transform_input(
user_model, features, datadef.names, meta=meta
)
metrics = client_custom_metrics(user_model, seldon_metrics)
return construct_response(
user_model, False, request, client_response, meta, metrics
)
else:
(features, meta, datadef, data_type) = extract_request_parts_json(request)
class_names = datadef["names"] if datadef and "names" in datadef else []
client_response = client_transform_input(
user_model, features, class_names, meta=meta
)
metrics = client_custom_metrics(user_model, seldon_metrics)
return construct_response_json(
user_model, False, request, client_response, meta, metrics
)
def transform_output(
user_model: Any,
request: Union[prediction_pb2.SeldonMessage, List, Dict],
seldon_metrics: SeldonMetrics,
) -> Union[prediction_pb2.SeldonMessage, List, Dict]:
"""
Parameters
----------
user_model
User defined class to handle transform input
request
The incoming request
Returns
-------
The transformed request
"""
is_proto = isinstance(request, prediction_pb2.SeldonMessage)
if hasattr(user_model, "transform_output_rest"):
logger.warning(
"transform_input_rest is deprecated. Please use transform_input_raw"
)
return user_model.transform_output_rest(request)
elif hasattr(user_model, "transform_output_grpc"):
logger.warning(
"transform_input_grpc is deprecated. Please use transform_input_raw"
)
return user_model.transform_output_grpc(request)
else:
if hasattr(user_model, "transform_output_raw"):
try:
response = user_model.transform_output_raw(request)
handle_raw_custom_metrics(response, seldon_metrics, is_proto)
return response
except SeldonNotImplementedError:
pass
if is_proto:
(features, meta, datadef, data_type) = extract_request_parts(request)
client_response = client_transform_output(
user_model, features, datadef.names, meta=meta
)
metrics = client_custom_metrics(user_model, seldon_metrics)
return construct_response(
user_model, False, request, client_response, meta, metrics
)
else:
(features, meta, datadef, data_type) = extract_request_parts_json(request)
class_names = datadef["names"] if datadef and "names" in datadef else []
client_response = client_transform_output(
user_model, features, class_names, meta=meta
)
metrics = client_custom_metrics(user_model, seldon_metrics)
return construct_response_json(
user_model, False, request, client_response, meta, metrics
)
def route(
user_model: Any,
request: Union[prediction_pb2.SeldonMessage, List, Dict],
seldon_metrics: SeldonMetrics,
) -> Union[prediction_pb2.SeldonMessage, List, Dict]:
"""
Parameters
----------
user_model
A Seldon user model
request
A SelodonMessage proto
seldon_metrics
A SeldonMetrics instance
Returns
-------
"""
is_proto = isinstance(request, prediction_pb2.SeldonMessage)
if hasattr(user_model, "route_rest"):
logger.warning("route_rest is deprecated. Please use route_raw")
return user_model.route_rest(request)
elif hasattr(user_model, "route_grpc"):
logger.warning("route_grpc is deprecated. Please use route_raw")
return user_model.route_grpc(request)
else:
if hasattr(user_model, "route_raw"):
try:
response = user_model.route_raw(request)
handle_raw_custom_metrics(response, seldon_metrics, is_proto)
return response
except SeldonNotImplementedError:
pass
if is_proto:
(features, meta, datadef, data_type) = extract_request_parts(request)
client_response = client_route(
user_model, features, datadef.names, meta=meta
)
if not isinstance(client_response, int):
raise SeldonMicroserviceException(
"Routing response must be int but got " + str(client_response)
)
client_response_arr = np.array([[client_response]])
metrics = client_custom_metrics(user_model, seldon_metrics)
return construct_response(
user_model, False, request, client_response_arr, None, metrics
)
else:
(features, meta, datadef, data_type) = extract_request_parts_json(request)
class_names = datadef["names"] if datadef and "names" in datadef else []
client_response = client_route(user_model, features, class_names, meta=meta)
if not isinstance(client_response, int):
raise SeldonMicroserviceException(
"Routing response must be int but got " + str(client_response)
)
client_response_arr = np.array([[client_response]])
metrics = client_custom_metrics(user_model, seldon_metrics)
return construct_response_json(
user_model, False, request, client_response_arr, None, metrics
)
def aggregate(
user_model: Any,
request: Union[prediction_pb2.SeldonMessageList, List, Dict],
seldon_metrics: SeldonMetrics,
) -> Union[prediction_pb2.SeldonMessage, List, Dict]:
"""
Aggregate a list of payloads
Parameters
----------
user_model
A Seldon user model
request
SeldonMessage proto
seldon_metrics
A SeldonMetrics instance
Returns
-------
Aggregated SeldonMessage proto
"""
def merge_meta(meta_list):
tags = {}
for meta in meta_list:
if meta:
tags.update(meta.get("tags", {}))
return {"tags": tags}
def merge_metrics(meta_list, custom_metrics):
metrics = []
for meta in meta_list:
if meta:
metrics.extend(meta.get("metrics", []))
metrics.extend(custom_metrics)
return metrics
is_proto = isinstance(request, prediction_pb2.SeldonMessageList)
if hasattr(user_model, "aggregate_rest"):
logger.warning("aggregate_rest is deprecated. Please use aggregate_raw")
return user_model.aggregate_rest(request)
elif hasattr(user_model, "aggregate_grpc"):
logger.warning("aggregate_grpc is deprecated. Please use aggregate_raw")
return user_model.aggregate_grpc(request)
else:
if hasattr(user_model, "aggregate_raw"):
try:
response = user_model.aggregate_raw(request)
handle_raw_custom_metrics(response, seldon_metrics, is_proto)
return response
except SeldonNotImplementedError:
pass
if is_proto:
features_list = []
names_list = []
meta_list = []
for msg in request.seldonMessages:
(features, meta, datadef, data_type) = extract_request_parts(msg)
features_list.append(features)
names_list.append(datadef.names)
meta_list.append(meta)
client_response = client_aggregate(user_model, features_list, names_list)
metrics = client_custom_metrics(user_model, seldon_metrics)
return construct_response(
user_model,
False,
request.seldonMessages[0],
client_response,
merge_meta(meta_list),
merge_metrics(meta_list, metrics),
)
else:
features_list = []
names_list = []
if isinstance(request, list):
msgs = request
elif "seldonMessages" in request and isinstance(
request["seldonMessages"], list
):
msgs = request["seldonMessages"]
else:
raise SeldonMicroserviceException(
f"Invalid request data type: {request}"
)
meta_list = []
for msg in msgs:
(features, meta, datadef, data_type) = extract_request_parts_json(msg)
class_names = datadef["names"] if datadef and "names" in datadef else []
features_list.append(features)
names_list.append(class_names)
meta_list.append(meta)
client_response = client_aggregate(user_model, features_list, names_list)
metrics = client_custom_metrics(user_model, seldon_metrics)
return construct_response_json(
user_model,
False,
msgs[0],
client_response,
merge_meta(meta_list),
merge_metrics(meta_list, metrics),
)
def health_status(
user_model: Any, seldon_metrics: SeldonMetrics
) -> Union[prediction_pb2.SeldonMessage, List, Dict]:
"""
Call the user model to check the health of the model
Parameters
----------
user_model
User defined class instance
seldon_metrics
A SeldonMetrics instance
Returns
-------
Health check output
"""
if hasattr(user_model, "health_status_raw"):
try:
return user_model.health_status_raw()
except SeldonNotImplementedError:
pass
client_response = client_health_status(user_model)
metrics = client_custom_metrics(user_model, seldon_metrics)
return construct_response_json(
user_model, False, {}, client_response, None, metrics
)
def init_metadata(user_model: Any) -> Dict:
"""
Call the user model to get the model init_metadata
Parameters
----------
user_model
User defined class instance
Returns
-------
Validated model metadata
"""
# meta_user: load metadata defined in the user_model instance
if hasattr(user_model, "init_metadata"):
try:
meta_user = user_model.init_metadata()
except SeldonNotImplementedError:
meta_user = {}
pass
else:
meta_user = {}
if not isinstance(meta_user, dict):
logger.error("init_metadata must return dict")
meta_user = {}
# meta_env: load metadata from environmental variable
try:
meta_env = json.loads(os.environ.get("MODEL_METADATA", "{}"))
except json.JSONDecodeError:
meta_env = {}
meta = {**meta_user, **meta_env}
try:
return validate_model_metadata(meta)
except SeldonInvalidMetadataError:
logger.error(f"Failed to validate metadata {meta}")
return None
|
the-stack_106_19198
|
#!/usr/bin/env pythonw
# /********************************************************************
# Filename: edit_corners.py
# Author: AHN
# Creation Date: Mar 12, 2018
# **********************************************************************/
#
# Editor to define four corners in a Goban picture and save the intersections
# to an sgf in the GC[] tag.
#
from __future__ import division, print_function
from pdb import set_trace as BP
import os,sys,re,json,copy,shutil
from io import StringIO
import numpy as np
from numpy.random import random
import argparse
import matplotlib as mpl
import matplotlib.patches as patches
# mpl.use('Agg') # This makes matplotlib work without a display
from matplotlib import pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons
import cv2
# Where am I
SCRIPTPATH = os.path.dirname(os.path.realpath(__file__))
SELECTED_CORNER = 'TL'
SELECTED_BEW = 'Black'
CORNER_COORDS = { 'TL': [0.0, 0.0], 'TR': [0.0, 0.0], 'BR': [0.0, 0.0], 'BL': [0.0, 0.0] }
AX_IMAGE = None
AX_STATUS = None
FIG = None
IMG = None
BOARDSZ = 19
NEW_INTERSECTIONS=[]
INTERSECTIONS=[]
DIAGRAM = []
FNAMES = []
#---------------------------
def usage(printmsg=False):
name = os.path.basename(__file__)
msg = '''
Name: %s
Synopsis: %s --run
Description:
Editor to define four corners in a Goban picture and save the intersections
to an sgf in the GC[] tag.
Goes through all png and jpg files in the current folder.
Example:
%s --run
''' % (name,name,name)
if printmsg:
print(msg)
exit(1)
else:
return msg
#-----------
def main():
global AX_IMAGE, AX_STATUS
global FIG
global FNAMES,FNUM
if len(sys.argv) == 1:
usage(True)
parser = argparse.ArgumentParser(usage=usage())
parser.add_argument( '--run', required=True, action='store_true')
args = parser.parse_args()
FNAMES = os.listdir('.')
FNAMES = [f for f in FNAMES if (f.endswith('.png') or f.endswith('.jpg')) and not f.startswith('.') ]
FNAMES = sorted(FNAMES)
FIG = plt.figure( figsize=(9,8))
AX_IMAGE = FIG.add_axes( [0.07, 0.06, 0.5, 0.9] )
cid = FIG.canvas.mpl_connect('button_press_event', onclick)
# Show button computes and shows intersections
ax_show = FIG.add_axes( [0.70, 0.28, 0.1, 0.05] )
btn_show = Button( ax_show, 'Show')
btn_show.on_clicked( cb_btn_show)
# Clear button sets all intersections to clear
ax_clear = FIG.add_axes( [0.70, 0.22, 0.1, 0.05] )
btn_clear = Button( ax_clear, 'Clear')
btn_clear.on_clicked( cb_btn_clear)
# Save button
ax_save = FIG.add_axes( [0.70, 0.16, 0.1, 0.05] )
btn_save = Button( ax_save, 'Save')
btn_save.on_clicked( cb_btn_save)
# Next button
ax_next = FIG.add_axes( [0.70, 0.09, 0.1, 0.05] )
btn_next = Button( ax_next, 'Next')
btn_next.on_clicked( cb_btn_next)
# Prev button
ax_prev = FIG.add_axes( [0.70, 0.03, 0.1, 0.05] )
btn_prev = Button( ax_prev, 'Prev')
btn_prev.on_clicked( cb_btn_prev)
# Radiobutton for corner and Black Empty White
ax_radio = FIG.add_axes( [0.70, 0.5, 0.2, 0.2] )
rbtn_corner_bew = RadioButtons( ax_radio, ('TL', 'TR', 'BR', 'BL', 'Black', 'Empty', 'White' ))
rbtn_corner_bew.on_clicked( cb_rbtn_corner_bew)
# Status Message
AX_STATUS = FIG.add_axes( [0.07, 0.02, 0.5, 0.05] )
AX_STATUS.axis('off')
#show_text( AX_STATUS, 'Status', 'red')
FNUM=-1
cb_btn_next()
plt.show()
# Show next file
#-----------------------------
def cb_btn_next( event=None):
global FNUM
FNUM += 1
FNUM %= len(FNAMES)
show_next_prev()
# Show prev file
#-----------------------------
def cb_btn_prev( event=None):
global FNUM
FNUM -= 1
FNUM %= len(FNAMES)
show_next_prev()
# Display current file for the first time
#------------------------------------------
def show_next_prev():
global IMG
global INTERSECTIONS
global DIAGRAM
global SGF_FILE
global CORNER_COORDS
fname = FNAMES[FNUM]
show_text( AX_STATUS, '%d/%d %s' % (FNUM+1, len(FNAMES), fname))
# Load image
IMG = cv2.imread( fname)
IMG = cv2.cvtColor( IMG, cv2.COLOR_BGR2RGB)
AX_IMAGE.cla()
AX_IMAGE.imshow( IMG, origin='upper')
# Sgf
SGF_FILE = os.path.splitext( fname)[0] + '.sgf'
INTERSECTIONS, DIAGRAM = get_isec_coords( SGF_FILE)
CORNER_COORDS = { 'TL': [0.0, 0.0], 'TR': [0.0, 0.0], 'BR': [0.0, 0.0], 'BL': [0.0, 0.0] }
draw_intersections( INTERSECTIONS, 5, 'g')
#===========
#=== Sgf ===
#===========
# Read an sgf file and linearize it into a list
# ['b','w','e',...]
#-------------------------------------------------
def linearize_sgf( sgf):
boardsz = int( get_sgf_tag( 'SZ', sgf))
if not 'KifuCam' in sgf:
# The AW[ab][ce]... case
match = re.search( 'AW(\[[a-s][a-s]\])*', sgf)
whites = match.group(0)
whites = re.sub( 'AW', '', whites)
whites = re.sub( '\[', 'AW[', whites)
whites = re.findall( 'AW' + '\[[^\]]*', whites)
match = re.search ( 'AB(\[[a-s][a-s]\])*', sgf)
blacks = match.group(0)
blacks = re.sub( 'AB', '', blacks)
blacks = re.sub( '\[', 'AB[', blacks)
blacks = re.findall( 'AB' + '\[[^\]]*', blacks)
else:
# The AW[ab]AW[ce]... case
whites = re.findall( 'AW' + '\[[^\]]*', sgf)
blacks = re.findall( 'AB' + '\[[^\]]*', sgf)
res = ['EMPTY'] * boardsz * boardsz
for w in whites:
pos = w.split( '[')[1]
col = ord( pos[0]) - ord( 'a')
row = ord( pos[1]) - ord( 'a')
idx = col + row * boardsz
res[idx] = 'WHITE'
for b in blacks:
pos = b.split( '[')[1]
col = ord( pos[0]) - ord( 'a')
row = ord( pos[1]) - ord( 'a')
idx = col + row * boardsz
res[idx] = 'BLACK'
return res
# e.g for board size, call get_sgf_tag( sgf, "SZ")
#---------------------------------------------------
def get_sgf_tag( tag, sgf):
m = re.search( tag + '\[[^\]]*', sgf)
if not m: return ''
mstr = m.group(0)
res = mstr.split( '[')[1]
res = res.split( ']')[0]
return res
# Get list of intersection coords from sgf GC tag
#--------------------------------------------------
def get_isec_coords( sgffile):
try:
with open( sgffile) as f: sgf = f.read()
except:
sgf = '(;GM[1]GN[]FF[4]CA[UTF-8]AP[KifuCam]RU[Chinese]PB[Black]PW[White]BS[0]WS[0]SZ[19] )'
sgf = sgf.replace( '\\','')
boardsz = int( get_sgf_tag( 'SZ', sgf))
diagram = linearize_sgf( sgf)
if not 'intersections:' in sgf and not 'intersections\:' in sgf:
print('no intersections in ' + sgffile)
intersections = [[0.0, 0.0]] * boardsz * boardsz
else:
intersections = get_sgf_tag( 'GC', sgf)
intersections = re.sub( '\(','[',intersections)
intersections = re.sub( '\)',']',intersections)
intersections = re.sub( 'intersections','"intersections"',intersections)
intersections = re.sub( '#.*','',intersections)
intersections = '{' + intersections + '}'
intersections = json.loads( intersections)
intersections = intersections[ 'intersections']
return (intersections, diagram)
# Read sgf, replace intersections, write back
#-----------------------------------------------
def isecs2sgf( sgffile, intersections):
try:
with open( sgffile) as f: sgf = f.read()
except:
sgf = '(;GM[1]GN[]FF[4]CA[UTF-8]AP[KifuCam]RU[Chinese]PB[Black]PW[White]BS[0]WS[0]SZ[19] )'
sgf = sgf.replace( '\\','')
gc = get_sgf_tag( 'GC', sgf)
phi = 0
if 'phi:' in gc:
phi = re.sub( r'.*#phi:([^#]*)#.*',r'\1',gc)
theta = 0
if 'theta:' in gc:
theta = re.sub( r'.*#theta:([^#]*)#.*',r'\1',gc)
tstr = json.dumps( intersections)
tstr = re.sub( '\[','(',tstr)
tstr = re.sub( '\]',')',tstr)
tstr = 'GC[intersections:' + tstr + '#' + 'phi:%.2f#' % float(phi) + 'theta:%.2f#' % float(theta) + ']'
res = sgf
res = re.sub( '(GC\[[^\]]*\])', '', res)
res = re.sub( '(SZ\[[^\]]*\])', r'\1' + tstr, res)
res = re.sub( r'\s*','', res)
open( sgffile, 'w').write( res)
# Replace anything after the GC tag with the new position
#----------------------------------------------------------
def overwrite_sgf( sgffile, diagram):
sgf = open( sgffile).read()
boardsz = int( get_sgf_tag( 'SZ', sgf))
# Cut off after GC tag
sgf = re.sub( '(.*GC\[[^\]]*\]).*', r'\1', sgf)
moves = ''
for i,bew in enumerate(diagram):
row = i // boardsz
col = i % boardsz
ccol = chr( ord('a') + col)
crow = chr( ord('a') + row)
if bew == 'WHITE': tag = 'AW'
elif bew == 'BLACK': tag = 'AB'
else: continue
moves += tag + "[" + ccol + crow + "]"
sgf += moves + ')'
open( sgffile, 'w').write( sgf)
#======================
#=== Click Handlers ===
#======================
#----------------------
def onclick( event):
global SELECTED_CORNER
if not event.xdata: return
if event.xdata < 1: return # The click was on a button, not the image
if SELECTED_CORNER:
handle_corner_click( event)
else:
handle_bew_click( event)
# Image click in corner mode
#--------------------------------
def handle_corner_click( event):
global SELECTED_CORNER
global CORNER_COORDS
global AX_IMAGE
global FIG
CORNER_COORDS[SELECTED_CORNER][0] = event.xdata
CORNER_COORDS[SELECTED_CORNER][1] = event.ydata
s = 15
r = s / 2.0
col = 'r'
if SELECTED_CORNER == 'TR': col = 'm'
elif SELECTED_CORNER == 'BR': col = 'b'
elif SELECTED_CORNER == 'BL': col = 'c'
ell = patches.Ellipse( (event.xdata, event.ydata), s, s, edgecolor=col, facecolor='none')
CORNER_COORDS['SELECTED_CORNER'] = [event.xdata, event.ydata]
#rect = patches.Rectangle((event.xdata, event.ydata), s, s, linewidth=1, edgecolor='r', facecolor='none')
#rect = patches.Rectangle((100, 100), s, s, linewidth=1, edgecolor='r', facecolor='none')
AX_IMAGE.add_patch( ell)
FIG.canvas.draw()
#plt.show()
print( '%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
('double' if event.dblclick else 'single', event.button,
event.x, event.y, event.xdata, event.ydata))
# Find closest intersection to x,y
#-------------------------------------
def closest_isec( x, y, isecs):
mind = 1E9
minidx = -1
for idx, isec in enumerate(isecs):
d = (x-isec[0])*(x-isec[0]) + (y-isec[1])*(y-isec[1])
if d < mind:
mind = d
minidx = idx
return (isecs[minidx][0], isecs[minidx][1], minidx)
# Image click in Black White Empty mode
#----------------------------------------
def handle_bew_click( event):
# Find closest intersection
x,y,idx = closest_isec( event.xdata, event.ydata, NEW_INTERSECTIONS)
# Draw color on intersection
if SELECTED_BEW == 'Black':
col = 'g'
DIAGRAM[idx] = 'BLACK'
elif SELECTED_BEW == 'Empty':
col = 'b'
DIAGRAM[idx] = 'EMPTY'
elif SELECTED_BEW == 'White':
col = 'r'
DIAGRAM[idx] = 'WHITE'
s = 4
ell = patches.Ellipse( (x, y), s, s, edgecolor=col, facecolor=col)
AX_IMAGE.add_patch( ell)
FIG.canvas.draw()
# Mark Black, White, Empty on the screen
#--------------------------------------------
def paint_diagram( diagram, intersections):
for idx,bew in enumerate(diagram):
isec = intersections[idx]
if bew == 'BLACK':
col = 'g'
elif bew == 'EMPTY':
col = 'b'
elif bew == 'WHITE':
col = 'r'
s = 4
ell = patches.Ellipse( (isec[0], isec[1]), s, s, edgecolor=col, facecolor=col)
AX_IMAGE.add_patch( ell)
FIG.canvas.draw()
# #----------------------------
# def cb_btn_reset( event):
# CORNER_COORDS = { 'TL': [0.0, 0.0], 'TR': [0.0, 0.0], 'BR': [0.0, 0.0], 'BL': [0.0, 0.0] }
# AX_IMAGE.cla()
# AX_IMAGE.imshow( IMG, origin='upper')
# FIG.canvas.draw()
# Write the sgf back, with the intersections swapped out
#----------------------------------------------------------
def cb_btn_save( event):
isecs2sgf( SGF_FILE, np.round(NEW_INTERSECTIONS).tolist())
overwrite_sgf( SGF_FILE, DIAGRAM)
print( 'saved')
sys.stdout.flush()
# Set all intersections to empty
#----------------------------------------------------------
def cb_btn_clear( event):
for idx,d in enumerate(DIAGRAM):
DIAGRAM[idx] = 'EMPTY'
cb_btn_show( event)
# Compute and show intersections from corners
#------------------------------------------------
def cb_btn_show( event):
global NEW_INTERSECTIONS
if CORNER_COORDS['TL'][0] == 0.0: # didn't mark corners
CORNER_COORDS['TL'] = INTERSECTIONS[0]
CORNER_COORDS['TR'] = INTERSECTIONS[BOARDSZ-1]
CORNER_COORDS['BR'] = INTERSECTIONS[BOARDSZ*BOARDSZ-1]
CORNER_COORDS['BL'] = INTERSECTIONS[BOARDSZ*BOARDSZ-BOARDSZ]
tl = CORNER_COORDS['TL']
tr = CORNER_COORDS['TR']
br = CORNER_COORDS['BR']
bl = CORNER_COORDS['BL']
src_quad = np.array( [tl,tr,br,bl]).astype('float32')
width = IMG.shape[1]
height = IMG.shape[0]
marg = width / 20.0
# Transform corners to be a square
s = width-2*marg
target_square = np.array( [[marg,marg], [marg+s,marg], [marg+s,marg+s], [marg,marg+s]]).astype('float32')
# Compute the grid
intersections_zoomed = []
ss = s / (BOARDSZ-1.0)
for r in range(BOARDSZ):
for c in range(BOARDSZ):
x = marg + c*ss
y = marg + r*ss
intersections_zoomed.append([x,y])
intersections_zoomed = np.array( intersections_zoomed).astype('float32')
# Needs extra dimension
extra = intersections_zoomed.reshape( 1,len(intersections_zoomed), 2)
# Transform back
M = cv2.getPerspectiveTransform( target_square, src_quad)
intersections = cv2.perspectiveTransform( extra, M)
intersections = intersections.reshape( len(intersections_zoomed), 2)
NEW_INTERSECTIONS = intersections.tolist()
# Show
AX_IMAGE.cla()
AX_IMAGE.imshow( IMG, origin='upper')
#draw_intersections( intersections, 5, 'r')
paint_diagram( DIAGRAM, NEW_INTERSECTIONS)
# Draw circles on intersections
#-----------------------------------------------
def draw_intersections( intersections, r, col):
for isec in intersections:
ell = patches.Ellipse( isec, r, r, edgecolor=col, facecolor='none')
AX_IMAGE.add_patch( ell)
FIG.canvas.draw()
# Choose corner
#--------------------------------
def cb_rbtn_corner_bew( label):
global SELECTED_CORNER
global SELECTED_BEW
if label in ('Black', 'Empty', 'White'):
SELECTED_CORNER = ''
SELECTED_BEW = label
print( 'rbtn_bew %s' % label)
else:
SELECTED_BEW = ''
SELECTED_CORNER = label
print( 'rbtn_corner %s' % label)
#-------------------------------------------------
def show_text( ax, txt, color='black', size=10):
ax.cla()
ax.axis( 'off')
ax.text( 0,0, txt,
verticalalignment='bottom', horizontalalignment='left',
transform = ax.transAxes,
fontname = 'monospace', style = 'normal',
color=color, fontsize=size)
#FIG.canvas.draw()
plt.pause( 0.0001)
if __name__ == '__main__':
main()
|
the-stack_106_19199
|
"""Simulates a DB being available before we ran a command """
from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db is available"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
# Replaces the behavior of time.sleep and returns True
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
|
the-stack_106_19200
|
"""A Python MapSequence Object"""
#pylint: disable=W0401,W0614,W0201,W0212,W0404
from copy import deepcopy
import numpy as np
import matplotlib.animation
import numpy.ma as ma
import astropy.units as u
from sunpy.map import GenericMap
from sunpy.visualization.animator.mapsequenceanimator import MapSequenceAnimator
from sunpy.visualization import wcsaxes_compat
from sunpy.visualization import axis_labels_from_ctype
from sunpy.util import expand_list
__all__ = ['MapSequence']
class MapSequence(object):
"""
MapSequence
A series of Maps in a single object.
Parameters
----------
args : `list`
A list of Map instances
sortby : `datetime.datetime`
Method by which the MapSequence should be sorted along the z-axis.
derotate : `bool`
Apply a derotation to the data. Default to False.
To coalign a mapsequence so that solar features remain on the same pixels,
please see the "Coalignment of MapSequences" note below.
Attributes
----------
maps : `list`
This attribute holds the list of Map instances obtained from parameter args.
Examples
--------
>>> import sunpy.map
>>> mapsequence = sunpy.map.Map('images/*.fits', sequence=True) # doctest: +SKIP
MapSequences can be co-aligned using the routines in sunpy.image.coalignment.
"""
#pylint: disable=W0613,E1101
def __init__(self, *args, **kwargs):
"""Creates a new Map instance"""
# Hack to get around Python 2.x not backporting PEP 3102.
sortby = kwargs.pop('sortby', 'date')
derotate = kwargs.pop('derotate', False)
self.maps = expand_list(args)
for m in self.maps:
if not isinstance(m, GenericMap):
raise ValueError(
'MapSequence expects pre-constructed map objects.')
# Optionally sort data
if sortby is not None:
if sortby is 'date':
self.maps.sort(key=self._sort_by_date())
else:
raise ValueError("Only sort by date is supported")
if derotate:
self._derotate()
def __getitem__(self, key):
"""Overriding indexing operation. If the key results in a single map,
then a map object is returned. This allows functions like enumerate to
work. Otherwise, a mapsequence is returned."""
if isinstance(self.maps[key], GenericMap):
return self.maps[key]
else:
return MapSequence(self.maps[key])
def __len__(self):
"""Return the number of maps in a mapsequence."""
return len(self.maps)
# Sorting methods
@classmethod
def _sort_by_date(cls):
return lambda m: m.date # maps.sort(key=attrgetter('date'))
def _derotate(self):
"""Derotates the layers in the MapSequence"""
pass
def plot(self, axes=None, resample=None, annotate=True,
interval=200, plot_function=None, **kwargs):
"""
A animation plotting routine that animates each element in the
MapSequence
Parameters
----------
axes: mpl axes
axes to plot the animation on, if none uses current axes
resample: list or False
Draws the map at a lower resolution to increase the speed of
animation. Specify a list as a fraction i.e. [0.25, 0.25] to
plot at 1/4 resolution.
[Note: this will only work where the map arrays are the same size]
annotate: bool
Annotate the figure with scale and titles
interval: int
Animation interval in ms
plot_function : function
A function to be called as each map is plotted. Any variables
returned from the function will have their ``remove()`` method called
at the start of the next frame so that they are removed from the plot.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import matplotlib.animation as animation
>>> from sunpy.map import Map
>>> sequence = Map(files, sequence=True) # doctest: +SKIP
>>> ani = sequence.plot(colorbar=True) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
Plot the map at 1/2 original resolution
>>> sequence = Map(files, sequence=True) # doctest: +SKIP
>>> ani = sequence.plot(resample=[0.5, 0.5], colorbar=True) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
Save an animation of the MapSequence
>>> sequence = Map(res, sequence=True) # doctest: +SKIP
>>> ani = sequence.plot() # doctest: +SKIP
>>> Writer = animation.writers['ffmpeg'] # doctest: +SKIP
>>> writer = Writer(fps=10, metadata=dict(artist='SunPy'), bitrate=1800) # doctest: +SKIP
>>> ani.save('mapsequence_animation.mp4', writer=writer) # doctest: +SKIP
Save an animation with the limb at each time step
>>> def myplot(fig, ax, sunpy_map):
... p = sunpy_map.draw_limb()
... return p
>>> sequence = Map(files, sequence=True) # doctest: +SKIP
>>> ani = sequence.peek(plot_function=myplot) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
"""
if not axes:
axes = wcsaxes_compat.gca_wcs(self.maps[0].wcs)
fig = axes.get_figure()
if not plot_function:
plot_function = lambda fig, ax, smap: []
removes = []
# Normal plot
def annotate_frame(i):
axes.set_title("{s.name}".format(s=self[i]))
axes.set_xlabel(axis_labels_from_ctype(self[i].coordinate_system[0],
self[i].spatial_units[0]))
axes.set_ylabel(axis_labels_from_ctype(self[i].coordinate_system[1],
self[i].spatial_units[1]))
if resample:
if self.all_maps_same_shape():
resample = u.Quantity(self.maps[0].dimensions) * np.array(resample)
ani_data = [amap.resample(resample) for amap in self.maps]
else:
raise ValueError('Maps in mapsequence do not all have the same shape.')
else:
ani_data = self.maps
im = ani_data[0].plot(axes=axes, **kwargs)
def updatefig(i, im, annotate, ani_data, removes):
while removes:
removes.pop(0).remove()
im.set_array(ani_data[i].data)
im.set_cmap(ani_data[i].plot_settings['cmap'])
norm = deepcopy(ani_data[i].plot_settings['norm'])
# The following explicit call is for bugged versions of Astropy's
# ImageNormalize
norm.autoscale_None(ani_data[i].data)
im.set_norm(norm)
if wcsaxes_compat.is_wcsaxes(axes):
im.axes.reset_wcs(ani_data[i].wcs)
wcsaxes_compat.default_wcs_grid(axes)
else:
bl = ani_data[i]._get_lon_lat(ani_data[i].bottom_left_coord)
tr = ani_data[i]._get_lon_lat(ani_data[i].top_right_coord)
x_range = list(u.Quantity([bl[0], tr[0]]).to(ani_data[i].spatial_units[0]).value)
y_range = list(u.Quantity([bl[1], tr[1]]).to(ani_data[i].spatial_units[1]).value)
im.set_extent(np.concatenate((x_range.value, y_range.value)))
if annotate:
annotate_frame(i)
removes += list(plot_function(fig, axes, ani_data[i]))
ani = matplotlib.animation.FuncAnimation(fig, updatefig,
frames=list(range(0, len(ani_data))),
fargs=[im, annotate, ani_data, removes],
interval=interval,
blit=False)
return ani
def peek(self, resample=None, **kwargs):
"""
A animation plotting routine that animates each element in the
MapSequence
Parameters
----------
fig: mpl.figure
Figure to use to create the explorer
resample: list or False
Draws the map at a lower resolution to increase the speed of
animation. Specify a list as a fraction i.e. [0.25, 0.25] to
plot at 1/4 resolution.
[Note: this will only work where the map arrays are the same size]
annotate: bool
Annotate the figure with scale and titles
interval: int
Animation interval in ms
colorbar: bool
Plot colorbar
plot_function : function
A function to call to overplot extra items on the map plot.
For more information see `sunpy.visualization.MapSequenceAnimator`.
Returns
-------
mapsequenceanim : `sunpy.visualization.MapSequenceAnimator`
A mapsequence animator instance.
See Also
--------
sunpy.visualization.mapsequenceanimator.MapSequenceAnimator
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sunpy.map import Map
>>> sequence = Map(files, sequence=True) # doctest: +SKIP
>>> ani = sequence.peek(colorbar=True) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
Plot the map at 1/2 original resolution
>>> sequence = Map(files, sequence=True) # doctest: +SKIP
>>> ani = sequence.peek(resample=[0.5, 0.5], colorbar=True) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
Plot the map with the limb at each time step
>>> def myplot(fig, ax, sunpy_map):
... p = sunpy_map.draw_limb()
... return p
>>> sequence = Map(files, sequence=True) # doctest: +SKIP
>>> ani = sequence.peek(plot_function=myplot) # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
Decide you want an animation:
>>> sequence = Map(files, sequence=True) # doctest: +SKIP
>>> ani = sequence.peek(resample=[0.5, 0.5], colorbar=True) # doctest: +SKIP
>>> mplani = ani.get_animation() # doctest: +SKIP
"""
if resample:
if self.all_maps_same_shape():
plot_sequence = MapSequence()
resample = u.Quantity(self.maps[0].dimensions) * np.array(resample)
for amap in self.maps:
plot_sequence.maps.append(amap.resample(resample))
else:
raise ValueError('Maps in mapsequence do not all have the same shape.')
else:
plot_sequence = self
return MapSequenceAnimator(plot_sequence, **kwargs)
def all_maps_same_shape(self):
"""
Tests if all the maps have the same number pixels in the x and y
directions.
"""
return np.all([m.data.shape == self.maps[0].data.shape for m in self.maps])
def at_least_one_map_has_mask(self):
"""
Tests if at least one map has a mask.
"""
return np.any([m.mask is not None for m in self.maps])
def as_array(self):
"""
If all the map shapes are the same, their image data is rendered
into the appropriate numpy object. If none of the maps have masks,
then the data is returned as a (ny, nx, nt) ndarray. If all the maps
have masks, then the data is returned as a (ny, nx, nt) masked array
with all the masks copied from each map. If only some of the maps
have masked then the data is returned as a (ny, nx, nt) masked array,
with masks copied from maps as appropriately; maps that do not have a
mask are supplied with a mask that is full of False entries.
If all the map shapes are not the same, a ValueError is thrown.
"""
if self.all_maps_same_shape():
data = np.swapaxes(np.swapaxes(np.asarray([m.data for m in self.maps]), 0, 1).copy(), 1, 2).copy()
if self.at_least_one_map_has_mask():
mask_sequence = np.zeros_like(data, dtype=bool)
for im, m in enumerate(self.maps):
if m.mask is not None:
mask_sequence[:, :, im] = m.mask
return ma.masked_array(data, mask=mask_sequence)
else:
return data
else:
raise ValueError('Not all maps have the same shape.')
def all_meta(self):
"""
Return all the meta objects as a list.
"""
return [m.meta for m in self.maps]
|
the-stack_106_19201
|
from xv_leak_tools.test_components.cleanup.cleanup_vpns import CleanupVPNs
class MacOSCleanup(CleanupVPNs):
# You can add more applications, processes etc. here or you can override this class
# and the vpn_application component to avoid editing this one.
VPN_PROCESS_NAMES = [
'openvpn',
'racoon',
'pppd',
]
VPN_APPLICATIONS = [
'/Applications/ExpressVPN.app/Contents/MacOS/ExpressVPN',
]
UNKILLABLE_APPLICATIONS = []
def __init__(self, device, config):
super().__init__(
device, config,
MacOSCleanup.VPN_PROCESS_NAMES, MacOSCleanup.VPN_APPLICATIONS,
MacOSCleanup.UNKILLABLE_APPLICATIONS)
|
the-stack_106_19203
|
"""
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
The IP allocator maintains the life cycle of assigned IP addresses.
The IP allocator accepts IP blocks (range of IP addresses), and supports
allocating and releasing IP addresses from the assigned IP blocks. Note
that an IP address is not immediately made available for allocation right
after release: it is "reserved" for the same client for a certain period of
time to ensure that 1) an observer, e.g. pipelined, that caches IP states has
enough time to pull the updated IP states; 2) IP packets intended for the
old client will not be unintentionally routed to a new client until the old
TCP connection expires.
To support this semantic, an IP address can have the following states
during it's life cycle in the IP allocator:
FREE: IP is available for allocation
ALLOCATED: IP is allocated for a client.
RELEASED: IP is released, but still reserved for the client
REAPED: IPs are periodically reaped from the RELEASED state to the
REAPED state, and at the same time a timer is set. All REAPED state
IPs are freed once the time goes off. The purpose of this state is
to age IPs for a certain period of time before freeing.
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import logging
import threading
from collections import defaultdict
from ipaddress import ip_address, ip_network
from typing import List, Optional, Set, Tuple
import redis
from copy import deepcopy
from magma.mobilityd import mobility_store as store
from magma.mobilityd.ip_descriptor import IPDesc, IPState
from magma.mobilityd.metrics import (IP_ALLOCATED_TOTAL, IP_RELEASED_TOTAL)
from random import choice
DEFAULT_IP_RECYCLE_INTERVAL = 15
class IPAllocator:
""" A thread-safe IP allocator: all mutating functions are protected by a
re-entrant lock.
The IPAllocator maintains IP life cycle, as well as the mapping between
SubscriberID (SID) and IPs. For now, only one-to-one mapping is supported
between SID and IP.
IP Recycling:
An IP address is periodically recycled for allocation after releasing.
Constraints:
1. Define a maturity time T, in seconds. Any IPs that were released
within the past T seconds cannot be freed.
2. All released IPs must eventually be freed.
To achieve these constraints, whenever we release an IP address we try
to set a recycle timer if it's not already set. When setting a timer,
we move all IPs in the RELEASED state to the REAPED state. Those IPs
will be freed once the timer goes off, at which time those IPs are
guaranteed to be "matured". Concurrently, newly released IPs are added
to the RELEASED state.
The time between when an IP is released and when it is freed is
guaranteed to be between T and 2T seconds. The worst-case occurs when
the release happens right after the previous timer has been initiated.
This leads to an additional T seconds wait for the existing timer to
time out and trigger a callback which initiate a new timer, on top of
the T seconds of timeout for the next timer.
Persistence to Redis:
The IP allocator now by default persists its state to a redis-server
process denoted in mobilityd.yml. The allocator's persisted properties
and associated structure:
- self._assigned_ip_blocks: {ip_block}
- self._ip_states: {state=>{ip=>ip_desc}}
- self._sid_ips_map: {SID=>[IPDesc]}
The utilized redis_containers store a cache of state in local memory,
so reads are the same speed as without persistence. For writes, state
is serialized with provided serializers and written through to the
Redis process. Used as expected, writes will be small (a few bytes).
Redis's performance can be measured with the redis-benchmark tool,
but we expect almost 100% of writes to take less than 1 millisecond.
"""
def __init__(self,
*,
recycling_interval: int = DEFAULT_IP_RECYCLE_INTERVAL,
persist_to_redis: bool = True,
redis_port: int = 6379):
""" Initializes a new IP allocator
Args:
recycling_interval (number): minimum time, in seconds, before a
released IP is recycled and freed into the pool of available
IPs.
Default: None, no recycling will occur automatically.
persist_to_redis (bool): store all state in local process if falsy,
else write state to Redis service
"""
logging.debug('Persist to Redis: %s', persist_to_redis)
self._lock = threading.RLock() # re-entrant locks
self._recycle_timer = None # reference to recycle timer
self._recycling_interval_seconds = recycling_interval
if not persist_to_redis:
self._assigned_ip_blocks = set() # {ip_block}
self._ip_states = defaultdict(dict) # {state=>{ip=>ip_desc}}
self._sid_ips_map = defaultdict(IPDesc) # {SID=>IPDesc}
else:
if not redis_port:
raise ValueError(
'Must specify a redis_port in mobilityd config.')
client = redis.Redis(host='localhost', port=redis_port)
self._assigned_ip_blocks = store.AssignedIpBlocksSet(client)
self._ip_states = store.defaultdict_key(
lambda key: store.ip_states(client, key))
self._sid_ips_map = store.IPDescDict(client)
def add_ip_block(self, ipblock: ip_network):
""" Add a block of IP addresses to the free IP list
IP blocks should not overlap.
Args:
ipblock (ipaddress.ip_network): ip network to add
e.g. ipaddress.ip_network("10.0.0.0/24")
Raises:
OverlappedIPBlocksError: if the given IP block overlaps with
existing ones
"""
for blk in self._assigned_ip_blocks:
if ipblock.overlaps(blk):
logging.error("Overlapped IP block: %s", ipblock)
raise OverlappedIPBlocksError(ipblock)
with self._lock:
self._assigned_ip_blocks.add(ipblock)
# TODO(oramadan) t23793559 HACK reserve the GW address for
# gtp_br0 iface and test VM
num_reserved_addresses = 11
for ip in ipblock.hosts():
state = IPState.RESERVED if num_reserved_addresses > 0 \
else IPState.FREE
ip_desc = IPDesc(ip=ip, state=state,
ip_block=ipblock, sid=None)
self._add_ip_to_state(ip, ip_desc, state)
if num_reserved_addresses > 0:
num_reserved_addresses -= 1
def remove_ip_blocks(self, *ipblocks: List[ip_network],
force: bool = False) -> List[ip_network]:
""" Makes the indicated block(s) unavailable for allocation
If force is False, blocks that have any addresses currently allocated
will not be removed. Otherwise, if force is True, the indicated blocks
will be removed regardless of whether any addresses have been allocated
and any allocated addresses will no longer be served.
Removing a block entails removing the IP addresses within that block
from the internal state machine.
Args:
ipblocks (ipaddress.ip_network): variable number of objects of type
ipaddress.ip_network, representing the blocks that are intended
to be removed. The blocks should have been explicitly added and
not yet removed. Any blocks that are not active in the IP
allocator will be ignored with a warning.
force (bool): whether to forcibly remove the blocks indicated. If
False, will only remove a block if no addresses from within the
block have been allocated. If True, will remove all blocks
regardless of whether any addresses have been allocated from
them.
Returns a set of the blocks that have been successfully removed.
"""
with self._lock:
remove_blocks = set(ipblocks) & self._assigned_ip_blocks
extraneous_blocks = set(ipblocks) ^ remove_blocks
# check unknown ip blocks
if extraneous_blocks:
logging.warning("Cannot remove unknown IP block(s): %s",
extraneous_blocks)
del extraneous_blocks
# "soft" removal does not remove blocks have IPs allocated
if not force:
allocated_ip_block_set = self._get_allocated_ip_block_set()
remove_blocks -= allocated_ip_block_set
del allocated_ip_block_set
# Remove the associated IP addresses
remove_ips = \
(ip for block in remove_blocks for ip in block.hosts())
for ip in remove_ips:
for state in (IPState.FREE, IPState.RELEASED, IPState.REAPED):
self._remove_ip_from_state(ip, state)
if force:
self._remove_ip_from_state(ip, IPState.ALLOCATED)
else:
assert not self._test_ip_state(ip, IPState.ALLOCATED), \
"Unexpected ALLOCATED IP %s from a soft IP block " \
"removal "
# Clean up SID maps
for sid in list(self._sid_ips_map):
self._sid_ips_map.pop(sid)
# Remove the IP blocks
self._assigned_ip_blocks -= remove_blocks
# Can't use generators here
remove_sids = tuple(sid for sid in self._sid_ips_map
if not self._sid_ips_map[sid])
for sid in remove_sids:
self._sid_ips_map.pop(sid)
for block in remove_blocks:
logging.info('Removed IP block %s from IPv4 address pool', block)
return remove_blocks
def list_added_ip_blocks(self) -> List[ip_network]:
""" List IP blocks added to the IP allocator
Return:
copy of the list of assigned IP blocks
"""
with self._lock:
ip_blocks = list(deepcopy(self._assigned_ip_blocks))
return ip_blocks
def list_allocated_ips(self, ipblock: ip_network) -> List[ip_address]:
""" List IP addresses allocated from a given IP block
Args:
ipblock (ipaddress.ip_network): ip network to add
e.g. ipaddress.ip_network("10.0.0.0/24")
Return:
list of IP addresses (ipaddress.ip_address)
Raises:
IPBlockNotFoundError: if the given IP block is not found in the
internal list
"""
if ipblock not in self._assigned_ip_blocks:
logging.error("Listing an unknown IP block: %s", ipblock)
raise IPBlockNotFoundError(ipblock)
with self._lock:
res = [ip for ip in ipblock \
if self._test_ip_state(ip, IPState.ALLOCATED)]
return res
def alloc_ip_address(self, sid: str, renew: bool = False) -> ip_address:
""" Allocate an IP address from the free list
Assumption: one-to-one mappings between SID and IP.
Args:
sid (string): universal subscriber id
Returns:
ipaddress.ip_address: IP address allocated
Raises:
NoAvailableIPError: if run out of available IP addresses
DuplicatedIPAllocationError: if an IP has been allocated to a UE
with the same IMSI
"""
have_old_ip = False
with self._lock:
# if an IP is reserved for the UE, this IP could be in the state of
# ALLOCATED, RELEASED or REAPED.
if sid in self._sid_ips_map:
old_ip_desc = self._sid_ips_map[sid]
if renew:
old_ip = old_ip_desc.ip
have_old_ip = True
elif self._test_ip_state(old_ip_desc.ip, IPState.ALLOCATED):
# MME state went out of sync with mobilityd!
# Recover gracefully by allocating the same IP
logging.warning("Re-allocate IP %s for sid %s without "
"MME releasing it first", old_ip_desc.ip,
sid)
# TODO: enable strict checking after root causing the
# issue in MME
# raise DuplicatedIPAllocationError(
# "An IP has been allocated for this IMSI")
elif self._test_ip_state(old_ip_desc.ip, IPState.RELEASED):
ip_desc = self._mark_ip_state(old_ip_desc.ip,
IPState.ALLOCATED)
ip_desc.sid = sid
logging.debug("SID %s IP %s RELEASED => ALLOCATED",
sid, old_ip_desc.ip)
elif self._test_ip_state(old_ip_desc.ip, IPState.REAPED):
ip_desc = self._mark_ip_state(old_ip_desc.ip,
IPState.ALLOCATED)
ip_desc.sid = sid
logging.debug("SID %s IP %s REAPED => ALLOCATED",
sid, old_ip_desc.ip)
else:
raise AssertionError("Unexpected internal state")
if not renew:
logging.info("Allocating the same IP %s for sid %s", old_ip_desc.ip, sid)
IP_ALLOCATED_TOTAL.inc()
return old_ip_desc.ip
# if an IP is not yet allocated for the UE, allocate a new IP
if self._get_ip_count(IPState.FREE):
ip_desc = self._pop_ip_from_state(IPState.FREE)
ip_desc.sid = sid
ip_desc.state = IPState.ALLOCATED
self._add_ip_to_state(ip_desc.ip, ip_desc, IPState.ALLOCATED)
self._sid_ips_map[sid] = ip_desc
if renew and have_old_ip:
# free the old ip
ip_desc_to_free = self._mark_ip_state(old_ip, IPState.FREE)
ip_desc_to_free.sid = None
else:
IP_ALLOCATED_TOTAL.inc()
return ip_desc.ip
else:
logging.error("Run out of available IP addresses")
raise NoAvailableIPError("No available IP addresses")
def get_sid_ip_table(self) -> List[Tuple[str, ip_address]]:
""" Return list of tuples (sid, ip) """
with self._lock:
res = [(sid, ip_desc.ip) for sid, ip_desc in
self._sid_ips_map.items()]
return res
def get_ip_for_sid(self, sid: str) -> Optional[ip_address]:
""" if ip is mapped to sid, return it, else return None """
with self._lock:
if sid in self._sid_ips_map:
if not self._sid_ips_map[sid]:
raise AssertionError("Unexpected internal state")
else:
return self._sid_ips_map[sid].ip
return None
def get_sid_for_ip(self, requested_ip: ip_address) -> Optional[str]:
""" If ip is associated with an sid, return the sid, else None """
with self._lock:
for sid, ip_desc in self._sid_ips_map.items():
if requested_ip == ip_desc.ip:
return sid
return None
def release_ip_address(self, sid: str, ip: ip_address):
""" Release an IP address.
A released IP is moved to a released list. Released IPs are recycled
periodically to the free list. SID IP mappings are removed at the
recycling time.
Args:
sid (string): universal subscriber id
ip (ipaddress.ip_address): IP address to release
Raises:
MappingNotFoundError: if the given sid-ip mapping is not found
IPNotInUseError: if the given IP is not found in the used list
"""
with self._lock:
if not (sid in self._sid_ips_map and ip ==
self._sid_ips_map[sid].ip):
logging.error(
"Releasing unknown <SID, IP> pair: <%s, %s>", sid, ip)
raise MappingNotFoundError(
"(%s, %s) pair is not found", sid, str(ip))
if not self._test_ip_state(ip, IPState.ALLOCATED):
logging.error("IP not found in used list, check if IP is "
"already released: <%s, %s>", sid, ip)
raise IPNotInUseError("IP not found in used list: %s", str(ip))
self._mark_ip_state(ip, IPState.RELEASED)
IP_RELEASED_TOTAL.inc()
self._try_set_recycle_timer() # start the timer to recycle
def _recycle_reaped_ips(self):
""" Periodically called to recycle the given IPs
*** It is highly not recommended to call this function directly, even
in tests. ***
Recycling depends on the period, T = self._recycling_interval_seconds,
which is set at construction time.
"""
with self._lock:
for ip in self._list_ips(IPState.REAPED):
ip_desc = self._mark_ip_state(ip, IPState.FREE)
sid = ip_desc.sid
ip_desc.sid = None
# update SID-IP map
del self._sid_ips_map[sid]
# Set timer for the next round of recycling
self._recycle_timer = None
if self._get_ip_count(IPState.RELEASED):
self._try_set_recycle_timer()
def _try_set_recycle_timer(self):
""" Try set the recycle timer and move RELEASED IPs to the REAPED state
self._try_set_recycle_timer is called in two places:
1) at the end of self.release_ip_address, we are guaranteed that
some IPs exist in RELEASED state, so we attempt to initiate a timer
then.
2) at the end of self._recycle_reaped_ips, the call to
self._try_set_recycle_timer serves as a callback for setting the
next timer, if any IPs have been released since the current timer
was initiated.
"""
with self._lock:
# check if auto recycling is enabled and no timer has been set
if self._recycling_interval_seconds is not None \
and not self._recycle_timer:
for ip in self._list_ips(IPState.RELEASED):
self._mark_ip_state(ip, IPState.REAPED)
if self._recycling_interval_seconds:
self._recycle_timer = threading.Timer(
self._recycling_interval_seconds,
self._recycle_reaped_ips)
self._recycle_timer.start()
else:
self._recycle_reaped_ips()
def _add_ip_to_state(self, ip: ip_address, ip_desc: IPDesc,
state: IPState):
""" Add ip=>ip_desc pairs to a internal dict """
assert ip_desc.state == state, \
"ip_desc.state %s does not match with state %s" \
% (ip_desc.state, state)
assert state in IPState, "unknown state %s" % state
with self._lock:
self._ip_states[state][ip.exploded] = ip_desc
def _remove_ip_from_state(self, ip: ip_address, state: IPState) -> IPDesc:
""" Remove an IP from a internal dict """
assert state in IPState, "unknown state %s" % state
with self._lock:
ip_desc = self._ip_states[state].pop(ip.exploded, None)
return ip_desc
def _pop_ip_from_state(self, state: IPState) -> IPDesc:
""" Pop an IP from a internal dict """
assert state in IPState, "unknown state %s" % state
with self._lock:
ip_state_key = choice(list(self._ip_states[state].keys()))
ip_desc = self._ip_states[state].pop(ip_state_key)
return ip_desc
def _get_ip_count(self, state: IPState) -> int:
""" Return number of IPs in a state """
assert state in IPState, "unknown state %s" % state
with self._lock:
return len(self._ip_states[state])
def _test_ip_state(self, ip: ip_address, state: IPState) -> bool:
""" check if IP is in state X """
assert state in IPState, "unknown state %s" % state
with self._lock:
return ip.exploded in self._ip_states[state]
def _get_ip_state(self, ip: ip_address) -> IPState:
""" return the state of an IP """
for state in IPState:
if self._test_ip_state(ip, state):
return state
raise AssertionError("IP %s not found in any states" % ip)
def _list_ips(self, state: IPState) -> List[ip_address]:
""" return a list of IPs in state X """
assert state in IPState, "unknown state %s" % state
with self._lock:
return [ip_address(ip) for ip in self._ip_states[state]]
def _mark_ip_state(self, ip: ip_address, state: IPState) -> IPDesc:
""" Remove, mark, add: move IP to a new state """
assert state in IPState, "unknown state %s" % state
old_state = self._get_ip_state(ip)
with self._lock:
ip_desc = self._ip_states[old_state][ip.exploded]
# some internal checks
assert ip_desc.state != state, \
"move IP to the same state %s" % state
assert ip == ip_desc.ip, "Unmatching ip_desc for %s" % ip
if ip_desc.state == IPState.FREE:
assert ip_desc.sid is None, "Unexpected sid in a freed IPDesc"
else:
assert ip_desc.sid is not None, \
"Missing sid in state %s IPDesc" % state
# remove, mark, add
self._remove_ip_from_state(ip, old_state)
ip_desc.state = state
self._add_ip_to_state(ip, ip_desc, state)
return ip_desc
def _get_allocated_ip_block_set(self) -> Set[ip_network]:
""" A IP block is allocated if ANY IP is allocated from it """
with self._lock:
allocated_ips = self._ip_states[IPState.ALLOCATED]
return {ip_desc.ip_block for ip_desc in allocated_ips.values()}
class OverlappedIPBlocksError(Exception):
""" Exception thrown when a given IP block overlaps with existing ones
"""
pass
class IPBlockNotFoundError(Exception):
""" Exception thrown when listing an IP block that is not found in the ip
block list
"""
pass
class NoAvailableIPError(Exception):
""" Exception thrown when no IP is available in the free list for an ip
allocation request
"""
pass
class DuplicatedIPAllocationError(Exception):
""" Exception thrown when an IP has already been allocated to a UE """
pass
class IPNotInUseError(Exception):
""" Exception thrown when releasing an IP address that is not found in the
used list
"""
pass
class MappingNotFoundError(Exception):
""" Exception thrown when releasing a non-exising SID-IP mapping """
pass
class SubscriberNotFoundError(Exception):
""" Exception thrown when subscriber ID is not found in SID-IP mapping """
|
the-stack_106_19204
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
'''The Azure Command-line tool.
This tools provides a command-line interface to Azure's management and storage
APIs.
'''
import pkg_resources
pkg_resources.declare_namespace(__name__)
__author__ = "Microsoft Corporation <[email protected]>"
__version__ = "2.0.27"
|
the-stack_106_19205
|
#!/usr/bin/python
import time
import sys
import os
import zlib
import lzma
import bsdiff4
import json
import math
import matplotlib.pyplot as plt
basepath = sys.argv[1] if len(sys.argv) == 2 else '/etc'
def compress(method):
ini = time.time()
results = []
for path, dirs, files in os.walk(basepath):
results += [1 for dir in dirs]
for file in files:
file = os.path.join(basepath, file)
packets = 1
if not os.path.islink(file):
filename = file.split(os.sep)[-1]
first_packet = 512-(355 + len(filename))
try:
with open(file, 'br') as handler:
size = len(method(handler.read()))
if size > first_packet:
packets += math.ceil((size-first_packet)/(512-1-28))
# if packets > 60:
# packets = 60
except (FileNotFoundError, IsADirectoryError):
continue
results.append(packets)
return results, time.time()-ini
# warm-up
compress(lambda n: '')
results = {
'raw': compress(lambda n: n),
'bsdiff4': compress(lambda n: bsdiff4.diff(b'', n)),
'lzma': compress(lzma.compress),
'zlib': compress(zlib.compress),
}
plt.hist(results['raw'][0], bins=1000, histtype='step', normed=True, color='y', label='raw', cumulative=True)
plt.hist(results['zlib'][0], bins=1000, histtype='step', normed=True, color='r', label='zlib', cumulative=True)
plt.hist(results['bsdiff4'][0], bins=1000, histtype='step', normed=True, color='g', label='bsdiff4', cumulative=True)
plt.hist(results['lzma'][0], bins=1000, histtype='step', normed=True, color='b', label='lzma', cumulative=True)
plt.title("/etc Nuber of Packets Cumulative Histogram")
plt.xlabel("Number of Packets")
plt.ylabel("Probability")
plt.legend()
plt.show()
plt.savefig('etc_packets.png', dpi=300)
plt.clf()
plt.barh(1, results['raw'][1], align='center', alpha=0.7, color='y')
plt.barh(2, results['zlib'][1], align='center', alpha=0.7, color='r')
plt.barh(3, results['bsdiff4'][1], align='center', alpha=0.7, color='g')
plt.barh(4, results['lzma'][1], align='center', alpha=0.7, color='b')
plt.yticks((1,2,3,4), ('raw', 'zlib', 'bsdiff4', 'lzma'))
plt.xlabel('Time in Seconds')
plt.title('/etc Compression Time')
plt.savefig('etc_time.png', dpi=300)
#print(json.dumps(results, indent=4))
|
the-stack_106_19206
|
# -*- coding: utf-8 -*-
"""
Created on 10/02/2014
@author: Dani
"""
import re
import codecs
from reconciler.entities.normalized_country import NormalizedCountry
from reconciler.exceptions.unknown_country_error import UnknownCountryError
class CountryNormalizer(object):
"""
In this class we'll implement the normalizer methods responsible
for returning a NormalizedCountry object from a certain distinctive
value. If a reconciliation is needed we'll also implement it here.
"""
# Conflictive expressions
EN_REMOVABLE_EXPRESSIONS = "(the|in|of|and|&)"
ES_REMOVABLE_EXPRESSIONS = "(el|las|los|la|lo|de|y|&|del|en)"
FR_REMOVABLE_EXPRESSIONS = "(les|las|le|la|et|&|dans|de|d|l)"
A_VOWEL_FORMS = "(Á|À|Â|Ä|á|à|â|ä)"
E_VOWEL_FORMS = "(É|È|Ê|Ë|é|è|ê|ë)"
I_VOWEL_FORMS = "(Í|Ì|Î|Ï|í|ì|î|ï)"
O_VOWEL_FORMS = "(Ó|Ò|Ô|Ö|ó|ò|ô|ö)"
U_VOWEL_FORMS = "(Ú|Ù|Û|Ü|ú|ù|û|ü)"
N_FORMS = "ñ"
C_FORMS = "(ç|þ)" # For "Curaçao" and "Curaþao"
PUNCTUATION_SYMBOLS = "(\.|,|-|:|;|_|`|'|´|!|¡|¿|\?|\^|¨)"
@staticmethod
def _equals_ignore_case(str1, str2):
if str1.lower() == str2.lower():
return True
return False
#DONE
@staticmethod
def normalize_country_by_en_name(en_name):
return CountryNormalizer._normalize_country_by_given_language_removable_expressions(en_name,
CountryNormalizer.EN_REMOVABLE_EXPRESSIONS)
@staticmethod
def normalize_country_by_es_name(es_name):
return CountryNormalizer._normalize_country_by_given_language_removable_expressions(es_name,
CountryNormalizer.ES_REMOVABLE_EXPRESSIONS)
@staticmethod
def normalize_country_by_fr_name(fr_name):
return CountryNormalizer._normalize_country_by_given_language_removable_expressions(fr_name,
CountryNormalizer.FR_REMOVABLE_EXPRESSIONS)
@staticmethod
def _normalize_country_by_given_language_removable_expressions(original_string, given_exp_removables):
# print "---------# NORMALIZER"
result = str(original_string)
# print result
result = CountryNormalizer._substitute_conflictive_chars(result)
# print result
result = CountryNormalizer._delete_text_between_brackets(result)
# print result
result = result.lower()
# print result
result = CountryNormalizer._substitute_commom_abreviations(result)
# print result
result = CountryNormalizer._rem_words_by_language(result, given_exp_removables)
# print result
result = CountryNormalizer._rem_white_spaces(result)
# print result
# print "---------# NORMALIZER"
return result
@staticmethod
def _substitute_commom_abreviations(original_string):
result = original_string
#Republic
result = re.sub("(republic|republica|republique)", "rep", result)
#Democratic
result = re.sub('(democratic|democratica|democratique)', "dem", result)
#Monarchy
result = re.sub('(monarchy|monarquia|monarchie)', "mon", result)
#Federation
result = re.sub('(federation|federacion)', "fed", result)
return result
@staticmethod
def _rem_white_spaces(original_string):
result = original_string.replace(" ", "")
result = result.replace("\n", "")
result = result.replace("\t", "")
result = result.replace("\r", "")
return result
@staticmethod
def _delete_text_between_brackets(original_string):
if original_string.__contains__("(") and original_string.__contains__(")"):
index_beg = original_string.index("(")
index_end = original_string.index(")") + 1
return original_string[0:index_beg] + original_string[index_end:1]
else:
return original_string
@staticmethod
def _substitute_conflictive_chars(original_string):
# print "MIRAD MI PENEEEEE"
result = original_string
result = re.sub(CountryNormalizer.A_VOWEL_FORMS, 'a', result)
result = re.sub(CountryNormalizer.E_VOWEL_FORMS, 'e', result)
result = re.sub(CountryNormalizer.I_VOWEL_FORMS, 'i', result)
result = re.sub(CountryNormalizer.O_VOWEL_FORMS, 'o', result)
result = re.sub(CountryNormalizer.U_VOWEL_FORMS, 'u', result)
result = re.sub(CountryNormalizer.N_FORMS, 'n', result)
result = re.sub(CountryNormalizer.C_FORMS, 'c', result)
result = re.sub(CountryNormalizer.PUNCTUATION_SYMBOLS, " ", result)
return result
@staticmethod
def _rem_words_by_language(original, sub_exp):
# regex_exp contains a list of non-significant words that should be replaced
# by a blank. To fit in the regex, each word should be in the middle of
# some of this pairs:
# - [white_space] word [white_space]
# - [white_space] word [end_of_string]
# - [start_of_the_string] word [end_of_string]
#
regex_exp = "(\A" + sub_exp + "\s)|(\s" + sub_exp + "\s)|(\s" + sub_exp + "\Z)"
version1 = ""
version2 = original
while version1 != version2:
version1 = version2
version2 = re.sub(regex_exp, " ", version1)
# The previous loop, applying re.sub more than one time to the original chain
# should be done because if more than 1 unsignificant words come in a streak,
# some of them coul be ignored by the regex. E.g.: "Republic of the Congo".
# " of " will fit in the regex, but that means that " the " won´t be recognized.
# The white space between "of" and "the" will be used only in one of the substrings,
# so in fact we hace " of " and "the ", and the resultant string will be
# "Republic the Congo". If we apply more than one time the regex, this things
# would be avoided
return version2
|
the-stack_106_19207
|
import sys
import logging
def get_logger(logger_name, log_level=logging.INFO):
logger = logging.getLogger(logger_name)
if not logger.hasHandlers():
fmt = logging.Formatter(
fmt="%(asctime)-11s %(name)s:%(lineno)d %(levelname)s: %(message)s",
datefmt="[%Y/%m/%d-%H:%M:%S]"
)
stream_handler = logging.StreamHandler(stream=sys.stdout)
stream_handler.setFormatter(fmt)
logger.addHandler(stream_handler)
logger.setLevel(log_level)
return logger
|
the-stack_106_19209
|
from .node import Node
from datetime import datetime as timestamp
from time import time as now
from .metrics import Metrics
class DepthFirstSearch:
def __init__(self, automated_planner):
self.time_start = now()
self.visited = []
self.automated_planner = automated_planner
self.init = Node(self.automated_planner.initial_state, automated_planner)
self.stack = [self.init]
self.metrics = Metrics()
def search(self, node_bound=float("inf")):
self.automated_planner.logger.debug(
"Search started at: " + str(timestamp.now())
)
while self.stack:
current_node = self.stack.pop()
if current_node not in self.visited:
self.visited.append(current_node)
self.metrics.n_evaluated += 1
if self.automated_planner.satisfies(
self.automated_planner.problem.goal, current_node.state
):
self.metrics.runtime = now() - self.time_start
self.automated_planner.logger.debug(
"Search finished at: " + str(timestamp.now())
)
self.metrics.total_cost = current_node.g_cost
return current_node, self.metrics
if self.metrics.n_opened > node_bound:
break
actions = self.automated_planner.available_actions(current_node.state)
if not actions:
self.metrics.deadend_states += 1
else:
self.metrics.n_expended += 1
for act in actions:
child = Node(
state=self.automated_planner.transition(
current_node.state, act
),
automated_planner=self.automated_planner,
parent_action=act,
parent=current_node,
)
self.metrics.n_generated += 1
if child in self.visited:
continue
self.metrics.n_opened += 1
self.stack.append(child)
self.metrics.runtime = now() - self.time_start
self.automated_planner.logger.warning("!!! No path found !!!")
return None, self.metrics
|
the-stack_106_19211
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('relation', '0005_auto_20151215_1549'),
]
operations = [
migrations.RemoveField(
model_name='relation',
name='concept_text_1',
),
migrations.RemoveField(
model_name='relation',
name='concept_text_2',
),
migrations.AddField(
model_name='relation',
name='concept_1',
field=models.ForeignKey(related_name='concept_1', default=1, to='relation.Concept'),
preserve_default=False,
),
migrations.AddField(
model_name='relation',
name='concept_2',
field=models.ForeignKey(related_name='concept_2', default=1, to='relation.Concept'),
preserve_default=False,
),
]
|
the-stack_106_19213
|
from django.shortcuts import render
from django.http import HttpResponsePermanentRedirect, HttpResponseNotFound
from django.contrib.auth.decorators import login_required
from cbp.models import FtpGroup, BackupGroup
from cbp.forms import FtpServersForm
@login_required(login_url='accounts/login/')
def ftp_servers(request):
if not request.user.is_superuser:
return HttpResponsePermanentRedirect('/')
ftp_servers_all = FtpGroup.objects.all()
return render(request, "backup_control/ftp_servers.html", {"ftp_servers": ftp_servers_all})
@login_required(login_url='accounts/login/')
def ftp_server_delete(request, fs_id):
try:
if not request.user.is_superuser:
return HttpResponsePermanentRedirect('/')
fs = FtpGroup.objects.get(id=fs_id)
fs.delete()
return HttpResponsePermanentRedirect('/ftp_servers')
except FtpGroup.DoesNotExist:
return HttpResponseNotFound("<h2>Данная группа не найдена!</h2>")
def ftp_servers_edit(request, fs_id: int = 0):
try:
if fs_id:
ftp_server = FtpGroup.objects.get(id=fs_id)
ftp_server_form = FtpServersForm(initial={
'name': ftp_server.name,
'ip': ftp_server.host,
'login': ftp_server.login,
'password': ftp_server.password,
'workdir': ftp_server.workdir,
'protocol': ftp_server.protocol,
'sftp_port': ftp_server.sftp_port
})
else:
ftp_server_form = FtpServersForm(initial={'sftp_port': 22})
ftp_server = FtpGroup()
if request.method == "POST":
ftp_server.name = request.POST.get('name')
ftp_server.host = request.POST.get('ip')
ftp_server.login = request.POST.get('login')
ftp_server.password = request.POST.get('password')
ftp_server.workdir = request.POST.get('workdir')
ftp_server.protocol = request.POST.get('protocol')
ftp_server.sftp_port = request.POST.get('sftp_port') or 22
ftp_server.save()
return HttpResponsePermanentRedirect("/ftp_servers")
else:
return render(request, "backup_control/ftp_servers_edit.html", {"form": ftp_server_form})
except FtpGroup.DoesNotExist or BackupGroup.DoesNotExist:
return HttpResponseNotFound("<h2>Данная группа не найдена!</h2>")
|
the-stack_106_19219
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Bijector unit-test utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# Dependency imports
from absl import logging
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import reshape as reshape_bijector
from tensorflow_probability.python.distributions import uniform as uniform_distribution
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow_probability.python.math.gradient import batch_jacobian
JAX_MODE = False
def assert_finite(array):
if not np.isfinite(array).all():
raise AssertionError('array was not all finite. %s' % array[:15])
def assert_strictly_increasing(array):
np.testing.assert_array_less(0., np.diff(array))
def assert_strictly_decreasing(array):
np.testing.assert_array_less(np.diff(array), 0.)
def assert_strictly_monotonic(array):
if array[0] < array[-1]:
assert_strictly_increasing(array)
else:
assert_strictly_decreasing(array)
def assert_scalar_congruency(bijector,
lower_x,
upper_x,
eval_func,
n=int(10e3),
rtol=0.01):
"""Assert `bijector`'s forward/inverse/inverse_log_det_jacobian are congruent.
We draw samples `X ~ U(lower_x, upper_x)`, then feed these through the
`bijector` in order to check that:
1. the forward is strictly monotonic.
2. the forward/inverse methods are inverses of each other.
3. the jacobian is the correct change of measure.
This can only be used for a Bijector mapping open subsets of the real line
to themselves. This is due to the fact that this test compares the `prob`
before/after transformation with the Lebesgue measure on the line.
Args:
bijector: Instance of Bijector
lower_x: Python scalar.
upper_x: Python scalar. Must have `lower_x < upper_x`, and both must be in
the domain of the `bijector`. The `bijector` should probably not produce
huge variation in values in the interval `(lower_x, upper_x)`, or else the
variance based check of the Jacobian will require small `rtol` or huge
`n`.
eval_func: Function to evaluate any intermediate results.
n: Number of samples to draw for the checks.
rtol: Positive number. Used for the Jacobian check.
Raises:
AssertionError: If tests fail.
"""
# Should be monotonic over this interval
ten_x_pts = np.linspace(lower_x, upper_x, num=10).astype(np.float32)
if bijector.dtype is not None:
ten_x_pts = ten_x_pts.astype(dtype_util.as_numpy_dtype(bijector.dtype))
lower_x = np.cast[dtype_util.as_numpy_dtype(bijector.dtype)](lower_x)
upper_x = np.cast[dtype_util.as_numpy_dtype(bijector.dtype)](upper_x)
forward_on_10_pts = bijector.forward(ten_x_pts)
# Set the lower/upper limits in the range of the bijector.
lower_y, upper_y = eval_func(
[bijector.forward(lower_x),
bijector.forward(upper_x)])
if upper_y < lower_y: # If bijector.forward is a decreasing function.
lower_y, upper_y = upper_y, lower_y
# Uniform samples from the domain, range.
seed_stream = tfp_test_util.test_seed_stream(salt='assert_scalar_congruency')
uniform_x_samps = uniform_distribution.Uniform(
low=lower_x, high=upper_x).sample(n, seed=seed_stream())
uniform_y_samps = uniform_distribution.Uniform(
low=lower_y, high=upper_y).sample(n, seed=seed_stream())
# These compositions should be the identity.
inverse_forward_x = bijector.inverse(bijector.forward(uniform_x_samps))
forward_inverse_y = bijector.forward(bijector.inverse(uniform_y_samps))
# For a < b, and transformation y = y(x),
# (b - a) = \int_a^b dx = \int_{y(a)}^{y(b)} |dx/dy| dy
# "change_measure_dy_dx" below is a Monte Carlo approximation to the right
# hand side, which should then be close to the left, which is (b - a).
# We assume event_ndims=0 because we assume scalar -> scalar. The log_det
# methods will handle whether they expect event_ndims > 0.
dy_dx = tf.exp(
bijector.inverse_log_det_jacobian(uniform_y_samps, event_ndims=0))
# E[|dx/dy|] under Uniform[lower_y, upper_y]
# = \int_{y(a)}^{y(b)} |dx/dy| dP(u), where dP(u) is the uniform measure
expectation_of_dy_dx_under_uniform = tf.reduce_mean(dy_dx)
# dy = dP(u) * (upper_y - lower_y)
change_measure_dy_dx = (
(upper_y - lower_y) * expectation_of_dy_dx_under_uniform)
# We'll also check that dy_dx = 1 / dx_dy.
dx_dy = tf.exp(
bijector.forward_log_det_jacobian(
bijector.inverse(uniform_y_samps), event_ndims=0))
[
forward_on_10_pts_v,
dy_dx_v,
dx_dy_v,
change_measure_dy_dx_v,
uniform_x_samps_v,
uniform_y_samps_v,
inverse_forward_x_v,
forward_inverse_y_v,
] = eval_func([
forward_on_10_pts,
dy_dx,
dx_dy,
change_measure_dy_dx,
uniform_x_samps,
uniform_y_samps,
inverse_forward_x,
forward_inverse_y,
])
assert_strictly_monotonic(forward_on_10_pts_v)
# Composition of forward/inverse should be the identity.
np.testing.assert_allclose(
inverse_forward_x_v, uniform_x_samps_v, atol=1e-5, rtol=1e-3)
np.testing.assert_allclose(
forward_inverse_y_v, uniform_y_samps_v, atol=1e-5, rtol=1e-3)
# Change of measure should be correct.
np.testing.assert_allclose(
desired=upper_x - lower_x, actual=change_measure_dy_dx_v,
atol=0, rtol=rtol)
# Inverse Jacobian should be equivalent to the reciprocal of the forward
# Jacobian.
np.testing.assert_allclose(
desired=dy_dx_v, actual=np.reciprocal(dx_dy_v), atol=1e-5, rtol=1e-3)
def assert_bijective_and_finite(bijector,
x,
y,
event_ndims,
eval_func,
inverse_event_ndims=None,
atol=0,
rtol=1e-5):
"""Assert that forward/inverse (along with jacobians) are inverses and finite.
It is recommended to use x and y values that are very very close to the edge
of the Bijector's domain.
Args:
bijector: A Bijector instance.
x: np.array of values in the domain of bijector.forward.
y: np.array of values in the domain of bijector.inverse.
event_ndims: Integer describing the number of event dimensions this bijector
operates on.
eval_func: Function to evaluate any intermediate results.
inverse_event_ndims: Integer describing the number of event dimensions for
the bijector codomain. If None, then the value of `event_ndims` is used.
atol: Absolute tolerance.
rtol: Relative tolerance.
Raises:
AssertionError: If tests fail.
"""
if inverse_event_ndims is None:
inverse_event_ndims = event_ndims
# These are the incoming points, but people often create a crazy range of
# values for which these end up being bad, especially in 16bit.
assert_finite(x)
assert_finite(y)
f_x = bijector.forward(x)
g_y = bijector.inverse(y)
[
x_from_x,
y_from_y,
ildj_f_x,
fldj_x,
ildj_y,
fldj_g_y,
f_x_v,
g_y_v,
] = eval_func([
bijector.inverse(f_x),
bijector.forward(g_y),
bijector.inverse_log_det_jacobian(f_x, event_ndims=inverse_event_ndims),
bijector.forward_log_det_jacobian(x, event_ndims=event_ndims),
bijector.inverse_log_det_jacobian(y, event_ndims=inverse_event_ndims),
bijector.forward_log_det_jacobian(g_y, event_ndims=event_ndims),
f_x,
g_y,
])
assert_finite(x_from_x)
assert_finite(y_from_y)
assert_finite(ildj_f_x)
assert_finite(fldj_x)
assert_finite(ildj_y)
assert_finite(fldj_g_y)
assert_finite(f_x_v)
assert_finite(g_y_v)
np.testing.assert_allclose(x_from_x, x, atol=atol, rtol=rtol)
np.testing.assert_allclose(y_from_y, y, atol=atol, rtol=rtol)
np.testing.assert_allclose(-ildj_f_x, fldj_x, atol=atol, rtol=rtol)
np.testing.assert_allclose(-ildj_y, fldj_g_y, atol=atol, rtol=rtol)
def get_fldj_theoretical(bijector,
x,
event_ndims,
inverse_event_ndims=None,
input_to_unconstrained=None,
output_to_unconstrained=None):
"""Numerically approximate the forward log det Jacobian of a bijector.
We compute the Jacobian of the chain
output_to_unconst_vec(bijector(inverse(input_to_unconst_vec))) so that
we're working with a full rank matrix. We then adjust the resulting Jacobian
for the unconstraining bijectors.
Bijectors that constrain / unconstrain their inputs/outputs may not be
testable with this method, since the composition above may reduce the test
to something trivial. However, bijectors that map within constrained spaces
should be fine.
Args:
bijector: the bijector whose Jacobian we wish to approximate
x: the value for which we want to approximate the Jacobian. Must have rank
at least `event_ndims`.
event_ndims: number of dimensions in an event
inverse_event_ndims: Integer describing the number of event dimensions for
the bijector codomain. If None, then the value of `event_ndims` is used.
input_to_unconstrained: bijector that maps the input to the above bijector
to an unconstrained 1-D vector. If unspecified, flatten the input into
a 1-D vector according to its event_ndims.
output_to_unconstrained: bijector that maps the output of the above bijector
to an unconstrained 1-D vector. If unspecified, flatten the input into
a 1-D vector according to its event_ndims.
Returns:
fldj: A gradient-based evaluation of the log det Jacobian of
`bijector.forward` at `x`.
"""
if inverse_event_ndims is None:
inverse_event_ndims = event_ndims
if input_to_unconstrained is None:
input_to_unconstrained = reshape_bijector.Reshape(
event_shape_in=x.shape[tensorshape_util.rank(x.shape) - event_ndims:],
event_shape_out=[-1])
if output_to_unconstrained is None:
f_x_shape = bijector.forward_event_shape(x.shape)
output_to_unconstrained = reshape_bijector.Reshape(
event_shape_in=f_x_shape[tensorshape_util.rank(f_x_shape) -
inverse_event_ndims:],
event_shape_out=[-1])
x = tf.convert_to_tensor(x)
x_unconstrained = 1 * input_to_unconstrained.forward(x)
# Collapse any batch dimensions (including scalar) to a single axis.
batch_shape = x_unconstrained.shape[:-1]
x_unconstrained = tf.reshape(
x_unconstrained, [int(np.prod(batch_shape)), x_unconstrained.shape[-1]])
def f(x_unconstrained, batch_shape=batch_shape):
# Unflatten any batch dimensions now under the tape.
unflattened_x_unconstrained = tf.reshape(
x_unconstrained,
tensorshape_util.concatenate(batch_shape, x_unconstrained.shape[-1:]))
f_x = bijector.forward(input_to_unconstrained.inverse(
unflattened_x_unconstrained))
return f_x
def f_unconstrained(x_unconstrained, batch_shape=batch_shape):
f_x_unconstrained = output_to_unconstrained.forward(
f(x_unconstrained, batch_shape=batch_shape))
# Flatten any batch dimensions to a single axis.
return tf.reshape(
f_x_unconstrained,
[int(np.prod(batch_shape)), f_x_unconstrained.shape[-1]])
if JAX_MODE:
f_unconstrained = functools.partial(f_unconstrained, batch_shape=[])
jacobian = batch_jacobian(f_unconstrained, x_unconstrained)
jacobian = tf.reshape(
jacobian, tensorshape_util.concatenate(batch_shape, jacobian.shape[-2:]))
logging.vlog(1, 'Jacobian: %s', jacobian)
log_det_jacobian = 0.5 * tf.linalg.slogdet(
tf.matmul(jacobian, jacobian, adjoint_a=True)).log_abs_determinant
input_correction = input_to_unconstrained.forward_log_det_jacobian(
x, event_ndims=event_ndims)
output_correction = output_to_unconstrained.forward_log_det_jacobian(
f(x_unconstrained), event_ndims=inverse_event_ndims)
return (log_det_jacobian + tf.cast(input_correction, log_det_jacobian.dtype) -
tf.cast(output_correction, log_det_jacobian.dtype))
|
the-stack_106_19220
|
#!/usr/bin/env python
'''
Project: Geothon (https://github.com/MBoustani/Geothon)
File: Vector/create_kml_point.py
Description: This code creates a point kml from latitude and longitue.
Author: Maziyar Boustani (github.com/MBoustani)
'''
import os
try:
import ogr
except ImportError:
from osgeo import ogr
try:
import osr
except ImportError:
from osgeo import osr
latitude = 30
longitude = 10
kml = 'point.kml'
layer_name = 'point_layer'
#create KML dirver
driver = ogr.GetDriverByName('KML')
#create kml data_source(file)
data_source = driver.CreateDataSource(kml)
#create spatial reference
srs = osr.SpatialReference()
#in this case wgs84
srs.ImportFromEPSG(4326)
#create kml layer as point data with wgs84 as spatial reference
layer = data_source.CreateLayer(layer_name, srs, ogr.wkbPoint)
#create "Name" column for attribute table and set type as string
field_name = ogr.FieldDefn("Name", ogr.OFTString)
field_name.SetWidth(24)
layer.CreateField(field_name)
#create point geometry
point = ogr.Geometry(ogr.wkbPoint)
#add point into point geometry
point.AddPoint(longitude, latitude)
#create a feature
feature = ogr.Feature(layer.GetLayerDefn())
#set feature geometry
feature.SetGeometry(point)
#add field "Name" to feature
feature.SetField("Name", 'point_one')
#create feature in layer
layer.CreateFeature(feature)
|
the-stack_106_19223
|
"""
Key functions for the ADFQ algorithms
Using jit
posterior_numeric_exact
posterior_numeric
posterior_adfq
posterior_adfq_v2
"""
import numpy as np
from scipy.stats import norm
from scipy.special import logsumexp
import pdb
from numba import jit
REW_VAR_0 = 1e-3
DTYPE = np.float64
def posterior_numeric_exact(n_means, n_vars, c_mean, c_var, reward, discount, terminal, varTH = 1e-10,
scale_factor=1.0, num_interval=2000, width=10.0, noise=0.0, batch=False, REW_VAR = REW_VAR_0):
"""
The mean and variance of the true posterior when |A|=2.
"""
assert(len(n_means) == 2)
target_means = reward + discount*np.array(n_means, dtype = DTYPE)
target_vars = discount*discount*np.array(n_vars, dtype = DTYPE)
bar_vars = 1./(1/c_var + 1./(target_vars + noise))
bar_means = bar_vars*(c_mean/c_var + target_means/(target_vars+noise))
cdf1 = norm.cdf((bar_means[0]-target_means[1])/np.sqrt(bar_vars[0]+target_vars[1]))
cdf2 = norm.cdf((bar_means[1]-target_means[0])/np.sqrt(bar_vars[1]+target_vars[0]))
pdf1 = norm.pdf((bar_means[0]-target_means[1])/np.sqrt(bar_vars[0]+target_vars[1]))/np.sqrt(bar_vars[0]+target_vars[1])
pdf2 = norm.pdf((bar_means[1]-target_means[0])/np.sqrt(bar_vars[1]+target_vars[0]))/np.sqrt(bar_vars[1]+target_vars[0])
c1 = norm.pdf((target_means[0]-c_mean)/np.sqrt(c_var+target_vars[0]))/np.sqrt(c_var+target_vars[0])
c2 = norm.pdf((target_means[1]-c_mean)/np.sqrt(c_var+target_vars[1]))/np.sqrt(c_var+target_vars[1])
Z = c1*cdf1 + c2*cdf2
mean = (c1*(bar_means[0]*cdf1+ bar_vars[0]*pdf1) + c2*(bar_means[1]*cdf2+bar_vars[1]*pdf2))/Z
sec_moment = c1*((bar_means[0]**2 + bar_vars[0])*cdf1 +2*bar_means[0]*bar_vars[0]*pdf1 \
- (bar_means[0]-target_means[1])/(bar_vars[0]+target_vars[1])*bar_vars[0]**2*pdf1) \
+ c2*((bar_means[1]**2 + bar_vars[1])*cdf2 +2*bar_means[1]*bar_vars[1]*pdf2 \
- (bar_means[1]-target_means[0])/(bar_vars[1]+target_vars[0])*bar_vars[1]**2*pdf2)
var = sec_moment/Z - mean**2
return mean, var
def posterior_numeric(n_means, n_vars, c_mean, c_var, reward, discount, terminal,
varTH = 1e-10, scale_factor=1.0, num_interval=2000, width=10.0, noise=0.0,
noise_c=0.0, batch=False, REW_VAR = REW_VAR_0):
"""ADFQ - Numeric posterior update
Parameters
----------
n_means : a numpy array of mean values for the next state (s'). shape = (anum,) or (batch_size, anum)
n_vars : a numpy array of variance values for the next state (s'). shape = (anum,) or (batch_size, anum)
c_mean : a mean value for the current state-action pair. shape = () or (batch_size,)
c_var : a variance value for the current state-action pair. shape = () or (batch_size,)
reward : the current observed reward. shape = () or (batch_size,)
discount : the discount factor. Scalar.
terminal : (=done) True if the environment is episodic and the current episode is done. shape = () or (batch_size,)
varTH : variance threshold.
scale_factor : scale factor.
num_interval : The number of intervals for the samples (x-values).
width : determines a range of samples. width*standard_deviation around mean.
noise : noise added to c_var for the update in stochastic environment
batch : True if you are using a batch update
REW_VAR : when terminal == True, TD target = reward. p(r) = Gaussian with mean=0 and variance = REW_VAR.
"""
noise = noise/scale_factor
noise_c= noise_c/scale_factor
c_var = c_var + noise_c
if batch:
batch_size = n_means.shape[0]
c_mean = np.reshape(c_mean, (batch_size,1))
c_var = np.reshape(c_var, (batch_size,1))
reward = np.reshape(reward, (batch_size,1))
terminal = np.reshape(terminal, (batch_size,1))
else:
if terminal:
var_new = 1./(1./c_var + scale_factor/(REW_VAR+noise))
sd_new = np.sqrt(var_new, dtype=DTYPE)
mean_new = var_new*(c_mean/c_var + scale_factor*reward/(REW_VAR+noise))
try:
x = np.arange(mean_new-0.5*width*sd_new, mean_new+0.5*width*sd_new, width*sd_new/num_interval)
except:
pdb.set_trace()
print(mean_new, sd_new)
return mean_new, var_new, None #(x, norm.pdf(x, mean_new, sd_new))
target_means = reward + discount*np.array(n_means, dtype = DTYPE)
target_vars = discount*discount*np.array(n_vars, dtype = DTYPE)
bar_vars = 1./(1/c_var + 1./(target_vars + noise))
bar_means = bar_vars*(c_mean/c_var + target_means/(target_vars+noise))
if (target_vars < 0.0).any() or (bar_vars < 0.0).any():
pdb.set_trace()
sd_range = np.sqrt(np.concatenate((target_vars, bar_vars) ,axis=-1), dtype=DTYPE)
mean_range = np.concatenate((target_means, bar_means),axis=-1)
x_max = np.max(mean_range + 0.5*width*sd_range, axis=-1)
x_min = np.min(mean_range - 0.5*width*sd_range, axis=-1)
interval = (x_max-x_min)/num_interval
# Need to better way for batch
if batch :
mean_new = []
var_new = []
for j in range(batch_size):
m, v, _ = posterior_numeric_helper(target_means[j], target_vars[j], c_mean[j], c_var[j],
bar_means[j], bar_vars[j], x_max[j], x_min[j], interval[j], noise=noise)
mean_new.append(m)
var_new.append(v)
var_new = (1.-terminal)*np.reshape(var_new, (batch_size,1)) \
+ terminal*1./(1./c_var + scale_factor/(REW_VAR+noise))
mean_new = (1.-terminal)*np.reshape(mean_new, (batch_size,1)) \
+ terminal*var_new*(c_mean/c_var + scale_factor*reward/(REW_VAR+noise))
return mean_new, np.maximum(varTH, var_new), None
else:
mean_new, var_new, (x, prob) = posterior_numeric_helper(target_means, target_vars, c_mean, c_var,
bar_means, bar_vars, x_max, x_min, interval, noise =noise)
if np.isnan(var_new).any():
print("Variance is NaN")
return mean_new, np.maximum(varTH, var_new), (x, prob)
def posterior_numeric_helper(target_means, target_vars, c_mean, c_var, bar_means, bar_vars,
x_max, x_min, interval, noise=0.0):
"""ADFQ - Numeric posterior update helper function
"""
anum = target_means.shape[-1]
add_vars = c_var+target_vars+noise
x = np.append(np.arange(x_min, x_max, interval), x_max)
cdfs = np.array([norm.cdf(x, target_means[i], np.sqrt(target_vars[i])) for i in range(anum)])
nonzero_ids = []
for cdf in cdfs:
for (i,v) in enumerate(cdf):
if v > 0.0:
nonzero_ids.append(i)
break
if len(nonzero_ids) != anum:
raise ValueError('CDF peak is outside of the range')
log_probs = []
min_id = len(x)
log_max_prob = -1.e+100
for b in range(anum):
min_id = min(min_id, nonzero_ids[b])
idx = max([nonzero_ids[c] for c in range(anum) if c!= b])
tmp = - np.log(2*np.pi) -0.5*( np.log(add_vars[b]) + np.log(bar_vars[b]) \
+ (c_mean-target_means[b])**2/add_vars[b]+ (x[idx:] - bar_means[b])**2/bar_vars[b]) \
+ np.sum([np.log(cdfs[c, idx:]) for c in range(anum) if c!=b], axis=0)
log_max_prob = max(log_max_prob, max(tmp))
log_probs.append(tmp)
probs = [np.exp(l - log_max_prob) for l in log_probs]
probs_l = [np.concatenate((np.zeros(len(x)-min_id-len(p),),p)) for p in probs]
prob_tot = np.sum(np.array(probs_l),axis=0)
Z = np.sum(prob_tot)
if (Z== 0.0).any():
print('All probabilities are 0.0')
pdb.set_trace()
prob_tot = prob_tot/Z/interval
x = x[min_id:]
m = interval*np.inner(x, prob_tot)
return m, interval*np.inner((x-m)**2, prob_tot), (x, prob_tot)
def posterior_adfq(n_means, n_vars, c_mean, c_var, reward, discount, terminal, varTH=1e-20,
REW_VAR = REW_VAR_0, scale_factor = 1.0, asymptotic = False, asymptotic_trigger = 1e-20,
noise = 0.0, noise_c =0.0, batch=False):
"""ADFQ posterior update
Parameters
----------
n_means : a numpy array of mean values for the next state (s'). shape = (anum,) or (batch_size, anum)
n_vars : a numpy array of variance values for the next state (s'). shape = (anum,) or (batch_size, anum)
c_mean : a mean value for the current state-action pair. shape = () or (batch_size,)
c_var : a variance value for the current state-action pair. shape = () or (batch_size,)
reward : the current observed reward. shape = () or (batch_size,)
discount : the discount factor. Scalar.
terminal : (=done) True if the environment is episodic and the current episode is done. shape = () or (batch_size,)
varTH : variance threshold.
scale_factor : scale factor.
asymptotic : True to use the asymptotic update
asymptotic_trigger : a value to decide when to start the asymptotic update if "asymptotic==True"
noise : noise added to c_var for the update in stochastic environment
batch : True if you are using a batch update
REW_VAR : when terminal == True, TD target = reward. p(r) = Gaussian with mean=0 and variance = REW_VAR.
"""
noise = noise/scale_factor
noise_c = noise_c/scale_factor
c_var = c_var + noise_c
target_vars = discount*discount*np.array(n_vars, dtype=DTYPE)#+ noise
t = asymptotic_trigger/scale_factor
if batch:
batch_size = n_means.shape[0]
target_means = np.reshape(reward , (batch_size,1))+ discount*np.array(n_means, dtype=DTYPE)
stats = posterior_adfq_batch_helper(target_means, target_vars, c_mean, c_var, discount,
scale_factor=scale_factor, asymptotic=asymptotic, noise=noise)
reward = np.array(reward, dtype=DTYPE)
terminal = np.array(terminal, dtype=int)
if asymptotic :
is_asymptotic = np.prod((n_vars <= t), axis=-1) * np.prod((c_var <= t), axis=-1)
else:
target_means = reward + discount*np.array(n_means, dtype=DTYPE)
stats = posterior_adfq_helper(target_means, target_vars, c_mean, c_var, discount,
scale_factor=scale_factor, asymptotic=asymptotic, noise=noise)
stats = stats[np.newaxis,:]
if asymptotic and (n_vars <= t).all() and (c_var <= t):
b_rep = np.argmin(stats[:,:,2], axis=-1)
weights = np.zeros((len(stats),))
weights[b_rep] = 1.0
return stats[b_rep, 0], np.maximum(stats[b_rep,1], varTH), (stats[:,0], stats[:,1], weights)
logk = stats[:,:,2] - np.max(stats[:,:,2],axis=-1, keepdims=batch)
weights = np.exp(logk - logsumexp(logk, axis=-1, keepdims=batch), dtype=DTYPE)
v = weights*stats[:,:,0]
mean_new = np.sum(v, axis=-1, keepdims=batch)
var_new = np.sum(weights*stats[:,:,1], axis=-1) \
+ np.sum(v*(stats[:,:,0] - mean_new),axis=-1)/scale_factor #+ (np.sum(weights*(stats[:,:,0]**2),axis=-1) - mean_new**2)/scale_factor
var_new = (1.-terminal)*var_new + terminal*1./(1./c_var + scale_factor/(REW_VAR+noise))
mean_new = (1.-terminal)*np.squeeze(mean_new) + terminal*var_new*(c_mean/c_var + scale_factor*reward/(REW_VAR+noise))
if np.isnan(mean_new).any() or np.isnan(var_new).any():
pdb.set_trace()
if batch:
return np.squeeze(mean_new), np.maximum(varTH, np.squeeze(var_new)), (np.squeeze(stats[:,:,0]), np.squeeze(stats[:,:,1]), np.squeeze(weights))
else:
return mean_new[0], np.maximum(varTH, var_new[0]), (np.squeeze(stats[:,:,0]), np.squeeze(stats[:,:,1]), np.squeeze(weights))
def posterior_adfq_batch_helper(target_means, target_vars, c_mean, c_var, discount, scale_factor, asymptotic=False, noise=0.0):
"""ADFQ - Numeric posterior update helper function for batch update
"""
batch_stats = []
sorted_idx = np.flip(np.argsort(target_means), axis=-1)
for k in range(target_means.shape[0]):
noise_k = noise[k] if type(noise)== np.ndarray else noise
stats = posterior_adfq_helper(target_means[k], target_vars[k], c_mean[k], c_var[k], discount, scale_factor,
sorted_idx = sorted_idx[k] , asymptotic=False, noise=noise_k)
batch_stats.append(stats)
return np.array(batch_stats)
def posterior_adfq_helper(target_means, target_vars, c_mean, c_var, discount, scale_factor, sorted_idx, asymptotic=False, noise=0.0):
"""ADFQ - Numeric posterior update helper function
"""
anum = target_means.shape[-1]
dis2 = discount*discount
rho_vars = c_var/target_vars*dis2
bar_vars = 1./(1./c_var + 1./(target_vars+noise))
bar_means = bar_vars*(c_mean/c_var + target_means/(target_vars+noise))
add_vars = c_var + target_vars + noise
stats = np.zeros((anum, 3))
# Search a range for mu_star
for (j,b) in enumerate(sorted_idx):
b_primes = [c for c in sorted_idx if c!=b] # anum-1
outcome = iter_search(anum, b_primes, target_means, target_vars, rho_vars, target_means[b], bar_means[b], bar_vars[b],
rho_vars[b], add_vars[b], c_mean, discount, asymptotic=asymptotic, scale_factor=scale_factor)
if outcome is not None:
stats[b] = outcome
else:
print("Could not find a maching mu star")
return np.array([stats[i] for i in range(anum)], dtype=DTYPE)
@jit(nopython=True)
def iter_search(anum, b_primes, target_means, target_vars, rho_vars, target_means_b, bar_means_b, bar_vars_b,
rho_vars_b, add_vars_b, c_mean, discount, asymptotic=False, scale_factor=1.0):
upper = 1e+20
dis2 = discount*discount
for i in range(anum):
n_target_means= np.array([target_means[b_primes[k]] for k in range(i)])
n_target_vars = np.array([target_vars[b_primes[k]] for k in range(i)])
n_rho_vars = np.array([rho_vars[b_primes[k]] for k in range(i)])
if i == (anum-1):
lower = -1e+20
else:
lower = target_means[b_primes[i]]
mu_star = (bar_means_b + np.sum(n_target_means * n_rho_vars)/(dis2 + rho_vars_b))\
/(1+np.sum(n_rho_vars)/(dis2 + rho_vars_b))
if (float(mu_star, ) >= float(lower)) and (float(mu_star) <= float(upper)):
var_star = 1./(1./bar_vars_b + np.sum(1./n_target_vars))
if asymptotic :
logk = (target_means_b - c_mean)**2 / add_vars_b + (mu_star - bar_means_b)**2 / bar_vars_b \
+ np.sum((n_target_means - mu_star)**2 / n_target_vars )
else:
logk = 0.5*(np.log(var_star) - np.log(bar_vars_b) - np.log(2*np.pi) - np.log(add_vars_b)) \
- 0.5/scale_factor * ((target_means_b - c_mean)**2 / add_vars_b + (mu_star-bar_means_b)**2 / bar_vars_b \
+ np.sum((n_target_means - mu_star)**2 / n_target_vars ))
return mu_star, var_star, logk
else:
upper = lower
return None
def posterior_adfq_v2(n_means, n_vars, c_mean, c_var, reward, discount, terminal, varTH=1e-20, REW_VAR = REW_VAR_0,
logEps=-1e+20, scale_factor = 1.0, asymptotic=False, asymptotic_trigger = 1e-20, batch=False, noise=0.0):
"""ADFQ posterior update version 2 (presented in appendix of the ADFQ paper)
Parameters
----------
n_means : a numpy array of mean values for the next state (s'). shape = (anum,) or (batch_size, anum)
n_vars : a numpy array of variance values for the next state (s'). shape = (anum,) or (batch_size, anum)
c_mean : a mean value for the current state-action pair. shape = () or (batch_size,)
c_var : a variance value for the current state-action pair. shape = () or (batch_size,)
reward : the current observed reward. shape = () or (batch_size,)
discount : the discount factor. Scalar.
terminal : (=done) True if the environment is episodic and the current episode is done. shape = () or (batch_size,)
varTH : variance threshold.
logEps : log of the small probability used in the approximation (Eq.16)
scale_factor : scale factor.
asymptotic : True to use the asymptotic update
asymptotic_trigger : a value to decide when to start the asymptotic update if "asymptotic==True"
noise : noise added to c_var for the update in stochastic environment
batch : True if you are using a batch update
REW_VAR : when terminal == True, TD target = reward. p(r) = Gaussian with mean=0 and variance = REW_VAR.
"""
if batch:
batch_size = len(n_means)
c_mean = np.reshape(c_mean, (batch_size,1))
c_var = np.reshape(c_var, (batch_size,1))
reward = np.reshape(reward, (batch_size,1))
terminal = np.reshape(terminal, (batch_size,1))
target_means = reward +discount*np.array(n_means, dtype = DTYPE)
target_vars = discount*discount*(np.array(n_vars, dtype = DTYPE))
bar_vars = 1./(1./c_var + 1./(target_vars + noise))
bar_means = bar_vars*(c_mean/c_var + target_means/(target_vars + noise))
add_vars = c_var + target_vars + noise
sorted_idx = np.argsort(target_means, axis=int(batch))
if batch:
ids = range(0,batch_size)
bar_targets = target_means[ids, sorted_idx[:,-1], np.newaxis]*np.ones(target_means.shape)
bar_targets[ids, sorted_idx[:,-1]] = target_means[ids, sorted_idx[:,-2]]
else:
bar_targets = target_means[sorted_idx[-1]]*np.ones(target_means.shape)
bar_targets[sorted_idx[-1]] = target_means[sorted_idx[-2]]
thetas = np.heaviside(bar_targets-bar_means,0.0)
t = asymptotic_trigger/scale_factor
if asymptotic :
if (n_vars <= t).all() and (c_var <= t):
b_rep = np.argmin((target_means-c_mean)**2 - 2*add_vars*logEps*thetas) # logEps*thetas = log(nu)
weights = np.zeros(np.shape(target_means))
weights[b_rep] = 1.0
return bar_means[b_rep], np.maximum(varTH, bar_vars[b_rep]), (bar_means, bar_vars, weights)
log_weights = -0.5*( np.log(np.pi*2) + np.log(add_vars) + (c_mean-target_means)**2/add_vars/scale_factor) + logEps*thetas
log_weights = log_weights - np.max(log_weights, axis=int(batch), keepdims=batch)
log_weights = log_weights - logsumexp(log_weights, axis=int(batch), keepdims=batch)
weights = np.exp(log_weights, dtype=DTYPE)
mean_new = np.sum(np.multiply(weights, bar_means), axis=int(batch), keepdims=batch)
var_new = (np.sum(np.multiply(weights, bar_means**2), axis=int(batch), keepdims=batch) - mean_new**2)/scale_factor \
+ np.sum(np.multiply(weights, bar_vars), axis=int(batch), keepdims=batch)
# For Terminals
var_new = (1.-terminal)*var_new + terminal*1./(1./c_var + scale_factor/(REW_VAR+noise))
mean_new = (1.-terminal)*mean_new + terminal*var_new*(c_mean/c_var + scale_factor*reward/(REW_VAR+noise))
if np.isnan(mean_new).any() or np.isnan(var_new).any():
pdb.set_trace()
if batch:
return np.squeeze(mean_new), np.squeeze(np.maximum(varTH, var_new)), (bar_means, bar_vars, weights)
else:
return mean_new, np.maximum(varTH, var_new), (bar_means, bar_vars, weights)
def posterior_hypbrid(n_means, n_vars, c_mean, c_var, reward, discount, terminal, varTH=1e-20,
logEps=-1e+20, scale_factor = 1.0, trigger = 1e-4, batch=False, noise=0.0):
"""ADFQ posterior update using both ADFQ and ADFQ-V2
Parameters
----------
trigger : threshold value to start ADFQ-V2 update
"""
if (c_var*scale_factor <= trigger).all() and (c_var*scale_factor<=trigger).all():
mean_new, var_new, stats = posterior_adfq_v2(n_means, n_vars, c_mean, c_var, reward, discount, terminal, varTH=varTH,
logEps=logEps, scale_factor = scale_factor, batch=batch, noise=noise)
else:
mean_new, var_new, stats = posterior_adfq(n_means, n_vars, c_mean, c_var, reward, discount, terminal, varTH=varTH,
scale_factor = scale_factor, batch=batch, noise=noise)
return mean_new, var_new, stats
|
the-stack_106_19224
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import name
class activate(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-rbridge - based on the path /rbridge-id/event-handler/activate. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__name',)
_yang_name = 'activate'
_rest_name = 'activate'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__name = YANGDynClass(base=YANGListType("name",name.name, yang_name="name", rest_name="name", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}), is_container='list', yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-event-handler', defining_module='brocade-event-handler', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'rbridge-id', u'event-handler', u'activate']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'rbridge-id', u'event-handler', u'activate']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /rbridge_id/event_handler/activate/name (list)
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /rbridge_id/event_handler/activate/name (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",name.name, yang_name="name", rest_name="name", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}), is_container='list', yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-event-handler', defining_module='brocade-event-handler', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",name.name, yang_name="name", rest_name="name", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}), is_container='list', yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-event-handler', defining_module='brocade-event-handler', yang_type='list', is_config=True)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=YANGListType("name",name.name, yang_name="name", rest_name="name", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}), is_container='list', yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-event-handler', defining_module='brocade-event-handler', yang_type='list', is_config=True)
name = __builtin__.property(_get_name, _set_name)
_pyangbind_elements = {'name': name, }
|
the-stack_106_19225
|
"""General test cases usable by multiple test modules."""
from __future__ import unicode_literals
import random
import string
from itertools import chain
from hashlib import md5
# Trimming off whitespace/line return chars
# reserving '0' for less-than string comparisons
# reserving '~' for greater-than string comparisons
STR_CHARS = sorted(string.printable[:-6])
STR_CHARS = STR_CHARS[1:-1]
MIN_STR = STR_CHARS[0]
MAX_STR = STR_CHARS[-1]
MAX_INT = 99999999999999999999999999
MIN_INT = -MAX_INT
def _random_with_dupes(sequence):
"""Return a random sequence including duplicates."""
part1 = random.sample(sequence, 50)
part2 = random.sample(part1, random.randrange(1, 50))
result = part1 + part2
random.shuffle(result)
return result
INT_EDGE_CASES = [
(),
(0,),
(0, 1),
(1, 0),
list(range(100)),
list(range(99, -1, -1)),
[1] * 10,
_random_with_dupes(range(100))
]
STR_EDGE_CASES = [
'',
'a',
'ab',
'ba',
string.ascii_letters,
''.join(reversed(string.ascii_letters)),
'b' * 10,
_random_with_dupes(STR_CHARS)
]
# lists of ints
INT_TEST_CASES = (random.sample(range(1000),
random.randrange(2, 20)) for n in range(10))
# strings
STR_TEST_CASES = (random.sample(STR_CHARS,
random.randrange(2, 20)) for n in range(10))
TEST_CASES = chain(
INT_EDGE_CASES,
STR_EDGE_CASES,
INT_TEST_CASES,
STR_TEST_CASES,
)
POP = (True, False)
def _make_words(sample_size=29, words_between_samples=2000):
"""Create lists of similar words from dictionary.
Used for testing Trie.
"""
sample_idx = random.randrange(words_between_samples)
similar_words = []
different_words = []
with open('/usr/share/dict/words', 'r') as words:
for idx, word in enumerate(words):
word = word.strip()
try:
word = word.decode('utf-8')
except AttributeError:
pass
if idx == sample_idx:
different_words.append(word)
if sample_idx <= idx <= sample_idx + sample_size:
similar_words.append(word)
elif idx > sample_idx + sample_size:
yield similar_words
sample_idx = idx + random.randrange(words_between_samples)
similar_words = []
yield similar_words
yield different_words
def make_unique_value():
"""Create a unique value for testing non-membership in a data strucutre."""
return md5(b'SUPERUNIQUEFLAGVALUE').hexdigest()
|
the-stack_106_19231
|
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import ordereddict as collections
except ImportError: # pragma: no cover
import collections # pragma: no cover
from poppy.transport.pecan.models.response import link
class Model(collections.OrderedDict):
def __init__(self, flavor, controller):
super(Model, self).__init__()
self['id'] = flavor.flavor_id
self['providers'] = []
for x in flavor.providers:
provider = collections.OrderedDict()
provider['provider'] = x.provider_id
provider['links'] = []
provider['links'].append(
link.Model(x.provider_url, 'provider_url'))
self['providers'].append(provider)
self['links'] = []
self['links'].append(
link.Model(
u'{0}/flavors/{1}'.format(controller.base_url,
flavor.flavor_id),
'self'))
|
the-stack_106_19232
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean CLI v1.0. Copyright 2021 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import os
import platform
import shutil
import signal
import subprocess
import sys
import threading
import types
from pathlib import Path
from typing import Optional, Set
import docker
from dateutil.parser import isoparse
from docker.errors import APIError
from docker.models.containers import Container
from docker.types import Mount
from lean.components.util.logger import Logger
from lean.components.util.platform_manager import PlatformManager
from lean.components.util.temp_manager import TempManager
from lean.constants import SITE_PACKAGES_VOLUME_LIMIT, \
DOCKER_NETWORK
from lean.models.docker import DockerImage
from lean.models.errors import MoreInfoError
class DockerManager:
"""The DockerManager contains methods to manage and run Docker images."""
def __init__(self, logger: Logger, temp_manager: TempManager, platform_manager: PlatformManager) -> None:
"""Creates a new DockerManager instance.
:param logger: the logger to use when printing messages
:param temp_manager: the TempManager instance used when creating temporary directories
:param platform_manager: the PlatformManager used when checking which operating system is in use
"""
self._logger = logger
self._temp_manager = temp_manager
self._platform_manager = platform_manager
def pull_image(self, image: DockerImage) -> None:
"""Pulls a Docker image.
:param image: the image to pull
"""
self._logger.info(f"Pulling {image}...")
# We cannot really use docker_client.images.pull() here as it doesn't let us log the progress
# Downloading multiple gigabytes without showing progress does not provide good developer experience
# Since the pull command is the same on Windows, macOS and Linux we can safely use a system call
if shutil.which("docker") is not None:
process = subprocess.run(["docker", "image", "pull", str(image)])
if process.returncode != 0:
raise RuntimeError(
f"Something went wrong while pulling {image}, see the logs above for more information")
else:
self._get_docker_client().images.pull(image.name, image.tag)
def run_image(self, image: DockerImage, **kwargs) -> bool:
"""Runs a Docker image. If the image is not available locally it will be pulled first.
See https://docker-py.readthedocs.io/en/stable/containers.html for all the supported kwargs.
If kwargs contains an "on_output" property, it is removed before passing it on to docker.containers.run
and the given lambda is ran whenever the Docker container prints anything.
If kwargs contains a "commands" property, it is removed before passing it on to docker.containers.run
and the Docker container is configured to run the given commands.
This property causes the "entrypoint" property to be overwritten if it exists.
If kwargs sets "detach" to True, the method returns as soon as the container starts.
If this is not the case, the method is blocking and runs until the container exits.
:param image: the image to run
:param kwargs: the kwargs to forward to docker.containers.run
:return: True if the command in the container exited successfully, False if not
"""
if not self.image_installed(image):
self.pull_image(image)
on_output = kwargs.pop("on_output", lambda chunk: None)
commands = kwargs.pop("commands", None)
if commands is not None:
shell_script_commands = ["#!/usr/bin/env bash", "set -e"]
if self._logger.debug_logging_enabled:
shell_script_commands.append("set -x")
shell_script_commands += commands
shell_script_path = self._temp_manager.create_temporary_directory() / "lean-cli-start.sh"
with shell_script_path.open("w+", encoding="utf-8", newline="\n") as file:
file.write("\n".join(shell_script_commands) + "\n")
if "mounts" not in kwargs:
kwargs["mounts"] = []
kwargs["mounts"].append(Mount(target="/lean-cli-start.sh",
source=str(shell_script_path),
type="bind",
read_only=True))
kwargs["entrypoint"] = ["bash", "/lean-cli-start.sh"]
# Format all source paths
if "mounts" in kwargs:
for mount in kwargs["mounts"]:
mount["Source"] = self._format_source_path(mount["Source"])
if "volumes" in kwargs:
for key in list(kwargs["volumes"].keys()):
new_key = self._format_source_path(key)
kwargs["volumes"][new_key] = kwargs["volumes"].pop(key)
detach = kwargs.pop("detach", False)
is_tty = sys.stdout.isatty()
kwargs["detach"] = True
kwargs["hostname"] = platform.node()
kwargs["tty"] = is_tty and not detach
kwargs["stdin_open"] = is_tty and not detach
kwargs["stop_signal"] = kwargs.get("stop_signal", "SIGKILL")
if detach and "remove" not in kwargs:
kwargs["remove"] = True
# Make sure host.docker.internal resolves on Linux
# See https://github.com/QuantConnect/Lean/pull/5092
if self._platform_manager.is_host_linux():
if "extra_hosts" not in kwargs:
kwargs["extra_hosts"] = {}
kwargs["extra_hosts"]["host.docker.internal"] = "172.17.0.1"
# Run all containers on a custom bridge network
# This makes it possible for containers to connect to each other by name
self.create_network(DOCKER_NETWORK)
kwargs["network"] = DOCKER_NETWORK
# Remove existing image with the same name if it exists and is not running
if "name" in kwargs:
existing_container = self.get_container_by_name(kwargs["name"])
if existing_container is not None and existing_container.status != "running":
existing_container.remove()
self._logger.debug(f"Running '{image}' with the following configuration:")
self._logger.debug(kwargs)
docker_client = self._get_docker_client()
container = docker_client.containers.run(str(image), None, **kwargs)
if detach:
return True
force_kill_next = False
killed = False
def kill_container(force: bool) -> None:
nonlocal killed
killed = True
try:
if force:
container.kill()
else:
container.stop(timeout=60)
container.remove()
except APIError:
pass
finally:
self._temp_manager.delete_temporary_directories()
sys.exit(1)
# Kill the container on Ctrl+C
def signal_handler(sig: signal.Signals, frame: types.FrameType) -> None:
nonlocal force_kill_next
if not is_tty or force_kill_next or kwargs["stop_signal"] == "SIGKILL":
force_kill_current = True
else:
self._logger.info("Waiting 1 minute for LEAN to exit gracefully, press Ctrl+C again to force stop")
force_kill_next = True
force_kill_current = False
# If we run this code on the current thread, a second Ctrl+C won't be detected on Windows
kill_thread = threading.Thread(target=kill_container, args=[force_kill_current])
kill_thread.daemon = True
kill_thread.start()
signal.signal(signal.SIGINT, signal_handler)
# container.logs() is blocking, we run it on a separate thread so the SIGINT handler works properly
# If we run this code on the current thread, SIGINT won't be triggered on Windows when Ctrl+C is triggered
def print_logs() -> None:
chunk_buffer = bytes()
is_first_time = True
try:
while True:
container.reload()
if container.status != "running":
return
if is_first_time:
tail = "all"
is_first_time = False
else:
tail = 0
# Capture all logs and print it to stdout line by line
for chunk in container.logs(stream=True, follow=True, tail=tail):
chunk_buffer += chunk
if not chunk_buffer.endswith(b"\n"):
continue
chunk = chunk_buffer.decode("utf-8")
chunk_buffer = bytes()
if on_output is not None:
on_output(chunk)
self._logger.info(chunk.rstrip())
if not is_tty:
continue
if "Press any key to exit..." in chunk or "QuantConnect.Report.Main(): Completed." in chunk:
socket = docker_client.api.attach_socket(container.id, params={"stdin": 1, "stream": 1})
if hasattr(socket, "_sock"):
socket._sock.send(b"\n")
else:
socket.send(b"\n")
socket.close()
except:
# This will crash when the container exits, ignore the exception
pass
logs_thread = threading.Thread(target=print_logs)
logs_thread.daemon = True
logs_thread.start()
while logs_thread.is_alive():
logs_thread.join(0.1)
if killed:
try:
container.remove()
except APIError:
pass
finally:
sys.exit(1)
container.wait()
container.reload()
success = container.attrs["State"]["ExitCode"] == 0
container.remove()
return success
def build_image(self, root: Path, dockerfile: Path, target: DockerImage) -> None:
"""Builds a Docker image.
:param root: the path build from
:param dockerfile: the path to the Dockerfile to build
:param target: the target name and tag
"""
# We cannot really use docker_client.images.build() here as it doesn't let us log the progress
# Building images without showing progress does not provide good developer experience
# Since the build command is the same on Windows, macOS and Linux we can safely use a system call
process = subprocess.run(["docker", "build", "-t", str(target), "-f", str(dockerfile), "."], cwd=root)
if process.returncode != 0:
raise RuntimeError(
f"Something went wrong while building '{dockerfile}', see the logs above for more information")
def image_installed(self, image: DockerImage) -> bool:
"""Returns whether a certain image is installed.
:param image: the image to check availability for
:return: True if the image is available locally, False if not
"""
docker_client = self._get_docker_client()
return any(str(image) in x.tags for x in docker_client.images.list())
def get_local_digest(self, image: DockerImage) -> Optional[str]:
"""Returns the digest of a locally installed image.
:param image: the local image to get the digest of
:return: the digest of the local image, or None if the digest does not exist
"""
img = self._get_docker_client().images.get(str(image))
repo_digests = img.attrs["RepoDigests"]
if len(repo_digests) == 0:
return None
return repo_digests[0].split("@")[1]
def get_remote_digest(self, image: DockerImage) -> str:
"""Returns the digest of a remote image.
:param image: the remote image to get the digest of
:return: the digest of the remote image
"""
img = self._get_docker_client().images.get_registry_data(str(image))
return img.attrs["Descriptor"]["digest"]
def create_network(self, name: str) -> None:
"""Creates a new bridge network, or does nothing if a network with the given name already exists.
:param name: the name of then network to create
"""
docker_client = self._get_docker_client()
if not any(n.name == name for n in docker_client.networks.list()):
docker_client.networks.create(name, driver="bridge")
def create_volume(self, name: str) -> None:
"""Creates a new volume, or does nothing if a volume with the given name already exists.
:param name: the name of the volume to create
"""
docker_client = self._get_docker_client()
if not any(v.name == name for v in docker_client.volumes.list()):
docker_client.volumes.create(name)
def create_site_packages_volume(self, requirements_file: Path) -> str:
"""Returns the name of the volume to mount to the user's site-packages directory.
This method automatically returns the best volume for the given requirements.
It also rotates out older volumes as needed to ensure we don't use too much disk space.
:param requirements_file: the path to the requirements file that will be pip installed in the container
:return: the name of the Docker volume to use
"""
requirements_hash = hashlib.md5(requirements_file.read_text(encoding="utf-8").encode("utf-8")).hexdigest()
volume_name = f"lean_cli_python_{requirements_hash}"
docker_client = self._get_docker_client()
existing_volumes = [v for v in docker_client.volumes.list() if v.name.startswith("lean_cli_python_")]
if any(v.name == volume_name for v in existing_volumes):
return volume_name
volumes_by_age = sorted(existing_volumes, key=lambda v: isoparse(v.attrs["CreatedAt"]))
for i in range((len(volumes_by_age) - SITE_PACKAGES_VOLUME_LIMIT) + 1):
volumes_by_age[i].remove()
docker_client.volumes.create(volume_name)
return volume_name
def get_running_containers(self) -> Set[str]:
"""Returns the names of all running containers.
:return: a set containing the names of all running Docker containers
"""
containers = self._get_docker_client().containers.list()
return {c.name.lstrip("/") for c in containers if c.status == "running"}
def get_container_by_name(self, container_name: str) -> Optional[Container]:
"""Finds a container with a given name.
:param container_name: the name of the container to find
:return: the container with the given name, or None if it does not exist
"""
for container in self._get_docker_client().containers.list(all=True):
if container.name.lstrip("/") == container_name:
return container
return None
def show_logs(self, container_name: str, follow: bool = False) -> None:
"""Shows the logs of a Docker container in the terminal.
:param container_name: the name of the container to show the logs of
:param follow: whether the logs should be streamed in real-time if the container is running (defaults to False)
"""
if self.get_container_by_name(container_name) is None:
return
# We cannot use the Docker Python SDK to get live logs consistently
# Since the logs command is the same on Windows, macOS and Linux we can safely use a system call
command = ["docker", "logs"]
if follow:
command.append("-f")
command.append(container_name)
subprocess.run(command)
def is_missing_permission(self) -> bool:
"""Returns whether we cannot connect to the Docker client because of a permissions issue.
A permissions issue usually indicates that the client can only be used with root privileges.
:return: True if we cannot connect to the Docker client because of a permissions issue, False if that's not
"""
try:
docker.from_env()
except Exception as exception:
return "Permission denied" in str(exception)
return False
def _get_docker_client(self) -> docker.DockerClient:
"""Creates a DockerClient instance.
Raises an error if Docker is not running.
:return: a DockerClient instance which responds to requests
"""
error = MoreInfoError("Please make sure Docker is installed and running",
"https://www.lean.io/docs/lean-cli/key-concepts/troubleshooting#02-Common-Errors")
try:
docker_client = docker.from_env()
except Exception:
raise error
try:
if not docker_client.ping():
raise error
except Exception:
raise error
return docker_client
def _format_source_path(self, path: str) -> str:
"""Formats a source path so Docker knows what it refers to.
This method does two things:
1. If Docker Toolbox is in use, it converts paths like C:/Path to /c/Path.
2. If Docker is running in Docker, it converts paths to the corresponding paths on the host system.
:param path: the original path
:return: the original path formatted in such a way that Docker can understand it
"""
# Docker Toolbox modifications
is_windows = self._platform_manager.is_system_windows()
is_docker_toolbox = "machine/machines" in os.environ.get("DOCKER_CERT_PATH", "").replace("\\", "/")
if is_windows and is_docker_toolbox:
# Backward slashes to forward slashes
path = path.replace('\\', '/')
# C:/Path to /c/Path
path = f"/{path[0].lower()}/{path[3:]}"
# Docker in Docker modifications
path_mappings = json.loads(os.environ.get("DOCKER_PATH_MAPPINGS", "{}"))
for container_path, host_path in path_mappings.items():
if path.startswith(container_path):
path = host_path + path[len(container_path):]
break
return path
|
the-stack_106_19234
|
from keras.preprocessing.text import Tokenizer
from DataReader import DataReader
from nltk.corpus import stopwords
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.wrappers import Bidirectional
from keras.layers import Embedding
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from gensim.models.keyedvectors import KeyedVectors
import file_paths
import math
# This is our LSTM module. It starts off by first reading in the tweet and labels file.
# It then tokenizes the data, followed up by encoding the documents into sequences of numbers and weight
# matrix construction. Lastly, it initializes the Embedding Layer and LSTM layer for training and prediction
def get_tweets_labels(tweet_file, labels_file):
#Simply read in data
data_reader = DataReader(tweet_file, labels_file)
tweets = data_reader.read_tweets()
labels = data_reader.read_labels()
return tweets, labels
def smoothen_tweets(tweets):
#Tokenization
stops = set(stopwords.words("english"))
smoothened_tweets = []
for tweet in tweets:
words = tweet.split(" ")
str = ""
for word in words:
if word[0] != "@" and word not in stops:
if word[0] == "#":
word = word[1:]
str += word + " "
smoothened_tweets.append(str)
return smoothened_tweets
def encode_docs(tweets):
#Translate tweets to sequence of numbers
tokenizer = Tokenizer(filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', split=" ", lower=True)
tokenizer.fit_on_texts(tweets)
return tokenizer, tokenizer.texts_to_sequences(tweets)
def format_train(encoded_docs, labels, max_length, train_range):
#Apply padding to data set and convert labels to bit vector form
Xtrain = pad_sequences(encoded_docs[0:train_range], maxlen=max_length, padding='post')
Ytrain = []
for emoji in labels[0:train_range]:
num = int(emoji)
bit_vec = np.zeros(20)
bit_vec[num] = 1
Ytrain.append(bit_vec)
Ytrain = np.asarray(Ytrain)
return Xtrain, Ytrain
def format_test(encoded_docs, labels, max_length, test_range):
# Apply padding to data set and convert labels to bit vector form
Xtest = pad_sequences(encoded_docs[-test_range:], maxlen=max_length, padding='post')
Ytest = []
for emoji in labels[-test_range:]:
num = int(emoji)
bit_vec = np.zeros(20)
bit_vec[num] = 1
Ytest.append(bit_vec)
Ytest = np.asarray(Ytest)
return Xtest, Ytest
def populate_weight_matrix(vocab, raw_embedding):
# Create weight matrix from pre-trained embeddings
vocab_size = len(vocab) + 1
weight_matrix = np.zeros((vocab_size, 300))
for word, i in vocab.items():
if word in raw_embedding:
weight_matrix[i] = raw_embedding[word]
return weight_matrix
def form_model_and_fit(weight_matrix, vocab_size, max_length, Xtrain, Ytrain, Xtest, Ytest):
#Core model training
embedding_layer = Embedding(vocab_size, 300, weights=[weight_matrix], input_length=max_length, trainable=True, mask_zero=True)
model = Sequential()
model.add(embedding_layer)
model.add(LSTM(128, dropout=0.2, return_sequences=True))
model.add(LSTM(128, dropout=0.2))
model.add(Dense(20, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(Xtrain, Ytrain, epochs=1, validation_data=(Xtest, Ytest))
return model.evaluate(Xtest, Ytest)
def run_LSTM(tweet_file, labels_file):
#Driver function
tweets, labels = get_tweets_labels(tweet_file, labels_file)
tweets = smoothen_tweets(tweets)
max_length = math.ceil(sum([len(s.split(" ")) for s in tweets])/len(tweets))
tokenizer, encoded_docs = encode_docs(tweets)
Xtrain, Ytrain = format_train(encoded_docs, labels,max_length, 40000)
Xtest, Ytest = format_test(encoded_docs, labels, max_length, 10000)
vocab = tokenizer.word_index
raw_embedding = KeyedVectors.load_word2vec_format('model_swm_300-6-10-low.w2v', binary=False)
weight_matrix = populate_weight_matrix(vocab, raw_embedding)
score, acc = form_model_and_fit(weight_matrix, len(vocab) + 1, max_length, Xtrain, Ytrain, Xtest, Ytest)
return acc
accuracy = run_LSTM(file_paths.us_tweets_path, file_paths.us_labels_path)
print(accuracy)
|
the-stack_106_19235
|
import warnings
from typing import List, Optional, Union, Dict
from mkdocs.structure.nav import (
Navigation as MkDocsNavigation,
Section,
Link,
_get_by_type,
_add_parent_links,
_add_previous_and_next_links,
)
from mkdocs.structure.pages import Page
from .arrange import arrange, InvalidArrangeEntry
from .meta import Meta
from .options import Options
from .utils import dirname, basename, join_paths
NavigationItem = Union[Page, Section, Link]
class ArrangeEntryNotFound(Warning):
def __init__(self, entry: str, context: str):
super().__init__(
'Arrange entry "{entry}" not found. [{context}]'.format(
entry=entry, context=context
)
)
class TitleInRootHasNoEffect(Warning):
def __init__(self, filename: str):
super().__init__(
'Using the "title" attribute in the {filename} file of the doc root has no effect'.format(
filename=filename
)
)
class HideInRootHasNoEffect(Warning):
def __init__(self, filename: str):
super().__init__(
'Using the "hide" attribute in the {filename} file of the doc root has no effect'.format(
filename=filename
)
)
class AwesomeNavigation:
def __init__(self, navigation: MkDocsNavigation, options: Options):
self.options = options
self.meta = NavigationMeta(navigation.items, options)
if self.meta.root.title is not None:
warnings.warn(TitleInRootHasNoEffect(self.options.filename))
if self.meta.root.hide is not None:
warnings.warn(HideInRootHasNoEffect(self.options.filename))
self.items = self._process_children(
navigation.items, self.options.collapse_single_pages, self.meta.root
)
def _process_children(
self, children: List[NavigationItem], collapse: bool, meta: Meta
) -> List[NavigationItem]:
children = self._arrange_items(children, meta)
result = []
for item in children:
if isinstance(item, Section):
item = self._process_section(item, collapse)
if item is None:
continue
result.append(item)
return result
def _arrange_items(
self, items: List[NavigationItem], meta: Meta
) -> List[NavigationItem]:
if meta.arrange is not None:
try:
arranged = arrange(
items,
meta.arrange,
lambda item: basename(self._get_item_path(item)),
)
return arranged if not meta.reverse else arranged[::-1]
except InvalidArrangeEntry as e:
warning = ArrangeEntryNotFound(e.value, meta.path)
if self.options.strict:
raise warning
else:
warnings.warn(warning)
return items
def _process_section(
self, section: Section, collapse_recursive: bool
) -> Optional[NavigationItem]:
meta = self.meta.sections[section]
if meta.hide is True:
return None
if meta.collapse_single_pages is not None:
collapse_recursive = meta.collapse_single_pages
self._set_title(section, meta)
section.children = self._process_children(
section.children, collapse_recursive, meta
)
if not section.children:
return None
return self._collapse(section, meta.collapse, collapse_recursive)
def _get_item_path(self, item: NavigationItem) -> Optional[str]:
if isinstance(item, Section):
return dirname(self.meta.sections[item].path)
elif isinstance(item, Page):
return item.file.abs_src_path
@staticmethod
def _set_title(section: Section, meta: Meta):
if meta.title is not None:
section.title = meta.title
@staticmethod
def _collapse(
section: Section, collapse: Optional[bool], collapse_recursive: bool
) -> NavigationItem:
if collapse is None:
collapse = collapse_recursive
if collapse and len(section.children) == 1:
return section.children[0]
return section
def to_mkdocs(self) -> MkDocsNavigation:
pages = _get_by_type(self.items, Page)
_add_previous_and_next_links(pages)
_add_parent_links(self.items)
return MkDocsNavigation(self.items, pages)
class NavigationMeta:
def __init__(self, items: List[NavigationItem], options: Options):
self.options = options
self.sections = {}
root_path = self._gather_metadata(items)
self.root = Meta.try_load_from(
join_paths(root_path, self.options.filename)
)
def _gather_metadata(self, items: List[NavigationItem]) -> Optional[str]:
paths = []
for item in items:
if isinstance(item, Page):
paths.append(item.file.abs_src_path)
elif isinstance(item, Section):
section_dir = self._gather_metadata(item.children)
paths.append(section_dir)
self.sections[item] = Meta.try_load_from(
join_paths(section_dir, self.options.filename)
)
return self._common_dirname(paths)
@staticmethod
def _common_dirname(paths: List[Optional[str]]) -> Optional[str]:
if paths:
dirnames = [dirname(path) for path in paths]
if len(set(dirnames)) == 1:
return dirnames[0]
|
the-stack_106_19238
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing utilities for the TensorIR schedule API"""
from typing import Union
from tvm.ir import IRModule, structural_equal
from tvm.tir import PrimFunc
from tvm.tir.schedule import Trace, Schedule
def verify_trace_roundtrip(
sch: Schedule,
mod: Union[PrimFunc, IRModule],
*,
debug_mask: Union[str, int] = "all",
) -> Schedule:
"""Serialize a traced schedule to JSON, then replay the JSON trace by applying to
a fresh new schedule, verifying the reproducibility of scheduling.
Parameters
----------
sch : tir.Schedule
The traced TensorIR schedule to be verified
mod : Union[PrimFunc, IRModule]
The IRModule or PrimFunc to construct the fresh new schedule
debug_mask : Union[str, int]
Do extra correctness checking after the class creation and each time
after calling the Replace method.
Possible choices of `debug_mask`:
1) "all" - Turn on all the checks
2) "none" - Turn off all the checks
3) An integer - Turn on checks according to the bitmasks provided in ScheduleDebugMask
"""
# Step 1. Serialize the trace to JSON
trace = sch.trace
assert trace is not None
json_obj = trace.as_json()
# Step 2. Apply the JSON trace to a new schedule, then check if it reproduces the scheduling
new_sch = Schedule(mod=mod, debug_mask=debug_mask)
Trace.apply_json_to_schedule(json_obj=json_obj, sch=new_sch)
assert structural_equal(new_sch.mod, sch.mod)
# Step 3. Check the consistency of the text format between the old and new traces
py_repr = "\n".join(trace.as_python())
new_py_repr = "\n".join(new_sch.trace.as_python())
assert py_repr == new_py_repr
# Step 4. Return the new schedule in case it could be useful
return new_sch
|
the-stack_106_19241
|
import json
from typing import Any, Dict, Optional, Type
from abc import ABC
from datetime import datetime
from bson import ObjectId
from bson.objectid import InvalidId
from pydantic import BaseConfig, BaseModel
class OID:
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
try:
return ObjectId(str(v))
except InvalidId:
raise ValueError("Invalid object ID")
class MongoDBModel(BaseModel, ABC):
id: Optional[OID]
class Config(BaseConfig):
allow_population_by_field_name = True
json_encoders = {
datetime: lambda dt: dt.isoformat(),
ObjectId: str,
}
@classmethod
def from_mongo(cls, data: Dict[str, Any]) -> Optional[Type["MongoDBModel"]]:
"""Constructs a pydantic object from mongodb compatible dictionary"""
if not data:
return None
id = data.pop("_id", None) # Convert _id into id
for k, v in cls.__fields__.items():
fields_data = str(v).split(" ")
for e in fields_data:
if "Optional[Any]".lower() in e.lower() and "type" in e.lower():
if k in data and data[k]:
data[k] = json.dumps(data[k])
return cls(**dict(data, id=id))
def to_mongo(self, **kwargs):
"""Maps a pydantic model to a mongodb compatible dictionary"""
exclude_unset = kwargs.pop(
"exclude_unset",
False, # Set as false so that default values are also stored
)
by_alias = kwargs.pop(
"by_alias", True
) # whether field aliases should be used as keys in the returned dictionary
# Converting the model to a dictionnary
parsed = self.dict(by_alias=by_alias, exclude_unset=exclude_unset, **kwargs)
# Mongo uses `_id` as default key.
# if "_id" not in parsed and "id" in parsed:
# parsed["_id"] = parsed.pop("id")
if "id" in parsed:
parsed.pop("id")
return parsed
def dict(self, **kwargs):
"""Override self.dict to hide some fields that are used as metadata"""
hidden_fields = {"_collection"}
kwargs.setdefault("exclude", hidden_fields)
return super().dict(**kwargs)
|
the-stack_106_19242
|
from typing import List
class Solution:
def find_min(self, nums: List[int]) -> int:
self.nums = nums
self.last = len(self.nums) - 1
return self.search(0, self.last, self.nums[0])
def search(self, left: int, right: int, target: int) -> int:
if left > right:
return target
else:
mid = (left + right) // 2
if self.nums[mid] < target:
if self.nums[mid-1] < self.nums[mid]:
return self.search(left, mid - 1, self.nums[mid-1])
else:
return self.search(mid + 1, right, self.nums[mid])
else:
return self.search(mid + 1, right, target)
|
the-stack_106_19243
|
#!/usr/bin/env python
# coding: utf-8
from msgpack import packb, unpackb
def test_unpack_buffer():
from array import array
buf = array('b')
buf.fromstring(packb(('foo', 'bar')))
obj = unpackb(buf, use_list=1)
assert [b'foo', b'bar'] == obj
|
the-stack_106_19244
|
"""
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import copy
import datetime as dt
import logging
import operator as op
from concurrent.futures import Future
from itertools import chain
from typing import Any, Iterable, Mapping, Optional, Tuple, Union
import pandas as pd
from gs_quant.base import Priceable, RiskKey, Sentinel, InstrumentBase
from gs_quant.config import DisplayOptions
from gs_quant.risk import DataFrameWithInfo, ErrorValue, FloatWithInfo, RiskMeasure, SeriesWithInfo, ResultInfo, \
ScalarWithInfo, aggregate_results
_logger = logging.getLogger(__name__)
def _compose(lhs: ResultInfo, rhs: ResultInfo) -> ResultInfo:
if isinstance(lhs, ScalarWithInfo):
if isinstance(rhs, ScalarWithInfo):
return rhs if lhs.risk_key.date == rhs.risk_key.date else lhs.compose((lhs, rhs))
elif isinstance(rhs, SeriesWithInfo):
return lhs.compose((lhs,)).combine_first(rhs).sort_index()
elif isinstance(lhs, SeriesWithInfo):
if isinstance(rhs, SeriesWithInfo):
return rhs.combine_first(lhs).sort_index()
elif isinstance(rhs, ScalarWithInfo):
return rhs.compose((rhs,)).combine_first(lhs).sort_index()
elif isinstance(lhs, DataFrameWithInfo):
if lhs.index.name != 'date':
lhs = lhs.assign(date=lhs.risk_key.date).set_index('date')
if isinstance(rhs, DataFrameWithInfo):
if rhs.index.name != 'date':
rhs = rhs.assign(date=rhs.risk_key.date).set_index('date')
return lhs.loc[set(lhs.index) - set(rhs.index)].append(rhs).sort_index()
elif isinstance(lhs, MultipleRiskMeasureResult):
if isinstance(rhs, MultipleRiskMeasureResult):
return lhs + rhs
raise RuntimeError(f'{lhs} and {rhs} cannot be composed')
def _value_for_date(result: Union[DataFrameWithInfo, SeriesWithInfo], date: Union[Iterable, dt.date]) -> \
Union[DataFrameWithInfo, ErrorValue, FloatWithInfo, SeriesWithInfo]:
from gs_quant.markets import CloseMarket
raw_value = result.loc[date]
key = result.risk_key
risk_key = RiskKey(
key.provider,
date if isinstance(date, dt.date) else tuple(date),
CloseMarket(date=date, location=key.market.location if isinstance(key.market, CloseMarket) else None),
key.params,
key.scenario,
key.risk_measure)
if isinstance(raw_value, ErrorValue):
return raw_value
elif isinstance(raw_value, DataFrameWithInfo):
raw_df = raw_value.raw_value.set_index('dates')
return DataFrameWithInfo(
raw_df.reset_index(drop=True) if isinstance(date, dt.date) else raw_df,
risk_key=risk_key,
unit=result.unit,
error=result.error)
elif isinstance(raw_value, SeriesWithInfo):
return SeriesWithInfo(
raw_value.raw_value,
risk_key=risk_key,
unit=result.unit,
error=result.error)
else:
return FloatWithInfo(
risk_key,
raw_value,
unit=result.unit.get(date, '') if result.unit else None,
error=result.error)
def _risk_keys_compatible(lhs, rhs) -> bool:
from gs_quant.markets import historical_risk_key
while isinstance(lhs, MultipleRiskMeasureResult):
lhs = next(iter(lhs.values()))
while isinstance(rhs, MultipleRiskMeasureResult):
rhs = next(iter(rhs.values()))
return historical_risk_key(lhs.risk_key).ex_measure == historical_risk_key(rhs.risk_key).ex_measure
def _value_for_risk_measure(res: dict, risk_measure: Union[Iterable, RiskMeasure]) -> dict:
result = copy.copy(res)
if isinstance(risk_measure, Iterable):
for value in list(result):
if value not in risk_measure:
del result[value]
else:
for value in list(result):
if value != risk_measure:
del result[value]
return result
class PricingFuture(Future):
__RESULT_SENTINEL = Sentinel('PricingFuture')
def __init__(self, result: Optional[Any] = __RESULT_SENTINEL):
super().__init__()
if result is not self.__RESULT_SENTINEL:
self.set_result(result)
def __add__(self, other):
if isinstance(other, (int, float)):
operand = other
elif isinstance(other, self.__class__):
operand = other.result()
else:
raise ValueError(f'Cannot add {self.__class__.__name__} and {other.__class__.name}')
return self.__class__(_compose(self.result(), operand))
def __mul__(self, other):
if isinstance(other, (int, float)):
return self.__class__(self.result() * other)
else:
raise ValueError('Can only multiply by an int or float')
def result(self, timeout=None):
"""Return the result of the call that the future represents.
:param timeout: The number of seconds to wait for the result if the future isn't done.
If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given timeout.
Exception: If the call raised then that exception will be raised.
"""
from gs_quant.markets import PricingContext
if not self.done() and PricingContext.current.active_context.is_entered:
raise RuntimeError('Cannot evaluate results under the same pricing context being used to produce them')
return super().result(timeout=timeout)
class CompositeResultFuture(PricingFuture):
def __init__(self, futures: Iterable[PricingFuture]):
super().__init__()
self.__futures = tuple(futures)
self.__pending = set()
for future in self.__futures:
if not future.done():
future.add_done_callback(self.__cb)
self.__pending.add(future)
if not self.__pending:
self._set_result()
def __getitem__(self, item):
return self.result()[item]
def __cb(self, future: PricingFuture):
self.__pending.discard(future)
if not self.__pending:
self._set_result()
def _set_result(self):
self.set_result([f.result() for f in self.__futures])
@property
def futures(self) -> Tuple[PricingFuture, ...]:
return self.__futures
class MultipleRiskMeasureResult(dict):
def __init__(self, instrument, dict_values: Iterable):
super().__init__(dict_values)
self.__instrument = instrument
def __getitem__(self, item):
if isinstance(item, dt.date) or (isinstance(item, Iterable) and all([isinstance(it, dt.date) for it in item])):
if all(isinstance(v, (DataFrameWithInfo, SeriesWithInfo)) for v in self.values()):
return MultipleRiskMeasureResult(self.__instrument, ((k, _value_for_date(v, item))
for k, v in self.items()))
else:
raise ValueError('Can only index by date on historical results')
else:
return super().__getitem__(item)
def __mul__(self, other):
if isinstance(other, (int, float)):
return self.__op(op.mul, other)
else:
return ValueError('Can only multiply by an int or float')
def __add__(self, other):
if isinstance(other, (int, float)):
return self.__op(op.add, other)
elif isinstance(other, MultipleRiskMeasureResult):
if not _risk_keys_compatible(self, other):
raise ValueError('Results must have matching scenario and location')
instruments_equal = self.__instrument == other.__instrument
self_dt = [list(self.values())[0].risk_key.date] if len(self.dates) == 0 else self.dates
other_dt = [list(other.values())[0].risk_key.date] if len(other.dates) == 0 else other.dates
dates_overlap = not set(self_dt).isdisjoint(other_dt)
if not set(self.keys()).isdisjoint(other.keys()) and instruments_equal and dates_overlap:
raise ValueError('Results overlap on risk measures, instruments or dates')
all_keys = set(chain(self.keys(), other.keys()))
if not instruments_equal:
from gs_quant.markets.portfolio import Portfolio
return PortfolioRiskResult(
Portfolio((self.__instrument, other.__instrument)),
all_keys,
tuple(MultipleRiskMeasureFuture(
r.__instrument,
{k: PricingFuture(r[k]) if k in r else None for k in all_keys}) for r in (self, other))
)
else:
results = {}
for result in (self, other):
for key in all_keys:
if key in result:
results[key] = _compose(results[key], result[key]) if key in results else result[key]
return MultipleRiskMeasureResult(self.__instrument, results)
else:
raise ValueError('Can only add instances of MultipleRiskMeasureResult or int, float')
def __op(self, operator, operand):
values = {}
for key, value in self.items():
if isinstance(value, pd.DataFrame):
new_value = value.copy()
new_value.value = operator(value.value, operand)
else:
new_value = operator(value, operand)
values[key] = new_value
return MultipleRiskMeasureResult(self.__instrument, values)
@property
def instrument(self):
return self.__instrument
@property
def dates(self) -> Tuple[dt.date, ...]:
dates = set()
for value in self.values():
if isinstance(value, (DataFrameWithInfo, SeriesWithInfo)):
if all([isinstance(i, dt.date) for i in value.index]):
dates.update(value.index)
return tuple(sorted(dates))
def to_frame(self, values='default', index='default', columns='default', aggfunc=pd.unique,
display_options: DisplayOptions = None):
df = self._get_raw_df(display_options)
if values is None and index is None and columns is None:
return df
elif values == 'default' and index == 'default' and columns == 'default':
if 'mkt_type' in df.columns:
return df.set_index(df.columns[0])
values = 'value'
columns = 'risk_measure'
index = 'dates' if 'dates' in df.columns else None
else:
values = 'value' if values == 'default' or values is ['value'] else values
index = None if index == 'default' else index
columns = None if columns == 'default' else columns
pivot_df = df.pivot_table(values=values, index=index, columns=columns, aggfunc=aggfunc)
if index is not None:
idx = df.set_index(list(pivot_df.index.names)).index.unique()
pivot_df = pivot_df.reindex(index=idx)
if columns is not None:
cols = df.set_index(list(pivot_df.columns.names)).index.unique()
pivot_df = pivot_df.reindex(columns=cols)
return pivot_df
def _get_raw_df(self, display_options: DisplayOptions = None):
list_df = []
cols = []
for rm in list(self):
curr_raw_df = self[rm]._get_raw_df(display_options)
if curr_raw_df is not None:
curr_raw_df.insert(0, 'risk_measure', rm)
if 'mkt_type' in curr_raw_df.columns.values:
cols = list(curr_raw_df.columns.values)
list_df.append(curr_raw_df)
concat_df = pd.concat(list_df, ignore_index=True, sort=False)
# if calc scalar before bucketed risk. eg: port.calc((Price, IRDelta))
if concat_df.columns.values[-1] != 'value':
return concat_df[cols]
return concat_df
class MultipleRiskMeasureFuture(CompositeResultFuture):
def __init__(self, instrument: InstrumentBase, measures_to_futures: Mapping[RiskMeasure, PricingFuture]):
self.__measures_to_futures = measures_to_futures
self.__instrument = instrument
super().__init__(measures_to_futures.values())
def __add__(self, other):
result = self.result() + other.result() if isinstance(other, MultipleRiskMeasureFuture) else other
ret = MultipleRiskMeasureFuture(self.__instrument, {k: PricingFuture(v) for k, v in result.items()})
ret._set_result()
return ret
def _set_result(self):
self.set_result(MultipleRiskMeasureResult(self.__instrument,
zip(self.__measures_to_futures.keys(),
(f.result() for f in self.futures))))
@property
def measures_to_futures(self) -> Mapping[RiskMeasure, PricingFuture]:
return self.__measures_to_futures
class HistoricalPricingFuture(CompositeResultFuture):
def _set_result(self):
results = [f.result() for f in self.futures]
base = next((r for r in results if not isinstance(r, (ErrorValue, Exception))), None)
if base is None:
_logger.error(f'Historical pricing failed: {results[0]}')
self.set_result(results[0])
else:
result = MultipleRiskMeasureResult(base.instrument,
{k: base[k].compose(r[k] for r in results) for k in base.keys()}) \
if isinstance(base, MultipleRiskMeasureResult) else base.compose(results)
self.set_result(result)
class PortfolioPath:
def __init__(self, path):
self.__path = (path,) if isinstance(path, int) else path
def __repr__(self):
return repr(self.__path)
def __iter__(self):
return iter(self.__path)
def __len__(self):
return len(self.__path)
def __add__(self, other):
return PortfolioPath(self.__path + other.__path)
def __eq__(self, other):
return self.__path == other.__path
def __hash__(self):
return hash(self.__path)
def __call__(self, target, rename_to_parent: Optional[bool] = False):
parent = None
path = list(self.__path)
while path:
elem = path.pop(0)
parent = target if len(self) - len(path) > 1 else None
target = target.futures[elem] if isinstance(target, CompositeResultFuture) else target[elem]
if isinstance(target, PricingFuture) and path:
target = target.result()
if rename_to_parent and parent and getattr(parent, 'name', None):
target = copy.copy(target)
target.name = parent.name
return target
class PortfolioRiskResult(CompositeResultFuture):
def __init__(self,
portfolio,
risk_measures: Iterable[RiskMeasure],
futures: Iterable[PricingFuture]):
super().__init__(futures)
self.__portfolio = portfolio
self.__risk_measures = tuple(risk_measures)
def __getitem__(self, item):
futures = []
if isinstance(item, RiskMeasure) or (
isinstance(item, Iterable) and all([isinstance(it, RiskMeasure) for it in item])):
'''Slicing a list of risk measures'''
if isinstance(item, Iterable):
if any([it not in self.risk_measures for it in item]):
raise ValueError('{} not computed'.format(item))
else:
if item not in self.risk_measures:
raise ValueError('{} not computed'.format(item))
if len(self.risk_measures) == 1:
return self
else:
for priceable in self.portfolio:
if isinstance(self[priceable], PortfolioRiskResult):
futures.append(self[priceable][item])
else:
futures.append(MultipleRiskMeasureFuture(priceable, {k: PricingFuture(v) for k, v in
_value_for_risk_measure(
self[priceable], item).items()}))
risk_measure = tuple(item) if isinstance(item, Iterable) else (item,)
return PortfolioRiskResult(self.__portfolio, risk_measure, futures)
elif isinstance(item, dt.date) or (
isinstance(item, Iterable) and all([isinstance(it, dt.date) for it in item])):
for priceable in self.portfolio:
if isinstance(self[priceable], (MultipleRiskMeasureResult, PortfolioRiskResult)):
futures.append(PricingFuture(self[priceable][item]))
elif isinstance(self[priceable], (DataFrameWithInfo, SeriesWithInfo)):
futures.append(PricingFuture(_value_for_date(self[priceable], item)))
else:
raise RuntimeError('Can only index by date on historical results')
return PortfolioRiskResult(self.__portfolio, self.risk_measures, futures)
elif (isinstance(item, list) or isinstance(item, tuple)) and all(
[isinstance(it, InstrumentBase) for it in item]):
'''Slicing a list/tuple of instruments (not an Portfolio iterable)'''
return self.subset(item)
# Inputs from excel always becomes a list
# Catch list length = 1 so that it doesn't return a sub-PortfolioRiskResult
elif isinstance(item, list) and len(item) == 1:
return self.__results(items=item[0])
else:
return self.__results(items=item)
def __contains__(self, item):
if isinstance(item, RiskMeasure):
return item in self.__risk_measures
elif isinstance(item, dt.date):
return item in self.dates
else:
return item in self.__portfolio
def __repr__(self):
ret = f'{self.__risk_measures} Results'
if self.__portfolio.name:
ret += f' for {self.__portfolio.name}'
return ret + f' ({len(self)})'
def __len__(self):
return len(self.futures)
def __iter__(self):
return iter(self.__results())
def __mul__(self, other):
if isinstance(other, (int, float)):
return PortfolioRiskResult(self.__portfolio, self.__risk_measures, [f * other for f in self.futures])
else:
return ValueError('Can only multiply by an int or float')
def __add__(self, other):
def as_multiple_result_futures(portfolio_result):
if len(portfolio_result.__risk_measures) > 1:
return portfolio_result
mr_futures = []
for p, f in zip(portfolio_result.__portfolio, portfolio_result.futures):
if isinstance(f, PortfolioRiskResult):
mr_futures.append(as_multiple_result_futures(f))
elif isinstance(f, MultipleRiskMeasureFuture):
mr_futures.append(f)
else:
mr_futures.append(MultipleRiskMeasureFuture(p, {portfolio_result.__risk_measures[0]: f}))
return PortfolioRiskResult(portfolio_result.__portfolio, portfolio_result.__risk_measures, mr_futures)
def set_value(dest_result, src_result, src_risk_measure):
for priceable, future in zip(dest_result.__portfolio, dest_result.futures):
if isinstance(future, PortfolioRiskResult):
set_value(future, src_result, src_risk_measure)
else:
try:
value = src_result[priceable]
value = value[src_risk_measure] if isinstance(value, MultipleRiskMeasureResult) else value
future.result()[src_risk_measure] = value
except KeyError:
pass
def first_value(portfolio_result):
if len(portfolio_result.__risk_measures) > 1:
return next(iter(portfolio_result[next(iter(portfolio_result.portfolio.all_instruments))].values()))
else:
return portfolio_result[next(iter(portfolio_result.__portfolio.all_instruments))]
if isinstance(other, (int, float)):
return PortfolioRiskResult(self.__portfolio, self.__risk_measures, [f + other for f in self.futures])
elif isinstance(other, PortfolioRiskResult):
if not _risk_keys_compatible(first_value(self), first_value(other)) and not \
set(self.__portfolio.all_instruments).isdisjoint(other.__portfolio.all_instruments):
raise ValueError('Results must have matching scenario and location')
self_dt = (first_value(self).risk_key.date,) if len(self.dates) == 0 else self.dates
other_dt = (first_value(other).risk_key.date,) if len(other.dates) == 0 else other.dates
dates_overlap = not set(self_dt).isdisjoint(other_dt)
if not set(self.__risk_measures).isdisjoint(other.__risk_measures) and dates_overlap and not \
set(self.__portfolio.all_instruments).isdisjoint(other.__portfolio.all_instruments):
raise ValueError('Results overlap on risk measures, instruments or dates')
self_futures = as_multiple_result_futures(self).futures
other_futures = as_multiple_result_futures(other).futures
if self.__portfolio is other.__portfolio or self.__portfolio == other.__portfolio:
portfolio = self.__portfolio
futures = [future + other_future for future, other_future in zip(self_futures, other_futures)]
else:
portfolio = self.__portfolio + other.__portfolio
futures = self_futures + other_futures
ret = PortfolioRiskResult(portfolio, set(chain(self.risk_measures, other.risk_measures)), futures)
if portfolio is not self.__portfolio and len(ret.risk_measures) > 1:
# Now fill in overlapping values
for dest, src in ((self, other), (other, self)):
for risk_measure in (m for m in src.risk_measures if dest == self or m not in dest.risk_measures):
set_value(ret, src, risk_measure)
return ret
else:
raise ValueError('Can only add instances of PortfolioRiskResult or int, float')
@property
def portfolio(self):
return self.__portfolio
@property
def risk_measures(self) -> Tuple[RiskMeasure, ...]:
return self.__risk_measures
@property
def dates(self) -> Tuple[dt.date, ...]:
dates = set()
for result in self.__results():
if isinstance(result, (MultipleRiskMeasureResult, PortfolioRiskResult)):
if all([isinstance(i, dt.date) for i in result.dates]):
dates.update(result.dates)
elif isinstance(result, (pd.DataFrame, pd.Series)):
if all([isinstance(i, dt.date) for i in result.index]):
dates.update(result.index)
try:
return tuple(sorted(dates))
except TypeError:
return tuple()
def result(self, timeout: Optional[int] = None):
super().result(timeout=timeout)
return self
def subset(self, items: Iterable[Union[int, str, PortfolioPath, Priceable]], name: Optional[str] = None):
paths = tuple(chain.from_iterable((i,) if isinstance(i, PortfolioPath) else self.__paths(i) for i in items))
sub_portfolio = self.__portfolio.subset(paths, name=name)
return PortfolioRiskResult(sub_portfolio, self.risk_measures, [p(self.futures) for p in paths])
def aggregate(self, allow_mismatch_risk_keys=False) -> Union[float, pd.DataFrame, pd.Series,
MultipleRiskMeasureResult]:
if len(self.__risk_measures) > 1:
return MultipleRiskMeasureResult(self.portfolio, ((r, self[r].aggregate()) for r in self.__risk_measures))
else:
return aggregate_results(self.__results(), allow_mismatch_risk_keys=allow_mismatch_risk_keys)
def to_frame(self, values='default', index='default', columns='default', aggfunc=sum,
display_options: DisplayOptions = None):
def get_df(priceable, port_info=None, inst_idx=0):
if port_info is None:
port_info = {}
if not isinstance(priceable, InstrumentBase): # for nested portfolio or portfolio of portfolios+instruments
list_sub_dfs = []
for p_idx, p in enumerate(priceable.priceables):
curr_port_info = port_info.copy()
if not isinstance(p, InstrumentBase):
curr_port_info.update(
{f'portfolio_name_{len(port_info)}': f'Portfolio_{p_idx}' if p.name is None else p.name})
list_sub_dfs.append(get_df(p, curr_port_info, p_idx))
list_sub_dfs = list(filter(lambda x: x is not None, list_sub_dfs))
if len(list_sub_dfs) > 0:
final_df = pd.concat(list_sub_dfs, ignore_index=True)
return final_df.reindex(columns=max([x.columns.values for x in list_sub_dfs], key=len))
else:
port_info.update({
'instrument_name': f'{priceable.type.name}_{inst_idx}' if priceable.name is None else priceable.name
})
sub_df = self[priceable]._get_raw_df(display_options)
if sub_df is not None:
for port_idx, (key, value) in enumerate(port_info.items()):
sub_df.insert(port_idx, key, value)
if 'risk_measure' not in sub_df.columns.values:
sub_df.insert(len(port_info), 'risk_measure', self.risk_measures[0])
return sub_df
def get_default_pivots(ori_cols, has_dates: bool, multi_measures: bool, simple_port: bool) -> tuple:
portfolio_names = list(filter(lambda x: 'portfolio_name_' in x, ori_cols))
port_and_inst_names = portfolio_names + ['instrument_name']
pivot_rules = [
# has_dates, multi_measures, simple_port
# output: (value,index,columns)
[True, True, None, ('value', 'dates', port_and_inst_names + ['risk_measure'])],
[True, False, None, ('value', 'dates', port_and_inst_names)],
[False, False, False, ('value', portfolio_names, 'instrument_name')],
[False, True, False, ('value', port_and_inst_names, 'risk_measure')],
[False, None, True, ('value', 'instrument_name', 'risk_measure')],
]
def match(rule_value, check_value) -> bool:
if rule_value is None:
return True
elif callable(rule_value):
return rule_value(check_value)
else:
return rule_value == check_value
for rule in pivot_rules:
[rule_has_dates, rule_multi_measures, rule_simple_port, rule_output] = rule
if match(rule_has_dates, has_dates) and match(rule_multi_measures, multi_measures) and \
match(rule_simple_port, simple_port):
return rule_output
return None, None, None
ori_df = get_df(self.portfolio)
if ori_df is None:
return
else:
# fill n/a values for different sub-portfolio depths
df_cols = list(ori_df.columns.values)
cols_except_value = [c for c in df_cols if c != 'value']
ori_df[cols_except_value] = ori_df[cols_except_value].fillna("N/A")
if values is None and index is None and columns is None: # to_frame(None, None, None)
return ori_df
elif values == 'default' and index == 'default' and columns == 'default': # to_frame()
has_bucketed = True if 'mkt_type' in df_cols else False
has_dt = True if 'dates' in df_cols else False
has_cashflows = True if 'payment_amount' in df_cols else False
multi_rm = True if len(self.risk_measures) > 1 else False
port_depth_one = True if len(max(self.portfolio.all_paths, key=len)) == 1 else False
if has_bucketed or has_cashflows:
res_list = list(filter(None.__ne__, [i._get_raw_df() for i in list(self)]))
res_df_cols = max([i.columns.values for i in res_list], key=len)
res_df_cols = list(filter(lambda x: x not in ['dates', 'risk_measure'], res_df_cols))
return ori_df.set_index([p for p in df_cols if p not in res_df_cols])
else:
values, index, columns = get_default_pivots(df_cols, has_dt, multi_rm, port_depth_one)
else: # user defined pivoting
values = 'value' if values == 'default' or values is ['value'] else values
try:
pivot_df = ori_df.pivot_table(values=values, index=index, columns=columns, aggfunc=aggfunc)
except ValueError:
raise RuntimeError('Unable to successfully pivot data')
try: # attempt to correct order of index
ori_index = ori_df.set_index(list(pivot_df.index.names)).index.unique()
ori_columns = ori_df.set_index(list(pivot_df.columns.names)).index.unique()
return pivot_df.reindex(index=ori_index, columns=ori_columns)
except KeyError:
return pivot_df
def __paths(self, items: Union[int, slice, str, Priceable]) -> Tuple[PortfolioPath, ...]:
if isinstance(items, int):
return PortfolioPath(items),
elif isinstance(items, slice):
return tuple(PortfolioPath(i) for i in range(len(self.__portfolio))[items])
elif isinstance(items, (str, Priceable)):
paths = self.__portfolio.paths(items)
# will enter in here only if trying to slice an unresolved portfolio with a resolved instrument
if not paths and isinstance(items, InstrumentBase) and items.unresolved:
paths = self.__portfolio.paths(items.unresolved)
if not paths:
raise KeyError(f'{items} not in portfolio')
key = items.resolution_key.ex_measure
paths = tuple(p for p in paths if self.__result(p, self.risk_measures[0]).risk_key.ex_measure == key)
if not paths:
raise KeyError(f'Cannot slice {items} which is resolved in a different pricing context')
return paths
def __results(self, items: Optional[Union[int, slice, str, Priceable]] = None):
if items is None:
return tuple(self.__result(p) for p in self.__portfolio.all_paths)
paths = self.__paths(items)
if not paths:
raise KeyError(f'{items}')
return self.__result(paths[0]) if not isinstance(items, slice) else self.subset(paths)
def __result(self, path: PortfolioPath, risk_measure: Optional[RiskMeasure] = None):
res = path(self.futures).result()
if len(self.risk_measures) == 1 and not risk_measure:
risk_measure = self.risk_measures[0]
return res[risk_measure] \
if risk_measure and isinstance(res, (MultipleRiskMeasureResult, PortfolioRiskResult)) else res
def get(self, item, default):
try:
value = self.__getitem__(item)
except (KeyError, ValueError):
value = default
return value
|
the-stack_106_19245
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch.utils.data
import random
from data.base_data_loader import BaseDataLoader
from data import online_dataset_for_old_photos as dts_ray_bigfile
def CreateDataset(opt):
dataset = None
if opt.training_dataset=='domain_A' or opt.training_dataset=='domain_B':
dataset = dts_ray_bigfile.UnPairOldPhotos_SR()
if opt.training_dataset=='mapping':
if opt.random_hole:
dataset = dts_ray_bigfile.PairOldPhotos_with_hole()
else:
dataset = dts_ray_bigfile.PairOldPhotos()
print("dataset [%s] was created" % (dataset.name()))
dataset.initialize(opt)
return dataset
class CustomDatasetDataLoader(BaseDataLoader):
def name(self):
return 'CustomDatasetDataLoader'
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.dataset = CreateDataset(opt)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads),
drop_last=True)
def load_data(self):
return self.dataloader
def __len__(self):
return min(len(self.dataset), self.opt.max_dataset_size)
|
the-stack_106_19246
|
from collections import Counter
from graph_tool import Graph, edge_endpoint_property
from itertools import chain, combinations
import logging
import numpy as np
from rhodonite.utils.graph import dict_to_vertex_prop, dict_to_edge_prop
from rhodonite.cooccurrence.basic import cooccurrence_counts
def cumulative_cooccurrence_graph(steps, sequences, directed=False):
'''cumulative_cooccurrence_graph
Creates a cumulative cooccurrence graph.
Parameters
----------
steps : :obj:`iter` of :obj:`int` or :obj:`str`
A series that contains sequential labels for the nested groups.
sequences : :obj:`iter` of :obj:`iter` of :obj:`int`
Nested iterable of integers representing vertices in the graph. Number
of nested iterables should be equal to `len(steps)`.
directed : :obj:`bool`
Currently has no effect. In future this will determine whether to build
a bi-directional cooccurrence graph.
Returns
-------
g : :obj:`graph_tool.Graph`
A graph. Vertices are elements. Edges link terms that have cooccurred
at least once in the series.
o_props : :obj:`dict`
Property maps with vertex occurrence values at each step.
o_cumsum_props : :obj:`dict`
Property maps with cumulative vertex cooccurrence values at each step.
co_props : :obj:`dict`
Property maps with edge cooccurrnce values at each step.
co_cumsum_props : :obj:`dict`
Property maps with cumulative edge cooccurrence values at each step.
'''
g = Graph(directed=directed)
o_total = Counter(chain(*chain(*sequences)))
n_vertices = len(o_total)
g.add_vertex(n_vertices)
o_max = dict_to_vertex_prop(g, o_total, 'int')
co_total = cooccurrence_counts(chain(*sequences))
edge_list = ((c[0], c[1], count) for c, count in co_total.items())
co_max = g.new_edge_property('int')
g.add_edge_list(edge_list, eprops=[co_max])
edges = g.get_edges()
edge_indices = dict(zip([(e[0], e[1]) for e in edges], edges[:, 2]))
o_props = {}
co_props = {}
o_cumsum_props = {}
co_cumsum_props = {}
for i, (step, seq) in enumerate(zip(steps[:-1], sequences[:-1])):
logging.info(f'Calculating cooccurrences at step {step}')
o_step = Counter(chain(*seq))
o_props[step] = dict_to_vertex_prop(g, o_step, 'int')
combos = (combinations(sorted(ids), 2) for ids in seq)
co_step = Counter(chain(*combos))
co_props[step] = dict_to_edge_prop(g, co_step, 'int', edge_indices)
o_cumsum = g.new_vertex_property('int')
co_cumsum = g.new_edge_property('int')
if i == 0:
o_cumsum.a = o_cumsum.a + o_props[step].a
co_cumsum.a = co_cumsum.a + co_props[step].a
else:
o_cumsum.a = o_cumsum_props[steps[i-1]].a + o_props[step].a
co_cumsum.a = co_cumsum_props[steps[i-1]].a + co_props[step].a
o_cumsum_props[step] = o_cumsum
co_cumsum_props[step] = co_cumsum
# fill in the last step without needing to count occurrences
# or cooccurrences
step_max = steps[-1]
o = g.new_vertex_property('int')
co = g.new_edge_property('int')
o.a = o_max.a - o_cumsum.a
co.a = co_max.a - co_cumsum.a
o_props[step_max] = o
co_props[step_max] = co
o_cumsum_props[step_max] = o_max
co_cumsum_props[step_max] = co_max
steps_prop = g.new_graph_property('vector<int>')
steps_prop.set_value(steps)
g.gp['steps'] = steps_prop
return g, o_props, o_cumsum_props, co_props, co_cumsum_props
def label_cumulative_cooccurrence(g, co_props):
'''label_cumulative_cooccurrence
Calculates the cumulative cooccurrence values for edges in a graph using
the edge cooccurrence values.
Parameters
----------
g : :obj:`graph_tool.Graph`
A graph.
co_props : :obj:`dict`
Contains property maps of edge cooccurrence values.
Returns
-------
co_cumsum_props : :obj:`dict`
Contains property maps of type `int` with edge cumulative cooccurrence
values.
'''
co_cumsum_props = {}
for i, (step, co_prop) in enumerate(co_props.items()):
co_cumsum_prop = g.new_edge_property('int')
if i == 0:
co_cumsum_prop.a = co_prop.a
else:
co_cumsum_prop.a = co_cumsum_props[step-1].a + co_prop.a
co_cumsum_props[step] = co_cumsum_prop
return co_cumsum_props
def label_cumulative_occurrence(g, o_props):
"""label_cumulative_occurrence
Calculates the cumulative occurrence values for vertices in a graph using
the vertex occurrence values.
Parameters
----------
g : :obj:`graph_tool.Graph`
A graph.
o_props : :obj:`dict`
Contains property maps of vertex occurrence values.
Returns
-------
o_cumsum_props : :obj:`dict`
Contains property maps of type 'int' with vertex cumulative occurrence
values.
"""
o_cumsum_props = {}
for i, (step, o_prop) in enumerate(o_props.items()):
o_cumsum_prop = g.new_vertex_property('int')
if i == 0:
o_cumsum_prop.a = o_prop.a
else:
o_cumsum_prop.a = (o_cumsum_props[step-1].a + o_prop.a)
o_cumsum_props[step] = o_cumsum_prop
return o_cumsum_props
def label_active_edge(g, co_props):
'''label_active_edge
Determines whether an edge is active (has at least one cooccurrence) at each
step.
Parameters
----------
g : :obj:`graph_tool.Graph`
A graph.
co_props : :obj:`dict`
Contains property maps of edge cooccurrence values.
Returns
-------
active_edge_props : :obj:`dict`
Contains property maps of type `bool` that is True if an edge is active.
'''
active_edge_props = {}
for step, co_prop in co_props.items():
active_edge_prop = g.new_edge_property('bool')
active_edge_prop.a = co_prop.a > 0
active_edge_props[step] = active_edge_prop
return active_edge_props
def label_new_edge(g, co_props, label_old=True, label_steps=False):
'''label_new_edge
Determines whether an edge has appeared for the first time at a given step.
Parameters
----------
g : :obj:`graph_tool.Graph`
A graph.
co_props : :obj:`dict`
Contains property maps of edge cooccurrence values.
label_old : :obj:`bool`
If True will also return property maps that indicate whether an edge
has existed at a previous step. Defaults to True.
label_steps : :obj:`bool`
If True will also return a property map of type `int` that indicates
the step at which an edge first appeared.
Returns
-------
new_edge_props : :obj:`dict`
old_edge_props : :obj:`dict`
edge_step_prop : :obj:`PropertyMap`
'''
new_edge_props = {}
if label_old:
old_edge_props = {}
_edge_tracker = g.new_edge_property('bool')
for step, co_prop in co_props.items():
new_edge_prop = g.new_edge_property('bool')
new_edge_prop.a = (co_prop.a > 0) & (_edge_tracker.a == False)
new_edge_props[step] = new_edge_prop
if label_old:
old_edge_prop = g.new_edge_property('bool')
old_edge_prop.a = _edge_tracker.a
old_edge_props[step] = old_edge_prop
_edge_tracker.a = _edge_tracker.a + new_edge_prop.a
if label_steps:
steps = list(co_props.keys())
edge_step_prop = g.new_edge_property('int')
start = 0
end = np.sum(new_edge_props[steps[0]].a)
for i, step in enumerate(steps):
if i > 0:
start = int(start + np.sum(new_edge_props[steps[i-1]].a))
end = int(end + np.sum(new_edge_props[step].a))
edge_step_prop.a[start:end] = step
if label_old & (not label_steps):
return (new_edge_props, old_edge_props)
elif label_steps & (not label_old):
return (new_edge_props, edge_step_prop)
elif label_old & label_steps:
return (new_edge_props, old_edge_props, edge_step_prop)
else:
return new_edge_props
def label_edge_activity(g, co_props):
'''label_edge_activity
Determines whether an edge is new, reinforcing or inactive at any given
step.
Parameters
----------
g : :obj:`graph_tool.Graph`
A graph.
co_props : :obj:`dict`
Contains property maps of edge cooccurrence values.
Returns
-------
new_edge_props : :obj:`dict`
reinforcing_edge_props : :obj:`dict`
inactive_edge_props : :obj:`dict`
'''
reinforcing_edge_props = {}
inactive_edge_props = {}
active_edge_props = label_active_edge(g, co_props)
new_edge_props, old_edge_props = label_new_edge(g, co_props)
for step in active_edge_props.keys():
reinforcing_eprop = g.new_edge_property('bool')
reinforcing_eprop.a = active_edge_props[step].a & old_edge_props[step].a
reinforcing_edge_props[step] = reinforcing_eprop
inactive_eprop = g.new_edge_property('bool')
inactive_eprop.a = ((old_edge_props[step].a > 0)
& (active_edge_props[step].a == 0))
inactive_edge_props[step] = inactive_eprop
return new_edge_props, reinforcing_edge_props, inactive_edge_props
def label_new_vertex(g, o_props, label_steps=False):
'''label_new_vertex
Parameters
----------
g : :obj:`graph_tool.Graph`
A graph.
o_props : :obj:`dict`
A dictionary of vertex property maps containing the vertex occurrence
values at each step.
label_steps : :obj:`bool`
If `True`, returns a vertex property map` that indicates the first step
that each vertex appeared.
Returns
-------
new_vertex_props : :obj:`dict`
vertex_step_prop : :obj:`graph_tool.PropertyMap`
'''
new_vertex_props = {}
_vertex_tracker = g.new_vertex_property('bool')
for step, o_prop in o_props.items():
new_vertex_prop = g.new_vertex_property('bool')
new_vertex_prop.a = (o_prop.a > 0) & (_vertex_tracker.a == False)
new_vertex_props[step] = new_vertex_prop
_vertex_tracker.a = _vertex_tracker.a + new_vertex_prop.a
if label_steps:
vertex_step_prop = g.new_vertex_property('int')
start = 0
end = np.sum(new_vertex_props[steps[0]].a)
for i, step in enumerate(steps):
if i > 0:
start = int(start + np.sum(new_vertex_props[step - 1].a))
end = int(end + np.sum(new_vertex_props[step].a))
vertex_step_prop.a[start:end] = step
return (new_vertex_props, vertex_step_prop)
else:
return new_vertex_props
def label_linkage(g, new_eprops, new_vprops):
"""label_linkage
Labels new edges as additive, extending or joining.
- Fresh: edges that connect two vertices that have both appeared for the
first time at a given step.
- Mixed: edges that connect a new vertex with a vertex that was already
in the graph.
- Mature: edges that connect two pre-existing vertices.
Parameters
----------
g : :obj:`graph_tool.Graph`
A graph.
new_eprops : :obj:`dict`
new_vprops : :obj:`dict`
Returns
-------
additive_eprops : :obj:`dict`
extending_eprops : :obj:`dict`
joining_eprops : :obj:`dict`
"""
additive_eprops = {}
extending_eprops = {}
joining_eprops = {}
for step, new_eprop in new_eprops.items():
new_vprop = new_vprops[step]
s_new_prop = g.new_edge_property('bool')
t_new_prop = g.new_edge_property('bool')
s_new_prop = edge_endpoint_property(g, new_vprop, 'source')
t_new_prop = edge_endpoint_property(g, new_vprop, 'target')
additive_eprop = g.new_edge_property('bool')
extending_eprop = g.new_edge_property('bool')
joining_eprop = g.new_edge_property('bool')
additive_eprop.a = (s_new_prop.a & t_new_prop.a) & (new_eprop.a)
extending_eprop.a = (s_new_prop.a != t_new_prop.a) & (new_eprop.a)
joining_eprop.a = ((additive_eprop.a == False)
& (extending_eprop.a == False)
& (new_eprop.a))
additive_eprops[step] = additive_eprop
extending_eprops[step] = extending_eprop
joining_eprops[step] = joining_eprop
return additive_eprops, extending_eprops, joining_eprops
def label_k_score(g, co_cumsum_eprops, first=True):
'''label_k_score
Calculates the K score for each existing edge at each step:
.. math::
k_{ij, T} = \\frac{1}{1 + C_{ij, T-1}}
Parameters
----------
g : :obj:`graph_tool.Graph`
co_cumsum_eprops : :obj:`dict`
Returns
-------
k_score_eprops : :obj:`dict`
'''
k_score_eprops = {}
for i, step in enumerate(co_cumsum_eprops.keys()):
k_score_eprop = g.new_edge_property('float')
if i == 0:
if first:
k_score_eprop = g.new_edge_property('float', val=1)
else:
k_score_eprop.a = 1 / (co_cumsum_eprops[step_prev].a + 1)
step_prev = step
k_score_eprops[step] = k_score_eprop
return k_score_eprops
def label_k_growth(g, co_eprops, co_cumsum_eprops):
'''label_k_growth
Calculates the K growth score for edges at each step.
.. math::
k_{ij, T} = \\frac{c_{ij, T}}{1 + C_{ij, T-1}}
Parameters
----------
g : :obj:`graph_tool.Graph`
A graph.
co_eprops : :obj:`dict`
Contains edge cooccurrence property maps.
co_cumsum_eprops : :obj:`dict`
Contains edge cumulative cooccurrence
property maps.
Returns
-------
k_growth_eprops : :obj:`dict` Contains edge k growth property maps.
'''
k_growth_eprops = {}
for i, (step, co_eprop) in enumerate(co_eprops.items()):
if i == 0:
continue
else:
k_growth_eprop = g.new_edge_property('float')
k_growth_eprop.a = (co_eprops[step].a /
(co_cumsum_eprops[step - 1].a + 1))
k_growth_eprops[step] = k_growth_eprop
return k_growth_eprops
|
the-stack_106_19247
|
from __future__ import print_function, absolute_import
import os
import six
import subprocess
from .base import CapPA
from .enums import IS_MAC
from .utils import warn
class Pip(CapPA):
def __init__(self, *flags):
super(Pip, self).__init__(*flags)
self.name = 'pip'
self.friendly_name = 'pip'
def _install_package_dict(self, packages):
def install(prefix, options, packages):
range_connector_gte = ">="
range_connector_lt = "<"
connector = '=='
manager = self.find_executable()
args = prefix + [manager, 'install'] + options
for package, version in six.iteritems(packages):
if version is None:
args.append(package)
elif isinstance(version, list):
args.append(package + range_connector_gte + version[0] + ',' + range_connector_lt + version[1])
else:
args.append(package + connector + version)
subprocess.check_call(args, env=os.environ)
prefix = []
if not self.use_venv:
prefix.append('sudo')
prefix.append('-E')
subdir_packages, nonsubdir_packages = self._split_pip_packages(packages)
if subdir_packages:
install(prefix, ['-e'], subdir_packages)
if nonsubdir_packages:
install(prefix, [], nonsubdir_packages)
def _clean(self):
""" Check for residual tmp files left by pip """
tmp_location = os.environ.get('TMPDIR',
os.environ.get('TEMP',
os.environ.get('TMP', '/tmp')))
tmp_location = tmp_location.strip()
prefix = []
if not IS_MAC:
prefix.append('sudo')
prefix.append('-E')
try:
subprocess.check_call(prefix + ['rm', '-rf', os.path.join(tmp_location, 'pip-*')])
except Exception as exc:
warn('error removing pip files', exc)
def _split_pip_packages(self, packages):
subdir_packages = {}
nonsubdir_packages = {}
for package, version in six.iteritems(packages):
if 'subdirectory=' in package:
subdir_packages[package] = version
else:
nonsubdir_packages[package] = version
return subdir_packages, nonsubdir_packages
|
the-stack_106_19248
|
#!/usr/bin/env python3
"""Google certified android devices tracker"""
import difflib
import json
from datetime import date
from os import rename, path, system, environ
from time import sleep
from requests import get, post
GIT_OAUTH_TOKEN = environ['GIT_OAUTH_TOKEN_XFU']
BOT_TOKEN = environ['BOTTOKEN']
TODAY = str(date.today())
BY_DEVICE = {}
BY_MODEL = {}
BY_BRAND = {}
BY_NAME = {}
def save_data(data_list):
"""Save Data to various files"""
markdown = open('README.md', 'w', encoding="utf-8")
markdown.write('# Google Play Certified Android devices\n')
markdown.write('Last sync is {}\n\nhttps://support.google.com/googleplay/'
'answer/1727131?hl=en\n\n'.format(TODAY))
markdown.write('|Retail Branding|Marketing Name|Device|Model|\n')
markdown.write('|---|---|---|---|\n')
for line in data_list[1:]:
i = line.strip().split(",")
try:
brand = i[0].strip()
name = i[1].strip()
device = i[2].strip()
model = i[3].strip()
markdown.write('|{}|{}|{}|{}|\n'.format(brand, name, device, model))
add_device(brand, name, device, model)
add_model(brand, name, device, model)
add_brand(brand, name, device, model)
add_name(brand, name, device, model)
except IndexError:
pass
with open("by_device.json", "w") as json_file:
json.dump(BY_DEVICE, json_file, indent=1, ensure_ascii=False)
with open("by_model.json", "w") as json_file:
json.dump(BY_MODEL, json_file, indent=1, ensure_ascii=False)
with open("by_brand.json", "w") as json_file:
json.dump(BY_BRAND, json_file, indent=1, ensure_ascii=False)
with open("by_name.json", "w") as json_file:
json.dump(BY_NAME, json_file, indent=1, ensure_ascii=False)
def add_device(brand, name, device, model):
"""add device to devices dict"""
try:
updated = BY_DEVICE[device] + [{'brand': brand, 'name': name, 'model': model}]
BY_DEVICE.update({
device: updated
})
except KeyError:
BY_DEVICE.update({
device: [{'brand': brand, 'name': name, 'model': model}]
})
def add_model(brand, name, device, model):
"""add device to models dict"""
try:
updated = BY_MODEL[model] + [{'brand': brand, 'name': name, 'device': device}]
BY_MODEL.update({
model: updated
})
except KeyError:
BY_MODEL.update({
model: [{'brand': brand, 'name': name, 'device': device}]
})
def add_brand(brand, name, device, model):
"""add device to brand dict"""
try:
updated = BY_BRAND[brand] + [{'device': device, 'name': name, 'model': model}]
BY_BRAND.update({
brand: updated
})
except KeyError:
BY_BRAND.update({
brand: [{'device': device, 'name': name, 'model': model}]
})
def add_name(brand, name, device, model):
"""add device to names dict"""
try:
updated = BY_NAME[name] + [{'brand': brand, 'device': device, 'model': model}]
BY_NAME.update({
name: updated
})
except KeyError:
BY_NAME.update({
name: [{'brand': brand, 'device': device, 'model': model}]
})
def fetch():
"""
Download latest and convert to utf-8
"""
url = "http://storage.googleapis.com/play_public/supported_devices.csv"
response = get(url)
data = (response.content.decode('utf-16'))
data_list = list(data.split('\n'))
return data_list
def diff_files():
"""
diff
"""
with open('old.md', 'r') as old, open('README.md', 'r') as new:
diff = difflib.unified_diff(old.readlines(), new.readlines(), fromfile='old', tofile='new')
changes = []
for line in diff:
if line.startswith('+'):
changes.append(str(line))
new = ''.join(changes[2:]).replace("+", "")
with open('changes', 'w') as out:
out.write(new)
def post_to_tg():
"""
post new devices to telegram channel
"""
# tg
telegram_chat = "@CertifiedAndroidDevices"
with open('changes', 'r') as changes:
for line in changes:
info = line.split("|")
brand = info[1]
name = info[2]
codename = info[3]
model = info[4]
telegram_message = f"New certified device added!: \n" \
f"Brand: *{brand}*\n" \
f"Name: *{name}*\n" \
f"*Codename:* `{codename}`\n" \
f"Model: *{model}*"
params = (
('chat_id', telegram_chat),
('text', telegram_message),
('parse_mode', "Markdown"),
('disable_web_page_preview', "yes")
)
telegram_url = "https://api.telegram.org/bot" + BOT_TOKEN + "/sendMessage"
telegram_req = post(telegram_url, params=params)
telegram_status = telegram_req.status_code
if telegram_status == 200:
print("{0}: Telegram Message sent".format(name))
else:
print("Telegram Error")
sleep(3)
def git_commit_push():
"""
git add - git commit - git push
"""
system("git add README.md *.json && git -c \"user.name=XiaomiFirmwareUpdater\" "
"-c \"[email protected]\" "
"commit -m \"[skip ci] sync: {0}\" && "" \
""git push -q https://{1}@github.com/androidtrackers/"
"certified-android-devices.git HEAD:master"
.format(TODAY, GIT_OAUTH_TOKEN))
def main():
"""
certified-android-devices tracker
"""
if path.exists('README.md'):
rename('README.md', 'old.md')
data_list = fetch()
save_data(data_list)
diff_files()
post_to_tg()
git_commit_push()
if __name__ == '__main__':
main()
|
the-stack_106_19250
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.python.lib.io import file_io
from skimage.transform import rescale, resize
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Function that reads the data from the csv file, increases the size of the images and returns the images and their labels
# dataset: Data path
IMAGE_SIZE = 48
# Function that reads the data from the csv file, increases the size of the images and returns the images and their labels
# dataset: Data path
IMAGE_SIZE = 48
def get_data(dataset, small=False, size=2000):
file_stream = file_io.FileIO(dataset, mode='r')
data = pd.read_csv(file_stream)
data[' pixels'] = data[' pixels'].apply(lambda x: [int(pixel) for pixel in x.split()])
X, Y = data[' pixels'].tolist(), data['emotion'].values
X = np.array(X, dtype='float32').reshape(-1,IMAGE_SIZE, IMAGE_SIZE,1)
X = X/255.0
if small==True:
X = X[0:size,:,:,:]
Y = Y[0:size]
X_res = np.zeros((X.shape[0], Resize_pixelsize,Resize_pixelsize,3))
for ind in range(X.shape[0]):
sample = X[ind]
sample = sample.reshape(IMAGE_SIZE, IMAGE_SIZE)
image_resized = resize(sample, (Resize_pixelsize, Resize_pixelsize), anti_aliasing=True)
X_res[ind,:,:,:] = image_resized.reshape(Resize_pixelsize,Resize_pixelsize,1)
Y_res = np.zeros((Y.size, 7))
Y_res[np.arange(Y.size),Y] = 1
return X, X_res, Y_res
Resize_pixelsize = 197
dev_dataset_dir = '../data/raw/fer_csv/dev.csv'
test_dataset_dir = '../data/raw/fer_csv/test.csv'
X_dev, X_res_dev, Y_dev = get_data(dev_dataset_dir,small=True, size=1000)
X_test, X_res_test, Y_test = get_data(test_dataset_dir, small=True, size=1000)
Resnet_model = tf.keras.models.load_model('../models/tl/ResNet-BEST-73.2.h5')
print('\n# Evaluate on dev data')
results_dev = Resnet_model.evaluate(X_res_dev,Y_dev)
print('dev loss, dev acc:', results_dev)
print('\n# Evaluate on test data')
results_test = Resnet_model.evaluate(X_res_test,Y_test)
print('test loss, test acc:', results_test)
|
the-stack_106_19251
|
class Tasks:
def __init__(self, taskID, title, datetime, description, statusID):
self.taskID = taskID
self.title = title
self.datetime = datetime
self.description = description
self.statusID = statusID
def get_status_name(self, list):
for val in list:
if val.statusID == self.statusID:
return val.name
|
the-stack_106_19254
|
# -*- coding:utf-8 -*-
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110- 1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# ----------------------------------------------------------
# Author: Stephen Leger (s-leger)
#
# ----------------------------------------------------------
import bpy
from bpy.types import Operator, PropertyGroup, Mesh, Panel
from bpy.props import (
FloatProperty, IntProperty, BoolProperty,
CollectionProperty, EnumProperty
)
from .bmesh_utils import BmeshEdit as bmed
# from .materialutils import MaterialUtils
from mathutils import Vector, Matrix
from math import sin, cos, pi
from .archipack_manipulator import Manipulable
from .archipack_object import ArchipackCreateTool, ArchipackObject
def update(self, context):
self.update(context)
class archipack_truss(ArchipackObject, Manipulable, PropertyGroup):
truss_type : EnumProperty(
name="Type",
items=(
('1', 'Prolyte E20', 'Prolyte E20', 0),
('2', 'Prolyte X30', 'Prolyte X30', 1),
('3', 'Prolyte H30', 'Prolyte H30', 2),
('4', 'Prolyte H40', 'Prolyte H40', 3),
('5', 'OPTI Trilite 100', 'OPTI Trilite 100', 4),
('6', 'OPTI Trilite 200', 'OPTI Trilite 200', 5),
('7', 'User defined', 'User defined', 6)
),
default='2',
update=update
)
z : FloatProperty(
name="Height",
default=2.0, min=0.01,
unit='LENGTH', subtype='DISTANCE',
update=update
)
segs : IntProperty(
name="Segs",
default=6, min=3,
update=update
)
master_segs : IntProperty(
name="Master Segs",
default=1, min=1,
update=update
)
master_count : IntProperty(
name="Masters",
default=3, min=2,
update=update
)
entre_axe : FloatProperty(
name="Distance",
default=0.239, min=0.001,
unit='LENGTH', subtype='DISTANCE',
update=update
)
master_radius : FloatProperty(
name="Radius",
default=0.02415, min=0.0001,
unit='LENGTH', subtype='DISTANCE',
update=update
)
slaves_radius : FloatProperty(
name="Subs radius",
default=0.01, min=0.0001,
unit='LENGTH', subtype='DISTANCE',
update=update
)
# Flag to prevent mesh update while making bulk changes over variables
# use :
# .auto_update = False
# bulk changes
# .auto_update = True
auto_update : BoolProperty(
options={'SKIP_SAVE'},
default=True,
update=update
)
def setup_manipulators(self):
if len(self.manipulators) < 1:
s = self.manipulators.add()
s.prop1_name = "z"
s.type_key = 'SIZE'
s.normal = Vector((0, 1, 0))
def docylinder(self, faces, verts, radius, segs, tMt, tMb, tM, add=False):
segs_step = 2 * pi / segs
tmpverts = [0 for i in range(segs)]
if add:
cv = len(verts) - segs
else:
cv = len(verts)
for seg in range(segs):
seg_angle = pi / 4 + seg * segs_step
tmpverts[seg] = radius * Vector((sin(seg_angle), -cos(seg_angle), 0))
if not add:
for seg in range(segs):
verts.append(tM @ tMb @ tmpverts[seg])
for seg in range(segs):
verts.append(tM @ tMt @ tmpverts[seg])
for seg in range(segs - 1):
f = cv + seg
faces.append((f + 1, f, f + segs, f + segs + 1))
f = cv
faces.append((f, f + segs - 1, f + 2 * segs - 1, f + segs))
def update(self, context):
o = self.find_in_selection(context, self.auto_update)
if o is None:
return
self.setup_manipulators()
if self.truss_type == '1':
EntreAxe = 0.19
master_radius = 0.016
slaves_radius = 0.005
elif self.truss_type == '2':
EntreAxe = 0.239
master_radius = 0.0255
slaves_radius = 0.008
elif self.truss_type == '3':
EntreAxe = 0.239
master_radius = 0.02415
slaves_radius = 0.008
elif self.truss_type == '4':
EntreAxe = 0.339
master_radius = 0.02415
slaves_radius = 0.01
elif self.truss_type == '5':
EntreAxe = 0.15
master_radius = 0.0127
slaves_radius = 0.004
elif self.truss_type == '6':
EntreAxe = 0.200
master_radius = 0.0254
slaves_radius = 0.00635
elif self.truss_type == '7':
EntreAxe = self.entre_axe
master_radius = min(0.5 * self.entre_axe, self.master_radius)
slaves_radius = min(0.5 * self.entre_axe, self.master_radius, self.slaves_radius)
master_sepang = (pi * (self.master_count - 2) / self.master_count) / 2
radius = (EntreAxe / 2) / cos(master_sepang)
master_step = pi * 2 / self.master_count
verts = []
faces = []
if self.master_count == 4:
master_rotation = pi / 4 # 45.0
else:
master_rotation = 0.0
slaves_width = 2 * radius * sin(master_step / 2)
slaves_count = int(self.z / slaves_width)
slave_firstOffset = (self.z - slaves_count * slaves_width) / 2
master_z = self.z / self.master_segs
for master in range(self.master_count):
master_angle = master_rotation + master * master_step
tM = Matrix([
[1, 0, 0, radius * sin(master_angle)],
[0, 1, 0, radius * -cos(master_angle)],
[0, 0, 1, 0],
[0, 0, 0, 1]])
tMb = Matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, self.z],
[0, 0, 0, 1]])
for n in range(1, self.master_segs + 1):
tMt = Matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, self.z - n * master_z],
[0, 0, 0, 1]])
self.docylinder(faces, verts, master_radius, self.segs, tMt, tMb, tM, add=(n > 1))
if self.master_count < 3 and master == 1:
continue
ma = master_angle + master_sepang
tM = Matrix([
[cos(ma), sin(ma), 0, radius * sin(master_angle)],
[sin(ma), -cos(ma), 0, radius * -cos(master_angle)],
[0, 0, 1, slave_firstOffset],
[0, 0, 0, 1]])
if int(self.truss_type) < 5:
tMb = Matrix([
[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]])
tMt = Matrix([
[1, 0, 0, 0],
[0, 0, 1, -slaves_width],
[0, 1, 0, 0],
[0, 0, 0, 1]])
self.docylinder(faces, verts, slaves_radius, self.segs, tMt, tMb, tM)
tMb = Matrix([
[1, 0, 0, 0],
[0, 1.4142, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
for n in range(1, slaves_count + 1):
tMt = Matrix([
[1, 0, 0, 0],
[0, 1.4142, 0, -(n % 2) * slaves_width],
[0, 0, 1, n * slaves_width],
[0, 0, 0, 1]])
self.docylinder(faces, verts, slaves_radius, self.segs, tMt, tMb, tM, add=(n > 1))
if int(self.truss_type) < 5:
tMb = Matrix([
[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, slaves_count * slaves_width],
[0, 0, 0, 1]])
tMt = Matrix([
[1, 0, 0, 0],
[0, 0, 1, -slaves_width],
[0, 1, 0, slaves_count * slaves_width],
[0, 0, 0, 1]])
self.docylinder(faces, verts, slaves_radius, self.segs, tMt, tMb, tM)
bmed.buildmesh(context, o, verts, faces, matids=None, uvs=None, weld=False)
self.manipulators[0].set_pts([(0, 0, 0), (0, 0, self.z), (1, 0, 0)])
self.restore_context(context)
class ARCHIPACK_PT_truss(Panel):
"""Archipack Truss"""
bl_idname = "ARCHIPACK_PT_truss"
bl_label = "Truss"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Archipack'
@classmethod
def poll(cls, context):
return archipack_truss.filter(context.active_object)
def draw(self, context):
prop = archipack_truss.datablock(context.active_object)
if prop is None:
return
layout = self.layout
row = layout.row(align=True)
row.operator('archipack.truss_manipulate', icon='VIEW_PAN')
box = layout.box()
box.prop(prop, 'truss_type')
box.prop(prop, 'z')
box.prop(prop, 'segs')
box.prop(prop, 'master_segs')
box.prop(prop, 'master_count')
if prop.truss_type == '7':
box.prop(prop, 'master_radius')
box.prop(prop, 'slaves_radius')
box.prop(prop, 'entre_axe')
class ARCHIPACK_OT_truss(ArchipackCreateTool, Operator):
bl_idname = "archipack.truss"
bl_label = "Truss"
bl_description = "Create Truss"
bl_category = 'Archipack'
bl_options = {'REGISTER', 'UNDO'}
def create(self, context):
m = bpy.data.meshes.new("Truss")
o = bpy.data.objects.new("Truss", m)
d = m.archipack_truss.add()
# make manipulators selectable
# d.manipulable_selectable = True
self.link_object_to_scene(context, o)
o.select_set(state=True)
context.view_layer.objects.active = o
self.load_preset(d)
self.add_material(o)
m.auto_smooth_angle = 1.15
return o
# -----------------------------------------------------
# Execute
# -----------------------------------------------------
def execute(self, context):
if context.mode == "OBJECT":
bpy.ops.object.select_all(action="DESELECT")
o = self.create(context)
o.location = bpy.context.scene.cursor.location
o.select_set(state=True)
context.view_layer.objects.active = o
self.manipulate()
return {'FINISHED'}
else:
self.report({'WARNING'}, "Archipack: Option only valid in Object mode")
return {'CANCELLED'}
# ------------------------------------------------------------------
# Define operator class to manipulate object
# ------------------------------------------------------------------
class ARCHIPACK_OT_truss_manipulate(Operator):
bl_idname = "archipack.truss_manipulate"
bl_label = "Manipulate"
bl_description = "Manipulate"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(self, context):
return archipack_truss.filter(context.active_object)
def invoke(self, context, event):
d = archipack_truss.datablock(context.active_object)
d.manipulable_invoke(context)
return {'FINISHED'}
def register():
bpy.utils.register_class(archipack_truss)
Mesh.archipack_truss = CollectionProperty(type=archipack_truss)
bpy.utils.register_class(ARCHIPACK_PT_truss)
bpy.utils.register_class(ARCHIPACK_OT_truss)
bpy.utils.register_class(ARCHIPACK_OT_truss_manipulate)
def unregister():
bpy.utils.unregister_class(archipack_truss)
del Mesh.archipack_truss
bpy.utils.unregister_class(ARCHIPACK_PT_truss)
bpy.utils.unregister_class(ARCHIPACK_OT_truss)
bpy.utils.unregister_class(ARCHIPACK_OT_truss_manipulate)
|
the-stack_106_19255
|
###################################################################################################
### Record & Render Information
###################################################################################################
import os
import csv
import plotly
import plotly.graph_objs as go
import itertools
import operator
import numpy as np
def writeDistTableToCSV(animal_list, results, outdir, outfilename):
# Takes in a matrix of DTW results in order specified in fileList
# Writes the results in the specified outfile name (expects absolute/full path)
N = len(animal_list)
outpath = os.path.join(outdir, outfilename)
with open(outpath, 'w') as outfile:
csvwriter = csv.writer(outfile, delimiter=',')
csvwriter.writerow(['']+[animal_obj.getName() for animal_obj in animal_list])
for i in range(N):
csvwriter.writerow([animal_list[i].getName()]+['' if results[i][j]=='' else'%.5f' % results[i][j] for j in range(N)])
print("LOG: Wrote the results in %s" % outpath)
def writeDistTableToHeatmap(animal_list, results, outdir, outfilename, color_min=0.0, color_max=1.0):
# Takes in a matrix of DTW results in order specified in fileList
# Writes the results in the specified outfile name (expects absolute/full path)
N = len(animal_list)
outpath = os.path.join(outdir, outfilename)
figure = {'data':[],'layout':{}}
trace = go.Heatmap(z=[[results[N-i-1][j] for j in range(N)] for i in range(N)], name='Heat Map',
x=[animal_list[j].getName() for j in range(N)],
y=[animal_list[N-i-1].getName() for i in range(N)],
colorscale='Viridis',
showscale=True,
visible=True,
zmin=color_min,
zmax=color_max
)
figure['data'].append(trace)
figure['layout']=dict(height=600,
width=630,
# margin=go.Margin(l=100,r=100,b=100,t=100),
showlegend=False,
xaxis={'showticklabels':False,'showgrid':False,'ticks':''},
yaxis={'showticklabels':False,'showgrid':False,'ticks':''},
annotations=[dict(x=j+0.5,y=N+1.0,text=animal_list[j].getName()[4:] if animal_list[j].inControlGroup() else animal_list[j].getName()[4:]+' ',
font={'color':'cyan' if animal_list[j].inControlGroup() else 'magenta', 'size':7},
textangle=-45,showarrow=False) for j in range(N)]
+[dict(x=N+1.0,y=i+0.0,text=animal_list[N-i-1].getName()[4:] if animal_list[N-i-1].inControlGroup() else animal_list[N-i-1].getName()[4:]+' ',
font={'color':'cyan' if animal_list[N-i-1].inControlGroup() else 'magenta', 'size':7},
textangle=0,showarrow=False) for i in range(N)])
plotly.offline.plot(figure, filename=outpath)
print("LOG: Plot the heatmap in %s" % outpath)
def writeSegmentExpsToCSV(animal_list, results, means, stds, outdir, outfilename):
# Writes the results from trajectory.runRandomSegmentComparisons() to CSV
# results is a table where results[i][j] is the j-th segment comparison for the i-th animal in animal_list. Note that the entry is a double, (segment lenth, distance).
num_animals = len(animal_list)
if means == None:
header_top = list(itertools.chain.from_iterable([[animal_obj.getName(),""] for animal_obj in animal_list]))
header_bottom = ["Segment Length", "Distance"] * num_animals
num_exps = len(results[0])
with open(os.path.join(outdir, outfilename),'w') as outfile:
csvwriter = csv.writer(outfile, delimiter=',')
csvwriter.writerow(header_top)
csvwriter.writerow(header_bottom)
for i in xrange(num_exps):
row = list(itertools.chain.from_iterable([results[j][i] for j in range(num_animals)]))
csvwriter.writerow(row)
else: #if means != None:
header_top = [""]+[animal_obj.getName() for animal_obj in animal_list]
header_bottom = ["Segment Length"] + ["Distance"]*num_animals
num_exps = len(results[0])
with open(os.path.join(outdir, outfilename),'w') as outfile:
csvwriter = csv.writer(outfile, delimiter=',')
csvwriter.writerow(header_top)
csvwriter.writerow(header_bottom)
for i in xrange(num_exps):
#row = list(itertools.chain.from_iterable([results[j][i] for j in range(num_animals)]))
row = [results[0][i][0]] + [results[j][i][1] for j in range(num_animals)]
csvwriter.writerow(row)
num_segs = len(means[0])
#write the mean information
csvwriter.writerow([""])
csvwriter.writerow(["Means"])
csvwriter.writerow(header_top)
csvwriter.writerow(header_bottom)
for i in range(num_segs):
#row = list(itertools.chain.from_iterable([means[j][i] for j in range(num_animals)]))
row = [means[0][i][0]] + [means[j][i][1] for j in range(num_animals)]
csvwriter.writerow(row)
#write the std information
csvwriter.writerow([""])
csvwriter.writerow(["Standard Deviations"])
csvwriter.writerow(header_top)
csvwriter.writerow(header_bottom)
for i in range(num_segs):
#row = list(itertools.chain.from_iterable([stds[j][i] for j in range(num_animals)]))
row = [stds[0][i][0]] + [stds[j][i][1] for j in range(num_animals)]
csvwriter.writerow(row)
print("Saved the table in %s" % outfile )
def renderSingleAnimalGraph(points, animal_obj, varname, outdir):
filename = "figure_%s_%s.html" % (animal_obj.getName(), varname)
outpath = os.path.join(outdir,filename).replace(' ','')
N = len(points)
trace = go.Scatter(x = range(N), y=points, mode='lines',showlegend=False,line={'width':4})
data = [trace]
plotly.offline.plot(data, filename=outpath, auto_open=False)
print("Saved single animal graph in %s" % outpath)
def renderAlignedGraphs( points_0, points_1, alignment, animal_obj_0, animal_obj_1, varname, outdir ):
matched_filename = "figure_%s-%s_matched_%s.html" % (animal_obj_0.getName(), animal_obj_1.getName(), varname )
aligned_filename = "figure_%s-%s_aligned_%s.html" % (animal_obj_0.getName(), animal_obj_1.getName(), varname )
original_filename_0 = "figure_%s_%s.html" % (animal_obj_0.getName(), varname )
original_filename_1 = "figure_%s_%s.html" % (animal_obj_1.getName(), varname )
reparam_filename_0 = "figure_%s_%s_warped_by_%s.html" % (animal_obj_0.getName(), varname, animal_obj_1.getName() )
reparam_filename_1 = "figure_%s_%s_warped_by_%s.html" % (animal_obj_1.getName(), varname, animal_obj_0.getName() )
fulloutpath_matched = os.path.join( outdir, matched_filename ).replace(' ','')
fulloutpath_aligned = os.path.join( outdir, aligned_filename ).replace(' ','')
fulloutpath_original_0 = os.path.join( outdir, original_filename_0 ).replace(' ','')
fulloutpath_original_1 = os.path.join( outdir, original_filename_1 ).replace(' ','')
fulloutpath_reparam_0 = os.path.join( outdir, reparam_filename_0 ).replace(' ','')
fulloutpath_reparam_1 = os.path.join( outdir, reparam_filename_1 ).replace(' ','')
N = len(alignment[0])
original_trace_0 = go.Scatter(x = [alignment[0][k] for k in range(N)], y=[points_0[alignment[0][k]] for k in range(N)], \
mode='lines',showlegend=False,line={'width':3},name=animal_obj_0.getName())
original_trace_1 = go.Scatter(x = [alignment[1][k] for k in range(N)], y=[points_1[alignment[1][k]] for k in range(N)], \
mode='lines',showlegend=False,line={'width':3},name=animal_obj_1.getName())
reparameterized_trace_0 = go.Scatter(x = [k*alignment[0][-1]/N for k in range(N)], y=[points_0[alignment[0][k]] for k in range(N)], \
mode='lines',showlegend=False,line={'width':3},name=animal_obj_0.getName())
reparameterized_trace_1 = go.Scatter(x = [k*alignment[1][-1]/N for k in range(N)], y=[points_1[alignment[1][k]] for k in range(N)], \
mode='lines',showlegend=False,line={'width':3},name=animal_obj_1.getName())
original_data_pair = []
reparameterized_data_pair = []
original_data_0 = []
original_data_1 = []
reparameterized_data_0 = []
reparameterized_data_1 = []
for i in range(N):
original_data_pair.append(go.Scatter(x=[alignment[0][i],alignment[1][i]],y=[points_0[alignment[0][i]],points_1[alignment[1][i]]], \
mode='lines',marker={'color':'black'},showlegend=False,opacity=0.1))
reparameterized_data_pair.append(go.Scatter(x=[i*alignment[0][-1]/N,i*alignment[0][-1]/N],y=[points_0[alignment[0][i]],points_1[alignment[1][i]]], \
mode='lines',marker={'color':'black'},showlegend=False,opacity=0.1))
original_data_0.append(go.Scatter(x=[alignment[0][i],alignment[0][i]],y=[0,points_0[alignment[0][i]]], \
mode='lines',marker={'color':'black'},showlegend=False,opacity=0.1))
original_data_1.append(go.Scatter(x=[alignment[1][i],alignment[1][i]],y=[0,points_1[alignment[1][i]]], \
mode='lines',marker={'color':'black'},showlegend=False,opacity=0.1))
reparameterized_data_0.append(go.Scatter(x=[i*alignment[0][-1]/N,i*alignment[0][-1]/N],y=[0,points_0[alignment[0][i]]], \
mode='lines',marker={'color':'black'},showlegend=False,opacity=0.1))
reparameterized_data_1.append(go.Scatter(x=[i*alignment[0][-1]/N,i*alignment[0][-1]/N],y=[0,points_1[alignment[1][i]]], \
mode='lines',marker={'color':'black'},showlegend=False,opacity=0.1))
original_data_pair.append(original_trace_0)
original_data_pair.append(original_trace_1)
reparameterized_data_pair.append(reparameterized_trace_0)
reparameterized_data_pair.append(reparameterized_trace_1)
original_data_0.append(original_trace_0)
original_data_1.append(original_trace_1)
reparameterized_data_0.append(reparameterized_trace_0)
reparameterized_data_1.append(reparameterized_trace_1)
matched_figure = {'data':original_data_pair,'layout':{'height':350,'width':1000,'xaxis':{'title': 'Real Time'},'yaxis':{'title': varname,'range':[0,1]}},'frames':[]}
aligned_figure = {'data':reparameterized_data_pair,'layout':{'height':350,'width':1000,'xaxis':{'title': 'Warped Time'},'yaxis':{'title': varname,'range':[0,1]}},'frames':[]}
original_figure_0 = {'data':original_data_0,'layout':{'height':350,'width':1000,'xaxis':{'title': '%s Time' % animal_obj_0.getName()},'yaxis':{'title': varname,'range':[0,1]}},'frames':[]}
original_figure_1 = {'data':original_data_1,'layout':{'height':350,'width':1000,'xaxis':{'title': '%s Time' % animal_obj_1.getName()},'yaxis':{'title': varname,'range':[0,1]}},'frames':[]}
reparam_figure_0 = {'data':reparameterized_data_0,'layout':{'height':350,'width':1000,'xaxis':{'title': 'Warped Time'},'yaxis':{'title': varname,'range':[0,1]}},'frames':[]}
reparam_figure_1 = {'data':reparameterized_data_1,'layout':{'height':350,'width':1000,'xaxis':{'title': 'Warped Time'},'yaxis':{'title': varname,'range':[0,1]}},'frames':[]}
plotly.offline.plot(matched_figure, filename=fulloutpath_matched, auto_open=False)
plotly.offline.plot(aligned_figure, filename=fulloutpath_aligned, auto_open=False)
plotly.offline.plot(original_figure_0, filename=fulloutpath_original_0, auto_open=False)
plotly.offline.plot(original_figure_1, filename=fulloutpath_original_1, auto_open=False)
plotly.offline.plot(reparam_figure_0, filename=fulloutpath_reparam_0, auto_open=False)
plotly.offline.plot(reparam_figure_1, filename=fulloutpath_reparam_1, auto_open=False)
print( "Saved the alignment graphs in directory %s" % outdir )
def renderAlignment(alignment, animal_obj_0, animal_obj_1, varnames, outdir):
filename = "figure_%s-%s_%s_alignment.html" % (animal_obj_0.getName(), animal_obj_1.getName(), '-'.join(varnames))
outpath = os.path.join(outdir,filename).replace(' ','')
N = len(alignment[0])
data = []
for i in range(N):
data.append(go.Scatter(x=[0,alignment[0][i],alignment[0][i]],y=[alignment[1][i],alignment[1][i],0],mode='lines',marker={'color':'black'},showlegend=False,opacity=0.1))
trace = go.Scatter(x = alignment[0], y=alignment[1], mode='lines',showlegend=False,line={'width':4})
data.append(trace)
figure = {'data':data,'layout':{'height':500,'width':500,'xaxis':{'title': '%s Time' % animal_obj_0.getName()},'yaxis': {'title': '%s Time' % animal_obj_1.getName()}}}
plotly.offline.plot(figure, filename=outpath+".html", auto_open=False)
print("Saved alignment graph in %s" % outpath)
def writeOFF(animal_obj, coordinates, outdir, filename):
outpath = os.path.join(outdir,filename).replace(' ','')
triangles = animal_obj.getTriangulation()
colors = animal_obj.getColors()
print("Writing triangulation to file %s..." % outpath)
with open(outpath, 'w') as outfile:
outfile.write("OFF\n")
outfile.write("%d %d %d\n" % (len(coordinates), len(triangles), 0))
for c in coordinates:
outfile.write("%f %f %f\n" % (c[0], c[1], c[2]))
for t in triangles:
c = colors[triangles.index(t)]
outfile.write("%d %d %d %d %f %f %f\n" % (3, t[0], t[1], t[2], c[0], c[1], c[2]))
def postProcess(animal_list, dists, outdir, outfilename, sort_table, square_table, color_min=0.0, color_max=1.0):
num_animals = len(animal_list)
if square_table:
for i in xrange(num_animals):
for j in xrange(i):
dists[i][j] = dists[j][i]
writeDistTableToCSV(animal_list, dists, outdir, outfilename+".csv")
writeDistTableToHeatmap(animal_list, dists, outdir, outfilename+".html", color_min, color_max)
else:
writeDistTableToCSV(animal_list, dists, outdir, outfilename+".csv")
writeDistTableToHeatmap(animal_list, dists, outdir, outfilename+".html", color_min, color_max)
if sort_table:
dist_means = {}
D = []
for i in xrange(num_animals):
dlist = [dists[j][i] for j in xrange(i)] + [dists[i][j] for j in xrange(i+1,num_animals)]
dist_means.update({animal_list[i]:np.mean(dlist)})
sorted_dists = sorted(dist_means.items(), key=operator.itemgetter(1))
sorted_indices = [animal_list.index(sorted_dists[i][0]) for i in xrange(num_animals)]
new_dists = [['' for i in range(num_animals)] for j in range(num_animals)]
for i in xrange(num_animals):
for j in xrange(i+1, num_animals):
new_dists[i][j] = dists[sorted_indices[i]][sorted_indices[j]] if sorted_indices[j] > sorted_indices[i] else dists[sorted_indices[j]][sorted_indices[i]]
dists = new_dists
animal_list = [animal_list[sorted_indices[i]] for i in xrange(num_animals)]
if square_table:
for i in xrange(num_animals):
for j in xrange(i):
dists[i][j] = dists[j][i]
writeDistTableToCSV(animal_list, dists, outdir, "%s" % outfilename+"_sorted.csv")
writeDistTableToHeatmap(animal_list, dists, outdir, "%s" % outfilename+"_sorted.html", color_min, color_max)
|
the-stack_106_19258
|
"""
My Discovery collection
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='mydiscovery-collection',
version='1.0.0',
description='My Discovery Collection',
long_description=long_description,
url='https://github.com/IBM-Bluemix/python-hello-world-flask',
license='Apache-2.0'
)
|
the-stack_106_19259
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2017-2022 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tree level output for MySQL function
This module defines functions that generate MySQL SQL code to make local
predictions
"""
from bigml.tree_utils import INDENT
from bigml.generators.model import PYTHON_OPERATOR, missing_branch, \
none_value
from bigml.predict_utils.common import mintree_split, get_predicate, get_node
from bigml.predict_utils.common import OPERATION_OFFSET, FIELD_OFFSET, \
VALUE_OFFSET, MISSING_OFFSET
from bigml.generators.tree_common import filter_nodes
T_MISSING_OPERATOR = {
"=": "ISNULL(",
"!=": "NOT ISNULL("
}
# Map operator str to its corresponding mysql operator
MYSQL_OPERATOR = {
"/=": "!="}
def value_to_print(value, optype):
"""String of code that represents a value according to its type
"""
if value is None:
return "NULL"
if optype == 'numeric':
return value
return "'%s'" % value.replace("'", '\\\'')
def missing_check_code(tree, offsets, fields, objective_id,
field, alternate, cmv, attr=None):
"""Builds the code to predict when the field is missing
"""
node = get_node(tree)
condition = "ISNULL(`%s`)" % fields[field]['name']
code = ("%s (%s)" %
(alternate, condition))
# used when printing the confidence metric
if attr is not None:
value = node[offsets[attr]]
else:
value = value_to_print( \
node[offsets["output"]],
fields[objective_id]['optype'])
code += (", %s" % value)
cmv.append(fields[field]['name'])
return code
def missing_prefix_code(tree, fields, field, cmv):
"""Part of the condition that checks for missings when missing_splits
has been used
"""
predicate = get_predicate(tree)
missing = predicate[MISSING_OFFSET]
negation = "" if missing else "NOT "
connection = "OR" if missing else "AND"
if not missing:
cmv.append(fields[field]['name'])
return "(%sISNULL(`%s`) %s " % ( \
negation, fields[field]['name'],
connection)
def split_condition_code(tree, fields, field, alternate,
pre_condition):
"""Condition code for the split
"""
predicate = get_predicate(tree)
post_condition = ""
optype = fields[field]['optype']
value = value_to_print(predicate[VALUE_OFFSET], optype)
operation = predicate[OPERATION_OFFSET]
operator = ("" if predicate[VALUE_OFFSET] is None else
MYSQL_OPERATOR.get(operation,
PYTHON_OPERATOR.get(operation)))
if predicate[VALUE_OFFSET] is None:
value = ""
pre_condition = (
T_MISSING_OPERATOR[operation])
post_condition = ")"
condition = "%s`%s`%s%s%s" % ( \
pre_condition,
fields[predicate[FIELD_OFFSET]]['name'],
operator,
value,
post_condition)
return "%s (%s)" % (alternate, condition)
def plug_in_body(tree, offsets, fields, objective_id, depth=0, cmv=None,
ids_path=None, subtree=True, body="", attr=None):
"""Translate the model into a mysql function
`depth` controls the size of indentation. As soon as a value is missing
that node is returned without further evaluation.
`attr` is used to decide the value returned by the function. When
it's set to None, the prediction is returned. When set to the
name of an attribute (e.g. 'confidence') this attribute is returned
"""
if cmv is None:
cmv = []
if body:
alternate = ",\n%sIF (" % (depth * INDENT)
else:
alternate = "IF ("
post_missing_body = ""
node = get_node(tree)
children = filter_nodes([] if node[offsets["children#"]] == 0 \
else node[offsets["children"]], offsets, ids=ids_path, subtree=subtree)
if children:
# field used in the split
field = mintree_split(children)
has_missing_branch = (missing_branch(children) or
none_value(children))
# the missing is singled out as a special case only when there's
# no missing branch in the children list
if (not has_missing_branch and
not fields[field]['name'] in cmv):
body += missing_check_code(tree, offsets, fields, objective_id,
field, alternate, cmv, attr)
depth += 1
alternate = ",\n%sIF (" % (depth * INDENT)
post_missing_body += ")"
for child in children:
pre_condition = ""
predicate = get_predicate(child)
# code when missing splits has been used
if has_missing_branch and predicate[VALUE_OFFSET] is not None:
pre_condition = missing_prefix_code(child, fields, field, cmv)
# complete split condition code
body += split_condition_code( \
child, fields, field, alternate, pre_condition)
depth += 1
alternate = ",\n%sIF (" % (depth * INDENT)
body = plug_in_body(child, offsets, fields, objective_id,
depth, cmv=cmv[:],
ids_path=ids_path, subtree=subtree,
body=body, attr=attr)
body += ", NULL))" + post_missing_body
post_missing_body = ""
else:
if attr is None:
value = value_to_print( \
node[offsets["output"]], fields[objective_id]['optype'])
else:
try:
value = node[offsets[attr]]
except KeyError:
value = "NULL"
body += ", %s" % (value)
return body
|
the-stack_106_19260
|
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Pycord Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import json
import logging
import sys
from typing import (
Any,
ClassVar,
Coroutine,
Dict,
Iterable,
List,
Optional,
Sequence,
TYPE_CHECKING,
Tuple,
Type,
TypeVar,
Union,
)
from urllib.parse import quote as _uriquote
import weakref
import aiohttp
from .errors import HTTPException, Forbidden, NotFound, LoginFailure, DiscordServerError, GatewayNotFound, InvalidArgument
from .gateway import DiscordClientWebSocketResponse
from . import __version__, utils
from .utils import MISSING
_log = logging.getLogger(__name__)
if TYPE_CHECKING:
from .file import File
from .enums import (
AuditLogAction,
InteractionResponseType,
)
from .types import (
appinfo,
audit_log,
channel,
components,
emoji,
embed,
guild,
integration,
interactions,
invite,
member,
message,
template,
role,
user,
webhook,
channel,
widget,
threads,
voice,
sticker,
)
from .types.snowflake import Snowflake, SnowflakeList
from types import TracebackType
T = TypeVar('T')
BE = TypeVar('BE', bound=BaseException)
MU = TypeVar('MU', bound='MaybeUnlock')
Response = Coroutine[Any, Any, T]
async def json_or_text(response: aiohttp.ClientResponse) -> Union[Dict[str, Any], str]:
text = await response.text(encoding='utf-8')
try:
if response.headers['content-type'] == 'application/json':
return utils._from_json(text)
except KeyError:
# Thanks Cloudflare
pass
return text
class Route:
BASE: ClassVar[str] = 'https://discord.com/api/v8'
def __init__(self, method: str, path: str, **parameters: Any) -> None:
self.path: str = path
self.method: str = method
url = self.BASE + self.path
if parameters:
url = url.format_map({k: _uriquote(v) if isinstance(v, str) else v for k, v in parameters.items()})
self.url: str = url
# major parameters:
self.channel_id: Optional[Snowflake] = parameters.get('channel_id')
self.guild_id: Optional[Snowflake] = parameters.get('guild_id')
self.webhook_id: Optional[Snowflake] = parameters.get('webhook_id')
self.webhook_token: Optional[str] = parameters.get('webhook_token')
@property
def bucket(self) -> str:
# the bucket is just method + path w/ major parameters
return f'{self.channel_id}:{self.guild_id}:{self.path}'
class MaybeUnlock:
def __init__(self, lock: asyncio.Lock) -> None:
self.lock: asyncio.Lock = lock
self._unlock: bool = True
def __enter__(self: MU) -> MU:
return self
def defer(self) -> None:
self._unlock = False
def __exit__(
self,
exc_type: Optional[Type[BE]],
exc: Optional[BE],
traceback: Optional[TracebackType],
) -> None:
if self._unlock:
self.lock.release()
# For some reason, the Discord voice websocket expects this header to be
# completely lowercase while aiohttp respects spec and does it as case-insensitive
aiohttp.hdrs.WEBSOCKET = 'websocket' # type: ignore
class HTTPClient:
"""Represents an HTTP client sending HTTP requests to the Discord API."""
def __init__(
self,
connector: Optional[aiohttp.BaseConnector] = None,
*,
proxy: Optional[str] = None,
proxy_auth: Optional[aiohttp.BasicAuth] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
unsync_clock: bool = True,
) -> None:
self.loop: asyncio.AbstractEventLoop = asyncio.get_event_loop() if loop is None else loop
self.connector = connector
self.__session: aiohttp.ClientSession = MISSING # filled in static_login
self._locks: weakref.WeakValueDictionary = weakref.WeakValueDictionary()
self._global_over: asyncio.Event = asyncio.Event()
self._global_over.set()
self.token: Optional[str] = None
self.bot_token: bool = False
self.proxy: Optional[str] = proxy
self.proxy_auth: Optional[aiohttp.BasicAuth] = proxy_auth
self.use_clock: bool = not unsync_clock
user_agent = 'DiscordBot (https://github.com/Pycord-Development/pycord {0}) Python/{1[0]}.{1[1]} aiohttp/{2}'
self.user_agent: str = user_agent.format(__version__, sys.version_info, aiohttp.__version__)
def recreate(self) -> None:
if self.__session.closed:
self.__session = aiohttp.ClientSession(
connector=self.connector, ws_response_class=DiscordClientWebSocketResponse
)
async def ws_connect(self, url: str, *, compress: int = 0) -> Any:
kwargs = {
'proxy_auth': self.proxy_auth,
'proxy': self.proxy,
'max_msg_size': 0,
'timeout': 30.0,
'autoclose': False,
'headers': {
'User-Agent': self.user_agent,
},
'compress': compress,
}
return await self.__session.ws_connect(url, **kwargs)
async def request(
self,
route: Route,
*,
files: Optional[Sequence[File]] = None,
form: Optional[Iterable[Dict[str, Any]]] = None,
**kwargs: Any,
) -> Any:
bucket = route.bucket
method = route.method
url = route.url
lock = self._locks.get(bucket)
if lock is None:
lock = asyncio.Lock()
if bucket is not None:
self._locks[bucket] = lock
# header creation
headers: Dict[str, str] = {
'User-Agent': self.user_agent,
}
if self.token is not None:
headers['Authorization'] = 'Bot ' + self.token
# some checking if it's a JSON request
if 'json' in kwargs:
headers['Content-Type'] = 'application/json'
kwargs['data'] = utils._to_json(kwargs.pop('json'))
try:
reason = kwargs.pop('reason')
except KeyError:
pass
else:
if reason:
headers['X-Audit-Log-Reason'] = _uriquote(reason, safe='/ ')
kwargs['headers'] = headers
# Proxy support
if self.proxy is not None:
kwargs['proxy'] = self.proxy
if self.proxy_auth is not None:
kwargs['proxy_auth'] = self.proxy_auth
if not self._global_over.is_set():
# wait until the global lock is complete
await self._global_over.wait()
response: Optional[aiohttp.ClientResponse] = None
data: Optional[Union[Dict[str, Any], str]] = None
await lock.acquire()
with MaybeUnlock(lock) as maybe_lock:
for tries in range(5):
if files:
for f in files:
f.reset(seek=tries)
if form:
form_data = aiohttp.FormData()
for params in form:
form_data.add_field(**params)
kwargs['data'] = form_data
try:
async with self.__session.request(method, url, **kwargs) as response:
_log.debug('%s %s with %s has returned %s', method, url, kwargs.get('data'), response.status)
# even errors have text involved in them so this is safe to call
data = await json_or_text(response)
# check if we have rate limit header information
remaining = response.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and response.status != 429:
# we've depleted our current bucket
delta = utils._parse_ratelimit_header(response, use_clock=self.use_clock)
_log.debug('A rate limit bucket has been exhausted (bucket: %s, retry: %s).', bucket, delta)
maybe_lock.defer()
self.loop.call_later(delta, lock.release)
# the request was successful so just return the text/json
if 300 > response.status >= 200:
_log.debug('%s %s has received %s', method, url, data)
return data
# we are being rate limited
if response.status == 429:
if not response.headers.get('Via') or isinstance(data, str):
# Banned by Cloudflare more than likely.
raise HTTPException(response, data)
fmt = 'We are being rate limited. Retrying in %.2f seconds. Handled under the bucket "%s"'
# sleep a bit
retry_after: float = data['retry_after']
_log.warning(fmt, retry_after, bucket)
# check if it's a global rate limit
is_global = data.get('global', False)
if is_global:
_log.warning('Global rate limit has been hit. Retrying in %.2f seconds.', retry_after)
self._global_over.clear()
await asyncio.sleep(retry_after)
_log.debug('Done sleeping for the rate limit. Retrying...')
# release the global lock now that the
# global rate limit has passed
if is_global:
self._global_over.set()
_log.debug('Global rate limit is now over.')
continue
# we've received a 500, 502, or 504, unconditional retry
if response.status in {500, 502, 504}:
await asyncio.sleep(1 + tries * 2)
continue
# the usual error cases
if response.status == 403:
raise Forbidden(response, data)
elif response.status == 404:
raise NotFound(response, data)
elif response.status >= 500:
raise DiscordServerError(response, data)
else:
raise HTTPException(response, data)
# This is handling exceptions from the request
except OSError as e:
# Connection reset by peer
if tries < 4 and e.errno in (54, 10054):
await asyncio.sleep(1 + tries * 2)
continue
raise
if response is not None:
# We've run out of retries, raise.
if response.status >= 500:
raise DiscordServerError(response, data)
raise HTTPException(response, data)
raise RuntimeError('Unreachable code in HTTP handling')
async def get_from_cdn(self, url: str) -> bytes:
async with self.__session.get(url) as resp:
if resp.status == 200:
return await resp.read()
elif resp.status == 404:
raise NotFound(resp, 'asset not found')
elif resp.status == 403:
raise Forbidden(resp, 'cannot retrieve asset')
else:
raise HTTPException(resp, 'failed to get asset')
# state management
async def close(self) -> None:
if self.__session:
await self.__session.close()
# login management
async def static_login(self, token: str) -> user.User:
# Necessary to get aiohttp to stop complaining about session creation
self.__session = aiohttp.ClientSession(connector=self.connector, ws_response_class=DiscordClientWebSocketResponse)
old_token = self.token
self.token = token
try:
data = await self.request(Route('GET', '/users/@me'))
except HTTPException as exc:
self.token = old_token
if exc.status == 401:
raise LoginFailure('Improper token has been passed.') from exc
raise
return data
def logout(self) -> Response[None]:
return self.request(Route('POST', '/auth/logout'))
# Group functionality
def start_group(self, user_id: Snowflake, recipients: List[int]) -> Response[channel.GroupDMChannel]:
payload = {
'recipients': recipients,
}
return self.request(Route('POST', '/users/{user_id}/channels', user_id=user_id), json=payload)
def leave_group(self, channel_id) -> Response[None]:
return self.request(Route('DELETE', '/channels/{channel_id}', channel_id=channel_id))
# Message management
def start_private_message(self, user_id: Snowflake) -> Response[channel.DMChannel]:
payload = {
'recipient_id': user_id,
}
return self.request(Route('POST', '/users/@me/channels'), json=payload)
def send_message(
self,
channel_id: Snowflake,
content: Optional[str],
*,
tts: bool = False,
embed: Optional[embed.Embed] = None,
embeds: Optional[List[embed.Embed]] = None,
nonce: Optional[str] = None,
allowed_mentions: Optional[message.AllowedMentions] = None,
message_reference: Optional[message.MessageReference] = None,
stickers: Optional[List[sticker.StickerItem]] = None,
components: Optional[List[components.Component]] = None,
) -> Response[message.Message]:
r = Route('POST', '/channels/{channel_id}/messages', channel_id=channel_id)
payload = {}
if content:
payload['content'] = content
if tts:
payload['tts'] = True
if embed:
payload['embeds'] = [embed]
if embeds:
payload['embeds'] = embeds
if nonce:
payload['nonce'] = nonce
if allowed_mentions:
payload['allowed_mentions'] = allowed_mentions
if message_reference:
payload['message_reference'] = message_reference
if components:
payload['components'] = components
if stickers:
payload['sticker_ids'] = stickers
return self.request(r, json=payload)
def send_typing(self, channel_id: Snowflake) -> Response[None]:
return self.request(Route('POST', '/channels/{channel_id}/typing', channel_id=channel_id))
def send_multipart_helper(
self,
route: Route,
*,
files: Sequence[File],
content: Optional[str] = None,
tts: bool = False,
embed: Optional[embed.Embed] = None,
embeds: Optional[Iterable[Optional[embed.Embed]]] = None,
nonce: Optional[str] = None,
allowed_mentions: Optional[message.AllowedMentions] = None,
message_reference: Optional[message.MessageReference] = None,
stickers: Optional[List[sticker.StickerItem]] = None,
components: Optional[List[components.Component]] = None,
) -> Response[message.Message]:
form = []
payload: Dict[str, Any] = {'tts': tts}
if content:
payload['content'] = content
if embed:
payload['embeds'] = [embed]
if embeds:
payload['embeds'] = embeds
if nonce:
payload['nonce'] = nonce
if allowed_mentions:
payload['allowed_mentions'] = allowed_mentions
if message_reference:
payload['message_reference'] = message_reference
if components:
payload['components'] = components
if stickers:
payload['sticker_ids'] = stickers
form.append({'name': 'payload_json', 'value': utils._to_json(payload)})
if len(files) == 1:
file = files[0]
form.append(
{
'name': 'file',
'value': file.fp,
'filename': file.filename,
'content_type': 'application/octet-stream',
}
)
else:
for index, file in enumerate(files):
form.append(
{
'name': f'file{index}',
'value': file.fp,
'filename': file.filename,
'content_type': 'application/octet-stream',
}
)
return self.request(route, form=form, files=files)
def send_files(
self,
channel_id: Snowflake,
*,
files: Sequence[File],
content: Optional[str] = None,
tts: bool = False,
embed: Optional[embed.Embed] = None,
embeds: Optional[List[embed.Embed]] = None,
nonce: Optional[str] = None,
allowed_mentions: Optional[message.AllowedMentions] = None,
message_reference: Optional[message.MessageReference] = None,
stickers: Optional[List[sticker.StickerItem]] = None,
components: Optional[List[components.Component]] = None,
) -> Response[message.Message]:
r = Route('POST', '/channels/{channel_id}/messages', channel_id=channel_id)
return self.send_multipart_helper(
r,
files=files,
content=content,
tts=tts,
embed=embed,
embeds=embeds,
nonce=nonce,
allowed_mentions=allowed_mentions,
message_reference=message_reference,
stickers=stickers,
components=components,
)
def delete_message(
self, channel_id: Snowflake, message_id: Snowflake, *, reason: Optional[str] = None
) -> Response[None]:
r = Route('DELETE', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id)
return self.request(r, reason=reason)
def delete_messages(
self, channel_id: Snowflake, message_ids: SnowflakeList, *, reason: Optional[str] = None
) -> Response[None]:
r = Route('POST', '/channels/{channel_id}/messages/bulk-delete', channel_id=channel_id)
payload = {
'messages': message_ids,
}
return self.request(r, json=payload, reason=reason)
def edit_message(self, channel_id: Snowflake, message_id: Snowflake, **fields: Any) -> Response[message.Message]:
r = Route('PATCH', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id)
return self.request(r, json=fields)
def add_reaction(self, channel_id: Snowflake, message_id: Snowflake, emoji: str) -> Response[None]:
r = Route(
'PUT',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me',
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
)
return self.request(r)
def remove_reaction(
self, channel_id: Snowflake, message_id: Snowflake, emoji: str, member_id: Snowflake
) -> Response[None]:
r = Route(
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/{member_id}',
channel_id=channel_id,
message_id=message_id,
member_id=member_id,
emoji=emoji,
)
return self.request(r)
def remove_own_reaction(self, channel_id: Snowflake, message_id: Snowflake, emoji: str) -> Response[None]:
r = Route(
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}/@me',
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
)
return self.request(r)
def get_reaction_users(
self,
channel_id: Snowflake,
message_id: Snowflake,
emoji: str,
limit: int,
after: Optional[Snowflake] = None,
) -> Response[List[user.User]]:
r = Route(
'GET',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}',
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
)
params: Dict[str, Any] = {
'limit': limit,
}
if after:
params['after'] = after
return self.request(r, params=params)
def clear_reactions(self, channel_id: Snowflake, message_id: Snowflake) -> Response[None]:
r = Route(
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions',
channel_id=channel_id,
message_id=message_id,
)
return self.request(r)
def clear_single_reaction(self, channel_id: Snowflake, message_id: Snowflake, emoji: str) -> Response[None]:
r = Route(
'DELETE',
'/channels/{channel_id}/messages/{message_id}/reactions/{emoji}',
channel_id=channel_id,
message_id=message_id,
emoji=emoji,
)
return self.request(r)
def get_message(self, channel_id: Snowflake, message_id: Snowflake) -> Response[message.Message]:
r = Route('GET', '/channels/{channel_id}/messages/{message_id}', channel_id=channel_id, message_id=message_id)
return self.request(r)
def get_channel(self, channel_id: Snowflake) -> Response[channel.Channel]:
r = Route('GET', '/channels/{channel_id}', channel_id=channel_id)
return self.request(r)
def logs_from(
self,
channel_id: Snowflake,
limit: int,
before: Optional[Snowflake] = None,
after: Optional[Snowflake] = None,
around: Optional[Snowflake] = None,
) -> Response[List[message.Message]]:
params: Dict[str, Any] = {
'limit': limit,
}
if before is not None:
params['before'] = before
if after is not None:
params['after'] = after
if around is not None:
params['around'] = around
return self.request(Route('GET', '/channels/{channel_id}/messages', channel_id=channel_id), params=params)
def publish_message(self, channel_id: Snowflake, message_id: Snowflake) -> Response[message.Message]:
return self.request(
Route(
'POST',
'/channels/{channel_id}/messages/{message_id}/crosspost',
channel_id=channel_id,
message_id=message_id,
)
)
def pin_message(self, channel_id: Snowflake, message_id: Snowflake, reason: Optional[str] = None) -> Response[None]:
r = Route(
'PUT',
'/channels/{channel_id}/pins/{message_id}',
channel_id=channel_id,
message_id=message_id,
)
return self.request(r, reason=reason)
def unpin_message(self, channel_id: Snowflake, message_id: Snowflake, reason: Optional[str] = None) -> Response[None]:
r = Route(
'DELETE',
'/channels/{channel_id}/pins/{message_id}',
channel_id=channel_id,
message_id=message_id,
)
return self.request(r, reason=reason)
def pins_from(self, channel_id: Snowflake) -> Response[List[message.Message]]:
return self.request(Route('GET', '/channels/{channel_id}/pins', channel_id=channel_id))
# Member management
def kick(self, user_id: Snowflake, guild_id: Snowflake, reason: Optional[str] = None) -> Response[None]:
r = Route('DELETE', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
if reason:
# thanks aiohttp
r.url = f'{r.url}?reason={_uriquote(reason)}'
return self.request(r)
def ban(
self,
user_id: Snowflake,
guild_id: Snowflake,
delete_message_days: int = 1,
reason: Optional[str] = None,
) -> Response[None]:
r = Route('PUT', '/guilds/{guild_id}/bans/{user_id}', guild_id=guild_id, user_id=user_id)
params = {
'delete_message_days': delete_message_days,
}
return self.request(r, params=params, reason=reason)
def unban(self, user_id: Snowflake, guild_id: Snowflake, *, reason: Optional[str] = None) -> Response[None]:
r = Route('DELETE', '/guilds/{guild_id}/bans/{user_id}', guild_id=guild_id, user_id=user_id)
return self.request(r, reason=reason)
def guild_voice_state(
self,
user_id: Snowflake,
guild_id: Snowflake,
*,
mute: Optional[bool] = None,
deafen: Optional[bool] = None,
reason: Optional[str] = None,
) -> Response[member.Member]:
r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
payload = {}
if mute is not None:
payload['mute'] = mute
if deafen is not None:
payload['deaf'] = deafen
return self.request(r, json=payload, reason=reason)
def edit_profile(self, payload: Dict[str, Any]) -> Response[user.User]:
return self.request(Route('PATCH', '/users/@me'), json=payload)
def change_my_nickname(
self,
guild_id: Snowflake,
nickname: str,
*,
reason: Optional[str] = None,
) -> Response[member.Nickname]:
r = Route('PATCH', '/guilds/{guild_id}/members/@me/nick', guild_id=guild_id)
payload = {
'nick': nickname,
}
return self.request(r, json=payload, reason=reason)
def change_nickname(
self,
guild_id: Snowflake,
user_id: Snowflake,
nickname: str,
*,
reason: Optional[str] = None,
) -> Response[member.Member]:
r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
payload = {
'nick': nickname,
}
return self.request(r, json=payload, reason=reason)
def edit_my_voice_state(self, guild_id: Snowflake, payload: Dict[str, Any]) -> Response[None]:
r = Route('PATCH', '/guilds/{guild_id}/voice-states/@me', guild_id=guild_id)
return self.request(r, json=payload)
def edit_voice_state(self, guild_id: Snowflake, user_id: Snowflake, payload: Dict[str, Any]) -> Response[None]:
r = Route('PATCH', '/guilds/{guild_id}/voice-states/{user_id}', guild_id=guild_id, user_id=user_id)
return self.request(r, json=payload)
def edit_member(
self,
guild_id: Snowflake,
user_id: Snowflake,
*,
reason: Optional[str] = None,
**fields: Any,
) -> Response[member.MemberWithUser]:
r = Route('PATCH', '/guilds/{guild_id}/members/{user_id}', guild_id=guild_id, user_id=user_id)
return self.request(r, json=fields, reason=reason)
# Channel management
def edit_channel(
self,
channel_id: Snowflake,
*,
reason: Optional[str] = None,
**options: Any,
) -> Response[channel.Channel]:
r = Route('PATCH', '/channels/{channel_id}', channel_id=channel_id)
valid_keys = (
'name',
'parent_id',
'topic',
'bitrate',
'nsfw',
'user_limit',
'position',
'permission_overwrites',
'rate_limit_per_user',
'type',
'rtc_region',
'video_quality_mode',
'archived',
'auto_archive_duration',
'locked',
'invitable',
'default_auto_archive_duration',
)
payload = {k: v for k, v in options.items() if k in valid_keys}
return self.request(r, reason=reason, json=payload)
def bulk_channel_update(
self,
guild_id: Snowflake,
data: List[guild.ChannelPositionUpdate],
*,
reason: Optional[str] = None,
) -> Response[None]:
r = Route('PATCH', '/guilds/{guild_id}/channels', guild_id=guild_id)
return self.request(r, json=data, reason=reason)
def create_channel(
self,
guild_id: Snowflake,
channel_type: channel.ChannelType,
*,
reason: Optional[str] = None,
**options: Any,
) -> Response[channel.GuildChannel]:
payload = {
'type': channel_type,
}
valid_keys = (
'name',
'parent_id',
'topic',
'bitrate',
'nsfw',
'user_limit',
'position',
'permission_overwrites',
'rate_limit_per_user',
'rtc_region',
'video_quality_mode',
'auto_archive_duration',
)
payload.update({k: v for k, v in options.items() if k in valid_keys and v is not None})
return self.request(Route('POST', '/guilds/{guild_id}/channels', guild_id=guild_id), json=payload, reason=reason)
def delete_channel(
self,
channel_id: Snowflake,
*,
reason: Optional[str] = None,
) -> Response[None]:
return self.request(Route('DELETE', '/channels/{channel_id}', channel_id=channel_id), reason=reason)
# Thread management
def start_thread_with_message(
self,
channel_id: Snowflake,
message_id: Snowflake,
*,
name: str,
auto_archive_duration: threads.ThreadArchiveDuration,
reason: Optional[str] = None,
) -> Response[threads.Thread]:
payload = {
'name': name,
'auto_archive_duration': auto_archive_duration,
}
route = Route(
'POST', '/channels/{channel_id}/messages/{message_id}/threads', channel_id=channel_id, message_id=message_id
)
return self.request(route, json=payload, reason=reason)
def start_thread_without_message(
self,
channel_id: Snowflake,
*,
name: str,
auto_archive_duration: threads.ThreadArchiveDuration,
type: threads.ThreadType,
invitable: bool = True,
reason: Optional[str] = None,
) -> Response[threads.Thread]:
payload = {
'name': name,
'auto_archive_duration': auto_archive_duration,
'type': type,
'invitable': invitable,
}
route = Route('POST', '/channels/{channel_id}/threads', channel_id=channel_id)
return self.request(route, json=payload, reason=reason)
def join_thread(self, channel_id: Snowflake) -> Response[None]:
return self.request(Route('POST', '/channels/{channel_id}/thread-members/@me', channel_id=channel_id))
def add_user_to_thread(self, channel_id: Snowflake, user_id: Snowflake) -> Response[None]:
return self.request(
Route('PUT', '/channels/{channel_id}/thread-members/{user_id}', channel_id=channel_id, user_id=user_id)
)
def leave_thread(self, channel_id: Snowflake) -> Response[None]:
return self.request(Route('DELETE', '/channels/{channel_id}/thread-members/@me', channel_id=channel_id))
def remove_user_from_thread(self, channel_id: Snowflake, user_id: Snowflake) -> Response[None]:
route = Route('DELETE', '/channels/{channel_id}/thread-members/{user_id}', channel_id=channel_id, user_id=user_id)
return self.request(route)
def get_public_archived_threads(
self, channel_id: Snowflake, before: Optional[Snowflake] = None, limit: int = 50
) -> Response[threads.ThreadPaginationPayload]:
route = Route('GET', '/channels/{channel_id}/threads/archived/public', channel_id=channel_id)
params = {}
if before:
params['before'] = before
params['limit'] = limit
return self.request(route, params=params)
def get_private_archived_threads(
self, channel_id: Snowflake, before: Optional[Snowflake] = None, limit: int = 50
) -> Response[threads.ThreadPaginationPayload]:
route = Route('GET', '/channels/{channel_id}/threads/archived/private', channel_id=channel_id)
params = {}
if before:
params['before'] = before
params['limit'] = limit
return self.request(route, params=params)
def get_joined_private_archived_threads(
self, channel_id: Snowflake, before: Optional[Snowflake] = None, limit: int = 50
) -> Response[threads.ThreadPaginationPayload]:
route = Route('GET', '/channels/{channel_id}/users/@me/threads/archived/private', channel_id=channel_id)
params = {}
if before:
params['before'] = before
params['limit'] = limit
return self.request(route, params=params)
def get_active_threads(self, guild_id: Snowflake) -> Response[threads.ThreadPaginationPayload]:
route = Route('GET', '/guilds/{guild_id}/threads/active', guild_id=guild_id)
return self.request(route)
def get_thread_members(self, channel_id: Snowflake) -> Response[List[threads.ThreadMember]]:
route = Route('GET', '/channels/{channel_id}/thread-members', channel_id=channel_id)
return self.request(route)
# Webhook management
def create_webhook(
self,
channel_id: Snowflake,
*,
name: str,
avatar: Optional[bytes] = None,
reason: Optional[str] = None,
) -> Response[webhook.Webhook]:
payload: Dict[str, Any] = {
'name': name,
}
if avatar is not None:
payload['avatar'] = avatar
r = Route('POST', '/channels/{channel_id}/webhooks', channel_id=channel_id)
return self.request(r, json=payload, reason=reason)
def channel_webhooks(self, channel_id: Snowflake) -> Response[List[webhook.Webhook]]:
return self.request(Route('GET', '/channels/{channel_id}/webhooks', channel_id=channel_id))
def guild_webhooks(self, guild_id: Snowflake) -> Response[List[webhook.Webhook]]:
return self.request(Route('GET', '/guilds/{guild_id}/webhooks', guild_id=guild_id))
def get_webhook(self, webhook_id: Snowflake) -> Response[webhook.Webhook]:
return self.request(Route('GET', '/webhooks/{webhook_id}', webhook_id=webhook_id))
def follow_webhook(
self,
channel_id: Snowflake,
webhook_channel_id: Snowflake,
reason: Optional[str] = None,
) -> Response[None]:
payload = {
'webhook_channel_id': str(webhook_channel_id),
}
return self.request(
Route('POST', '/channels/{channel_id}/followers', channel_id=channel_id), json=payload, reason=reason
)
# Guild management
def get_guilds(
self,
limit: int,
before: Optional[Snowflake] = None,
after: Optional[Snowflake] = None,
) -> Response[List[guild.Guild]]:
params: Dict[str, Any] = {
'limit': limit,
}
if before:
params['before'] = before
if after:
params['after'] = after
return self.request(Route('GET', '/users/@me/guilds'), params=params)
def leave_guild(self, guild_id: Snowflake) -> Response[None]:
return self.request(Route('DELETE', '/users/@me/guilds/{guild_id}', guild_id=guild_id))
def get_guild(self, guild_id: Snowflake) -> Response[guild.Guild]:
return self.request(Route('GET', '/guilds/{guild_id}', guild_id=guild_id))
def delete_guild(self, guild_id: Snowflake) -> Response[None]:
return self.request(Route('DELETE', '/guilds/{guild_id}', guild_id=guild_id))
def create_guild(self, name: str, region: str, icon: Optional[str]) -> Response[guild.Guild]:
payload = {
'name': name,
'region': region,
}
if icon:
payload['icon'] = icon
return self.request(Route('POST', '/guilds'), json=payload)
def edit_guild(self, guild_id: Snowflake, *, reason: Optional[str] = None, **fields: Any) -> Response[guild.Guild]:
valid_keys = (
'name',
'region',
'icon',
'afk_timeout',
'owner_id',
'afk_channel_id',
'splash',
'discovery_splash',
'features',
'verification_level',
'system_channel_id',
'default_message_notifications',
'description',
'explicit_content_filter',
'banner',
'system_channel_flags',
'rules_channel_id',
'public_updates_channel_id',
'preferred_locale',
)
payload = {k: v for k, v in fields.items() if k in valid_keys}
return self.request(Route('PATCH', '/guilds/{guild_id}', guild_id=guild_id), json=payload, reason=reason)
def get_template(self, code: str) -> Response[template.Template]:
return self.request(Route('GET', '/guilds/templates/{code}', code=code))
def guild_templates(self, guild_id: Snowflake) -> Response[List[template.Template]]:
return self.request(Route('GET', '/guilds/{guild_id}/templates', guild_id=guild_id))
def create_template(self, guild_id: Snowflake, payload: template.CreateTemplate) -> Response[template.Template]:
return self.request(Route('POST', '/guilds/{guild_id}/templates', guild_id=guild_id), json=payload)
def sync_template(self, guild_id: Snowflake, code: str) -> Response[template.Template]:
return self.request(Route('PUT', '/guilds/{guild_id}/templates/{code}', guild_id=guild_id, code=code))
def edit_template(self, guild_id: Snowflake, code: str, payload) -> Response[template.Template]:
valid_keys = (
'name',
'description',
)
payload = {k: v for k, v in payload.items() if k in valid_keys}
return self.request(
Route('PATCH', '/guilds/{guild_id}/templates/{code}', guild_id=guild_id, code=code), json=payload
)
def delete_template(self, guild_id: Snowflake, code: str) -> Response[None]:
return self.request(Route('DELETE', '/guilds/{guild_id}/templates/{code}', guild_id=guild_id, code=code))
def create_from_template(self, code: str, name: str, region: str, icon: Optional[str]) -> Response[guild.Guild]:
payload = {
'name': name,
'region': region,
}
if icon:
payload['icon'] = icon
return self.request(Route('POST', '/guilds/templates/{code}', code=code), json=payload)
def get_bans(self, guild_id: Snowflake) -> Response[List[guild.Ban]]:
return self.request(Route('GET', '/guilds/{guild_id}/bans', guild_id=guild_id))
def get_ban(self, user_id: Snowflake, guild_id: Snowflake) -> Response[guild.Ban]:
return self.request(Route('GET', '/guilds/{guild_id}/bans/{user_id}', guild_id=guild_id, user_id=user_id))
def get_vanity_code(self, guild_id: Snowflake) -> Response[invite.VanityInvite]:
return self.request(Route('GET', '/guilds/{guild_id}/vanity-url', guild_id=guild_id))
def change_vanity_code(self, guild_id: Snowflake, code: str, *, reason: Optional[str] = None) -> Response[None]:
payload: Dict[str, Any] = {'code': code}
return self.request(Route('PATCH', '/guilds/{guild_id}/vanity-url', guild_id=guild_id), json=payload, reason=reason)
def get_all_guild_channels(self, guild_id: Snowflake) -> Response[List[guild.GuildChannel]]:
return self.request(Route('GET', '/guilds/{guild_id}/channels', guild_id=guild_id))
def get_members(
self, guild_id: Snowflake, limit: int, after: Optional[Snowflake]
) -> Response[List[member.MemberWithUser]]:
params: Dict[str, Any] = {
'limit': limit,
}
if after:
params['after'] = after
r = Route('GET', '/guilds/{guild_id}/members', guild_id=guild_id)
return self.request(r, params=params)
def get_member(self, guild_id: Snowflake, member_id: Snowflake) -> Response[member.MemberWithUser]:
return self.request(Route('GET', '/guilds/{guild_id}/members/{member_id}', guild_id=guild_id, member_id=member_id))
def prune_members(
self,
guild_id: Snowflake,
days: int,
compute_prune_count: bool,
roles: List[str],
*,
reason: Optional[str] = None,
) -> Response[guild.GuildPrune]:
payload: Dict[str, Any] = {
'days': days,
'compute_prune_count': 'true' if compute_prune_count else 'false',
}
if roles:
payload['include_roles'] = ', '.join(roles)
return self.request(Route('POST', '/guilds/{guild_id}/prune', guild_id=guild_id), json=payload, reason=reason)
def estimate_pruned_members(
self,
guild_id: Snowflake,
days: int,
roles: List[str],
) -> Response[guild.GuildPrune]:
params: Dict[str, Any] = {
'days': days,
}
if roles:
params['include_roles'] = ', '.join(roles)
return self.request(Route('GET', '/guilds/{guild_id}/prune', guild_id=guild_id), params=params)
def get_sticker(self, sticker_id: Snowflake) -> Response[sticker.Sticker]:
return self.request(Route('GET', '/stickers/{sticker_id}', sticker_id=sticker_id))
def list_premium_sticker_packs(self) -> Response[sticker.ListPremiumStickerPacks]:
return self.request(Route('GET', '/sticker-packs'))
def get_all_guild_stickers(self, guild_id: Snowflake) -> Response[List[sticker.GuildSticker]]:
return self.request(Route('GET', '/guilds/{guild_id}/stickers', guild_id=guild_id))
def get_guild_sticker(self, guild_id: Snowflake, sticker_id: Snowflake) -> Response[sticker.GuildSticker]:
return self.request(
Route('GET', '/guilds/{guild_id}/stickers/{sticker_id}', guild_id=guild_id, sticker_id=sticker_id)
)
def create_guild_sticker(
self, guild_id: Snowflake, payload: sticker.CreateGuildSticker, file: File, reason: str
) -> Response[sticker.GuildSticker]:
initial_bytes = file.fp.read(16)
try:
mime_type = utils._get_mime_type_for_image(initial_bytes)
except InvalidArgument:
if initial_bytes.startswith(b'{'):
mime_type = 'application/json'
else:
mime_type = 'application/octet-stream'
finally:
file.reset()
form: List[Dict[str, Any]] = [
{
'name': 'file',
'value': file.fp,
'filename': file.filename,
'content_type': mime_type,
}
]
for k, v in payload.items():
form.append(
{
'name': k,
'value': v,
}
)
return self.request(
Route('POST', '/guilds/{guild_id}/stickers', guild_id=guild_id), form=form, files=[file], reason=reason
)
def modify_guild_sticker(
self, guild_id: Snowflake, sticker_id: Snowflake, payload: sticker.EditGuildSticker, reason: Optional[str],
) -> Response[sticker.GuildSticker]:
return self.request(
Route('PATCH', '/guilds/{guild_id}/stickers/{sticker_id}', guild_id=guild_id, sticker_id=sticker_id),
json=payload,
reason=reason,
)
def delete_guild_sticker(self, guild_id: Snowflake, sticker_id: Snowflake, reason: Optional[str]) -> Response[None]:
return self.request(
Route('DELETE', '/guilds/{guild_id}/stickers/{sticker_id}', guild_id=guild_id, sticker_id=sticker_id),
reason=reason,
)
def get_all_custom_emojis(self, guild_id: Snowflake) -> Response[List[emoji.Emoji]]:
return self.request(Route('GET', '/guilds/{guild_id}/emojis', guild_id=guild_id))
def get_custom_emoji(self, guild_id: Snowflake, emoji_id: Snowflake) -> Response[emoji.Emoji]:
return self.request(Route('GET', '/guilds/{guild_id}/emojis/{emoji_id}', guild_id=guild_id, emoji_id=emoji_id))
def create_custom_emoji(
self,
guild_id: Snowflake,
name: str,
image: bytes,
*,
roles: Optional[SnowflakeList] = None,
reason: Optional[str] = None,
) -> Response[emoji.Emoji]:
payload = {
'name': name,
'image': image,
'roles': roles or [],
}
r = Route('POST', '/guilds/{guild_id}/emojis', guild_id=guild_id)
return self.request(r, json=payload, reason=reason)
def delete_custom_emoji(
self,
guild_id: Snowflake,
emoji_id: Snowflake,
*,
reason: Optional[str] = None,
) -> Response[None]:
r = Route('DELETE', '/guilds/{guild_id}/emojis/{emoji_id}', guild_id=guild_id, emoji_id=emoji_id)
return self.request(r, reason=reason)
def edit_custom_emoji(
self,
guild_id: Snowflake,
emoji_id: Snowflake,
*,
payload: Dict[str, Any],
reason: Optional[str] = None,
) -> Response[emoji.Emoji]:
r = Route('PATCH', '/guilds/{guild_id}/emojis/{emoji_id}', guild_id=guild_id, emoji_id=emoji_id)
return self.request(r, json=payload, reason=reason)
def get_all_integrations(self, guild_id: Snowflake) -> Response[List[integration.Integration]]:
r = Route('GET', '/guilds/{guild_id}/integrations', guild_id=guild_id)
return self.request(r)
def create_integration(self, guild_id: Snowflake, type: integration.IntegrationType, id: int) -> Response[None]:
payload = {
'type': type,
'id': id,
}
r = Route('POST', '/guilds/{guild_id}/integrations', guild_id=guild_id)
return self.request(r, json=payload)
def edit_integration(self, guild_id: Snowflake, integration_id: Snowflake, **payload: Any) -> Response[None]:
r = Route(
'PATCH', '/guilds/{guild_id}/integrations/{integration_id}', guild_id=guild_id, integration_id=integration_id
)
return self.request(r, json=payload)
def sync_integration(self, guild_id: Snowflake, integration_id: Snowflake) -> Response[None]:
r = Route(
'POST', '/guilds/{guild_id}/integrations/{integration_id}/sync', guild_id=guild_id, integration_id=integration_id
)
return self.request(r)
def delete_integration(
self, guild_id: Snowflake, integration_id: Snowflake, *, reason: Optional[str] = None
) -> Response[None]:
r = Route(
'DELETE', '/guilds/{guild_id}/integrations/{integration_id}', guild_id=guild_id, integration_id=integration_id
)
return self.request(r, reason=reason)
def get_audit_logs(
self,
guild_id: Snowflake,
limit: int = 100,
before: Optional[Snowflake] = None,
after: Optional[Snowflake] = None,
user_id: Optional[Snowflake] = None,
action_type: Optional[AuditLogAction] = None,
) -> Response[audit_log.AuditLog]:
params: Dict[str, Any] = {'limit': limit}
if before:
params['before'] = before
if after:
params['after'] = after
if user_id:
params['user_id'] = user_id
if action_type:
params['action_type'] = action_type
r = Route('GET', '/guilds/{guild_id}/audit-logs', guild_id=guild_id)
return self.request(r, params=params)
def get_widget(self, guild_id: Snowflake) -> Response[widget.Widget]:
return self.request(Route('GET', '/guilds/{guild_id}/widget.json', guild_id=guild_id))
def edit_widget(self, guild_id: Snowflake, payload) -> Response[widget.WidgetSettings]:
return self.request(Route('PATCH', '/guilds/{guild_id}/widget', guild_id=guild_id), json=payload)
# Invite management
def create_invite(
self,
channel_id: Snowflake,
*,
reason: Optional[str] = None,
max_age: int = 0,
max_uses: int = 0,
temporary: bool = False,
unique: bool = True,
target_type: Optional[invite.InviteTargetType] = None,
target_user_id: Optional[Snowflake] = None,
target_application_id: Optional[Snowflake] = None,
) -> Response[invite.Invite]:
r = Route('POST', '/channels/{channel_id}/invites', channel_id=channel_id)
payload = {
'max_age': max_age,
'max_uses': max_uses,
'temporary': temporary,
'unique': unique,
}
if target_type:
payload['target_type'] = target_type
if target_user_id:
payload['target_user_id'] = target_user_id
if target_application_id:
payload['target_application_id'] = str(target_application_id)
return self.request(r, reason=reason, json=payload)
def get_invite(
self, invite_id: str, *, with_counts: bool = True, with_expiration: bool = True
) -> Response[invite.Invite]:
params = {
'with_counts': int(with_counts),
'with_expiration': int(with_expiration),
}
return self.request(Route('GET', '/invites/{invite_id}', invite_id=invite_id), params=params)
def invites_from(self, guild_id: Snowflake) -> Response[List[invite.Invite]]:
return self.request(Route('GET', '/guilds/{guild_id}/invites', guild_id=guild_id))
def invites_from_channel(self, channel_id: Snowflake) -> Response[List[invite.Invite]]:
return self.request(Route('GET', '/channels/{channel_id}/invites', channel_id=channel_id))
def delete_invite(self, invite_id: str, *, reason: Optional[str] = None) -> Response[None]:
return self.request(Route('DELETE', '/invites/{invite_id}', invite_id=invite_id), reason=reason)
# Role management
def get_roles(self, guild_id: Snowflake) -> Response[List[role.Role]]:
return self.request(Route('GET', '/guilds/{guild_id}/roles', guild_id=guild_id))
def edit_role(
self, guild_id: Snowflake, role_id: Snowflake, *, reason: Optional[str] = None, **fields: Any
) -> Response[role.Role]:
r = Route('PATCH', '/guilds/{guild_id}/roles/{role_id}', guild_id=guild_id, role_id=role_id)
valid_keys = ('name', 'permissions', 'color', 'hoist', 'mentionable')
payload = {k: v for k, v in fields.items() if k in valid_keys}
return self.request(r, json=payload, reason=reason)
def delete_role(self, guild_id: Snowflake, role_id: Snowflake, *, reason: Optional[str] = None) -> Response[None]:
r = Route('DELETE', '/guilds/{guild_id}/roles/{role_id}', guild_id=guild_id, role_id=role_id)
return self.request(r, reason=reason)
def replace_roles(
self,
user_id: Snowflake,
guild_id: Snowflake,
role_ids: List[int],
*,
reason: Optional[str] = None,
) -> Response[member.MemberWithUser]:
return self.edit_member(guild_id=guild_id, user_id=user_id, roles=role_ids, reason=reason)
def create_role(self, guild_id: Snowflake, *, reason: Optional[str] = None, **fields: Any) -> Response[role.Role]:
r = Route('POST', '/guilds/{guild_id}/roles', guild_id=guild_id)
return self.request(r, json=fields, reason=reason)
def move_role_position(
self,
guild_id: Snowflake,
positions: List[guild.RolePositionUpdate],
*,
reason: Optional[str] = None,
) -> Response[List[role.Role]]:
r = Route('PATCH', '/guilds/{guild_id}/roles', guild_id=guild_id)
return self.request(r, json=positions, reason=reason)
def add_role(
self, guild_id: Snowflake, user_id: Snowflake, role_id: Snowflake, *, reason: Optional[str] = None
) -> Response[None]:
r = Route(
'PUT',
'/guilds/{guild_id}/members/{user_id}/roles/{role_id}',
guild_id=guild_id,
user_id=user_id,
role_id=role_id,
)
return self.request(r, reason=reason)
def remove_role(
self, guild_id: Snowflake, user_id: Snowflake, role_id: Snowflake, *, reason: Optional[str] = None
) -> Response[None]:
r = Route(
'DELETE',
'/guilds/{guild_id}/members/{user_id}/roles/{role_id}',
guild_id=guild_id,
user_id=user_id,
role_id=role_id,
)
return self.request(r, reason=reason)
def edit_channel_permissions(
self,
channel_id: Snowflake,
target: Snowflake,
allow: str,
deny: str,
type: channel.OverwriteType,
*,
reason: Optional[str] = None,
) -> Response[None]:
payload = {'id': target, 'allow': allow, 'deny': deny, 'type': type}
r = Route('PUT', '/channels/{channel_id}/permissions/{target}', channel_id=channel_id, target=target)
return self.request(r, json=payload, reason=reason)
def delete_channel_permissions(
self, channel_id: Snowflake, target: channel.OverwriteType, *, reason: Optional[str] = None
) -> Response[None]:
r = Route('DELETE', '/channels/{channel_id}/permissions/{target}', channel_id=channel_id, target=target)
return self.request(r, reason=reason)
# Welcome Screen
def get_welcome_screen(self, guild_id: Snowflake) -> Response[welcome_screen.WelcomeScreen]:
return self.request(Route('GET', '/guilds/{guild_id}/welcome-screen', guild_id=guild_id))
def edit_welcome_screen(self, guild_id: Snowflake, payload: Any, *, reason: Optional[str] = None) -> Response[welcome_screen.WelcomeScreen]:
keys = (
'description',
'welcome_channels',
'enabled',
)
payload = {
key: val for key, val in payload.items() if key in keys
}
return self.request(Route('PATCH', '/guilds/{guild_id}/welcome-screen', guild_id=guild_id), json=payload, reason=reason)
# Voice management
def move_member(
self,
user_id: Snowflake,
guild_id: Snowflake,
channel_id: Snowflake,
*,
reason: Optional[str] = None,
) -> Response[member.MemberWithUser]:
return self.edit_member(guild_id=guild_id, user_id=user_id, channel_id=channel_id, reason=reason)
# Stage instance management
def get_stage_instance(self, channel_id: Snowflake) -> Response[channel.StageInstance]:
return self.request(Route('GET', '/stage-instances/{channel_id}', channel_id=channel_id))
def create_stage_instance(self, *, reason: Optional[str], **payload: Any) -> Response[channel.StageInstance]:
valid_keys = (
'channel_id',
'topic',
'privacy_level',
)
payload = {k: v for k, v in payload.items() if k in valid_keys}
return self.request(Route('POST', '/stage-instances'), json=payload, reason=reason)
def edit_stage_instance(self, channel_id: Snowflake, *, reason: Optional[str] = None, **payload: Any) -> Response[None]:
valid_keys = (
'topic',
'privacy_level',
)
payload = {k: v for k, v in payload.items() if k in valid_keys}
return self.request(
Route('PATCH', '/stage-instances/{channel_id}', channel_id=channel_id), json=payload, reason=reason
)
def delete_stage_instance(self, channel_id: Snowflake, *, reason: Optional[str] = None) -> Response[None]:
return self.request(Route('DELETE', '/stage-instances/{channel_id}', channel_id=channel_id), reason=reason)
# Application commands (global)
def get_global_commands(self, application_id: Snowflake) -> Response[List[interactions.ApplicationCommand]]:
return self.request(Route('GET', '/applications/{application_id}/commands', application_id=application_id))
def get_global_command(
self, application_id: Snowflake, command_id: Snowflake
) -> Response[interactions.ApplicationCommand]:
r = Route(
'GET',
'/applications/{application_id}/commands/{command_id}',
application_id=application_id,
command_id=command_id,
)
return self.request(r)
def upsert_global_command(self, application_id: Snowflake, payload) -> Response[interactions.ApplicationCommand]:
r = Route('POST', '/applications/{application_id}/commands', application_id=application_id)
return self.request(r, json=payload)
def edit_global_command(
self,
application_id: Snowflake,
command_id: Snowflake,
payload: interactions.EditApplicationCommand,
) -> Response[interactions.ApplicationCommand]:
valid_keys = (
'name',
'description',
'options',
)
payload = {k: v for k, v in payload.items() if k in valid_keys} # type: ignore
r = Route(
'PATCH',
'/applications/{application_id}/commands/{command_id}',
application_id=application_id,
command_id=command_id,
)
return self.request(r, json=payload)
def delete_global_command(self, application_id: Snowflake, command_id: Snowflake) -> Response[None]:
r = Route(
'DELETE',
'/applications/{application_id}/commands/{command_id}',
application_id=application_id,
command_id=command_id,
)
return self.request(r)
def bulk_upsert_global_commands(
self, application_id: Snowflake, payload
) -> Response[List[interactions.ApplicationCommand]]:
r = Route('PUT', '/applications/{application_id}/commands', application_id=application_id)
return self.request(r, json=payload)
# Application commands (guild)
def get_guild_commands(
self, application_id: Snowflake, guild_id: Snowflake
) -> Response[List[interactions.ApplicationCommand]]:
r = Route(
'GET',
'/applications/{application_id}/guilds/{guild_id}/commands',
application_id=application_id,
guild_id=guild_id,
)
return self.request(r)
def get_guild_command(
self,
application_id: Snowflake,
guild_id: Snowflake,
command_id: Snowflake,
) -> Response[interactions.ApplicationCommand]:
r = Route(
'GET',
'/applications/{application_id}/guilds/{guild_id}/commands/{command_id}',
application_id=application_id,
guild_id=guild_id,
command_id=command_id,
)
return self.request(r)
def upsert_guild_command(
self,
application_id: Snowflake,
guild_id: Snowflake,
payload: interactions.EditApplicationCommand,
) -> Response[interactions.ApplicationCommand]:
r = Route(
'POST',
'/applications/{application_id}/guilds/{guild_id}/commands',
application_id=application_id,
guild_id=guild_id,
)
return self.request(r, json=payload)
def edit_guild_command(
self,
application_id: Snowflake,
guild_id: Snowflake,
command_id: Snowflake,
payload: interactions.EditApplicationCommand,
) -> Response[interactions.ApplicationCommand]:
valid_keys = (
'name',
'description',
'options',
)
payload = {k: v for k, v in payload.items() if k in valid_keys} # type: ignore
r = Route(
'PATCH',
'/applications/{application_id}/guilds/{guild_id}/commands/{command_id}',
application_id=application_id,
guild_id=guild_id,
command_id=command_id,
)
return self.request(r, json=payload)
def delete_guild_command(
self,
application_id: Snowflake,
guild_id: Snowflake,
command_id: Snowflake,
) -> Response[None]:
r = Route(
'DELETE',
'/applications/{application_id}/guilds/{guild_id}/commands/{command_id}',
application_id=application_id,
guild_id=guild_id,
command_id=command_id,
)
return self.request(r)
def bulk_upsert_guild_commands(
self,
application_id: Snowflake,
guild_id: Snowflake,
payload: List[interactions.EditApplicationCommand],
) -> Response[List[interactions.ApplicationCommand]]:
r = Route(
'PUT',
'/applications/{application_id}/guilds/{guild_id}/commands',
application_id=application_id,
guild_id=guild_id,
)
return self.request(r, json=payload)
# Interaction responses
def _edit_webhook_helper(
self,
route: Route,
file: Optional[File] = None,
content: Optional[str] = None,
embeds: Optional[List[embed.Embed]] = None,
allowed_mentions: Optional[message.AllowedMentions] = None,
):
payload: Dict[str, Any] = {}
if content:
payload['content'] = content
if embeds:
payload['embeds'] = embeds
if allowed_mentions:
payload['allowed_mentions'] = allowed_mentions
form: List[Dict[str, Any]] = [
{
'name': 'payload_json',
'value': utils._to_json(payload),
}
]
if file:
form.append(
{
'name': 'file',
'value': file.fp,
'filename': file.filename,
'content_type': 'application/octet-stream',
}
)
return self.request(route, form=form, files=[file] if file else None)
def create_interaction_response(
self,
interaction_id: Snowflake,
token: str,
*,
type: InteractionResponseType,
data: Optional[interactions.InteractionApplicationCommandCallbackData] = None,
) -> Response[None]:
r = Route(
'POST',
'/interactions/{interaction_id}/{interaction_token}/callback',
interaction_id=interaction_id,
interaction_token=token,
)
payload: Dict[str, Any] = {
'type': type,
}
if data is not None:
payload['data'] = data
return self.request(r, json=payload)
def get_original_interaction_response(
self,
application_id: Snowflake,
token: str,
) -> Response[message.Message]:
r = Route(
'GET',
'/webhooks/{application_id}/{interaction_token}/messages/@original',
application_id=application_id,
interaction_token=token,
)
return self.request(r)
def edit_original_interaction_response(
self,
application_id: Snowflake,
token: str,
file: Optional[File] = None,
content: Optional[str] = None,
embeds: Optional[List[embed.Embed]] = None,
allowed_mentions: Optional[message.AllowedMentions] = None,
) -> Response[message.Message]:
r = Route(
'PATCH',
'/webhooks/{application_id}/{interaction_token}/messages/@original',
application_id=application_id,
interaction_token=token,
)
return self._edit_webhook_helper(r, file=file, content=content, embeds=embeds, allowed_mentions=allowed_mentions)
def delete_original_interaction_response(self, application_id: Snowflake, token: str) -> Response[None]:
r = Route(
'DELETE',
'/webhooks/{application_id}/{interaction_token}/messages/@original',
application_id=application_id,
interaction_token=token,
)
return self.request(r)
def create_followup_message(
self,
application_id: Snowflake,
token: str,
files: List[File] = [],
content: Optional[str] = None,
tts: bool = False,
embeds: Optional[List[embed.Embed]] = None,
allowed_mentions: Optional[message.AllowedMentions] = None,
) -> Response[message.Message]:
r = Route(
'POST',
'/webhooks/{application_id}/{interaction_token}',
application_id=application_id,
interaction_token=token,
)
return self.send_multipart_helper(
r,
content=content,
files=files,
tts=tts,
embeds=embeds,
allowed_mentions=allowed_mentions,
)
def edit_followup_message(
self,
application_id: Snowflake,
token: str,
message_id: Snowflake,
file: Optional[File] = None,
content: Optional[str] = None,
embeds: Optional[List[embed.Embed]] = None,
allowed_mentions: Optional[message.AllowedMentions] = None,
) -> Response[message.Message]:
r = Route(
'PATCH',
'/webhooks/{application_id}/{interaction_token}/messages/{message_id}',
application_id=application_id,
interaction_token=token,
message_id=message_id,
)
return self._edit_webhook_helper(r, file=file, content=content, embeds=embeds, allowed_mentions=allowed_mentions)
def delete_followup_message(self, application_id: Snowflake, token: str, message_id: Snowflake) -> Response[None]:
r = Route(
'DELETE',
'/webhooks/{application_id}/{interaction_token}/messages/{message_id}',
application_id=application_id,
interaction_token=token,
message_id=message_id,
)
return self.request(r)
def get_guild_application_command_permissions(
self,
application_id: Snowflake,
guild_id: Snowflake,
) -> Response[List[interactions.GuildApplicationCommandPermissions]]:
r = Route(
'GET',
'/applications/{application_id}/guilds/{guild_id}/commands/permissions',
application_id=application_id,
guild_id=guild_id,
)
return self.request(r)
def get_application_command_permissions(
self,
application_id: Snowflake,
guild_id: Snowflake,
command_id: Snowflake,
) -> Response[interactions.GuildApplicationCommandPermissions]:
r = Route(
'GET',
'/applications/{application_id}/guilds/{guild_id}/commands/{command_id}/permissions',
application_id=application_id,
guild_id=guild_id,
command_id=command_id,
)
return self.request(r)
def edit_application_command_permissions(
self,
application_id: Snowflake,
guild_id: Snowflake,
command_id: Snowflake,
payload: interactions.BaseGuildApplicationCommandPermissions,
) -> Response[None]:
r = Route(
'PUT',
'/applications/{application_id}/guilds/{guild_id}/commands/{command_id}/permissions',
application_id=application_id,
guild_id=guild_id,
command_id=command_id,
)
return self.request(r, json=payload)
def bulk_edit_guild_application_command_permissions(
self,
application_id: Snowflake,
guild_id: Snowflake,
payload: List[interactions.PartialGuildApplicationCommandPermissions],
) -> Response[None]:
r = Route(
'PUT',
'/applications/{application_id}/guilds/{guild_id}/commands/permissions',
application_id=application_id,
guild_id=guild_id,
)
return self.request(r, json=payload)
# Misc
def application_info(self) -> Response[appinfo.AppInfo]:
return self.request(Route('GET', '/oauth2/applications/@me'))
async def get_gateway(self, *, encoding: str = 'json', zlib: bool = True) -> str:
try:
data = await self.request(Route('GET', '/gateway'))
except HTTPException as exc:
raise GatewayNotFound() from exc
if zlib:
value = '{0}?encoding={1}&v=9&compress=zlib-stream'
else:
value = '{0}?encoding={1}&v=9'
return value.format(data['url'], encoding)
async def get_bot_gateway(self, *, encoding: str = 'json', zlib: bool = True) -> Tuple[int, str]:
try:
data = await self.request(Route('GET', '/gateway/bot'))
except HTTPException as exc:
raise GatewayNotFound() from exc
if zlib:
value = '{0}?encoding={1}&v=9&compress=zlib-stream'
else:
value = '{0}?encoding={1}&v=9'
return data['shards'], value.format(data['url'], encoding)
def get_user(self, user_id: Snowflake) -> Response[user.User]:
return self.request(Route('GET', '/users/{user_id}', user_id=user_id))
|
the-stack_106_19261
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: [email protected]
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class ResignStatus(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'status': 'string',
'error_code': 'string',
'error_message': 'string'
}
attribute_map = {
'status': 'status',
'error_code': 'error_code',
'error_message': 'error_message'
}
def __init__(self, status=None, error_code=None, error_message=None): # noqa: E501
"""ResignStatus - a model defined in Swagger""" # noqa: E501
self._status = None
self._error_code = None
self._error_message = None
self.discriminator = None
self.status = status
if error_code is not None:
self.error_code = error_code
if error_message is not None:
self.error_message = error_message
@property
def status(self):
"""Gets the status of this ResignStatus. # noqa: E501
The status of the resign # noqa: E501
:return: The status of this ResignStatus. # noqa: E501
:rtype: string
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ResignStatus.
The status of the resign # noqa: E501
:param status: The status of this ResignStatus. # noqa: E501
:type: string
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def error_code(self):
"""Gets the error_code of this ResignStatus. # noqa: E501
Error code for any error that occured during the resigning operation. # noqa: E501
:return: The error_code of this ResignStatus. # noqa: E501
:rtype: string
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""Sets the error_code of this ResignStatus.
Error code for any error that occured during the resigning operation. # noqa: E501
:param error_code: The error_code of this ResignStatus. # noqa: E501
:type: string
"""
self._error_code = error_code
@property
def error_message(self):
"""Gets the error_message of this ResignStatus. # noqa: E501
Error message for any error that occured during the resigning operation. # noqa: E501
:return: The error_message of this ResignStatus. # noqa: E501
:rtype: string
"""
return self._error_message
@error_message.setter
def error_message(self, error_message):
"""Sets the error_message of this ResignStatus.
Error message for any error that occured during the resigning operation. # noqa: E501
:param error_message: The error_message of this ResignStatus. # noqa: E501
:type: string
"""
self._error_message = error_message
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResignStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_19262
|
from django.test import TestCase
from scratch_model import *
chain = Blockchain()
transaction1 = Transaction('A', 'B', 100)
block1 = Block(transactions=[transaction1], time='now', index=1)
block1.mine_block()
chain.add_block(block1)
transaction2 = Transaction('c', 'd', 50)
transaction3 = Transaction('a', 'd', 150)
transaction4 = Transaction('a', 'b', 10000000)
block2 = Block(transactions=[transaction2, transaction3, transaction4], time='tomorrow', index=2)
block2.mine_block()
chain.add_block(block2)
chain.generate_keys()
for block in chain.chain:
print(block)
|
the-stack_106_19263
|
# MIT License
# Copyright 2020 Ryan Hausen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ==============================================================================
import os
import numpy as np
import pytest
from astropy.io import fits
import morpheus_core.helpers.fits_helper as fh
import morpheus_core.tests.helpers as helper
@pytest.mark.unit
def test_open_file():
"""Tests morpheus_core.helpers.fits_helper.open_file"""
helper.setup()
sample_location = helper.make_sample_file()
expected_array = np.arange(100).reshape([10, 10])
hdul, actual_array = fh.open_file(sample_location)
np.testing.assert_array_equal(expected_array, actual_array)
helper.tear_down()
@pytest.mark.unit
def test_open_files():
"""Tests morpheus_core.helpers.fits_helper.open_file"""
helper.setup()
sample_location = helper.make_sample_file()
sample2_location = helper.make_sample_file2()
expected_array = np.arange(100).reshape([10, 10])
_, actual_arrays = fh.open_files([sample_location, sample2_location])
np.testing.assert_array_equal(expected_array, actual_arrays[0])
np.testing.assert_array_equal(expected_array, actual_arrays[1])
helper.tear_down()
@pytest.mark.unit
def test_dtype_to_bytes_per_value():
"""Tests morpheus_core.helpers.fits_helper.dtype_to_bytes_per_value"""
types = [np.uint8, np.int16, np.int32, np.float32, np.float64]
expected_bytes_per_value = [1, 2, 4, 4, 8]
actual_bytes_per_value = list(map(fh.dtype_to_bytes_per_value, types))
assert actual_bytes_per_value == expected_bytes_per_value
@pytest.mark.unit
def test_dtype_to_bytes_per_value_fails():
"""Tests morpheus_core.helpers.fits_helper.dtype_to_bytes_per_value"""
with pytest.raises(ValueError):
fh.dtype_to_bytes_per_value(np.bool)
@pytest.mark.unit
@pytest.mark.filterwarnings("ignore::UserWarning") # Ignore astropy warning
def test_create_file():
"""Tests morpheus_core.helpers.fits_helper.create_file"""
helper.setup()
shape = (100, 100)
tmp_out = os.path.join(helper.TMP_DIR, "test.fits")
fh.create_file(tmp_out, shape, np.float32)
actual = fits.getdata(tmp_out)
assert actual.shape == shape
helper.tear_down()
|
the-stack_106_19265
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.base import BaseEstimator
from sklearn.cluster import KMeans
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import MinMaxScaler
class RBFClassifier(BaseEstimator):
def __init__(self, k=2, n_neighbors=2, plot=False, n_selection=2):
self.k = k
self.n_neighbors = n_neighbors
self.plot = plot
self.n_selection = n_selection
def euclidean_distance(self, x1, x2):
return np.linalg.norm(x1 - x2)
def rbf_hidden_layer(self, X):
def activation(x, c, s):
return np.exp(-self.euclidean_distance(x, c) / 2 * (s ** 2))
return np.array([[activation(x, c, s) for (c, s) in zip(self.cluster_, self.std_list_)] for x in X])
def fit(self, X, y):
def convert_to_one_hot(y, n_classes):
arr = np.zeros((y.size, n_classes))
arr[np.arange(y.size), y.astype(np.uint)] = 1
return arr
kmeans = KMeans(n_clusters=self.k, random_state=0)
kmeans_prediction = kmeans.fit_predict(X)
if self.plot:
plt.scatter(X[:, 0], X[:, 1], c=kmeans_prediction)
plt.savefig('figs/k-means with k=%d.png' % self.k)
plt.clf()
self.cluster_ = kmeans.cluster_centers_
cond = self.k if self.n_neighbors > self.k or self.n_neighbors == 0 else self.n_neighbors
# Select N clusters centroids at "random"
if self.n_selection == 0:
self.std_list_ = np.array([[self.euclidean_distance(c1, c2) for c1 in self.cluster_] for c2 in self.cluster_[: cond]])
else:
self.std_list_ = np.sort(np.array([[self.euclidean_distance(c1, c2) for c1 in self.cluster_] for c2 in self.cluster_]))
# Select N clusters centroids by distance (closest last)
if self.n_selection == 2:
self.std_list_ = self.std_list_[::-1]
self.std_list_ = self.std_list_[:, : cond]
self.std_list_ = np.mean(self.std_list_, axis=1)
RBF_X = self.rbf_hidden_layer(X)
self.w_ = np.linalg.pinv(RBF_X.T @ RBF_X) @ RBF_X.T @ convert_to_one_hot(y, np.unique(y).size)
rbs_prediction = np.array([np.argmax(x) for x in self.rbf_hidden_layer(X) @ self.w_])
if self.plot:
plt.scatter(X[:, 0], X[:, 1], c=rbs_prediction)
plt.savefig('figs/rbs train k=%d, n_neighbors=%f.png' % (self.k, self.n_neighbors))
plt.clf()
def predict(self, X):
rbs_prediction = np.array([np.argmax(x) for x in self.rbf_hidden_layer(X) @ self.w_])
if self.plot:
plt.scatter(X[:, 0], X[:, 1], c=rbs_prediction)
plt.savefig('figs/rbs predict k=%d, n_neighbors=%f.png' % (self.k, self.n_neighbors))
plt.clf()
return rbs_prediction
def get_params(self, deep=True):
return {"k": self.k, "n_neighbors": self.n_neighbors, "plot": self.plot, "n_selection": self.n_selection}
data = np.loadtxt(open("dataset.csv", "rb"), delimiter=",", skiprows=1)
for i in range(2):
x = data[:, i]
hist, bins = np.histogram(x)
plt.plot(bins[:hist.size], hist / np.sum(hist))
print(i, 'min %.2f max %.2f mean %.2f std %.2f' %(np.min(x), np.max(x), np.mean(x), np.std(x)))
plt.xlabel('Values')
plt.ylabel('Proportions')
plt.savefig('Histogram before normalization.png')
plt.clf()
scaler = MinMaxScaler()
scaler.fit(data[:, 0:2])
X = scaler.transform(data[:, 0:2])
xTrain, xTest, yTrain, yTest = train_test_split(X, data[:, 2], test_size = 0.2, random_state = 0)
for i in range(2):
x = xTrain[:, i]
hist, bins = np.histogram(x)
plt.plot(bins[:hist.size], hist / np.sum(hist))
print(i, 'min %.2f max %.2f mean %.2f std %.2f' %(np.min(x), np.max(x), np.mean(x), np.std(x)))
plt.xlabel('Values')
plt.ylabel('Proportions')
plt.savefig('Histogram after normalization.png')
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=data[:, 2])
plt.savefig('correct result.png')
plt.clf()
# See how f1-score goes for k=50, trying different N selection methods
k = 50
for n_selection in range(3):
results = []
for n in range(2, k + 1):
clf = RBFClassifier(k, n, False, n_selection)
clf.fit(xTrain, yTrain)
results.append(classification_report(yTest, clf.predict(xTest), output_dict=True)['weighted avg']['f1-score'])
plt.plot(results)
plt.ylabel('f1-score')
plt.xlabel('N')
plt.savefig('f1-score for k = %d, N from 2 to %d selected at %s.png' % (k, k, ('random' if n_selection == 0 else ('sorted' if n_selection == 1 else 'sorted backwards'))))
plt.clf()
# Now that we know the best N selection method, let's take a look at how f1-score goes for different amount of neurons at the hidden layer
results = []
for k in range(2, 51):
clf = RBFClassifier(k, 2, True)
clf.fit(xTrain, yTrain)
results.append(classification_report(yTest, clf.predict(xTest), output_dict=True)['weighted avg']['f1-score'])
#print(confusion_matrix(yTest, clf.predict(xTest)))
plt.plot(results)
plt.ylabel('f1-score')
plt.xlabel('k')
plt.savefig('f1-score for k = 2...50, N = 2 (furthest neighbor).png')
plt.clf()
|
the-stack_106_19267
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
sys.path.insert(0, os.path.abspath("../CYLGame"))
# -- Project information -----------------------------------------------------
project = "CYLGame"
copyright = "2019, UMD LARS Lab"
author = "UMD LARS Lab"
try:
import CYLGame.version
version = CYLGame.version
except:
# The short X.Y version
version = ""
# The full version, including alpha/beta/rc tags
release = ""
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.viewcode", "sphinx.ext.githubpages", "sphinx.ext.napoleon", "sphinx.ext.autodoc"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "CYLGamedoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "CYLGame.tex", "CYLGame Documentation", "UMD LARS Lab", "manual"),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "cylgame", "CYLGame Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"CYLGame",
"CYLGame Documentation",
author,
"CYLGame",
"One line description of project.",
"Miscellaneous",
),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
|
the-stack_106_19268
|
# Artificial Neural Network
# Installing Theano
# pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git
# Installing Tensorflow
# pip install tensorflow
# Installing Keras
# pip install --upgrade keras
# Part 1 - Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, 13].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
onehotencoder = OneHotEncoder(categorical_features = [1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Part 2 - Now let's make the ANN!
# Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11))
# Adding the second hidden layer
classifier.add(Dense(units = 6, kernel_initializer = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size = 10, epochs = 100)
# Part 3 - Making predictions and evaluating the model
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Homework
#Geography: France
#Credit Score: 600
#Gender: Male
#Age: 40 years old
#Tenure: 3 years
#Balance: $60000
#Number of Products: 2
#Does this customer have a credit card ? Yes
#Is this customer an Active Member: Yes
#Estimated Salary: $50000
D_homework = np.array([600, 'France', 'Male', 40, 3, 60000, 2, 1, 1, 50000])
X_homework = D_homework.reshape((1,10))
X_homework[:, 1] = labelencoder_X_1.transform(X_homework[:, 1])
X_homework[:, 2] = labelencoder_X_2.transform(X_homework[:, 2])
X_homework = onehotencoder.transform(X_homework).toarray()
X_homework = X_homework[:, 1:]
X_homework = sc.transform(X_homework)
y_homework = classifier.predict(X_homework)
pred_homework = y_homework > 0.5 # False so, the answer is NO
from keras.models import load_model
classifier.save('ann_homework.h5')
|
the-stack_106_19269
|
'''
Elie Yen
Python version: 3.6
Conway's Game of life
'''
import numpy
import math
def get_generation(cells, generations):
#_ the direction of adjacent cells
adj = ((-2, -2), (-2, -1), (-2, 0), (-1, -2), (-1, 0),
(0, -2), (0, -1), (0, 0))
def status(cells, cur):
print("\ngeneration{0}\n".format(cur), cells)
if not generations or len(cells) < 1 or cur == generations:
return cells
#_ expand 1 cells in each border
#_ 1 for live cells, -1 for dead cells
h, w = len(cells), len(cells[0])
next_cells = numpy.full((h + 2, w + 2), -1, dtype = numpy.int8)
next_cells[1: -1, 1: -1] = cells[:]
#_ new height, width of next generation
nh, nw = -math.inf, -math.inf
min_h, min_w = math.inf, math.inf
for row in range(len(next_cells)):
for col in range(len(next_cells[0])):
#_ calculate how many adj live cells
#_ next_cells[i + 1][j + 1] = cells[i][j]
for r, c in adj:
if (-1 < row + r < h and -1 < col + c < w and
cells[row + r, col + c]):
next_cells[row, col] *= 2
#_ cells that have 3+ live neighbors will die
if next_cells[row, col] in (16, -16):
next_cells[row, col] = 0
break
#_ check next status of cell by its value
#_ update range of width, height after trim empty row/ col
if next_cells[row, col] in (4, 8, -8):
nh, min_h = max(nh, row), min(min_h, row)
nw, min_w = max(nw, col), min(min_w, col)
next_cells[row, col] = 1
else:
next_cells[row, col] = 0
#_ if no live cells, cells = []
#_ else trim the empty rows/ cols of next generation
cells = ([] if min_h == min_w == -nh == -nw == math.inf
else next_cells[min_h: nh + 1, min_w: nw + 1])
status(cells, cur + 1)
return status(cells, 0)
#_ test
cells = numpy.random.randint(2, size=(3, 5))
get_generation(cells, 5)
|
the-stack_106_19271
|
# Ensure we get the local copy of tornado instead of what's on the standard path
import os
import sys
import time
sys.path.insert(0, os.path.abspath(".."))
import tornado
master_doc = "index"
project = "Tornado"
copyright = "2009-%s, The Tornado Authors" % time.strftime("%Y")
version = release = tornado.version
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
]
primary_domain = 'py'
default_role = 'py:obj'
autodoc_member_order = "bysource"
autoclass_content = "both"
# Without this line sphinx includes a copy of object.__init__'s docstring
# on any class that doesn't define __init__.
# https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__
autodoc_docstring_signature = False
coverage_skip_undoc_in_source = True
coverage_ignore_modules = [
"tornado.platform.asyncio",
"tornado.platform.caresresolver",
"tornado.platform.twisted",
]
# I wish this could go in a per-module file...
coverage_ignore_classes = [
# tornado.concurrent
"TracebackFuture",
# tornado.gen
"Runner",
# tornado.ioloop
"PollIOLoop",
# tornado.web
"ChunkedTransferEncoding",
"GZipContentEncoding",
"OutputTransform",
"TemplateModule",
"url",
# tornado.websocket
"WebSocketProtocol",
"WebSocketProtocol13",
"WebSocketProtocol76",
]
coverage_ignore_functions = [
# various modules
"doctests",
"main",
# tornado.escape
# parse_qs_bytes should probably be documented but it's complicated by
# having different implementations between py2 and py3.
"parse_qs_bytes",
]
html_favicon = 'favicon.ico'
latex_documents = [
('documentation', 'tornado.tex', 'Tornado Documentation', 'Facebook', 'manual', False),
]
# HACK: sphinx has limited support for substitutions with the |version|
# variable, but there doesn't appear to be any way to use this in a link
# target.
# http://stackoverflow.com/questions/1227037/substitutions-inside-links-in-rest-sphinx
# The extlink extension can be used to do link substitutions, but it requires a
# portion of the url to be literally contained in the document. Therefore,
# this link must be referenced as :current_tarball:`z`
extlinks = {
'current_tarball': (
'https://pypi.python.org/packages/source/t/tornado/tornado-%s.tar.g%%s' % version,
'tornado-%s.tar.g' % version),
}
intersphinx_mapping = {
'python': ('https://docs.python.org/3.4/', None),
}
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# On RTD we can't import sphinx_rtd_theme, but it will be applied by
# default anyway. This block will use the same theme when building locally
# as on RTD.
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
the-stack_106_19272
|
import json
import urllib
import urllib2
import tempfile
import subprocess
def set_image_cmd(filename):
return ['/usr/bin/feh', '--bg-scale', filename]
API_URL = 'http://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1&mkt=en-US'
def get_current_image_data():
"""return the latest bing image data: A dictionary
contining the keys 'url' and 'description' at least.
url is relative to http(s)://bing.com/"""
req = urllib2.urlopen(API_URL)
data = req.read()
req.close()
# returned data is a list of images, choose the first one
data = json.loads(data)['images'][0]
return data
def fetch_image(url):
"""fetches the image to a temporary location.
Returns that location."""
target = tempfile.mkstemp(prefix='bingbg')[1]
urllib.urlretrieve(url, target)
return target
def set_image(filename):
cmd = set_image_cmd(filename)
ret = subprocess.call(cmd)
if ret:
raise RuntimeError("Something went wrong when executing %r", cmd)
def main():
data = get_current_image_data()
url = 'http://bing.com/' + data['url']
filename = fetch_image(url)
set_image(filename)
def loop():
"""calls main(), waits for 24 hours to have passed,
then calls main() again.
The function actually wakes up more often than that,
since the PC might spend some time in suspend/hibernate
(standby), and i'm not sure how time.sleep() is affected
by that."""
while True:
cur_time = time.time()
wakeup_at = cur_time + (60*60*24)
while time.time() < wakeup_at:
time.sleep(300) # sleep for five minutes
main()
if __name__ == "__main__":
import sys, time
main()
if '-l' in sys.argv:
loop()
|
the-stack_106_19273
|
"""Solve a random maze with
Markovian Decision Process"""
# -----------------------------------------------------------------------------
# Copyright 2019 (C) Nicolas P. Rougier & Anthony Strock
# Released under a BSD two-clauses license
#
# References: Bellman, Richard (1957), A Markovian Decision Process.
# Journal of Mathematics and Mechanics. Vol. 6, No. 5.
# -----------------------------------------------------------------------------
#https://github.com/rougier/ML-Recipes/blob/master/recipes/MDP/value-iteration.py
#https://en.wikipedia.org/wiki/Markov_decision_process
import numpy as np
from scipy.ndimage import generic_filter
def maze(shape=(30, 50), complexity=0.8, density=0.8):
shape = (np.array(shape)//2)*2 + 1
n_complexity = int(complexity*(shape[0]+shape[1]))
n_density = int(density*(shape[0]*shape[1]))
Z = np.ones(shape, dtype=bool)
Z[1:-1, 1:-1] = 0
P = (np.dstack([np.random.randint(0, shape[0]+1, n_density),
np.random.randint(0, shape[1]+1, n_density)])//2)*2
for (y,x) in P.squeeze():
Z[y, x] = 1
for j in range(n_complexity):
neighbours = []
if x > 1: neighbours.append([(y, x-1), (y, x-2)])
if x < shape[1]-2: neighbours.append([(y, x+1), (y, x+2)])
if y > 1: neighbours.append([(y-1, x), (y-2, x)])
if y < shape[0]-2: neighbours.append([(y+1, x), (y+2, x)])
if len(neighbours):
next_1, next_2 = neighbours[np.random.randint(len(neighbours))]
if Z[next_2] == 0:
Z[next_1] = Z[next_2] = 1
y, x = next_2
else:
break
return Z
def solve(Z, start, goal):
Z = 1 - Z
G = np.zeros(Z.shape)
G[start] = 1
# We iterate until value at exit is > 0. This requires the maze
# to have a solution or it will be stuck in the loop.
def diffuse(Z, gamma=0.99):
return max(gamma*Z[0], gamma*Z[1], Z[2], gamma*Z[3], gamma*Z[4])
while G[goal] == 0.0:
G = Z * generic_filter(G, diffuse, footprint=[[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
# Descent gradient to find shortest path from entrance to exit
y, x = goal
dirs = (0,-1), (0,+1), (-1,0), (+1,0)
P = []
while (x, y) != start:
P.append((y,x))
neighbours = [-1, -1, -1, -1]
if x > 0: neighbours[0] = G[y, x-1]
if x < G.shape[1]-1: neighbours[1] = G[y, x+1]
if y > 0: neighbours[2] = G[y-1, x]
if y < G.shape[0]-1: neighbours[3] = G[y+1, x]
a = np.argmax(neighbours)
x, y = x + dirs[a][1], y + dirs[a][0]
P.append((y,x))
return P, G
def printSolution(S, start, goal):
for y,line in enumerate(Z):
for x,c in enumerate(line):
if (y,x) == start: print("[]", end='')
elif (y,x) == goal: print("[]", end='')
elif (y,x) in S[0]: print("..", end='')
elif c: print("██", end='')
else: print(" ", end='')
print()
def showSolution3D(S, start, goal):
from vedo import Text3D, Cube, Line, Grid, merge, show
pts, cubes, txts = [], [], []
pts = [(x,-y) for y,x in S[0]]
for y,line in enumerate(Z):
for x,c in enumerate(line):
if c: cubes.append(Cube([x,-y,0]))
path = Line(pts).lw(6).c('red5')
walls = merge(cubes).flat().c('orange1')
sy, sx = S[1].shape
gradient = np.flip(S[1], axis=0).ravel()
grd = Grid(pos=((sx-1)/2, -(sy-1)/2, -0.49), s=[sx,sy], res=[sx,sy])
grd.lw(0).wireframe(False).cmap('gist_earth_r', gradient, on='cells')
grd.addScalarBar(title='Gradient', horizontal=True, c='k', nlabels=2)
txts.append(__doc__)
txts.append(Text3D('Start', pos=[start[1]-1,-start[0]+1.5,1], c='k'))
txts.append(Text3D('Goal!', pos=[goal[1] -2,-goal[0] -2.7,1], c='k'))
return show(path, walls, grd, txts, axes=0, zoom=1.2)
##########################################################################
if __name__ == '__main__':
np.random.seed(4)
Z = maze(shape=(50, 70))
start, goal = (1,1), (Z.shape[0]-2, Z.shape[1]-2)
print("Please wait..")
S = solve(Z, start, goal)
#printSolution(S, start, goal)
showSolution3D(S, start, goal).close()
|
the-stack_106_19275
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Garnet Chan <[email protected]>
# Qiming Sun <[email protected]>
#
import unittest
import numpy as np
from pyscf import lib
from pyscf.pbc import gto as pbcgto
from pyscf.pbc import scf as pscf
from pyscf.pbc.scf import khf
from pyscf.pbc.scf import kuhf
from pyscf.pbc import df
import pyscf.pbc.tools
def make_primitive_cell(mesh):
cell = pbcgto.Cell()
cell.unit = 'A'
cell.atom = 'C 0., 0., 0.; C 0.8917, 0.8917, 0.8917'
cell.a = '''0. 1.7834 1.7834
1.7834 0. 1.7834
1.7834 1.7834 0. '''
cell.basis = 'gth-szv'
cell.pseudo = 'gth-pade'
cell.mesh = mesh
cell.verbose = 7
cell.output = '/dev/null'
cell.build()
return cell
cell = make_primitive_cell([9]*3)
kpts = cell.make_kpts([3,1,1])
kmf = khf.KRHF(cell, kpts, exxdiv='vcut_sph').run(conv_tol=1e-9)
kumf = kuhf.KUHF(cell, kpts, exxdiv='vcut_sph').run(conv_tol=1e-9)
def tearDownModule():
global cell, kmf, kumf
cell.stdout.close()
del cell, kmf, kumf
class KnownValues(unittest.TestCase):
def test_analyze(self):
rpop, rchg = kmf.analyze() # pop at gamma point
upop, uchg = kumf.analyze()
self.assertTrue(isinstance(rpop, np.ndarray) and rpop.ndim == 1)
self.assertAlmostEqual(abs(upop[0]+upop[1]-rpop).max(), 0, 7)
self.assertAlmostEqual(lib.fp(rpop), 1.697446, 5)
def test_kpt_vs_supercell_high_cost(self):
# For large n, agreement is always achieved
# n = 17
# For small n, agreement only achieved if "wrapping" k-k'+G in get_coulG
n = 9
nk = (3, 1, 1)
cell = make_primitive_cell([n]*3)
abs_kpts = cell.make_kpts(nk, wrap_around=True)
kmf = khf.KRHF(cell, abs_kpts, exxdiv='vcut_sph')
ekpt = kmf.scf()
self.assertAlmostEqual(ekpt, -11.221426249047617, 8)
# nk = (5, 1, 1)
# abs_kpts = cell.make_kpts(nk, wrap_around=True)
# kmf = khf.KRHF(cell, abs_kpts, exxdiv='vcut_sph')
# ekpt = kmf.scf()
# self.assertAlmostEqual(ekpt, -12.337299166550796, 8)
supcell = pyscf.pbc.tools.super_cell(cell, nk)
mf = pscf.RHF(supcell, exxdiv='vcut_sph')
esup = mf.scf()/np.prod(nk)
self.assertAlmostEqual(ekpt, esup, 8)
def test_init_guess_by_chkfile(self):
n = 9
nk = (1, 1, 1)
cell = make_primitive_cell([n]*3)
kpts = cell.make_kpts(nk)
kmf = khf.KRHF(cell, kpts, exxdiv='vcut_sph')
kmf.conv_tol = 1e-9
ekpt = kmf.scf()
dm1 = kmf.make_rdm1()
dm2 = kmf.from_chk(kmf.chkfile)
self.assertTrue(dm2.dtype == np.double)
self.assertTrue(np.allclose(dm1, dm2))
mf = pscf.RHF(cell, exxdiv='vcut_sph')
mf.chkfile = kmf.chkfile
mf.init_guess = 'chkfile'
mf.max_cycle = 1
e1 = mf.kernel()
mf.conv_check = False
self.assertAlmostEqual(e1, ekpt, 9)
nk = (3, 1, 1)
kpts = cell.make_kpts(nk)
kmf1 = khf.KRHF(cell, kpts, exxdiv='vcut_sph')
kmf1.conv_tol = 1e-9
kmf1.chkfile = mf.chkfile
kmf1.init_guess = 'chkfile'
kmf1.max_cycle = 2
ekpt = kmf1.scf()
kmf1.conv_check = False
self.assertAlmostEqual(ekpt, -11.215259853108822, 8)
def test_krhf(self):
self.assertAlmostEqual(kmf.e_tot, -11.218735269838586, 8)
def test_kuhf(self):
self.assertAlmostEqual(kumf.e_tot, -11.218735269838586, 8)
np.random.seed(1)
kpts_bands = np.random.random((2,3))
e = kumf.get_bands(kpts_bands)[0]
self.assertAlmostEqual(lib.fp(np.array(e)), -0.0455444, 6)
def test_krhf_1d(self):
L = 4
cell = pbcgto.Cell()
cell.build(unit = 'B',
a = np.eye(3) * 4,
mesh = [8,20,20],
atom = '''He 2 0 0; He 3 0 0''',
dimension = 1,
low_dim_ft_type = 'inf_vacuum',
verbose = 0,
basis = { 'He': [[0, (0.8, 1.0)],
#[0, (1.0, 1.0)],
[0, (1.2, 1.0)]
]})
mf = khf.KRHF(cell)
mf.with_df = df.AFTDF(cell)
mf.with_df.eta = 0.2
mf.init_guess = 'hcore'
mf.kpts = cell.make_kpts([2,1,1])
e1 = mf.kernel()
self.assertAlmostEqual(e1, -3.5112358424228809, 5)
def test_krhf_2d(self):
L = 4
cell = pbcgto.Cell()
cell.build(unit = 'B',
a = np.eye(3) * 4,
mesh = [10,10,20],
atom = '''He 2 0 0; He 3 0 0''',
dimension = 2,
low_dim_ft_type = 'inf_vacuum',
verbose = 0,
basis = { 'He': [[0, (0.8, 1.0)],
#[0, (1.0, 1.0)],
[0, (1.2, 1.0)]
]})
mf = khf.KRHF(cell)
mf.with_df = df.AFTDF(cell)
mf.with_df.eta = 0.2
mf.with_df.mesh = cell.mesh
mf.kpts = cell.make_kpts([2,1,1])
e1 = mf.kernel()
self.assertAlmostEqual(e1, -3.5376801775171911, 5)
def test_kuhf_1d(self):
L = 4
cell = pbcgto.Cell()
cell.build(unit = 'B',
a = np.eye(3) * 4,
mesh = [8,20,20],
atom = '''He 2 0 0; He 3 0 0''',
dimension = 1,
low_dim_ft_type = 'inf_vacuum',
verbose = 0,
basis = { 'He': [[0, (0.8, 1.0)],
#[0, (1.0, 1.0)],
[0, (1.2, 1.0)]
]})
mf = kuhf.KUHF(cell)
mf.with_df = df.AFTDF(cell)
mf.with_df.eta = 0.2
mf.init_guess = 'hcore'
mf.kpts = cell.make_kpts([2,1,1])
e1 = mf.kernel()
self.assertAlmostEqual(e1, -3.5112358424228809, 5)
def test_kghf_1d(self):
L = 4
cell = pbcgto.Cell()
cell.build(unit = 'B',
a = np.eye(3) * 4,
mesh = [8,20,20],
atom = '''He 2 0 0; He 3 0 0''',
dimension = 1,
low_dim_ft_type = 'inf_vacuum',
verbose = 0,
basis = { 'He': [[0, (0.8, 1.0)],
#[0, (1.0, 1.0)],
[0, (1.2, 1.0)]
]})
mf = pscf.KGHF(cell)
mf.with_df = df.AFTDF(cell)
mf.with_df.eta = 0.2
mf.init_guess = 'hcore'
mf.kpts = cell.make_kpts([2,1,1])
e1 = mf.kernel()
self.assertAlmostEqual(e1, -3.5112358424228809, 4)
def test_get_fermi(self):
self.assertAlmostEqual(kmf.get_fermi(), 0.33154831914017424, 6)
def occ_vir(nocc, nvir):
occ = np.zeros(nocc+nvir)
occ[:nocc] = 1
return occ
mo_e_kpts = [np.arange(5), np.arange(2, 6)]
mo_occ_kpts = [occ_vir(2, 3)*2, occ_vir(2, 2)*2]
f = kmf.get_fermi(mo_e_kpts, mo_occ_kpts)
self.assertAlmostEqual(f, 2, 9)
# Smearing with error
mo_occ_kpts[0][1:3] = 1.000001
f = kmf.get_fermi(mo_e_kpts, mo_occ_kpts)
self.assertAlmostEqual(f, 2, 9)
mo_e_kpts = [mo_e_kpts, [x-.5 for x in mo_e_kpts]]
mo_occ_kpts = [[occ_vir(3, 2), occ_vir(2, 2)],
[occ_vir(2, 3), occ_vir(1, 3)]]
f = kumf.get_fermi(mo_e_kpts, mo_occ_kpts)
self.assertAlmostEqual(f[0], 3, 9)
self.assertAlmostEqual(f[1], 1.5, 9)
# Smearing with error
mo_occ_kpts[0][0][2:4] = 0.500001
mo_occ_kpts[1][1][0] -= 0.0000001
f = kumf.get_fermi(mo_e_kpts, mo_occ_kpts)
self.assertAlmostEqual(f[0], 3, 9)
self.assertAlmostEqual(f[1], 1.5, 9)
def test_dipole_moment(self):
dip = kmf.dip_moment()
self.assertAlmostEqual(lib.fp(dip), 0.729751581497479, 5)
def test_krhf_vs_rhf(self):
np.random.seed(1)
k = np.random.random(3)
mf = pscf.RHF(cell, k, exxdiv='vcut_sph')
mf.max_cycle = 1
mf.diis = None
e1 = mf.kernel()
kmf = pscf.KRHF(cell, [k], exxdiv='vcut_sph')
kmf.max_cycle = 1
kmf.diis = None
e2 = kmf.kernel()
self.assertAlmostEqual(e1, e2, 9)
self.assertAlmostEqual(e1, -11.451118801956275, 9)
def test_small_system(self):
mol = pbcgto.Cell(
atom='He 0 0 0;',
a=[[3, 0, 0], [0, 3, 0], [0, 0, 3]],
basis=[[0, [1, 1]]],
verbose=7,
output='/dev/null'
)
mf = pscf.KRHF(mol,kpts=[[0., 0., 0.]])
mf.run()
self.assertAlmostEqual(mf.e_tot, -2.2719576422665635, 8)
def test_damping(self):
nao = cell.nao
np.random.seed(1)
s = kmf.get_ovlp()
d = np.random.random((len(kpts),nao,nao))
d = (d + d.transpose(0,2,1)) * 2
vhf = 0
f = khf.get_fock(kmf, kmf.get_hcore(), s, vhf, d, cycle=0,
diis_start_cycle=2, damp_factor=0.5)
self.assertAlmostEqual(np.linalg.norm(f[0]), 95.32749551722966, 9)
self.assertAlmostEqual(np.linalg.norm(f[1]), 73.9231303798864, 9)
self.assertAlmostEqual(np.linalg.norm(f[2]), 58.973290554565196, 9)
vhf = np.zeros((2,len(kpts),nao,nao))
d1 = np.asarray([d/2, d/2])
f1 = kuhf.get_fock(kumf, kumf.get_hcore(), s, vhf, d1, cycle=0,
diis_start_cycle=2, damp_factor=0.5)
for k in range(len(kpts)):
self.assertAlmostEqual(np.linalg.norm(f[k]), np.linalg.norm(f1[0,k]),9)
self.assertAlmostEqual(np.linalg.norm(f[k]), np.linalg.norm(f1[1,k]),9)
if __name__ == '__main__':
print("Full Tests for pbc.scf.khf")
unittest.main()
|
the-stack_106_19277
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
import getpass
import hashlib
import logging
import math
import os
import signal
import time
import warnings
from datetime import datetime, timedelta
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
from urllib.parse import quote
import dill
import lazy_object_proxy
import pendulum
from jinja2 import TemplateAssertionError, UndefinedError
from sqlalchemy import Column, Float, Index, Integer, PickleType, String, and_, func, or_
from sqlalchemy.orm import reconstructor
from sqlalchemy.orm.session import Session
from sqlalchemy.sql.elements import BooleanClauseList
from airflow import settings
from airflow.configuration import conf
from airflow.exceptions import (
AirflowException, AirflowRescheduleException, AirflowSkipException, AirflowTaskTimeout,
)
from airflow.models.base import COLLATION_ARGS, ID_LEN, Base
from airflow.models.log import Log
from airflow.models.taskfail import TaskFail
from airflow.models.taskreschedule import TaskReschedule
from airflow.models.variable import Variable
from airflow.models.xcom import XCOM_RETURN_KEY, XCom
from airflow.sentry import Sentry
from airflow.settings import STORE_SERIALIZED_DAGS
from airflow.stats import Stats
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_deps import REQUEUEABLE_DEPS, RUNNING_DEPS
from airflow.utils import timezone
from airflow.utils.email import send_email
from airflow.utils.helpers import is_container
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.net import get_hostname
from airflow.utils.operator_helpers import context_to_airflow_vars
from airflow.utils.session import provide_session
from airflow.utils.sqlalchemy import UtcDateTime
from airflow.utils.state import State
from airflow.utils.timeout import timeout
def clear_task_instances(tis,
session,
activate_dag_runs=True,
dag=None,
):
"""
Clears a set of task instances, but makes sure the running ones
get killed.
:param tis: a list of task instances
:param session: current session
:param activate_dag_runs: flag to check for active dag run
:param dag: DAG object
"""
job_ids = []
for ti in tis:
if ti.state == State.RUNNING:
if ti.job_id:
ti.state = State.SHUTDOWN
job_ids.append(ti.job_id)
else:
task_id = ti.task_id
if dag and dag.has_task(task_id):
task = dag.get_task(task_id)
ti.refresh_from_task(task)
task_retries = task.retries
ti.max_tries = ti.try_number + task_retries - 1
else:
# Ignore errors when updating max_tries if dag is None or
# task not found in dag since database records could be
# outdated. We make max_tries the maximum value of its
# original max_tries or the last attempted try number.
ti.max_tries = max(ti.max_tries, ti.prev_attempted_tries)
ti.state = State.NONE
session.merge(ti)
# Clear all reschedules related to the ti to clear
TR = TaskReschedule
session.query(TR).filter(
TR.dag_id == ti.dag_id,
TR.task_id == ti.task_id,
TR.execution_date == ti.execution_date,
TR.try_number == ti.try_number
).delete()
if job_ids:
from airflow.jobs.base_job import BaseJob as BJ
for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all():
job.state = State.SHUTDOWN
if activate_dag_runs and tis:
from airflow.models.dagrun import DagRun # Avoid circular import
drs = session.query(DagRun).filter(
DagRun.dag_id.in_({ti.dag_id for ti in tis}),
DagRun.execution_date.in_({ti.execution_date for ti in tis}),
).all()
for dr in drs:
dr.state = State.RUNNING
dr.start_date = timezone.utcnow()
# Key used to identify task instance
# Tuple of: dag_id, task_id, execution_date, try_number
TaskInstanceKeyType = Tuple[str, str, datetime, int]
class TaskInstance(Base, LoggingMixin):
"""
Task instances store the state of a task instance. This table is the
authority and single source of truth around what tasks have run and the
state they are in.
The SqlAlchemy model doesn't have a SqlAlchemy foreign key to the task or
dag model deliberately to have more control over transactions.
Database transactions on this table should insure double triggers and
any confusion around what task instances are or aren't ready to run
even while multiple schedulers may be firing task instances.
"""
__tablename__ = "task_instance"
task_id = Column(String(ID_LEN, **COLLATION_ARGS), primary_key=True)
dag_id = Column(String(ID_LEN, **COLLATION_ARGS), primary_key=True)
execution_date = Column(UtcDateTime, primary_key=True)
start_date = Column(UtcDateTime)
end_date = Column(UtcDateTime)
duration = Column(Float)
state = Column(String(20))
_try_number = Column('try_number', Integer, default=0)
max_tries = Column(Integer)
hostname = Column(String(1000))
unixname = Column(String(1000))
job_id = Column(Integer)
pool = Column(String(50), nullable=False)
pool_slots = Column(Integer, default=1)
queue = Column(String(256))
priority_weight = Column(Integer)
operator = Column(String(1000))
queued_dttm = Column(UtcDateTime)
pid = Column(Integer)
executor_config = Column(PickleType(pickler=dill))
# If adding new fields here then remember to add them to
# refresh_from_db() or they wont display in the UI correctly
__table_args__ = (
Index('ti_dag_state', dag_id, state),
Index('ti_dag_date', dag_id, execution_date),
Index('ti_state', state),
Index('ti_state_lkp', dag_id, task_id, execution_date, state),
Index('ti_pool', pool, state, priority_weight),
Index('ti_job_id', job_id),
)
def __init__(self, task, execution_date: datetime, state: Optional[str] = None):
self.dag_id = task.dag_id
self.task_id = task.task_id
self.task = task
self.refresh_from_task(task)
self._log = logging.getLogger("airflow.task")
# make sure we have a localized execution_date stored in UTC
if execution_date and not timezone.is_localized(execution_date):
self.log.warning("execution date %s has no timezone information. Using "
"default from dag or system", execution_date)
if self.task.has_dag():
execution_date = timezone.make_aware(execution_date,
self.task.dag.timezone)
else:
execution_date = timezone.make_aware(execution_date)
execution_date = timezone.convert_to_utc(execution_date)
self.execution_date = execution_date
self.try_number = 0
self.unixname = getpass.getuser()
if state:
self.state = state
self.hostname = ''
self.init_on_load()
# Is this TaskInstance being currently running within `airflow tasks run --raw`.
# Not persisted to the database so only valid for the current process
self.raw = False
@reconstructor
def init_on_load(self):
""" Initialize the attributes that aren't stored in the DB. """
self.test_mode = False # can be changed when calling 'run'
@property
def try_number(self):
"""
Return the try number that this task number will be when it is actually
run.
If the TaskInstance is currently running, this will match the column in the
database, in all other cases this will be incremented.
"""
# This is designed so that task logs end up in the right file.
if self.state == State.RUNNING:
return self._try_number
return self._try_number + 1
@try_number.setter
def try_number(self, value):
self._try_number = value
@property
def prev_attempted_tries(self):
"""
Based on this instance's try_number, this will calculate
the number of previously attempted tries, defaulting to 0.
"""
# Expose this for the Task Tries and Gantt graph views.
# Using `try_number` throws off the counts for non-running tasks.
# Also useful in error logging contexts to get
# the try number for the last try that was attempted.
# https://issues.apache.org/jira/browse/AIRFLOW-2143
return self._try_number
@property
def next_try_number(self):
return self._try_number + 1
def command_as_list(
self,
mark_success=False,
ignore_all_deps=False,
ignore_task_deps=False,
ignore_depends_on_past=False,
ignore_ti_state=False,
local=False,
pickle_id=None,
raw=False,
job_id=None,
pool=None,
cfg_path=None):
"""
Returns a command that can be executed anywhere where airflow is
installed. This command is part of the message sent to executors by
the orchestrator.
"""
dag = self.task.dag
should_pass_filepath = not pickle_id and dag
if should_pass_filepath and dag.full_filepath != dag.filepath:
path = "DAGS_FOLDER/{}".format(dag.filepath)
elif should_pass_filepath and dag.full_filepath:
path = dag.full_filepath
else:
path = None
return TaskInstance.generate_command(
self.dag_id,
self.task_id,
self.execution_date,
mark_success=mark_success,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_ti_state=ignore_ti_state,
local=local,
pickle_id=pickle_id,
file_path=path,
raw=raw,
job_id=job_id,
pool=pool,
cfg_path=cfg_path)
@staticmethod
def generate_command(dag_id: str,
task_id: str,
execution_date: datetime,
mark_success: Optional[bool] = False,
ignore_all_deps: Optional[bool] = False,
ignore_depends_on_past: Optional[bool] = False,
ignore_task_deps: Optional[bool] = False,
ignore_ti_state: Optional[bool] = False,
local: Optional[bool] = False,
pickle_id: Optional[str] = None,
file_path: Optional[str] = None,
raw: Optional[bool] = False,
job_id: Optional[str] = None,
pool: Optional[str] = None,
cfg_path: Optional[str] = None
) -> List[str]:
"""
Generates the shell command required to execute this task instance.
:param dag_id: DAG ID
:type dag_id: str
:param task_id: Task ID
:type task_id: str
:param execution_date: Execution date for the task
:type execution_date: datetime
:param mark_success: Whether to mark the task as successful
:type mark_success: Optional[bool]
:param ignore_all_deps: Ignore all ignorable dependencies.
Overrides the other ignore_* parameters.
:type ignore_all_deps: Optional[bool]
:param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs
(e.g. for Backfills)
:type ignore_depends_on_past: Optional[bool]
:param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past
and trigger rule
:type ignore_task_deps: Optional[bool]
:param ignore_ti_state: Ignore the task instance's previous failure/success
:type ignore_ti_state: Optional[bool]
:param local: Whether to run the task locally
:type local: Optional[bool]
:param pickle_id: If the DAG was serialized to the DB, the ID
associated with the pickled DAG
:type pickle_id: Optional[str]
:param file_path: path to the file containing the DAG definition
:type file_path: Optional[str]
:param raw: raw mode (needs more details)
:type raw: Optional[bool]
:param job_id: job ID (needs more details)
:type job_id: Optional[int]
:param pool: the Airflow pool that the task should run in
:type pool: Optional[str]
:param cfg_path: the Path to the configuration file
:type cfg_path: Optional[str]
:return: shell command that can be used to run the task instance
:rtype: list[str]
"""
iso = execution_date.isoformat()
cmd = ["airflow", "tasks", "run", dag_id, task_id, iso]
if mark_success:
cmd.extend(["--mark-success"])
if pickle_id:
cmd.extend(["--pickle", pickle_id])
if job_id:
cmd.extend(["--job-id", str(job_id)])
if ignore_all_deps:
cmd.extend(["--ignore-all-dependencies"])
if ignore_task_deps:
cmd.extend(["--ignore-dependencies"])
if ignore_depends_on_past:
cmd.extend(["--ignore-depends-on-past"])
if ignore_ti_state:
cmd.extend(["--force"])
if local:
cmd.extend(["--local"])
if pool:
cmd.extend(["--pool", pool])
if raw:
cmd.extend(["--raw"])
if file_path:
cmd.extend(["--subdir", file_path])
if cfg_path:
cmd.extend(["--cfg-path", cfg_path])
return cmd
@property
def log_filepath(self):
iso = self.execution_date.isoformat()
log = os.path.expanduser(conf.get('logging', 'BASE_LOG_FOLDER'))
return ("{log}/{dag_id}/{task_id}/{iso}.log".format(
log=log, dag_id=self.dag_id, task_id=self.task_id, iso=iso))
@property
def log_url(self):
iso = quote(self.execution_date.isoformat())
base_url = conf.get('webserver', 'BASE_URL')
return base_url + (
"/log?"
"execution_date={iso}"
"&task_id={task_id}"
"&dag_id={dag_id}"
).format(iso=iso, task_id=self.task_id, dag_id=self.dag_id)
@property
def mark_success_url(self):
iso = quote(self.execution_date.isoformat())
base_url = conf.get('webserver', 'BASE_URL')
return base_url + (
"/success"
"?task_id={task_id}"
"&dag_id={dag_id}"
"&execution_date={iso}"
"&upstream=false"
"&downstream=false"
).format(task_id=self.task_id, dag_id=self.dag_id, iso=iso)
@provide_session
def current_state(self, session=None) -> str:
"""
Get the very latest state from the database, if a session is passed,
we use and looking up the state becomes part of the session, otherwise
a new session is used.
"""
ti = session.query(TaskInstance).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.execution_date == self.execution_date,
).all()
if ti:
state = ti[0].state
else:
state = None
return state
@provide_session
def error(self, session=None):
"""
Forces the task instance's state to FAILED in the database.
"""
self.log.error("Recording the task instance as FAILED")
self.state = State.FAILED
session.merge(self)
session.commit()
@provide_session
def refresh_from_db(self, session=None, lock_for_update=False) -> None:
"""
Refreshes the task instance from the database based on the primary key
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
qry = session.query(TaskInstance).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.execution_date == self.execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
if ti:
# Fields ordered per model definition
self.start_date = ti.start_date
self.end_date = ti.end_date
self.duration = ti.duration
self.state = ti.state
# Get the raw value of try_number column, don't read through the
# accessor here otherwise it will be incremented by one already.
self.try_number = ti._try_number
self.max_tries = ti.max_tries
self.hostname = ti.hostname
self.unixname = ti.unixname
self.job_id = ti.job_id
self.pool = ti.pool
self.pool_slots = ti.pool_slots or 1
self.queue = ti.queue
self.priority_weight = ti.priority_weight
self.operator = ti.operator
self.queued_dttm = ti.queued_dttm
self.pid = ti.pid
else:
self.state = None
def refresh_from_task(self, task, pool_override=None):
"""
Copy common attributes from the given task.
:param task: The task object to copy from
:type task: airflow.models.BaseOperator
:param pool_override: Use the pool_override instead of task's pool
:type pool_override: str
"""
self.queue = task.queue
self.pool = pool_override or task.pool
self.pool_slots = task.pool_slots
self.priority_weight = task.priority_weight_total
self.run_as_user = task.run_as_user
self.max_tries = task.retries
self.executor_config = task.executor_config
self.operator = task.__class__.__name__
@provide_session
def clear_xcom_data(self, session=None):
"""
Clears all XCom data from the database for the task instance
"""
session.query(XCom).filter(
XCom.dag_id == self.dag_id,
XCom.task_id == self.task_id,
XCom.execution_date == self.execution_date
).delete()
session.commit()
@property
def key(self) -> TaskInstanceKeyType:
"""
Returns a tuple that identifies the task instance uniquely
"""
return self.dag_id, self.task_id, self.execution_date, self.try_number
@provide_session
def set_state(self, state, session=None, commit=True):
self.state = state
self.start_date = timezone.utcnow()
self.end_date = timezone.utcnow()
session.merge(self)
if commit:
session.commit()
@property
def is_premature(self):
"""
Returns whether a task is in UP_FOR_RETRY state and its retry interval
has elapsed.
"""
# is the task still in the retry waiting period?
return self.state == State.UP_FOR_RETRY and not self.ready_for_retry()
@provide_session
def are_dependents_done(self, session=None):
"""
Checks whether the dependents of this task instance have all succeeded.
This is meant to be used by wait_for_downstream.
This is useful when you do not want to start processing the next
schedule of a task until the dependents are done. For instance,
if the task DROPs and recreates a table.
"""
task = self.task
if not task.downstream_task_ids:
return True
ti = session.query(func.count(TaskInstance.task_id)).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id.in_(task.downstream_task_ids),
TaskInstance.execution_date == self.execution_date,
TaskInstance.state == State.SUCCESS,
)
count = ti[0][0]
return count == len(task.downstream_task_ids)
@provide_session
def get_previous_ti(
self,
state: Optional[str] = None,
session: Session = None
) -> Optional['TaskInstance']:
"""
The task instance for the task that ran before this task instance.
:param state: If passed, it only take into account instances of a specific state.
"""
dag = self.task.dag
if dag:
dr = self.get_dagrun(session=session)
# LEGACY: most likely running from unit tests
if not dr:
# Means that this TaskInstance is NOT being run from a DR, but from a catchup
previous_scheduled_date = dag.previous_schedule(self.execution_date)
if not previous_scheduled_date:
return None
return TaskInstance(task=self.task, execution_date=previous_scheduled_date)
dr.dag = dag
# We always ignore schedule in dagrun lookup when `state` is given or `schedule_interval is None`.
# For legacy reasons, when `catchup=True`, we use `get_previous_scheduled_dagrun` unless
# `ignore_schedule` is `True`.
ignore_schedule = state is not None or dag.schedule_interval is None
if dag.catchup is True and not ignore_schedule:
last_dagrun = dr.get_previous_scheduled_dagrun(session=session)
else:
last_dagrun = dr.get_previous_dagrun(session=session, state=state)
if last_dagrun:
return last_dagrun.get_task_instance(self.task_id, session=session)
return None
@property
def previous_ti(self):
"""
This attribute is deprecated.
Please use `airflow.models.taskinstance.TaskInstance.get_previous_ti` method.
"""
warnings.warn(
"""
This attribute is deprecated.
Please use `airflow.models.taskinstance.TaskInstance.get_previous_ti` method.
""",
DeprecationWarning,
stacklevel=2,
)
return self.get_previous_ti()
@property
def previous_ti_success(self) -> Optional['TaskInstance']:
"""
This attribute is deprecated.
Please use `airflow.models.taskinstance.TaskInstance.get_previous_ti` method.
"""
warnings.warn(
"""
This attribute is deprecated.
Please use `airflow.models.taskinstance.TaskInstance.get_previous_ti` method.
""",
DeprecationWarning,
stacklevel=2,
)
return self.get_previous_ti(state=State.SUCCESS)
@provide_session
def get_previous_execution_date(
self,
state: Optional[str] = None,
session: Session = None,
) -> Optional[pendulum.datetime]:
"""
The execution date from property previous_ti_success.
:param state: If passed, it only take into account instances of a specific state.
"""
self.log.debug("previous_execution_date was called")
prev_ti = self.get_previous_ti(state=state, session=session)
return prev_ti and prev_ti.execution_date
@provide_session
def get_previous_start_date(
self,
state: Optional[str] = None,
session: Session = None
) -> Optional[pendulum.datetime]:
"""
The start date from property previous_ti_success.
:param state: If passed, it only take into account instances of a specific state.
"""
self.log.debug("previous_start_date was called")
prev_ti = self.get_previous_ti(state=state, session=session)
return prev_ti and prev_ti.start_date
@property
def previous_start_date_success(self) -> Optional[pendulum.datetime]:
"""
This attribute is deprecated.
Please use `airflow.models.taskinstance.TaskInstance.get_previous_start_date` method.
"""
warnings.warn(
"""
This attribute is deprecated.
Please use `airflow.models.taskinstance.TaskInstance.get_previous_start_date` method.
""",
DeprecationWarning,
stacklevel=2,
)
return self.get_previous_start_date(state=State.SUCCESS)
@provide_session
def are_dependencies_met(
self,
dep_context=None,
session=None,
verbose=False):
"""
Returns whether or not all the conditions are met for this task instance to be run
given the context for the dependencies (e.g. a task instance being force run from
the UI will ignore some dependencies).
:param dep_context: The execution context that determines the dependencies that
should be evaluated.
:type dep_context: DepContext
:param session: database session
:type session: sqlalchemy.orm.session.Session
:param verbose: whether log details on failed dependencies on
info or debug log level
:type verbose: bool
"""
dep_context = dep_context or DepContext()
failed = False
verbose_aware_logger = self.log.info if verbose else self.log.debug
for dep_status in self.get_failed_dep_statuses(
dep_context=dep_context,
session=session):
failed = True
verbose_aware_logger(
"Dependencies not met for %s, dependency '%s' FAILED: %s",
self, dep_status.dep_name, dep_status.reason
)
if failed:
return False
verbose_aware_logger("Dependencies all met for %s", self)
return True
@provide_session
def get_failed_dep_statuses(
self,
dep_context=None,
session=None):
dep_context = dep_context or DepContext()
for dep in dep_context.deps | self.task.deps:
for dep_status in dep.get_dep_statuses(
self,
session,
dep_context):
self.log.debug(
"%s dependency '%s' PASSED: %s, %s",
self, dep_status.dep_name, dep_status.passed, dep_status.reason
)
if not dep_status.passed:
yield dep_status
def __repr__(self):
return (
"<TaskInstance: {ti.dag_id}.{ti.task_id} "
"{ti.execution_date} [{ti.state}]>"
).format(ti=self)
def next_retry_datetime(self):
"""
Get datetime of the next retry if the task instance fails. For exponential
backoff, retry_delay is used as base and will be converted to seconds.
"""
delay = self.task.retry_delay
if self.task.retry_exponential_backoff:
# If the min_backoff calculation is below 1, it will be converted to 0 via int. Thus,
# we must round up prior to converting to an int, otherwise a divide by zero error
# will occurr in the modded_hash calculation.
min_backoff = int(math.ceil(delay.total_seconds() * (2 ** (self.try_number - 2))))
# deterministic per task instance
hash = int(hashlib.sha1("{}#{}#{}#{}".format(self.dag_id,
self.task_id,
self.execution_date,
self.try_number)
.encode('utf-8')).hexdigest(), 16)
# between 1 and 1.0 * delay * (2^retry_number)
modded_hash = min_backoff + hash % min_backoff
# timedelta has a maximum representable value. The exponentiation
# here means this value can be exceeded after a certain number
# of tries (around 50 if the initial delay is 1s, even fewer if
# the delay is larger). Cap the value here before creating a
# timedelta object so the operation doesn't fail.
delay_backoff_in_seconds = min(
modded_hash,
timedelta.max.total_seconds() - 1
)
delay = timedelta(seconds=delay_backoff_in_seconds)
if self.task.max_retry_delay:
delay = min(self.task.max_retry_delay, delay)
return self.end_date + delay
def ready_for_retry(self):
"""
Checks on whether the task instance is in the right state and timeframe
to be retried.
"""
return (self.state == State.UP_FOR_RETRY and
self.next_retry_datetime() < timezone.utcnow())
@provide_session
def get_dagrun(self, session=None):
"""
Returns the DagRun for this TaskInstance
:param session:
:return: DagRun
"""
from airflow.models.dagrun import DagRun # Avoid circular import
dr = session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date == self.execution_date
).first()
return dr
@provide_session
def check_and_change_state_before_execution(
self,
verbose: bool = True,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
mark_success: bool = False,
test_mode: bool = False,
job_id: Optional[str] = None,
pool: Optional[str] = None,
session=None) -> bool:
"""
Checks dependencies and then sets state to RUNNING if they are met. Returns
True if and only if state is set to RUNNING, which implies that task should be
executed, in preparation for _run_raw_task
:param verbose: whether to turn on more verbose logging
:type verbose: bool
:param ignore_all_deps: Ignore all of the non-critical dependencies, just runs
:type ignore_all_deps: bool
:param ignore_depends_on_past: Ignore depends_on_past DAG attribute
:type ignore_depends_on_past: bool
:param ignore_task_deps: Don't check the dependencies of this TaskInstance's task
:type ignore_task_deps: bool
:param ignore_ti_state: Disregards previous task instance state
:type ignore_ti_state: bool
:param mark_success: Don't run the task, mark its state as success
:type mark_success: bool
:param test_mode: Doesn't record success or failure in the DB
:type test_mode: bool
:param pool: specifies the pool to use to run the task instance
:type pool: str
:return: whether the state was changed to running or not
:rtype: bool
"""
task = self.task
self.refresh_from_task(task, pool_override=pool)
self.test_mode = test_mode
self.refresh_from_db(session=session, lock_for_update=True)
self.job_id = job_id
self.hostname = get_hostname()
if not ignore_all_deps and not ignore_ti_state and self.state == State.SUCCESS:
Stats.incr('previously_succeeded', 1, 1)
# TODO: Logging needs cleanup, not clear what is being printed
hr = "\n" + ("-" * 80) # Line break
if not mark_success:
# Firstly find non-runnable and non-requeueable tis.
# Since mark_success is not set, we do nothing.
non_requeueable_dep_context = DepContext(
deps=RUNNING_DEPS - REQUEUEABLE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_ti_state=ignore_ti_state,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps)
if not self.are_dependencies_met(
dep_context=non_requeueable_dep_context,
session=session,
verbose=True):
session.commit()
return False
# For reporting purposes, we report based on 1-indexed,
# not 0-indexed lists (i.e. Attempt 1 instead of
# Attempt 0 for the first attempt).
# Set the task start date. In case it was re-scheduled use the initial
# start date that is recorded in task_reschedule table
self.start_date = timezone.utcnow()
task_reschedules = TaskReschedule.find_for_task_instance(self, session)
if task_reschedules:
self.start_date = task_reschedules[0].start_date
# Secondly we find non-runnable but requeueable tis. We reset its state.
# This is because we might have hit concurrency limits,
# e.g. because of backfilling.
dep_context = DepContext(
deps=REQUEUEABLE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
if not self.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
self.state = State.NONE
self.log.warning(hr)
self.log.warning(
"Rescheduling due to concurrency limits reached "
"at task runtime. Attempt %s of "
"%s. State set to NONE.", self.try_number, self.max_tries + 1
)
self.log.warning(hr)
self.queued_dttm = timezone.utcnow()
session.merge(self)
session.commit()
return False
# print status message
self.log.info(hr)
self.log.info("Starting attempt %s of %s", self.try_number, self.max_tries + 1)
self.log.info(hr)
self._try_number += 1
if not test_mode:
session.add(Log(State.RUNNING, self))
self.state = State.RUNNING
self.pid = os.getpid()
self.end_date = None
if not test_mode:
session.merge(self)
session.commit()
# Closing all pooled connections to prevent
# "max number of connections reached"
settings.engine.dispose() # type: ignore
if verbose:
if mark_success:
self.log.info("Marking success for %s on %s", self.task, self.execution_date)
else:
self.log.info("Executing %s on %s", self.task, self.execution_date)
return True
@provide_session
@Sentry.enrich_errors
def _run_raw_task(
self,
mark_success: bool = False,
test_mode: bool = False,
job_id: Optional[str] = None,
pool: Optional[str] = None,
session=None) -> None:
"""
Immediately runs the task (without checking or changing db state
before execution) and then sets the appropriate final state after
completion and runs any post-execute callbacks. Meant to be called
only after another function changes the state to running.
:param mark_success: Don't run the task, mark its state as success
:type mark_success: bool
:param test_mode: Doesn't record success or failure in the DB
:type test_mode: bool
:param pool: specifies the pool to use to run the task instance
:type pool: str
"""
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.models.renderedtifields import RenderedTaskInstanceFields as RTIF
task = self.task
self.test_mode = test_mode
self.refresh_from_task(task, pool_override=pool)
self.refresh_from_db(session=session)
self.job_id = job_id
self.hostname = get_hostname()
context = {} # type: Dict
actual_start_date = timezone.utcnow()
try:
if not mark_success:
context = self.get_template_context()
task_copy = copy.copy(task)
# Sensors in `poke` mode can block execution of DAGs when running
# with single process executor, thus we change the mode to`reschedule`
# to allow parallel task being scheduled and executed
if isinstance(task_copy, BaseSensorOperator) and \
conf.get('core', 'executor') == "DebugExecutor":
self.log.warning("DebugExecutor changes sensor mode to 'reschedule'.")
task_copy.mode = 'reschedule'
self.task = task_copy
def signal_handler(signum, frame):
self.log.error("Received SIGTERM. Terminating subprocesses.")
task_copy.on_kill()
raise AirflowException("Task received SIGTERM signal")
signal.signal(signal.SIGTERM, signal_handler)
# Don't clear Xcom until the task is certain to execute
self.clear_xcom_data()
start_time = time.time()
self.render_templates(context=context)
if STORE_SERIALIZED_DAGS:
RTIF.write(RTIF(ti=self, render_templates=False), session=session)
RTIF.delete_old_records(self.task_id, self.dag_id, session=session)
# Export context to make it available for operators to use.
airflow_context_vars = context_to_airflow_vars(context, in_env_var_format=True)
self.log.info("Exporting the following env vars:\n%s",
'\n'.join(["{}={}".format(k, v)
for k, v in airflow_context_vars.items()]))
os.environ.update(airflow_context_vars)
task_copy.pre_execute(context=context)
try:
if task.on_execute_callback:
task.on_execute_callback(context)
except Exception as e3:
self.log.error("Failed when executing execute callback")
self.log.exception(e3)
# If a timeout is specified for the task, make it fail
# if it goes beyond
result = None
if task_copy.execution_timeout:
try:
with timeout(int(
task_copy.execution_timeout.total_seconds())):
result = task_copy.execute(context=context)
except AirflowTaskTimeout:
task_copy.on_kill()
raise
else:
result = task_copy.execute(context=context)
# If the task returns a result, push an XCom containing it
if task_copy.do_xcom_push and result is not None:
self.xcom_push(key=XCOM_RETURN_KEY, value=result)
task_copy.post_execute(context=context, result=result)
end_time = time.time()
duration = end_time - start_time
Stats.timing(
'dag.{dag_id}.{task_id}.duration'.format(
dag_id=task_copy.dag_id,
task_id=task_copy.task_id),
duration)
Stats.incr('operator_successes_{}'.format(
self.task.__class__.__name__), 1, 1)
Stats.incr('ti_successes')
self.refresh_from_db(lock_for_update=True)
self.state = State.SUCCESS
except AirflowSkipException as e:
# Recording SKIP
# log only if exception has any arguments to prevent log flooding
if e.args:
self.log.info(e)
self.refresh_from_db(lock_for_update=True)
self.state = State.SKIPPED
self.log.info(
'Marking task as SKIPPED.'
'dag_id=%s, task_id=%s, execution_date=%s, start_date=%s, end_date=%s',
self.dag_id,
self.task_id,
self.execution_date.strftime('%Y%m%dT%H%M%S') if hasattr(
self,
'execution_date') and self.execution_date else '',
self.start_date.strftime('%Y%m%dT%H%M%S') if hasattr(
self,
'start_date') and self.start_date else '',
self.end_date.strftime('%Y%m%dT%H%M%S') if hasattr(
self,
'end_date') and self.end_date else '')
except AirflowRescheduleException as reschedule_exception:
self.refresh_from_db()
self._handle_reschedule(actual_start_date, reschedule_exception, test_mode, context)
return
except AirflowException as e:
self.refresh_from_db()
# for case when task is marked as success/failed externally
# current behavior doesn't hit the success callback
if self.state in {State.SUCCESS, State.FAILED}:
return
else:
self.handle_failure(e, test_mode, context)
raise
except (Exception, KeyboardInterrupt) as e:
self.handle_failure(e, test_mode, context)
raise
# Success callback
try:
if task.on_success_callback:
task.on_success_callback(context)
except Exception as e3:
self.log.error("Failed when executing success callback")
self.log.exception(e3)
# Recording SUCCESS
self.end_date = timezone.utcnow()
self.log.info(
'Marking task as SUCCESS.'
'dag_id=%s, task_id=%s, execution_date=%s, start_date=%s, end_date=%s',
self.dag_id,
self.task_id,
self.execution_date.strftime('%Y%m%dT%H%M%S') if hasattr(
self,
'execution_date') and self.execution_date else '',
self.start_date.strftime('%Y%m%dT%H%M%S') if hasattr(
self,
'start_date') and self.start_date else '',
self.end_date.strftime('%Y%m%dT%H%M%S') if hasattr(
self,
'end_date') and self.end_date else '')
self.set_duration()
if not test_mode:
session.add(Log(self.state, self))
session.merge(self)
session.commit()
@provide_session
def run(
self,
verbose: bool = True,
ignore_all_deps: bool = False,
ignore_depends_on_past: bool = False,
ignore_task_deps: bool = False,
ignore_ti_state: bool = False,
mark_success: bool = False,
test_mode: bool = False,
job_id: Optional[str] = None,
pool: Optional[str] = None,
session=None) -> None:
res = self.check_and_change_state_before_execution(
verbose=verbose,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
mark_success=mark_success,
test_mode=test_mode,
job_id=job_id,
pool=pool,
session=session)
if res:
self._run_raw_task(
mark_success=mark_success,
test_mode=test_mode,
job_id=job_id,
pool=pool,
session=session)
def dry_run(self):
task = self.task
task_copy = copy.copy(task)
self.task = task_copy
self.render_templates()
task_copy.dry_run()
@provide_session
def _handle_reschedule(self, actual_start_date, reschedule_exception, test_mode=False, context=None,
session=None):
# Don't record reschedule request in test mode
if test_mode:
return
self.end_date = timezone.utcnow()
self.set_duration()
# Log reschedule request
session.add(TaskReschedule(self.task, self.execution_date, self._try_number,
actual_start_date, self.end_date,
reschedule_exception.reschedule_date))
# set state
self.state = State.UP_FOR_RESCHEDULE
# Decrement try_number so subsequent runs will use the same try number and write
# to same log file.
self._try_number -= 1
session.merge(self)
session.commit()
self.log.info('Rescheduling task, marking task as UP_FOR_RESCHEDULE')
@provide_session
def handle_failure(self, error, test_mode=None, context=None, session=None):
if test_mode is None:
test_mode = self.test_mode
if context is None:
context = self.get_template_context()
self.log.exception(error)
task = self.task
self.end_date = timezone.utcnow()
self.set_duration()
Stats.incr('operator_failures_{}'.format(task.__class__.__name__), 1, 1)
Stats.incr('ti_failures')
if not test_mode:
session.add(Log(State.FAILED, self))
# Log failure duration
session.add(TaskFail(task, self.execution_date, self.start_date, self.end_date))
if context is not None:
context['exception'] = error
# Let's go deeper
try:
# Since this function is called only when the TaskInstance state is running,
# try_number contains the current try_number (not the next). We
# only mark task instance as FAILED if the next task instance
# try_number exceeds the max_tries.
if self.is_eligible_to_retry():
self.state = State.UP_FOR_RETRY
self.log.info('Marking task as UP_FOR_RETRY')
if task.email_on_retry and task.email:
self.email_alert(error)
else:
self.state = State.FAILED
if task.retries:
self.log.info(
'All retries failed; marking task as FAILED.'
'dag_id=%s, task_id=%s, execution_date=%s, start_date=%s, end_date=%s',
self.dag_id,
self.task_id,
self.execution_date.strftime('%Y%m%dT%H%M%S') if hasattr(
self,
'execution_date') and self.execution_date else '',
self.start_date.strftime('%Y%m%dT%H%M%S') if hasattr(
self,
'start_date') and self.start_date else '',
self.end_date.strftime('%Y%m%dT%H%M%S') if hasattr(
self,
'end_date') and self.end_date else '')
else:
self.log.info(
'Marking task as FAILED.'
'dag_id=%s, task_id=%s, execution_date=%s, start_date=%s, end_date=%s',
self.dag_id,
self.task_id,
self.execution_date.strftime('%Y%m%dT%H%M%S') if hasattr(
self,
'execution_date') and self.execution_date else '',
self.start_date.strftime('%Y%m%dT%H%M%S') if hasattr(
self,
'start_date') and self.start_date else '',
self.end_date.strftime('%Y%m%dT%H%M%S') if hasattr(
self,
'end_date') and self.end_date else '')
if task.email_on_failure and task.email:
self.email_alert(error)
except Exception as e2:
self.log.error('Failed to send email to: %s', task.email)
self.log.exception(e2)
# Handling callbacks pessimistically
try:
if self.state == State.UP_FOR_RETRY and task.on_retry_callback:
task.on_retry_callback(context)
if self.state == State.FAILED and task.on_failure_callback:
task.on_failure_callback(context)
except Exception as e3:
self.log.error("Failed at executing callback")
self.log.exception(e3)
if not test_mode:
session.merge(self)
session.commit()
def is_eligible_to_retry(self):
"""Is task instance is eligible for retry"""
return self.task.retries and self.try_number <= self.max_tries
@provide_session
def get_template_context(self, session=None) -> Dict[str, Any]:
task = self.task
from airflow import macros
params = {} # type: Dict[str, Any]
run_id = ''
dag_run = None
if hasattr(task, 'dag'):
if task.dag.params:
params.update(task.dag.params)
from airflow.models.dagrun import DagRun # Avoid circular import
dag_run = (
session.query(DagRun)
.filter_by(
dag_id=task.dag.dag_id,
execution_date=self.execution_date)
.first()
)
run_id = dag_run.run_id if dag_run else None
session.expunge_all()
session.commit()
ds = self.execution_date.strftime('%Y-%m-%d')
ts = self.execution_date.isoformat()
yesterday_ds = (self.execution_date - timedelta(1)).strftime('%Y-%m-%d')
tomorrow_ds = (self.execution_date + timedelta(1)).strftime('%Y-%m-%d')
# For manually triggered dagruns that aren't run on a schedule, next/previous
# schedule dates don't make sense, and should be set to execution date for
# consistency with how execution_date is set for manually triggered tasks, i.e.
# triggered_date == execution_date.
if dag_run and dag_run.external_trigger:
prev_execution_date = self.execution_date
next_execution_date = self.execution_date
else:
prev_execution_date = task.dag.previous_schedule(self.execution_date)
next_execution_date = task.dag.following_schedule(self.execution_date)
next_ds = None
next_ds_nodash = None
if next_execution_date:
next_ds = next_execution_date.strftime('%Y-%m-%d')
next_ds_nodash = next_ds.replace('-', '')
next_execution_date = pendulum.instance(next_execution_date)
prev_ds = None
prev_ds_nodash = None
if prev_execution_date:
prev_ds = prev_execution_date.strftime('%Y-%m-%d')
prev_ds_nodash = prev_ds.replace('-', '')
prev_execution_date = pendulum.instance(prev_execution_date)
ds_nodash = ds.replace('-', '')
ts_nodash = self.execution_date.strftime('%Y%m%dT%H%M%S')
ts_nodash_with_tz = ts.replace('-', '').replace(':', '')
yesterday_ds_nodash = yesterday_ds.replace('-', '')
tomorrow_ds_nodash = tomorrow_ds.replace('-', '')
ti_key_str = "{dag_id}__{task_id}__{ds_nodash}".format(
dag_id=task.dag_id, task_id=task.task_id, ds_nodash=ds_nodash)
if task.params:
params.update(task.params)
if conf.getboolean('core', 'dag_run_conf_overrides_params'):
self.overwrite_params_with_dag_run_conf(params=params, dag_run=dag_run)
class VariableAccessor:
"""
Wrapper around Variable. This way you can get variables in
templates by using ``{{ var.value.variable_name }}`` or
``{{ var.value.get('variable_name', 'fallback') }}``.
"""
def __init__(self):
self.var = None
def __getattr__(
self,
item: str,
):
self.var = Variable.get(item)
return self.var
def __repr__(self):
return str(self.var)
@staticmethod
def get(
item: str,
default_var: Any = Variable._Variable__NO_DEFAULT_SENTINEL,
):
return Variable.get(item, default_var=default_var)
class VariableJsonAccessor:
"""
Wrapper around Variable. This way you can get variables in
templates by using ``{{ var.json.variable_name }}`` or
``{{ var.json.get('variable_name', {'fall': 'back'}) }}``.
"""
def __init__(self):
self.var = None
def __getattr__(
self,
item: str,
):
self.var = Variable.get(item, deserialize_json=True)
return self.var
def __repr__(self):
return str(self.var)
@staticmethod
def get(
item: str,
default_var: Any = Variable._Variable__NO_DEFAULT_SENTINEL,
):
return Variable.get(item, default_var=default_var, deserialize_json=True)
return {
'conf': conf,
'dag': task.dag,
'dag_run': dag_run,
'ds': ds,
'ds_nodash': ds_nodash,
'execution_date': pendulum.instance(self.execution_date),
'inlets': task.inlets,
'macros': macros,
'next_ds': next_ds,
'next_ds_nodash': next_ds_nodash,
'next_execution_date': next_execution_date,
'outlets': task.outlets,
'params': params,
'prev_ds': prev_ds,
'prev_ds_nodash': prev_ds_nodash,
'prev_execution_date': prev_execution_date,
'prev_execution_date_success': lazy_object_proxy.Proxy(
lambda: self.get_previous_execution_date(state=State.SUCCESS)),
'prev_start_date_success': lazy_object_proxy.Proxy(
lambda: self.get_previous_start_date(state=State.SUCCESS)),
'run_id': run_id,
'task': task,
'task_instance': self,
'task_instance_key_str': ti_key_str,
'test_mode': self.test_mode,
'ti': self,
'tomorrow_ds': tomorrow_ds,
'tomorrow_ds_nodash': tomorrow_ds_nodash,
'ts': ts,
'ts_nodash': ts_nodash,
'ts_nodash_with_tz': ts_nodash_with_tz,
'var': {
'json': VariableJsonAccessor(),
'value': VariableAccessor(),
},
'yesterday_ds': yesterday_ds,
'yesterday_ds_nodash': yesterday_ds_nodash,
}
def get_rendered_template_fields(self):
"""
Fetch rendered template fields from DB if Serialization is enabled.
Else just render the templates
"""
from airflow.models.renderedtifields import RenderedTaskInstanceFields
if STORE_SERIALIZED_DAGS:
rtif = RenderedTaskInstanceFields.get_templated_fields(self)
if rtif:
for field_name, rendered_value in rtif.items():
setattr(self.task, field_name, rendered_value)
else:
try:
self.render_templates()
except (TemplateAssertionError, UndefinedError) as e:
raise AirflowException(
"Webserver does not have access to User-defined Macros or Filters "
"when Dag Serialization is enabled. Hence for the task that have not yet "
"started running, please use 'airflow tasks render' for debugging the "
"rendering of template_fields."
) from e
else:
self.render_templates()
def overwrite_params_with_dag_run_conf(self, params, dag_run):
if dag_run and dag_run.conf:
params.update(dag_run.conf)
def render_templates(self, context: Optional[Dict] = None) -> None:
"""Render templates in the operator fields."""
if not context:
context = self.get_template_context()
self.task.render_template_fields(context)
def email_alert(self, exception):
exception_html = str(exception).replace('\n', '<br>')
jinja_context = self.get_template_context()
# This function is called after changing the state
# from State.RUNNING so use prev_attempted_tries.
jinja_context.update(dict(
exception=exception,
exception_html=exception_html,
try_number=self.prev_attempted_tries,
max_tries=self.max_tries))
jinja_env = self.task.get_template_env()
default_subject = 'Airflow alert: {{ti}}'
# For reporting purposes, we report based on 1-indexed,
# not 0-indexed lists (i.e. Try 1 instead of
# Try 0 for the first attempt).
default_html_content = (
'Try {{try_number}} out of {{max_tries + 1}}<br>'
'Exception:<br>{{exception_html}}<br>'
'Log: <a href="{{ti.log_url}}">Link</a><br>'
'Host: {{ti.hostname}}<br>'
'Log file: {{ti.log_filepath}}<br>'
'Mark success: <a href="{{ti.mark_success_url}}">Link</a><br>'
)
def render(key, content):
if conf.has_option('email', key):
path = conf.get('email', key)
with open(path) as file:
content = file.read()
return jinja_env.from_string(content).render(**jinja_context)
subject = render('subject_template', default_subject)
html_content = render('html_content_template', default_html_content)
try:
send_email(self.task.email, subject, html_content)
except Exception:
default_html_content_err = (
'Try {{try_number}} out of {{max_tries + 1}}<br>'
'Exception:<br>Failed attempt to attach error logs<br>'
'Log: <a href="{{ti.log_url}}">Link</a><br>'
'Host: {{ti.hostname}}<br>'
'Log file: {{ti.log_filepath}}<br>'
'Mark success: <a href="{{ti.mark_success_url}}">Link</a><br>'
)
html_content_err = render('html_content_template', default_html_content_err)
send_email(self.task.email, subject, html_content_err)
def set_duration(self) -> None:
if self.end_date and self.start_date:
self.duration = (self.end_date - self.start_date).total_seconds()
else:
self.duration = None
def xcom_push(
self,
key: str,
value: Any,
execution_date: Optional[datetime] = None) -> None:
"""
Make an XCom available for tasks to pull.
:param key: A key for the XCom
:type key: str
:param value: A value for the XCom. The value is pickled and stored
in the database.
:type value: any pickleable object
:param execution_date: if provided, the XCom will not be visible until
this date. This can be used, for example, to send a message to a
task on a future date without it being immediately visible.
:type execution_date: datetime
"""
if execution_date and execution_date < self.execution_date:
raise ValueError(
'execution_date can not be in the past (current '
'execution_date is {}; received {})'.format(
self.execution_date, execution_date))
XCom.set(
key=key,
value=value,
task_id=self.task_id,
dag_id=self.dag_id,
execution_date=execution_date or self.execution_date)
def xcom_pull(
self,
task_ids: Optional[Union[str, Iterable[str]]] = None,
dag_id: Optional[str] = None,
key: str = XCOM_RETURN_KEY,
include_prior_dates: bool = False) -> Any:
"""
Pull XComs that optionally meet certain criteria.
The default value for `key` limits the search to XComs
that were returned by other tasks (as opposed to those that were pushed
manually). To remove this filter, pass key=None (or any desired value).
If a single task_id string is provided, the result is the value of the
most recent matching XCom from that task_id. If multiple task_ids are
provided, a tuple of matching values is returned. None is returned
whenever no matches are found.
:param key: A key for the XCom. If provided, only XComs with matching
keys will be returned. The default key is 'return_value', also
available as a constant XCOM_RETURN_KEY. This key is automatically
given to XComs returned by tasks (as opposed to being pushed
manually). To remove the filter, pass key=None.
:type key: str
:param task_ids: Only XComs from tasks with matching ids will be
pulled. Can pass None to remove the filter.
:type task_ids: str or iterable of strings (representing task_ids)
:param dag_id: If provided, only pulls XComs from this DAG.
If None (default), the DAG of the calling task is used.
:type dag_id: str
:param include_prior_dates: If False, only XComs from the current
execution_date are returned. If True, XComs from previous dates
are returned as well.
:type include_prior_dates: bool
"""
if dag_id is None:
dag_id = self.dag_id
query = XCom.get_many(
execution_date=self.execution_date,
key=key,
dag_ids=dag_id,
task_ids=task_ids,
include_prior_dates=include_prior_dates
).with_entities(XCom.value)
# Since we're only fetching the values field, and not the
# whole class, the @recreate annotation does not kick in.
# Therefore we need to deserialize the fields by ourselves.
if is_container(task_ids):
return [XCom.deserialize_value(xcom) for xcom in query]
else:
xcom = query.first()
if xcom:
return XCom.deserialize_value(xcom)
@provide_session
def get_num_running_task_instances(self, session):
# .count() is inefficient
return session.query(func.count()).filter(
TaskInstance.dag_id == self.dag_id,
TaskInstance.task_id == self.task_id,
TaskInstance.state == State.RUNNING
).scalar()
def init_run_context(self, raw=False):
"""
Sets the log context.
"""
self.raw = raw
self._set_context(self)
@staticmethod
def filter_for_tis(
tis: Iterable[Union["TaskInstance", TaskInstanceKeyType]]
) -> Optional[BooleanClauseList]:
"""Returns SQLAlchemy filter to query selected task instances"""
TI = TaskInstance
if not tis:
return None
if all(isinstance(t, tuple) for t in tis):
filter_for_tis = ([and_(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
for dag_id, task_id, execution_date, _ in tis])
return or_(*filter_for_tis)
if all(isinstance(t, TaskInstance) for t in tis):
filter_for_tis = ([and_(TI.dag_id == ti.dag_id, # type: ignore
TI.task_id == ti.task_id, # type: ignore
TI.execution_date == ti.execution_date) # type: ignore
for ti in tis])
return or_(*filter_for_tis)
raise TypeError("All elements must have the same type: `TaskInstance` or `TaskInstanceKey`.")
# State of the task instance.
# Stores string version of the task state.
TaskInstanceStateType = Tuple[TaskInstanceKeyType, str]
class SimpleTaskInstance:
"""
Simplified Task Instance.
Used to send data between processes via Queues.
"""
def __init__(self, ti: TaskInstance):
self._dag_id: str = ti.dag_id
self._task_id: str = ti.task_id
self._execution_date: datetime = ti.execution_date
self._start_date: datetime = ti.start_date
self._end_date: datetime = ti.end_date
self._try_number: int = ti.try_number
self._state: str = ti.state
self._executor_config: Any = ti.executor_config
self._run_as_user: Optional[str] = None
if hasattr(ti, 'run_as_user'):
self._run_as_user = ti.run_as_user
self._pool: Optional[str] = None
if hasattr(ti, 'pool'):
self._pool = ti.pool
self._priority_weight: Optional[int] = None
if hasattr(ti, 'priority_weight'):
self._priority_weight = ti.priority_weight
self._queue: str = ti.queue
self._key = ti.key
# pylint: disable=missing-docstring
@property
def dag_id(self) -> str:
return self._dag_id
@property
def task_id(self) -> str:
return self._task_id
@property
def execution_date(self) -> datetime:
return self._execution_date
@property
def start_date(self) -> datetime:
return self._start_date
@property
def end_date(self) -> datetime:
return self._end_date
@property
def try_number(self) -> int:
return self._try_number
@property
def state(self) -> str:
return self._state
@property
def pool(self) -> Any:
return self._pool
@property
def priority_weight(self) -> Optional[int]:
return self._priority_weight
@property
def queue(self) -> str:
return self._queue
@property
def key(self) -> TaskInstanceKeyType:
return self._key
@property
def executor_config(self):
return self._executor_config
@provide_session
def construct_task_instance(self, session=None, lock_for_update=False) -> TaskInstance:
"""
Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
:return: the task instance constructed
"""
qry = session.query(TaskInstance).filter(
TaskInstance.dag_id == self._dag_id,
TaskInstance.task_id == self._task_id,
TaskInstance.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti
|
the-stack_106_19280
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Shifted Gompertz CDF bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import math as tfp_math
from tensorflow_probability.python.bijectors import bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import tensor_util
__all__ = [
'ShiftedGompertzCDF',
]
class ShiftedGompertzCDF(bijector.Bijector):
"""Compute `Y = g(X) = (1 - exp(-rate * X)) * exp(-c * exp(-rate * X))`.
This bijector maps inputs from `[-inf, inf]` to `[0, inf]`. The inverse of the
bijector applied to a uniform random variable `X ~ U(0, 1)` gives back a
random variable with the
[Shifted Gompertz distribution](
https://en.wikipedia.org/wiki/Shifted_Gompertz_distribution):
```none
Y ~ ShiftedGompertzCDF(concentration, rate)
pdf(y; c, r) = r * exp(-r * y - exp(-r * y) / c) * (1 + (1 - exp(-r * y)) / c)
```
Note: Even though this is called `ShiftedGompertzCDF`, when applied to the
`Uniform` distribution, this is not the same as applying a `GompertzCDF` with
a `Shift` bijector (i.e. the Shifted Gompertz distribution is not the same as
a Gompertz distribution with a location parameter).
Note: Because the Shifted Gompertz distribution concentrates its mass close
to zero, for larger rates or larger concentrations, `bijector.forward` will
quickly saturate to 1.
"""
def __init__(self,
concentration,
rate,
validate_args=False,
name='shifted_gompertz_cdf'):
"""Instantiates the `ShiftedGompertzCDF` bijector.
Args:
concentration: Positive Float-like `Tensor` that is the same dtype and is
broadcastable with `concentration`.
This is `c` in
`Y = g(X) = (1 - exp(-rate * X)) * exp(-exp(-rate * X) / c)`.
rate: Positive Float-like `Tensor` that is the same dtype and is
broadcastable with `concentration`.
This is `rate` in
`Y = g(X) = (1 - exp(-rate * X)) * exp(-exp(-rate * X) / c)`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype(
[concentration, rate], dtype_hint=tf.float32)
self._concentration = tensor_util.convert_nonref_to_tensor(
concentration, dtype=dtype, name='concentration')
self._rate = tensor_util.convert_nonref_to_tensor(
rate, dtype=dtype, name='rate')
super(ShiftedGompertzCDF, self).__init__(
validate_args=validate_args,
forward_min_event_ndims=0,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype):
return dict(
concentration=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),
rate=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))))
@property
def concentration(self):
"""The `c` in `Y = g(X) = (1 - exp(-r * X)) * exp(-exp(-r * X) / c)`."""
return self._concentration
@property
def rate(self):
"""The `r` in `Y = g(X) = (1 - exp(-r * X)) * exp(-exp(-r * X) / c)`."""
return self._rate
@classmethod
def _is_increasing(cls):
return True
def _forward(self, x):
with tf.control_dependencies(self._maybe_assert_valid_x(x)):
rate = tf.convert_to_tensor(self.rate)
log1mexpx = tfp_math.log1mexp(-rate * x)
return tf.math.exp(
log1mexpx - tf.math.exp(-rate * x) / self.concentration)
def _inverse(self, y):
with tf.control_dependencies(self._maybe_assert_valid_y(y)):
concentration = tf.convert_to_tensor(self.concentration)
reciprocal_concentration = tf.math.reciprocal(concentration)
z = -tfp_math.lambertw(
reciprocal_concentration * tf.math.exp(
reciprocal_concentration + tf.math.log(y))) * concentration
# Due to numerical instability, when y approaches 1, this expression
# can be less than -1. We clip the value to prevent that.
z = tf.clip_by_value(z, -1., np.inf)
return -tf.math.log1p(z) / self.rate
def _forward_log_det_jacobian(self, x):
with tf.control_dependencies(self._maybe_assert_valid_x(x)):
rate = tf.convert_to_tensor(self.rate)
concentration = tf.convert_to_tensor(self.concentration)
z = rate * x
return (-z - tf.math.exp(-z) / concentration + tf.math.log1p(
-tf.math.expm1(-z) / concentration) + tf.math.log(rate))
def _maybe_assert_valid_x(self, x):
if not self.validate_args:
return []
return [assert_util.assert_non_negative(
x, message='Forward transformation input must be non-negative.')]
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return []
is_positive = assert_util.assert_non_negative(
y, message='Inverse transformation input must be greater than 0.')
less_than_one = assert_util.assert_less_equal(
y,
tf.constant(1., y.dtype),
message='Inverse transformation input must be less than or equal to 1.')
return [is_positive, less_than_one]
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
if is_init != tensor_util.is_ref(self.rate):
assertions.append(assert_util.assert_positive(
self.rate,
message='Argument `rate` must be positive.'))
if is_init != tensor_util.is_ref(self.concentration):
assertions.append(assert_util.assert_positive(
self.concentration,
message='Argument `concentration` must be positive.'))
return assertions
|
the-stack_106_19281
|
from __future__ import division
import numpy as np
import pandas
import math
import os
import types
import h5py
from six.moves import cPickle as pickle
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style("white")
from ML_Tools.Plotting_And_Evaluation.Plotters import *
from ML_Tools.General.Misc_Functions import *
from ML_Tools.General.Ensemble_Functions import ensemblePredict, loadModel
from ML_Tools.General.Batch_Train import getFeature, batchEnsemblePredict
from keras.models import Sequential,model_from_json, load_model
from sklearn.model_selection import StratifiedKFold
dirLoc = "../Data/"
wFactor = 250000/50000
def AMS(s, b):
""" Approximate Median Significance defined as:
AMS = sqrt(
2 { (s + b + b_r) log[1 + (s/(b+b_r))] - s}
)
where b_r = 10, b = background, s = signal, log is natural logarithm """
br = 10.0
radicand = 2 *( (s+b+br) * math.log (1.0 + s/(b+br)) -s)
if radicand < 0:
print('radicand is negative. Exiting')
exit()
else:
return math.sqrt(radicand)
def amsScan(inData, scale=False):
best = [0,-1]
ams = []
for index, row in inData.iterrows():
s = scale[0]*np.sum(inData.loc[(inData['pred_class'] >= row['pred_class']) & (inData['gen_target'] == 1), 'gen_weight'])
b = scale[1]*np.sum(inData.loc[(inData['pred_class'] >= row['pred_class']) & (inData['gen_target'] == 0), 'gen_weight'])
ams.append(AMS(s, b))
if ams[-1] > best[1]:
best = [row['pred_class'], ams[-1]]
print(best)
return ams, best
def foldAMSScan(inData, N=10):
kf = StratifiedKFold(n_splits=N, shuffle=True)
folds = kf.split(inData, inData['gen_target'])
bests = []
for i, (train, test) in enumerate(folds):
bests.append(amsScan(inData.iloc[test], (np.sum(inData[(inData['gen_target'] == 1)]['gen_weight']), np.sum(inData[(inData['gen_target'] == 0)]['gen_weight'])))[1])
print("Fold {}, best AMS {} at cut of {}. Total weights Signal:Bkg. {}:{}".format(i, bests[-1][1], bests[-1][0],
np.sum(inData.iloc[test][inData.gen_target == 1]['gen_weight']),
np.sum(inData.iloc[test][inData.gen_target == 0]['gen_weight'])))
print("Mean cut", np.average([x[0] for x in bests], weights=[1/x[1] for x in bests]), "mean AMS", np.average([x[1] for x in bests], weights=[1/x[1] for x in bests]))
return bests
def amsScanQuick(inData, wFactor=250000./50000.):
s = np.sum(inData.loc[inData['gen_target'] == 1, 'gen_weight'])
b = np.sum(inData.loc[inData['gen_target'] == 0, 'gen_weight'])
tIIs = inData['pred_class'].argsort()
amss = np.empty([len(tIIs)])
amsMax = 0
threshold = 0.0
for tI in range(len(tIIs)):
# don't forget to renormalize the weights to the same sum
# as in the complete training set
amss[tI] = AMS(max(0,s * wFactor),max(0,b * wFactor))
if amss[tI] > amsMax:
amsMax = amss[tI]
threshold = inData['pred_class'].values[tIIs[tI]]
#print tI,threshold
if inData.loc[:, 'gen_target'].values[tIIs[tI]]:
s -= inData.loc[:, 'gen_weight'].values[tIIs[tI]]
else:
b -= inData.loc[:, 'gen_weight'].values[tIIs[tI]]
print (amsMax, threshold)
return amsMax, threshold
def scoreTest(ensemble, weights):
testData = h5py.File(dirLoc + 'testing.hdf5', "r+")
batchEnsemblePredict(ensemble, weights, testData, ensembleSize=10, verbose=1)
def saveTest(cut, name):
testData = h5py.File(dirLoc + 'testing.hdf5', "r+")
data = pandas.DataFrame()
data['EventId'] = getFeature('EventId', testData)
data['pred_class'] = getFeature('pred', testData)
data['Class'] = 'b'
data.loc[data.pred_class >= cut, 'Class'] = 's'
data.sort_values(by=['pred_class'], inplace=True)
data['RankOrder']=range(1, len(data)+1)
data.sort_values(by=['EventId'], inplace=True)
print (dirLoc + name + '_test.csv')
data.to_csv(dirLoc + name + '_test.csv', columns=['EventId', 'RankOrder', 'Class'], index=False)
def convertToDF(datafile, columns={'gen_target', 'gen_weight', 'pred_class'}, nLoad=-1, setFold=-1):
data = pandas.DataFrame()
data['gen_target'] = getFeature('targets', datafile, nLoad, setFold=setFold)
data['gen_weight'] = getFeature('weights', datafile, nLoad, setFold=setFold)
data['pred_class'] = getFeature('pred', datafile, nLoad, setFold=setFold)
print (len(data), "candidates loaded")
return data
|
the-stack_106_19282
|
import gym
import sys; import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import argparse
import imageio
import maml_rl.envs
from tqdm import tqdm
from maml_rl.policies.conv_lstm_policy import ConvLSTMPolicy
from scipy.ndimage.filters import gaussian_filter
from scipy.misc import imresize
searchlight = lambda I, mask: I * mask + gaussian_filter(I, sigma=3) * (1 - mask) # choose an area NOT to blur
occlude = lambda I, mask: I * (1 - mask) + gaussian_filter(I, sigma=3) * mask # choose an area to blur
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--env", type=str, default='CustomGame-v0')
parser.add_argument("--checkpoint", type=str)
parser.add_argument("--D", type=int, default=1)
parser.add_argument("--N", type=int, default=1)
parser.add_argument("--cnn_type", type=str, default='nature')
parser.add_argument("--render", action="store_true")
return parser.parse_args()
def get_mask(center, size, r):
y,x = np.ogrid[-center[0]:size[0]-center[0], -center[1]:size[1]-center[1]]
keep = x*x + y*y <= 1
mask = np.zeros(size) ; mask[keep] = 1 # select a circle of pixels
mask = gaussian_filter(mask, sigma=r) # blur the circle of pixels. this is a 2D Gaussian for r=r^2=1
return mask / mask.max()
def rollout(env, policy, device, render=False):
history = {'ins': [], 'logits': [], 'values': [], 'outs': [], 'hx': [], 'cx': [], 'embed': []}
A = env.action_space.n
obs = env.reset(); done = False
embed_tensor = torch.zeros(1, A + 2).to(device=device)
embed_tensor[:, 0] = 1.
hx = torch.zeros(policy.D, 1, 256).to(device=device)
cx = torch.zeros(policy.D, 1, 256).to(device=device)
while not done:
if render: env.render()
obs_tensor = torch.from_numpy(np.array(obs)[None]).to(device=device)
action_dist, value_tensor, hx, cx = policy(obs_tensor, hx, cx, embed_tensor)
action = action_dist.sample().cpu().numpy()
obs, rew, done, _ = env.step(action[0])
if 'v0' in env.spec.id:
term_flag = float(done)
else:
term_flag = np.sign(info['done']) if 'done' in info else 0.0
embed_arr = np.zeros(A + 2)
embed_arr[action[0]] = 1.
embed_arr[-2] = rew
embed_arr[-1] = term_flag
embed_tensor = torch.from_numpy(embed_arr[None]).float().to(device=device)
history['ins'].append(np.array(obs)[None])
history['hx'].append(hx.data.numpy())
history['cx'].append(cx.data.numpy())
history['logits'].append(action_dist.logits.data.numpy())
history['values'].append(value_tensor.data.numpy())
history['outs'].append(action_dist.probs.data.numpy())
history['embed'].append(embed_arr[None])
return history
def run_through_model(policy, history, idx, interp_func, mask=None, mode='actor'):
if mask is None:
im = history['ins'][idx]
else:
im = interp_func(history['ins'][idx], mask).astype(np.float32) # perturb input
obs_tensor = torch.from_numpy(im)
embed_tensor = torch.from_numpy(history['embed'][idx]).float()
hx = torch.from_numpy(history['hx'][idx])
cx = torch.from_numpy(history['cx'][idx])
a_dist, v_tensor, hx, cx = policy(obs_tensor, hx, cx, embed_tensor)
return a_dist.logits.data.numpy() if mode == 'actor' else v_tensor.data.numpy()
def score_frame(policy, history, idx, radius=5, density=5, interp_func=occlude, mode='actor'):
"""
@radius: radius of blur
@density: density of scores (if d==1, then get a score for every pixel...
if d==2 then every other, which is 25% of total pixels for a 2D image)
"""
assert mode in ['actor', 'critic'], 'mode must be either "actor" or "critic"'
L = run_through_model(policy, history, idx, interp_func, mask=None, mode=mode)
scores = np.zeros((int(84 / density) + 1, int(84 / density) + 1)) # saliency scores S(t,i,j)
for i in range(0, 84, density):
for j in range(0, 84, density):
mask = get_mask(center=[i, j], size=[84, 84, 2], r=radius)
l = run_through_model(policy, history, idx, interp_func, mask=mask, mode=mode)
scores[int(i / density), int(j / density)] = 0.5 * np.power((L - l), 2).sum()
pmax = scores.max()
scores = imresize(scores, size=[84, 84], interp='bilinear').astype(np.float32)
return pmax * scores / scores.max()
def saliency_frame(saliency, frame, fudge_factor=100, channel=2, sigma=0):
"""
sometimes saliency maps are a bit clearer if you blur them
slightly...sigma adjusts the radius of that blur
"""
pmax = saliency.max()
S = saliency if sigma == 0 else gaussian_filter(saliency, sigma=sigma)
S -= S.min(); S = fudge_factor * pmax * S / S.max()
S = S[:,:,np.newaxis].astype('uint16')
I = (frame * 255.).astype('uint16')
if channel == 0:
I = np.concatenate((S, I), axis=2)
else:
I = np.concatenate((I, S), axis=2)
return I.clip(1, 255).astype('uint8')
if __name__=='__main__':
args = parse_args()
env = gym.make(args.env)
obs_shape = env.observation_space.shape
act_dim = env.action_space.n
# load model
model = ConvLSTMPolicy(input_size=obs_shape, output_size=act_dim, cnn_type=args.cnn_type, D=args.D, N=args.N)
model.load_state_dict(torch.load(args.checkpoint, map_location='cpu'))
# rollout and get saliency maps
history = rollout(env, model, 'cpu', render=args.render)
amap_frames = []; cmap_frames = []
for frame_idx in tqdm(range(len(history['ins']))):
actor_saliency = score_frame(model, history, frame_idx, mode='actor')
critic_saliency = score_frame(model, history, frame_idx, mode='critic')
# display visualization
frame = history['ins'][frame_idx].squeeze().copy()
actor_map = saliency_frame(actor_saliency, frame, fudge_factor=100, channel=2) # blue vis; yellow bg
critic_map = saliency_frame(critic_saliency, frame, fudge_factor=int(3e5), channel=0) # red vis; blueish background
amap_frames.append(actor_map)
cmap_frames.append(critic_map)
imageio.mimsave('base_actor.gif', amap_frames)
imageio.mimsave('base_critic.gif', cmap_frames)
|
the-stack_106_19283
|
from django.shortcuts import render, get_object_or_404
from django.utils import timezone
from .models import Engine, RefreshBatch
from .collections import (
general_table, stats_daterange_table, players_chart,
wads_popularity_table, iwad_popularity_chart,
servers_popularity_table)
from datetime import timedelta, datetime
from doomstats.timestuff import day_range
import json
def front_page(request):
data = {}
daterange = _daterange_for_query(request)
data["tables"] = [
general_table(),
stats_daterange_table(daterange)
]
return render(request, "stats/front_page.html", data)
def engine_players(request, name):
return _Engine(request, name).players()
def engine_wads(request, name):
return _Engine(request, name).wads()
def engine_servers(request, name):
return _Engine(request, name).servers()
class _Engine(object):
def __init__(self, request, name):
self._request = request
self._name = name
self._game_engine = get_object_or_404(Engine, name__iexact=name)
self._daterange = _daterange_for_query(request)
def players(self):
data = {
"cur_engine": self._game_engine,
"stats": [
stats_daterange_table(self._daterange, self._game_engine),
players_chart(self._daterange, self._game_engine)
],
}
return self._render(data)
def wads(self):
data = {
"cur_engine": self._game_engine,
"stats": [
wads_popularity_table(self._daterange, self._game_engine),
iwad_popularity_chart(self._daterange, self._game_engine)
],
}
return self._render(data)
def servers(self):
data = {
"cur_engine": self._game_engine,
"stats": [
servers_popularity_table(self._daterange, self._game_engine)
],
}
return self._render(data)
def _render(self, data):
return render(self._request, "stats/engine.html", data)
def about(request):
return render(request, "stats/about.html")
def load_site_global_context(request):
datefrom, dateto = _daterange_from_request(request)
return {
"engines": Engine.objects.all().order_by("name"),
"json": json.dumps({
"date-from": _dateformat(datefrom),
"date-to": _dateformat(dateto),
}),
"dates": {
"today": _dateformat(timezone.now()),
"yesterday": _dateformat(timezone.now() - timedelta(days=1)),
"7days": _dateformat(timezone.now() - timedelta(days=7))
}
}
def _daterange_for_query(request):
datefrom, dateto = _daterange_from_request(request)
return day_range(datefrom, dateto)
def _daterange_from_request(request):
datefrom = _date_from_request(request, "datefrom") or (timezone.now() - timedelta(days=1))
dateto = _date_from_request(request, "dateto") or timezone.now()
if dateto < datefrom:
datefrom, dateto = dateto, datefrom # Obviously, it's that simple.
return _stddate(datefrom), _stddate(dateto)
def _date_from_request(request, fieldname):
stamp = request.GET.get(fieldname)
if stamp is None:
stamp = request.session.get(fieldname)
if stamp is None:
return None
request.session[fieldname] = stamp
try:
date = datetime.strptime(stamp, "%Y-%m-%d")
except ValueError:
return timezone.now()
date = date.replace(tzinfo=timezone.utc)
return date
def _dateformat(date):
return date.strftime("%Y-%m-%d")
def _stddate(date):
try:
earliest = RefreshBatch.objects.earliest('date').date
latest = RefreshBatch.objects.latest('date').date
except RefreshBatch.DoesNotExist:
earliest = latest = date
date = max(earliest, min(latest, date))
return date.replace(hour=0, minute=0, second=0)
|
the-stack_106_19284
|
#!/usr/bin/python3
# (c) 2014, WasHere Consulting, Inc
import struct
f = open("mbr.dd", "rb")
mbr = bytearray()
try:
mbr = f.read(512)
finally:
f.close()
sig = struct.unpack("<I", mbr[0x1B8:0x1BC])
print("Disk signature: ", sig[0])
active = mbr[0x1BE]
if active == 0x80:
print("Active flag: Active")
else:
print("Active flag: Not active")
lbastart = struct.unpack("<I", mbr[0x1C6:0x1CA])
print("Partition Start (LBA): ", lbastart[0])
lbaend = struct.unpack("<I", mbr[0x1C9:0x1CD])
print("Partition End (LBA): ", lbaend[0])
|
the-stack_106_19285
|
import nextcord, random, json
from nextcord.ext import commands
from Functions.Response import embed
from Functions.Permission import permissionCheck
from Functions.Integration import DiscordInteraction
class Voice(commands.Cog):
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
@nextcord.slash_command(
name = "song",
description = "🎶 Pick out a song from the database",
guild_ids = [ 904958479574401064 ]
)
async def song(
self, interaction: nextcord.Interaction,
filter: str = nextcord.SlashOption(name = "filter", description = "Name of an artist / genre [ TODO ]", required = False)
):
if not permissionCheck(interaction.user, 'use_song'):
await interaction.response.send_message(f"You can't run this command", ephemeral = True)
return
if not filter:
with open("Data/Songs.json") as raw_data: song_data: dict = json.load(raw_data)
else:
with open("Data/Songs.json") as raw_data: song_data: dict = json.load(raw_data)
song: str = random.choice(list(song_data.keys()))
await embed(3, f"Artist: **{song_data[song]['Artist'].capitalize()}**\n\nAdded By: {song_data[song]['Added By']}\nLink: [Click Here]({song_data[song]['Link']})", title = song.capitalize(), target = interaction)
@nextcord.slash_command(
name = "add_song",
description = "📝 Add a new song to the bot",
guild_ids = [ 904958479574401064 ]
)
async def addsong(
self, interaction: nextcord.Interaction,
name: str = nextcord.SlashOption(name = "name", description = "Name of the song", required = True),
artist: str = nextcord.SlashOption(name = "artist", description = "Name of the artist", required = True),
link: str = nextcord.SlashOption(name = "link", description = "A spotify / youtube link to the song", required = True),
):
if not permissionCheck(interaction.user, 'can_add-song'):
await interaction.response.send_message(f"You can't run this command", ephemeral = True)
return
with open("Data/Songs.json") as raw_data: song_data: dict = json.load(raw_data)
if name.lower() in song_data.keys():
await embed(2, description = "This song is already in the database", target = interaction)
return
song_data[name.lower()] = {
"Link": link,
"Added By": str(interaction.user.name),
"Artist": artist.lower()
}
with open("Data/Songs.json", "w") as raw_writable: json.dump(song_data, raw_writable, indent = 4)
await embed(1, f"Added **{name}** to the database!", target = interaction)
@nextcord.slash_command(
name = "remove_song",
description = "🔐 Remove a song from the database",
guild_ids = [ 904958479574401064 ]
)
async def delsong(
self, interaction: nextcord.Interaction,
filter: str = nextcord.SlashOption(name = "filter", description = "Name of the song", required = True)
):
if not permissionCheck(interaction.user, 'can_remove-song'):
await interaction.response.send_message(f"You can't run this command", ephemeral = True)
return
with open("Data/Songs.json") as raw_data: song_data: dict = json.load(raw_data)
if filter.lower() in song_data.keys():
song_data.pop(filter.lower())
with open("Data/Songs.json", "w") as raw_writable: json.dump(song_data, raw_writable, indent = 4)
await embed(1, description = f"Removed **{filter.capitalize()}** from the database", target = interaction)
return
emb = await embed(2, description = "I couldn't find that song in the database... Try to check for spelling errors and try again!")
await interaction.response.send_message(embed = emb, ephemeral = True)
@nextcord.slash_command(
name = "game",
description = "⚡ Start a game activity!",
guild_ids = [ 904958479574401064 ]
)
async def game(
self, interaction: nextcord.Interaction,
game: str = nextcord.SlashOption(
name = "game",
description = "The game you want to start",
required = True,
choices = {
"Poker": "poker",
"Chess": "chess",
"Fishing": "fishing",
"Betrayal": "betrayal",
"Letter Tile": "letter-tile",
"Word Snack": "word-snack",
"Doodle Crew": "doodle-crew",
"Spellcast": "spellcast",
"Awkword": "awkword",
"Checkers": "checkers"
}
)
):
"""Start a game activity"""
if not interaction.user.voice: return await interaction.response.send_message("You aren't in a voice channel!", ephemeral = True)
else: channel = interaction.user.voice.channel
Control = DiscordInteraction(self.bot)
Link = await Control.new(channel.id, game)
embed = nextcord.Embed(
title = f"{game.capitalize()}",
description = f"Created a new {game.capitalize()} party!\n\nTo get started or join, click **[here](https://discord.gg/{Link})**",
color = nextcord.Colour.random()
)
await interaction.response.send_message(f"https://discord.gg/{Link}", embed = embed)
@nextcord.slash_command(
name = "youtube",
description = "▶️ Start a youtube activity in your channel!",
guild_ids = [ 904958479574401064 ]
)
async def youtube(
self, interaction: nextcord.Interaction
):
"""Start a youtube activity"""
if not interaction.user.voice: return await interaction.response.send_message("You aren't in a voice channel!", ephemeral = True)
else: channel = interaction.user.voice.channel
Control = DiscordInteraction(self.bot)
Link = await Control.new(channel.id, 'youtube')
embed = nextcord.Embed(
title = f"YouTube",
description = f"Created a new YouTube party!\n\nTo get started or join, click **[here](https://discord.gg/{Link})**",
color = nextcord.Colour.random()
)
await interaction.response.send_message(f"https://discord.gg/{Link}", embed = embed)
def setup(bot: commands.Bot): bot.add_cog(Voice(bot))
|
the-stack_106_19286
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
PyCOMPSs Testbench
========================
"""
# Imports
import unittest
from pycompss.api.api import compss_wait_on
from pycompss.api.task import task
class testMultiReturnInstanceMethods(unittest.TestCase):
@task(returns=(int, int))
def argTask(self, *args):
print("ARG: ", args)
secondReturn = 12345
if len(args) != 0:
secondReturn = args[0]
return sum(args), secondReturn
@task(returns=(int, list))
def varargTask(self, v, w, *args):
print("V: ", v)
print("W: ", w)
print("ARG: ", args)
return (v * w) + sum(args), [v, w]
@task(returns=(int, list))
def kwargTask(self, **kwargs):
print("KARG: ", kwargs)
return len(kwargs), sorted(list(kwargs.keys()))
@task(returns=(int, list))
def varkwargTask(self, v, w, **kwargs):
print("V: ", v)
print("W: ", w)
print("KARG: ", kwargs)
return (v * w) + len(kwargs), sorted(list(kwargs.values()))
@task(returns=(int, tuple, dict))
def argkwargTask(self, *args, **kwargs):
print("ARG: ", args)
print("KARG: ", kwargs)
return sum(args) + len(kwargs), args, kwargs
@task(returns=(int, tuple, dict))
def varargkwargTask(self, v, w, *args, **kwargs):
print("V: ", v)
print("W: ", w)
print("ARG: ", args)
print("KARG: ", kwargs)
return (v * w) + sum(args) + len(kwargs), args, kwargs
@task(returns=(int, int, tuple, dict))
def varargdefaultkwargTask(self, v, w, s=2, *args, **kwargs):
print("V: ", v)
print("W: ", w)
print("S: ", s)
print("ARGS: ", args)
print("KWARG: ", kwargs)
return (v * w) + sum(args) + len(kwargs) + s, s, args, kwargs
@task(returns=(int, dict))
def taskUnrollDict(self, a, b, **kwargs):
print("a: ", a)
print("b: ", b)
print("kwargs: ", kwargs)
return a + b, kwargs
@task(returns=(int, dict))
def taskUnrollDictWithDefaults(self, a=1, b=2, **kwargs):
print("a: ", a)
print("b: ", b)
print("kwargs: ", kwargs)
return a + b, kwargs
'''
FUNCTION WITH *ARGS
'''
# we have arguments
def testArgTask1(self):
pending1, pending2 = self.argTask(1, 2)
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (3, 1))
def testArgTask2(self):
pending1, pending2 = self.argTask(1, 2, 3, 4)
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (10, 1))
# args is empty
def testArgTask3(self):
pending1, pending2 = self.argTask()
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (0, 12345))
# args is not empty but args are an unpacked tuple
def testArgTask4(self):
my_tup = (1, 2, 3, 4)
pending1, pending2 = self.argTask(*my_tup)
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (10, 1))
'''
FUNCTION WITH ARGS + *ARGS
'''
def testVarArgTask1(self):
pending1, pending2 = self.varargTask(10, 20, 1, 2, 3, 4)
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (210, [10, 20]))
def testVarArgTask2(self):
pending1, pending2 = self.varargTask(4, 50, 5, 4, 3, 2, 1)
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (215, [4, 50]))
def testVarArgTask3(self):
pending1, pending2 = self.varargTask(4, 50)
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (200, [4, 50]))
'''
FUNCTION WITH **KWARGS
'''
def testKwargTask1(self):
pending1, pending2 = self.kwargTask(hello='world')
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (1, ['hello']))
def testKwargTask2(self):
pending1, pending2 = self.kwargTask(this='is', a='test')
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (2, sorted(['this', 'a'])))
def testKwargTask3(self):
pending1, pending2 = self.kwargTask()
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (0, []))
'''
FUNCTION WITH ARGS + **KWARGS
'''
def testVarKwargTask1(self):
pending1, pending2 = self.varkwargTask(1, 2, hello='world')
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (3, ['world']))
def testVarArgKwargTask2(self):
pending1, pending2 = self.varkwargTask(2, 3, this='is', a='test')
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (8, sorted(['is', 'test'])))
def testVarArgKwargTask3(self):
pending1, pending2 = self.varkwargTask(2, 3)
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (6, []))
'''
FUNCTION WITH *ARGS + **KWARGS
'''
def testArgKwargTask1(self):
pending1, pending2, pending3 = self.argkwargTask(1, 2, hello='world')
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
result3 = compss_wait_on(pending3)
self.assertEqual((result1, result2, result3), (4, (1, 2), {'hello': 'world'}))
def testArgKwargTask2(self):
pending1, pending2, pending3 = self.argkwargTask(1, 2, 3, 4, this='is', a='test')
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
result3 = compss_wait_on(pending3)
self.assertEqual((result1, result2, result3), (12, (1, 2, 3, 4), {'this': 'is', 'a': 'test'}))
def testArgKwargTask3(self):
pending1, pending2, pending3 = self.argkwargTask(1, 2, 3, 4)
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
result3 = compss_wait_on(pending3)
self.assertEqual((result1, result2, result3), (10, (1, 2, 3, 4), {}))
def testArgKwargTask4(self):
pending1, pending2, pending3 = self.argkwargTask()
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
result3 = compss_wait_on(pending3)
self.assertEqual((result1, result2, result3), (0, (), {}))
'''
FUNCTION WITH ARGS, *ARGS AND **KWARGS
'''
def testVarArgKwargTask1(self):
pending1, pending2, pending3 = self.varargkwargTask(1, 2, 3, 4, hello='world')
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
result3 = compss_wait_on(pending3)
self.assertEqual((result1, result2, result3), (10, (3, 4), {'hello': 'world'}))
def testVarArgKwargTask2(self):
pending1, pending2, pending3 = self.varargkwargTask(1, 2, 3, 4, 5, 6, this='is', a='test')
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
result3 = compss_wait_on(pending3)
self.assertEqual((result1, result2, result3), (22, (3, 4, 5, 6), {'this': 'is', 'a': 'test'}))
'''
FUNCTION WITH ARGS, DEFAULTED ARGS, *ARGS AND **KWARGS
'''
def testVarArgDefaultKwargTask1(self):
pending1, pending2, pending3, pending4 = self.varargdefaultkwargTask(1, 1)
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
result3 = compss_wait_on(pending3)
result4 = compss_wait_on(pending4)
self.assertEqual((result1, result2, result3, result4), (3, 2, (), {}))
def testVarArgDefaultKwargTask2(self):
pending1, pending2, pending3, pending4 = self.varargdefaultkwargTask(1, 2, 3)
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
result3 = compss_wait_on(pending3)
result4 = compss_wait_on(pending4)
self.assertEqual((result1, result2, result3, result4), (5, 3, (), {}))
def testVarArgDefaultKwargTask3(self):
pending1, pending2, pending3, pending4 = self.varargdefaultkwargTask(1, 2, 3, 4)
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
result3 = compss_wait_on(pending3)
result4 = compss_wait_on(pending4)
self.assertEqual((result1, result2, result3, result4), (9, 3, (4,), {}))
def testVarArgDefaultKwargTask4(self):
pending1, pending2, pending3, pending4 = self.varargdefaultkwargTask(1, 2, 3, 4, five=5)
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
result3 = compss_wait_on(pending3)
result4 = compss_wait_on(pending4)
self.assertEqual((result1, result2, result3, result4), (10, 3, (4,), {'five': 5}))
'''
FUNCTION WITH **KWARGS AND DICT UNROLLING
'''
def testKwargsDictUnrolling(self):
z = {'a': 10, 'b': 20, 'c': 30}
pending1, pending2 = self.taskUnrollDict(**z)
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (30, {'c': 30}))
def testKwargsDictUnrollingControl(self):
pending1, pending2 = self.taskUnrollDict(10, 20, c=30)
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (30, {'c': 30}))
def testKwargsDictUnrollingDefaults(self):
z = {'a': 10, 'b': 20, 'c': 30}
pending1, pending2 = self.taskUnrollDictWithDefaults(**z)
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (30, {'c': 30}))
def testKwargsDictUnrollingDefaultsControl(self):
pending1, pending2 = self.taskUnrollDictWithDefaults()
result1 = compss_wait_on(pending1)
result2 = compss_wait_on(pending2)
self.assertEqual((result1, result2), (3, {}))
|
the-stack_106_19290
|
from Instruments.devGlobalFunctions import devGlobal
import numpy as np
import struct
import wx
import math
import pyte16 as pyte
from wx.lib.pubsub import pub
#Simple panel
class graphPanel(wx.Panel):
def __init__(self, parent, device):
wx.Panel.__init__(self,parent)
button_sizer = wx.BoxSizer(wx.VERTICAL)
btnWidth = 90
btnHeight = 40
blankBtnLabel = "&Blank"
self.device = device
#==============================================================================================================================================================
# Buttons are created with labels, sizes, and positions. The names of the buttons are arbitrary but
# it is a good practice to name them according to their function.
##Creating Buttons
self.button = []
rowSizer = wx.BoxSizer(wx.HORIZONTAL)
rowCounter = 0
buttonsInARow = 5
buttonsInACol = 3
for i in range(buttonsInARow*buttonsInACol):
btnIndexCol = i%buttonsInARow + 1
btnIndexRow = int(math.floor(i/buttonsInARow)) + 1
self.button.append(wx.Button(self, label = blankBtnLabel + "\n(" + str(btnIndexRow) + "," + str(btnIndexCol) + ")", size = (btnWidth, btnHeight)))
self.button[i].SetBackgroundColour("RED")
rowSizer.Add(self.button[i], 0, wx.ALL, 1)
if btnIndexCol == buttonsInARow:
button_sizer.Add(wx.StaticLine(self), 0, wx.ALL, 1)
button_sizer.Add(rowSizer, 0, wx.ALL, 0)
rowSizer = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizerAndFit(button_sizer)
class SuperFastClass(devGlobal):
# Human readable name for the gui, note: Needs to be unique
name = "Super Fast"
def __init__(self, *args):
devGlobal.__init__(self, *args)
self.devComm.write("*CLS; *RST")
self.devComm.write(":INST:SEL 1")
self.devComm.write(":OUTPut:STATe OFF")
self.devComm.write(":INST:SEL 2")
self.devComm.write(":OUTPut:STATe OFF")
self.devComm.write(":INST:SEL 3")
self.devComm.write(":OUTPut:STATe OFF")
self.devComm.write(":INST:SEL 4")
self.devComm.write(":OUTPut:STATe OFF")
self.devComm.write(":INST:SEL 1")
self.ch1OnOff = 0
self.ch2OnOff = 0
self.ch3OnOff = 0
self.ch4OnOff = 0
self.numTabs = 2
self.additionalPanels = [(graphPanel, 'Graph Tab')]
self.register(self.GetModelOption, 'Gets Model\nOption')
self.register(self.SegmentLength, 'Segment\nLength')
self.register(self.Tests, 'Tests')
self.register(self.SetOperatingChannel, 'Sets Operating\nChannel') # not this function used channel input, but we don't handle that yet
self.register(self.SetStandardSquareWave, 'Standard\nSquare Wave', parameters = ["Frequency"])
self.register(self.OutputOnOff, 'Sets Operating\nChannel On/Off', "")
self.register(self.DeleteAllTraces, 'Deletes\nAll Traces')
self.register(self.TracePoints, 'Queries\nTrace Points')
self.register(self.Ch1OnOff, 'Ch1 Output\nOn/Off')
self.register(self.Ch2OnOff, 'Ch2 Output\nOn/Off')
self.register(self.SyncChannels, 'Sync\nCh1 and Ch2')
self.register(self.Ch1TrigIntExt, 'Ch1 Trigger\nInt/Ext', parameters = ["Ch1 Trigger"])
self.register(self.Ch2TrigIntExt, 'Ch2 Trigger\nInt/Ext', parameters = ["Ch1 Trigger"])
self.register(self.Ch1setWaitTime, 'Ch1 Set\nWait Time', parameters = ["CH1 Wait Time"])
def GetModelOption(self, msg):
cmdString = "Queries memory option on "
self.answer = str(int(self.devComm.query("*OPT?")[2:4])*1e6)
self.printOut(cmdString)
done = 0
while done != 1:
print(self.devComm.query("*OPC?"))
done = int(self.devComm.query("*OPC?"))
def SegmentLength(self, msg):
cmdString = "Gets Segment Length on "
self.answer = self.devComm.query(":TRACE:DEFine?")
self.printOut(cmdString)
def Tests(self, msg):
cmdString = "Arbitrary waveform tests on "
cycle_len = 1024
num_cycles = 1
seg_len = cycle_len * num_cycles
wave1 = self.build_sine_wave(cycle_len,num_cycles, low_level=4000,
high_level=2 ** 14 - 4000)
wave2 = self.build_square_wave(cycle_len, num_cycles)
self.devComm.write(":INST:SEL 1")
self.devComm.write(":TRAC:MODE SING")
seg_nb = 1
self.devComm.write(':TRAC:DEF {0:d},{1:d}'.format(seg_nb, seg_len))
self.devComm.write(':TRAC:SEL {0:d}'.format(seg_nb))
self.send_binary_data(pref=':TRAC:DATA', bin_dat=wave1)
seg_nb = 2
self.devComm.write(':TRAC:DEF {0:d},{1:d}'.format(seg_nb, seg_len))
self.devComm.write(':TRAC:SEL {0:d}'.format(seg_nb))
self.send_binary_data(pref=':TRAC:DATA', bin_dat=wave2)
seg_num = [2, 1, 2, 1]
repeats = [1, 5, 1, 4]
jump = [0, 0, 0, 0]
seq_table = list(zip(repeats, seg_num, jump))
self.devComm.write(':SEQ:SELect 1')
self.download_sequencer_table(seq_table)
self.devComm.write(':SOURce:FUNCtion:MODE SEQ')
self.devComm.write(':SOUR:FREQ:RAST 1.0e9')
self.printOut(cmdString)
yNP = np.concatenate((wave1,wave2))
pub.sendMessage('PrintData', msg=[self.panelId,yNP])
def SetOperatingChannel(self, msg):
param = self.GetParamVector()
cmdString = "Sets the operating channel to " + param[0] + " on "
visaCmd = ":INST:SEL " + param[0]
self.devComm.write(visaCmd)
self.printOut(cmdString)
def SetStandardSquareWave(self, msg):
param = self.GetParamVector()
cmdString = "Sets a standard square wave with frequency " + param[2] + " on the operating channel on "
self.devComm.write(':SOURce:FUNCtion:MODE FIX')
self.devComm.write(":SOURce:FREQuency:CW " + param[2])
self.devComm.write(":SOURce:FUNCtion:SHAPe SQU")
self.devComm.write(":SOURce:SQUare:DCYC 50.0")
self.printOut(cmdString)
def OutputOnOff(self, msg):
cmdString = "Sets active channel output to on/off on "
if self.ch1OnOff == 0:
self.ch1OnOff = 1
self.devComm.write(":OUTPut:STATe ON")
else:
self.ch1OnOff = 0
self.devComm.write(":OUTPut:STATe OFF")
self.printOut(cmdString)
def DeleteAllTraces(self, msg):
cmdString = "Deletes all traces on "
self.devComm.write(":TRACE:DELETE:ALL")
self.printOut(cmdString)
def TracePoints(self, msg):
cmdString = "Queries trace points on "
self.answer = self.devComm.query(":TRAC:POINts?")
self.printOut(cmdString)
def Ch1OnOff(self, msg):
param = self.GetParamVector()
cmdString = "Sets Ch1 Output to " + param[2] + " on "
cmdString = cmdString + self.address + "\n"
self.printOut(self.cmdString)
def Ch2OnOff(self, msg):
param = self.GetParamVector()
cmdString = "Sets Ch2 Output to " + param[2] + " on "
cmdString = cmdString + self.address + "\n"
print(self.cmdString)
def SyncChannels(self, msg):
StringInit = "Sync Ch1 and Ch2 on "
self.cmdString = StringInit + self.address
print(self.cmdString)
def Ch1TrigIntExt(self, msg):
param = self.GetParamVector()
cmdString = "Sets Ch1 Trigger to " + param[2] + " on "
cmdString = cmdString + self.com_type + self.address + "\n"
print(self.cmdString)
def Ch2TrigIntExt(self, event):
param = self.GetParamVector()
StringInit = "Sets Ch2 Trigger to " + param[2] + " on "
self.cmdString = StringInit + self.com_type + self.address
print(self.cmdString)
def Ch1setWaitTime(self, event):
param = self.GetParamVector()
StringInit = "Sets Ch1 Wait Time to " + param[2] + " on "
self.cmdString = StringInit + self.com_type + self.address
print(self.cmdString)
def Ch2setWaitTime(self, event):
param = self.GetParamVector()
StringInit = "Sets Ch2 Wait Time to " + param[2] + " on "
self.cmdString = StringInit + self.com_type + self.address
print(self.cmdString)
def Ch1Ena10MExtRef(self, event):
StringInit = "Enable Ch1 10 MHz External Reference on "
self.cmdString = StringInit + self.com_type + self.address
print(self.cmdString)
def Ch2Ena10MExtRef(self, event):
StringInit = "Enable Ch2 10 MHz External Reference on "
self.cmdString = StringInit + self.com_type + self.address
print(self.cmdString)
def build_sine_wave(self, cycle_len, num_cycles=1, phase_degree=0, low_level=0, high_level=2**14-1):
cycle_len = int(cycle_len)
num_cycles = int(num_cycles)
if cycle_len <= 0 or num_cycles <= 0:
return None
dac_min = 0
dac_max = 2**14-1
wav_len = cycle_len * num_cycles
phase = float(phase_degree) * np.pi / 180.0
x = np.linspace(start=phase, stop=phase+2*np.pi, num=cycle_len, endpoint=False)
zero_val = (low_level + high_level) / 2.0
amplitude = (high_level - low_level) / 2.0
y = np.sin(x) * amplitude + zero_val
y = np.round(y)
y = np.clip(y, dac_min, dac_max)
y = y.astype(np.uint16)
wav = np.empty(wav_len, dtype=np.uint16)
for n in range(num_cycles):
wav[n * cycle_len : (n + 1) * cycle_len] = y
return wav
def build_square_wave(self, cycle_len, num_cycles=1, duty_cycle=50.0, phase_degree=0, low_level=0,
high_level=2 ** 14 - 1):
cycle_len = int(cycle_len)
num_cycles = int(num_cycles)
if cycle_len <= 0 or num_cycles <= 0:
return None
dac_min = 0
dac_max = 2 ** 14 - 1
wav_len = cycle_len * num_cycles
duty_cycle = np.clip(duty_cycle, 0.0, 100.0)
low_level = np.clip(low_level, dac_min, dac_max)
high_level = np.clip(high_level, dac_min, dac_max)
low_level = np.uint16(low_level)
high_level = np.uint16(high_level)
phase = float(phase_degree) * np.pi / 180.0
x = np.linspace(start=phase, stop=phase + 2 * np.pi, num=cycle_len, endpoint=False)
x = x <= 2 * np.pi * duty_cycle / 100.0
y = np.full(x.shape, low_level, dtype=np.uint16)
y[x] = high_level
y = y.astype(np.uint16)
wav = np.empty(wav_len, dtype=np.uint16)
for n in range(num_cycles):
wav[n * cycle_len: (n + 1) * cycle_len] = y
return wav
def download_sequencer_table(self, seq_table, pref=':SEQ:DATA'):
tbl_len = len(seq_table)
s = struct.Struct('< L H B x')
s_size = s.size
m = np.empty(s_size * tbl_len, dtype='uint8')
for n in range(tbl_len):
repeats, seg_nb, jump_flag = seq_table[n]
s.pack_into(m, n * s_size, np.uint32(repeats), np.uint16(seg_nb), np.uint8(jump_flag))
self.send_binary_data(pref, m)
def send_binary_data(self, pref, bin_dat):
pyte.download_binary_data(self.devComm, pref, bin_dat, bin_dat.nbytes)
# IMPORTANT Don't forget this line (and remember to use the class name above)
instrument = SuperFastClass
|
the-stack_106_19292
|
import unittest
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.cluster import KMeans as SKMeans
from sklearn.datasets import make_blobs
import dislib as ds
from dislib.cluster import KMeans
import dislib.data.util.model as utilmodel
class KMeansTest(unittest.TestCase):
def test_init_params(self):
""" Tests that KMeans object correctly sets the initialization
parameters """
n_clusters = 2
max_iter = 1
tol = 1e-4
seed = 666
arity = 2
init = "random"
km = KMeans(n_clusters=n_clusters, max_iter=max_iter, tol=tol,
arity=arity, random_state=seed)
expected = (n_clusters, init, max_iter, tol, arity)
real = (km.n_clusters, km.init, km.max_iter, km.tol, km.arity)
self.assertEqual(expected, real)
def test_fit(self):
""" Tests that the fit method returns the expected centers using toy
data.
"""
arr = np.array([[1, 2], [2, 1], [-1, -2], [-2, -1]])
x = ds.array(arr, block_size=(2, 2))
km = KMeans(n_clusters=2, random_state=666, verbose=False)
km.fit(x)
expected_centers = np.array([[1.5, 1.5], [-1.5, -1.5]])
self.assertTrue((km.centers == expected_centers).all())
def test_predict(self):
""" Tests that labels are correctly predicted using toy data. """
p1, p2, p3, p4 = [1, 2], [2, 1], [-1, -2], [-2, -1]
arr1 = np.array([p1, p2, p3, p4])
x = ds.array(arr1, block_size=(2, 2))
km = KMeans(n_clusters=2, random_state=666)
km.fit(x)
p5, p6 = [10, 10], [-10, -10]
arr2 = np.array([p1, p2, p3, p4, p5, p6])
x_test = ds.array(arr2, block_size=(2, 2))
labels = km.predict(x_test).collect()
expected_labels = np.array([0, 0, 1, 1, 0, 1])
self.assertTrue(np.array_equal(labels, expected_labels))
def test_fit_predict(self):
""" Tests fit_predict."""
x, y = make_blobs(n_samples=1500, random_state=170)
x_filtered = np.vstack(
(x[y == 0][:500], x[y == 1][:100], x[y == 2][:10]))
x_train = ds.array(x_filtered, block_size=(300, 2))
kmeans = KMeans(n_clusters=3, random_state=170)
labels = kmeans.fit_predict(x_train).collect()
skmeans = SKMeans(n_clusters=3, random_state=170)
sklabels = skmeans.fit_predict(x_filtered)
centers = np.array([[-8.941375656533449, -5.481371322614891],
[-4.524023204953875, 0.06235042593214654],
[2.332994701667008, 0.37681003933082696]])
self.assertTrue(np.allclose(centers, kmeans.centers))
self.assertTrue(np.allclose(labels, sklabels))
def test_sparse(self):
""" Tests K-means produces the same results using dense and sparse
data structures. """
file_ = "tests/datasets/libsvm/2"
x_sp, _ = ds.load_svmlight_file(file_, (10, 300), 780, True)
x_ds, _ = ds.load_svmlight_file(file_, (10, 300), 780, False)
kmeans = KMeans(random_state=170)
y_sparse = kmeans.fit_predict(x_sp).collect()
sparse_c = kmeans.centers.toarray()
kmeans = KMeans(random_state=170)
y_dense = kmeans.fit_predict(x_ds).collect()
dense_c = kmeans.centers
self.assertTrue(np.allclose(sparse_c, dense_c))
self.assertTrue(np.array_equal(y_sparse, y_dense))
def test_init(self):
# With dense data
x, y = make_blobs(n_samples=1500, random_state=170)
x_filtered = np.vstack(
(x[y == 0][:500], x[y == 1][:100], x[y == 2][:10]))
x_train = ds.array(x_filtered, block_size=(300, 2))
init = np.random.random((5, 2))
km = KMeans(n_clusters=5, init=init)
km.fit(x_train)
self.assertTrue(np.array_equal(km.init, init))
self.assertFalse(np.array_equal(km.centers, init))
# With sparse data
x_sp = ds.array(csr_matrix(x_filtered), block_size=(300, 2))
init = csr_matrix(np.random.random((5, 2)))
km = KMeans(n_clusters=5, init=init)
km.fit(x_sp)
self.assertTrue(np.array_equal(km.init.toarray(), init.toarray()))
self.assertFalse(np.array_equal(km.centers.toarray(), init.toarray()))
def test_load_save(self):
"""
Tests that the save and load methods work properly with the three
expected formats and that an exception is raised when a non-supported
format is provided.
"""
p1, p2, p3, p4 = [1, 2], [2, 1], [-1, -2], [-2, -1]
arr1 = np.array([p1, p2, p3, p4])
x = ds.array(arr1, block_size=(2, 2))
km = KMeans(n_clusters=2, random_state=666)
km.fit(x)
km.save_model("./model_saved_kmeans")
p5, p6 = [10, 10], [-10, -10]
arr2 = np.array([p1, p2, p3, p4, p5, p6])
x_test = ds.array(arr2, block_size=(2, 2))
km2 = KMeans()
km2.load_model("./model_saved_kmeans")
labels = km2.predict(x_test).collect()
expected_labels = np.array([0, 0, 1, 1, 0, 1])
self.assertTrue(np.array_equal(labels, expected_labels))
km.save_model("./model_saved_kmeans", save_format="cbor")
km2 = KMeans()
km2.load_model("./model_saved_kmeans", load_format="cbor")
labels = km2.predict(x_test).collect()
self.assertTrue(np.array_equal(labels, expected_labels))
km.save_model("./model_saved_kmeans", save_format="pickle")
km2 = KMeans()
km2.load_model("./model_saved_kmeans", load_format="pickle")
labels = km2.predict(x_test).collect()
self.assertTrue(np.array_equal(labels, expected_labels))
with self.assertRaises(ValueError):
km.save_model("./model_saved_kmeans", save_format="txt")
with self.assertRaises(ValueError):
km2 = KMeans()
km2.load_model("./model_saved_kmeans", load_format="txt")
p1, p2, p3, p4 = [14, 15], [15, 14], [7, 8], [8, 7]
arr1 = np.array([p1, p2, p3, p4])
x = ds.array(arr1, block_size=(2, 2))
km = KMeans(n_clusters=2, random_state=666)
km.fit(x)
km.save_model("./model_saved_kmeans", overwrite=False)
km2 = KMeans()
km2.load_model("./model_saved_kmeans", load_format="pickle")
labels = km2.predict(x_test).collect()
expected_labels = np.array([0, 0, 1, 1, 0, 1])
self.assertTrue(np.array_equal(labels, expected_labels))
cbor2_module = utilmodel.cbor2
utilmodel.cbor2 = None
with self.assertRaises(ModuleNotFoundError):
km.save_model("./model_saved_kmeans", save_format="cbor")
with self.assertRaises(ModuleNotFoundError):
km2.load_model("./model_saved_kmeans", load_format="cbor")
utilmodel.cbor2 = cbor2_module
def main():
unittest.main()
if __name__ == '__main__':
main()
|
the-stack_106_19295
|
import sys
import streamlit as st
import matplotlib.pyplot as plt
import numpy as np
from dpu_utils.utils import RichPath
from data.edits import Edit
from dpu_utils.ptutils import BaseComponent
'''
# Copy Span Visualization
'''
model_path = sys.argv[1]
@st.cache
def get_model(filename):
path = RichPath.create(filename)
model = BaseComponent.restore_model(path, device='cpu')
model.eval()
return model
st.markdown(f'> Using model from {model_path}')
before_tokens = st.text_area('Input (space) tokenized before version.').strip().split()
after_tokens = st.text_area('Input (space) tokenized after version.').strip().split()
'''
#### Input Data
'''
edit = Edit(input_sequence=before_tokens, output_sequence=after_tokens, provenance='', edit_type='')
st.write(edit)
model = get_model(model_path)
tensorized_data = [model.load_data_from_sample(edit)]
mb_data, is_full, num_elements = model.create_minibatch(tensorized_data, max_num_items=10)
assert num_elements == 1
ground_input_sequence = [edit.input_sequence]
predicted_outputs = model.beam_decode(input_sequences=mb_data['input_sequences'],
ground_input_sequences=ground_input_sequence)[0]
ll, debug_info = model.compute_likelihood(**mb_data, return_debug_info=True)
ll = ll.cpu().numpy()
st.markdown(f' > Likelihood of target edit {ll[0]:.2f}')
copy_span_logprobs = debug_info['copy_span_logprobs'][0]
gen_logprobs = debug_info['generation_logprobs'][0]
vocabulary = debug_info['vocabulary']
before_tokens = ['<s>'] + before_tokens + ['</s>']
after_tokens = after_tokens + ['</s>']
for i in range(copy_span_logprobs.shape[0]):
st.markdown(f'### At position {i}: "{after_tokens[i]}"')
st.markdown(f'Current context `{["<s>"] + after_tokens[:i]}`')
plt.figure(figsize=[1, 1])
current_copy_span_probs = np.exp(copy_span_logprobs[i])
plt.matshow(current_copy_span_probs, cmap='Greys')
plt.xticks(range(copy_span_logprobs.shape[1]), before_tokens, fontsize=8, rotation=90)
plt.xlabel('Start Span Pos')
plt.yticks(range(copy_span_logprobs.shape[2]), before_tokens, fontsize=8, rotation=0)
plt.ylabel('End Span Pos')
plt.colorbar()
st.pyplot()
max_idx = np.argmax(current_copy_span_probs)
from_idx, to_idx = max_idx // current_copy_span_probs.shape[1], max_idx % current_copy_span_probs.shape[1],
st.markdown(f'* Best copy suggestion: `Copy({from_idx}:{to_idx+1})` with prob {np.max(current_copy_span_probs)*100:.1f}%, _i.e._ `Copy({before_tokens[from_idx: to_idx+1]})`.')
st.markdown(f'* Best generation suggestion: `Gen("{vocabulary.get_name_for_id(np.argmax(gen_logprobs[i]))}")` with prob {np.exp(np.max(gen_logprobs[i]))*100:.1f}%')
'''### Beam decoding results '''
for i, (prediction, logprob) in enumerate(zip(predicted_outputs[0], predicted_outputs[1])):
if i > 2:
break
st.markdown(f'* {" ".join(prediction)} ({np.exp(logprob)*100:.1f}%)')
|
the-stack_106_19296
|
#!/usr/bin/python3
import argparse
import http.client
import os
import csv
import json
import dateutil.parser
def main(file_path, delimiter, max_rows, elastic_index, json_struct, datetime_field, elastic_type, elastic_address, id_column):
endpoint = '/_bulk'
if max_rows is None:
max_rows_disp = "all"
else:
max_rows_disp = max_rows
print("")
print(" ----- CSV to ElasticSearch ----- ")
print("Importing %s rows into `%s` from '%s'" % (max_rows_disp, elastic_index, file_path))
print("")
count = 0
headers = []
headers_position = {}
to_elastic_string = ""
with open(file_path, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=delimiter, quotechar='"')
for row in reader:
if count == 0:
for iterator, col in enumerate(row):
headers.append(col)
headers_position[col] = iterator
elif max_rows is not None and count >= max_rows:
print('Max rows imported - exit')
break
elif len(row[0]) == 0: # Empty rows on the end of document
print("Found empty rows at the end of document")
break
else:
pos = 0
if os.name == 'nt':
_data = json_struct.replace("^", '"')
else:
_data = json_struct.replace("'", '"')
_data = _data.replace('\n','').replace('\r','')
for header in headers:
if header == datetime_field:
datetime_type = dateutil.parser.parse(row[pos])
_data = _data.replace('"%' + header + '%"', '"{:%Y-%m-%dT%H:%M}"'.format(datetime_type))
else:
try:
int(row[pos])
_data = _data.replace('"%' + header + '%"', row[pos])
except ValueError:
_data = _data.replace('%' + header + '%', row[pos])
pos += 1
# Send the request
if id_column is not None:
index_row = {"index": {"_index": elastic_index,
"_type": elastic_type,
'_id': row[headers_position[id_column]]}}
else:
index_row = {"index": {"_index": elastic_index, "_type": elastic_type}}
json_string = json.dumps(index_row) + "\n" + _data + "\n"
to_elastic_string += json_string
count += 1
print('Reached end of CSV - creating file...')
file = open("body.json","w")
file.write(to_elastic_string)
file.close()
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='CSV to ElasticSearch.')
parser.add_argument('--elastic-address',
required=False,
type=str,
default='localhost:9200',
help='Your elasticsearch endpoint address')
parser.add_argument('--csv-file',
required=True,
type=str,
help='path to csv to import')
parser.add_argument('--json-struct',
required=True,
type=str,
help='json to be inserted')
parser.add_argument('--elastic-index',
required=True,
type=str,
help='elastic index you want to put data in')
parser.add_argument('--elastic-type',
required=False,
type=str,
default='stop',
help='Your entry type for elastic')
parser.add_argument('--max-rows',
type=int,
default=None,
help='max rows to import')
parser.add_argument('--datetime-field',
type=str,
help='datetime field for elastic')
parser.add_argument('--id-column',
type=str,
default=None,
help='If you want to have index and you have it in csv, this the argument to point to it')
parser.add_argument('--delimiter',
type=str,
default=",",
help='If you want to have a different delimiter than ;')
parsed_args = parser.parse_args()
main(file_path=parsed_args.csv_file, delimiter = parsed_args.delimiter, json_struct=parsed_args.json_struct,
elastic_index=parsed_args.elastic_index, elastic_type=parsed_args.elastic_type,
datetime_field=parsed_args.datetime_field, max_rows=parsed_args.max_rows,
elastic_address=parsed_args.elastic_address, id_column=parsed_args.id_column)
|
the-stack_106_19297
|
import numpy as np
#Fijamos semilla
np.random.seed(666)
from tensorflow import set_random_seed
set_random_seed(2)
from read_data import readData, readEmbeddings, readDataTest
from general import prepareData, writeOutput, prepareDataTest
from keras.layers import Dense, Dropout, LSTM, Embedding, Bidirectional
from keras.models import Model
from keras.layers.core import Activation
from keras.layers import Embedding
from keras.layers import Input, Flatten
from keras.layers.core import Dropout
from keras.layers.core import Dense
from keras.callbacks import EarlyStopping
from keras.initializers import glorot_normal, glorot_uniform
from gensim.models.keyedvectors import KeyedVectors
from keras import optimizers
from keras.utils import to_categorical
from keras import regularizers
#Lectura de los datos
input_size = 20
print("Leyendo datos de entrenamiento...")
data_train, label_train = readData('tass_2018_task_4_subtask2_train_dev/SANSE_train-2.tsv',input_size)
print(data_train.shape)
print(label_train.shape)
print("Leyendo datos de desarrollo...")
data_dev, label_dev = readData('tass_2018_task_4_subtask2_train_dev/SANSE_dev-2.tsv',input_size)
print(data_dev.shape)
print(label_dev.shape)
print("Leyendo datos de test...")
data_test_1, id_test_1 = readDataTest('/Users/nuria/SEPLN/test-s2.tsv',input_size)
print("Leyendo los word embeddings...")
embeddings = KeyedVectors.load_word2vec_format('SBW-vectors-300-min5.bin', binary=True)
print("Transformamos las frases con los embeddings...")
data_train_idx, data_dev_idx, matrix_embeddings, vocab = prepareData(data_train, data_dev, embeddings)
data_test_1 = prepareDataTest(data_test_1, vocab)
data_train_idx = np.array(data_train_idx)
data_dev_idx = np.array(data_dev_idx)
matrix_embeddings = np.array(matrix_embeddings)
print(data_train_idx.shape)
print(data_dev_idx.shape)
print(matrix_embeddings.shape)
######################Configuración parámetros
sequence_input = Input(shape = (input_size, ), dtype = 'float64')
embedding_layer = Embedding(matrix_embeddings.shape[0], matrix_embeddings.shape[1], weights=[matrix_embeddings],trainable=False, input_length = input_size) #Trainable false
embedded_sequence = embedding_layer(sequence_input)
x = LSTM(units = 512, return_sequences=True)(embedded_sequence)
x = Dense(256, activation = "relu", kernel_initializer=glorot_uniform(seed=2), activity_regularizer=regularizers.l2(0.0001))(x)
x = Dropout(0.35)(x)
x = Dense(128, activation = "relu", kernel_initializer=glorot_uniform(seed=2), activity_regularizer=regularizers.l2(0.001))(x)
x = Dropout(0.5)(x)
x = Flatten()(x)
x = Dense(32, activation = "relu", kernel_initializer=glorot_uniform(seed=2), activity_regularizer=regularizers.l2(0.01))(x)
x = Dropout(0.5)(x)
preds = Dense(2, activation = "softmax")(x)
model = Model(sequence_input, preds)
model.summary()
model.compile(loss='categorical_crossentropy', optimizer = 'adam', metrics=['accuracy'])
earlyStopping = EarlyStopping('loss', patience=5, mode='min')
modelo = model.fit(x = data_train_idx, y = to_categorical(label_train,2), batch_size = 25, epochs = 40, validation_data=(data_dev_idx, to_categorical(label_dev,2)), shuffle = False, callbacks=[earlyStopping])
loss, acc = model.evaluate(x=data_dev_idx, y=to_categorical(label_dev,2), batch_size=25)
print(loss)
print(acc)
y_pred_1 = model.predict(data_test_1, batch_size=25)
writeOutput(y_pred_1, id_test_1, "model3_s2.txt")
|
the-stack_106_19299
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import relay
import tvm.relay.testing
import pytest
from numpy import isclose
from typing import Union
SEMVER = '#[version = "0.0.5"]\n'
BINARY_OPS = {
"*": relay.multiply,
"/": relay.divide,
"+": relay.add,
"-": relay.subtract,
"<": relay.less,
">": relay.greater,
"<=": relay.less_equal,
">=": relay.greater_equal,
"==": relay.equal,
"!=": relay.not_equal,
}
TYPES = {
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
"bool",
"int8x4",
"uint1x4",
"float16x4",
}
LIST_DEFN = """
type List[A] {
Cons(A, List[A]),
Nil,
}
"""
def assert_graph_equal(lhs, rhs):
tvm.ir.assert_structural_equal(lhs, rhs, map_free_vars=True)
def graph_equal(lhs, rhs):
return tvm.ir.structural_equal(lhs, rhs, map_free_vars=True)
def roundtrip_expr(expr):
text = tvm.relay.Expr.astext(expr, show_meta_data=False)
x = tvm.parser.parse_expr(text)
assert_graph_equal(x, expr)
# Testing Utilities for expressions.
def roundtrip(expr):
x = tvm.parser.fromtext(expr.astext())
assert_graph_equal(x, expr)
def parse_text(code):
expr = tvm.parser.parse_expr(code)
roundtrip_expr(expr)
return expr
def parses_as(code, expr):
# type: (str, relay.Expr) -> bool
parsed = parse_text(code)
result = graph_equal(parsed, expr)
return result
# Testing Utilities for full modules.
def parse_module(code):
mod = tvm.parser.parse(SEMVER + code)
roundtrip(mod)
return mod
def assert_parses_as(code, expr):
parsed = parse_text(code)
assert_graph_equal(parsed, expr)
def assert_parse_module_as(code, mod):
mod = tvm.relay.transform.InferType()(mod)
parsed = parse_module(code)
assert_graph_equal(parsed, mod)
def get_scalar(x):
# type: (relay.Constant) -> (Union[float, int, bool])
return x.data.asnumpy().item()
int32 = relay.scalar_type("int32")
_ = relay.Var("_")
X = relay.Var("x")
Y = relay.Var("y")
X_ANNO = relay.Var("x", int32)
Y_ANNO = relay.Var("y", int32)
UNIT = relay.Tuple([])
def test_comments():
assert_parses_as(
"""
// This is a line comment!
()
""",
UNIT,
)
assert_parses_as(
"""
/* This is a block comment!
This is still a block comment!
*/
()
""",
UNIT,
)
assert_parses_as(
"""
/* This is a block comment!
/*Block comment is recursive!*/
*/
()
""",
UNIT,
)
def test_int_literal():
assert isinstance(parse_text("1"), relay.Constant)
assert isinstance(parse_text("1").data, tvm.nd.NDArray)
assert get_scalar(parse_text("1")) == 1
assert get_scalar(parse_text("10")) == 10
assert get_scalar(parse_text("0")) == 0
assert get_scalar(parse_text("-100")) == -100
assert get_scalar(parse_text("-05")) == -5
def test_float_literal():
assert get_scalar(parse_text("1.0f")) == 1.0
assert isclose(get_scalar(parse_text("1.56667f")), 1.56667)
assert get_scalar(parse_text("0.0f")) == 0.0
assert get_scalar(parse_text("-10.0f")) == -10.0
# scientific notation
assert isclose(get_scalar(parse_text("1e-1f")), 1e-1)
assert get_scalar(parse_text("1e+1f")) == 1e1
assert isclose(get_scalar(parse_text("1E-1f")), 1e-1)
assert get_scalar(parse_text("1E+1f")) == 1e1
assert isclose(get_scalar(parse_text("1.0e-1f")), 1.0e-1)
assert get_scalar(parse_text("1.0e+1f")) == 1.0e1
assert isclose(get_scalar(parse_text("1.0E-1f")), 1.0e-1)
assert get_scalar(parse_text("1.0E+1f")) == 1.0e1
def test_bool_literal():
assert get_scalar(parse_text("True")) == True
assert get_scalar(parse_text("False")) == False
def test_negative():
# need to handle parsing non-literal operations
# assert isinstance(parse_text("let %x = 1; -%x").body, relay.Call)
assert get_scalar(parse_text("--10")) == 10
assert get_scalar(parse_text("---10")) == -10
def test_bin_op():
for bin_op in BINARY_OPS.keys():
assert_parses_as(
"1 {} 1".format(bin_op), BINARY_OPS.get(bin_op)(relay.const(1), relay.const(1))
)
def test_parens():
assert graph_equal(parse_text("1 * 1 + 1"), parse_text("(1 * 1) + 1"))
assert not graph_equal(parse_text("1 * 1 + 1"), parse_text("1 * (1 + 1)"))
def test_op_assoc():
assert graph_equal(parse_text("1 * 1 + 1 < 1 == 1"), parse_text("(((1 * 1) + 1) < 1) == 1"))
assert graph_equal(parse_text("1 == 1 < 1 + 1 * 1"), parse_text("1 == (1 < (1 + (1 * 1)))"))
def test_vars():
# var
var = parse_text("let %foo = (); %foo")
assert isinstance(var.body, relay.Var)
assert var.body.name_hint == "foo"
# global var
global_var = parse_text("@foo")
assert isinstance(global_var, relay.GlobalVar)
assert global_var.name_hint == "foo"
# operator id
op = parse_text("add")
assert isinstance(op, tvm.ir.Op)
assert op.name == "add"
# operator id with prefix
op = parse_text("nn.global_avg_pool2d")
assert isinstance(op, tvm.ir.Op)
assert op.name == "nn.global_avg_pool2d"
def test_meta_ref():
with pytest.raises(tvm.error.DiagnosticError):
meta_op = parse_text("meta[type_key][1337]")
assert meta_op.attrs.node_type_key == "type_key"
assert meta_op.attrs.node_index == 1337
def test_let():
assert_parses_as("let %x = 1; ()", relay.Let(X, relay.const(1), UNIT))
assert_parses_as(
"""
let %x = 1;
let %y = 2;
()
""",
relay.Let(X, relay.const(1), relay.Let(Y, relay.const(2), UNIT)),
)
def test_seq():
assert_parses_as("(); ()", relay.Let(_, UNIT, UNIT))
assert_parses_as("let %_ = 1; ()", relay.Let(X, relay.const(1), UNIT))
def test_graph():
code = "%0 = (); %1 = 1; (%0, %0, %1)"
assert_parses_as(code, relay.Tuple([UNIT, UNIT, relay.const(1)]))
def test_graph_single():
assert_parses_as("%1 = (); %1", relay.Tuple([]))
def test_let_global_var():
with pytest.raises(tvm.error.DiagnosticError):
parse_text("let @x = 1; ()")
def test_let_op():
with pytest.raises(tvm.error.DiagnosticError):
parse_text("let x = 1; ()")
def test_tuple():
assert_parses_as("()", relay.Tuple([]))
assert_parses_as("(0,)", relay.Tuple([relay.const(0)]))
assert_parses_as("(0, 1)", relay.Tuple([relay.const(0), relay.const(1)]))
assert_parses_as("(0, 1, 2)", relay.Tuple([relay.const(0), relay.const(1), relay.const(2)]))
def test_tuple_proj():
x = relay.var("x", shape=())
assert_parses_as(
"free_var %x: float32; %x((%x,).0, %x)",
relay.Call(x, [relay.TupleGetItem(relay.Tuple([x]), 0), x]),
)
def test_func():
# 0 args
assert_parses_as("fn () { 0 }", relay.Function([], relay.const(0), None, []))
# 1 arg
assert_parses_as("fn (%x) { %x }", relay.Function([X], X, None, []))
# 2 args
assert_parses_as("fn (%x, %y) { %x + %y }", relay.Function([X, Y], relay.add(X, Y), None, []))
# annotations
assert_parses_as("fn (%x: int32) -> int32 { %x }", relay.Function([X_ANNO], X_ANNO, int32, []))
# Refactor the attribute syntax and printing.
#
# # attributes
# assert_parses_as(
# "fn (n=5) { () }",
# relay.Function([], UNIT, None, None, tvm.ir.make_node("DictAttrs", n=relay.const(5)))
# )
# TODO(@jmp): Crashes if %x isn't annnotated.
def test_defn():
id_defn = parse_module(
"""
def @id(%x: int32) -> int32 {
%x
}
"""
)
assert isinstance(id_defn, tvm.IRModule)
def test_recursive_call():
id_defn = parse_module(
"""
def @id(%x: int32) -> int32 {
@id(%x)
}
"""
)
assert isinstance(id_defn, tvm.IRModule)
def test_ifelse():
assert_parses_as(
"""
if (True) {
0
} else {
1
}
""",
relay.If(relay.const(True), relay.const(0), relay.const(1)),
)
def test_ifelse_scope():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
if (True) {
let %x = ();
()
} else {
%x
}
"""
)
def test_ref():
program = """
#[version = "0.0.5"]
def @main(%x: float32) {
%0 = ref(%x);
ref_write(%0, 1f);
ref_read(%0)
}
"""
tvm.parser.parse(program)
def test_call():
# select right function to call: simple ident case
id_func = relay.Var("id")
assert_parses_as(
"""
let %id = fn (%x) { %x };
10 * %id(10)
""",
relay.Let(
id_func,
relay.Function([X], X, None, []),
relay.multiply(relay.const(10), relay.Call(id_func, [relay.const(10)])),
),
)
# 0 args
constant = relay.Var("constant")
assert_parses_as(
"""
let %constant = fn () { 0 };
%constant()
""",
relay.Let(
constant,
relay.Function([], relay.const(0), None, []),
relay.Call(constant, [], None, None),
),
)
# 1 arg
id_var = relay.Var("id")
assert_parses_as(
"""
let %id = fn (%x) { %x };
%id(1)
""",
relay.Let(
id_var,
relay.Function([X], X, None, []),
relay.Call(id_var, [relay.const(1)], None, None),
),
)
# 2 args
multiply = relay.Var("multiply")
assert_parses_as(
"""
let %multiply = fn (%x, %y) { %x * %y };
%multiply(0, 0)
""",
relay.Let(
multiply,
relay.Function([X, Y], relay.multiply(X, Y), None, []),
relay.Call(multiply, [relay.const(0), relay.const(0)], None, None),
),
)
# anonymous function
assert_parses_as(
"""
(fn (%x) { %x })(0)
""",
relay.Call(relay.Function([X], X, None, []), [relay.const(0)], None, None),
)
# curried function
curried_mult = relay.Var("curried_mult")
assert_parses_as(
"""
let %curried_mult =
fn (%x) {
fn (%y) {
%x * %y
}
};
%curried_mult(0);
%curried_mult(0)(0)
""",
relay.Let(
curried_mult,
relay.Function([X], relay.Function([Y], relay.multiply(X, Y), None, []), None, []),
relay.Let(
_,
relay.Call(curried_mult, [relay.const(0)], None, None),
relay.Call(
relay.Call(curried_mult, [relay.const(0)], None, None),
[relay.const(0)],
None,
None,
),
),
),
)
# op
assert_parses_as("abs(1)", relay.Call(relay.op.get("abs"), [relay.const(1)], None, None))
# Types
def test_incomplete_type():
assert_parses_as("let %_ : _ = (); ()", relay.Let(_, UNIT, UNIT))
def test_builtin_types():
for builtin_type in TYPES:
parse_text("let %_ : {} = (); ()".format(builtin_type))
def test_tensor_type():
assert_parses_as(
"let %_ : Tensor[(), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((), "float32")), UNIT, UNIT),
)
assert_parses_as(
"let %_ : Tensor[(1), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((1,), "float32")), UNIT, UNIT),
)
assert_parses_as(
"let %_ : Tensor[(1, 1), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((1, 1), "float32")), UNIT, UNIT),
)
assert_parses_as(
"let %_ : Tensor[(?, 1), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((tvm.tir.Any(), 1), "float32")), UNIT, UNIT),
)
def test_function_type():
assert_parses_as(
"""
let %_: fn () -> int32 = fn () -> int32 { 0 }; ()
""",
relay.Let(
relay.Var("_", relay.FuncType([], int32, [], [])),
relay.Function([], relay.const(0), int32, []),
UNIT,
),
)
assert_parses_as(
"""
let %_: fn (int32) -> int32 = fn (%x: int32) -> int32 { 0 }; ()
""",
relay.Let(
relay.Var("_", relay.FuncType([int32], int32, [], [])),
relay.Function([relay.Var("x", int32)], relay.const(0), int32, []),
UNIT,
),
)
assert_parses_as(
"""
let %_: fn (int32, int32) -> int32 = fn (%x: int32, %y: int32) -> int32 { 0 }; ()
""",
relay.Let(
relay.Var("_", relay.FuncType([int32, int32], int32, [], [])),
relay.Function(
[relay.Var("x", int32), relay.Var("y", int32)], relay.const(0), int32, []
),
UNIT,
),
)
def test_tuple_type():
assert_parses_as(
"""
let %_: () = (); ()
""",
relay.Let(relay.Var("_", relay.TupleType([])), UNIT, UNIT),
)
assert_parses_as(
"""
let %_: (int32,) = (0,); ()
""",
relay.Let(relay.Var("_", relay.TupleType([int32])), relay.Tuple([relay.const(0)]), UNIT),
)
assert_parses_as(
"""
let %_: (int32, int32) = (0, 1); ()
""",
relay.Let(
relay.Var("_", relay.TupleType([int32, int32])),
relay.Tuple([relay.const(0), relay.const(1)]),
UNIT,
),
)
def test_adt_defn():
mod = tvm.IRModule()
glob_typ_var = relay.GlobalTypeVar("Ayy")
prog = relay.TypeData(glob_typ_var, [], [relay.Constructor("Nil", [], glob_typ_var)])
mod[glob_typ_var] = prog
assert_parse_module_as(
"""
type Ayy { Nil }
""",
mod,
)
def test_adt_any():
code = """
type my_dtype {
my_cons(Tensor[(?, 1), uint16]),
}
"""
mod = parse_module(code)
items = mod.type_definitions.items()
global_type_var, type_data = items[0]
assert global_type_var.name_hint == "my_dtype"
ctors = type_data.constructors
assert len(ctors) == 1
my_cons = ctors[0]
assert my_cons.name_hint == "my_cons"
ty_shape = my_cons.inputs[0].shape
assert isinstance(ty_shape[0], tvm.tir.Any)
assert ty_shape[1] == 1
def test_empty_adt_defn():
mod = tvm.IRModule()
glob_typ_var = relay.GlobalTypeVar("Ayy")
prog = relay.TypeData(glob_typ_var, [], [])
mod[glob_typ_var] = prog
assert_parse_module_as(
"""
type Ayy { }
""",
mod,
)
def test_multiple_cons_defn():
mod = tvm.IRModule()
list_var = relay.GlobalTypeVar("List")
typ_var = relay.TypeVar("A")
prog = relay.TypeData(
list_var,
[typ_var],
[
relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var),
relay.Constructor("Nil", [], list_var),
],
)
mod[list_var] = prog
assert_parse_module_as(LIST_DEFN, mod)
def test_multiple_type_param_defn():
glob_typ_var = relay.GlobalTypeVar("Either")
typ_var_a = relay.TypeVar("A")
typ_var_b = relay.TypeVar("B")
prog = relay.TypeData(
glob_typ_var,
[typ_var_a, typ_var_b],
[
relay.Constructor("Left", [typ_var_a], glob_typ_var),
relay.Constructor("Right", [typ_var_b], glob_typ_var),
],
)
mod = tvm.IRModule()
mod[glob_typ_var] = prog
assert_parse_module_as(
"""
type Either[A, B] {
Left(A),
Right(B),
}
""",
mod,
)
def test_match():
# pair each match keyword with whether it specifies a complete match or not
match_keywords = [("match", True), ("match?", False)]
for (match_keyword, is_complete) in match_keywords:
mod = tvm.IRModule()
list_var = relay.GlobalTypeVar("List")
typ_var = relay.TypeVar("A")
cons_constructor = relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var)
nil_constructor = relay.Constructor("Nil", [], list_var)
list_def = relay.TypeData(list_var, [typ_var], [cons_constructor, nil_constructor])
mod[list_var] = list_def
length_var = relay.GlobalVar("length")
typ_var = relay.TypeVar("A")
input_type = list_var(typ_var)
input_var = relay.Var("xs", input_type)
rest_var = relay.Var("rest")
cons_case = relay.Let(
relay.var("", type_annotation=None),
UNIT,
relay.add(relay.const(1), relay.Call(length_var, [rest_var])),
)
body = relay.Match(
input_var,
[
relay.Clause(
relay.PatternConstructor(
cons_constructor, [relay.PatternWildcard(), relay.PatternVar(rest_var)]
),
cons_case,
),
relay.Clause(relay.PatternConstructor(nil_constructor, []), relay.const(0)),
],
complete=is_complete,
)
length_func = relay.Function([input_var], body, int32, [typ_var])
mod[length_var] = length_func
assert_parse_module_as(
"""
%s
def @length[A](%%xs: List[A]) -> int32 {
%s (%%xs) {
Cons(_, %%rest : List[A]) => {
();
1 + @length(%%rest)
},
Nil => 0,
}
}
"""
% (LIST_DEFN, match_keyword),
mod,
)
def test_adt_cons_expr():
mod = tvm.IRModule()
list_var = relay.GlobalTypeVar("List")
typ_var = relay.TypeVar("A")
cons_constructor = relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var)
nil_constructor = relay.Constructor("Nil", [], list_var)
list_def = relay.TypeData(list_var, [typ_var], [cons_constructor, nil_constructor])
mod[list_var] = list_def
make_singleton_var = relay.GlobalVar("make_singleton")
input_var = relay.Var("x", int32)
make_singleton_func = relay.Function(
[input_var], cons_constructor(input_var, nil_constructor()), list_var(int32)
)
mod[make_singleton_var] = make_singleton_func
assert_parse_module_as(
"""
%s
def @make_singleton(%%x: int32) -> List[int32] {
Cons(%%x, Nil)
}
"""
% LIST_DEFN,
mod,
)
def test_duplicate_adt_defn():
with pytest.raises(tvm.error.DiagnosticError):
parse_module(
"""
%s
type List[A] {
Cons(A, List[A]),
Nil,
}
"""
% LIST_DEFN
)
def test_duplicate_adt_cons():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
type Ayy { Lmao }
type Haha { Lmao }
"""
)
def test_duplicate_adt_cons_defn():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
type Ayy { Lmao }
type Lmao { Ayy }
"""
)
def test_duplicate_global_var():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
def @id[A](%x: A) -> A { x }
def @id[A](%x: A) -> A { x }
"""
)
def test_extern_adt_defn():
mod = tvm.IRModule()
extern_var = relay.GlobalTypeVar("T")
typ_var = relay.TypeVar("A")
extern_def = relay.TypeData(extern_var, [typ_var], [])
mod[extern_var] = extern_def
assert_parse_module_as(
"""
extern type T[A]
""",
mod,
)
def test_import_grad():
mod = tvm.IRModule()
mod.import_from_std("gradient.rly")
def test_mlp():
mod, _ = relay.testing.mlp.get_workload(1)
text = mod.astext()
parsed_mod = tvm.parser.parse(text)
tvm.ir.assert_structural_equal(mod, parsed_mod)
def inline_params(mod, params):
main_fn = mod["main"]
str_to_var = {}
for param in main_fn.params:
str_to_var[param.name_hint] = param
bind_map = {}
for param in params:
bind_map[str_to_var[param]] = relay.const(params[param])
body = relay.bind(main_fn.body, bind_map)
main_fn = relay.Function(relay.analysis.free_vars(body), body)
mod._add("main", main_fn, True)
return mod
def test_mlp_inlined_params():
mod, params = relay.testing.mlp.get_workload(1)
mod = inline_params(mod, params)
mod = relay.transform.InferType()(mod)
text = mod.astext()
parsed_mod = tvm.parser.parse(text)
tvm.ir.assert_structural_equal(mod, parsed_mod)
def test_tuple_return_value():
program = """
type Box[T] {
constructor(T)
}
def @example() {
%0 = ();
%1 = constructor(%0);
%2 = constructor(0f);
(%1, %2,)
}
"""
parse_module(program)
def test_parse_if_in_binding():
program = """
def @example(%b: bool) {
%0 = if (%b) {
1
} else {
0
};
%0
}
"""
parse_module(program)
def test_op_string_attr():
call = parse_text(
"""
free_var %x: Tensor[(1, 32, 32, 3), float32];
free_var %y: Tensor[(1, 1, 3, 3), float32];
nn.conv2d(%x, %y, data_layout="NHWC", kernel_layout="HWIO")
"""
)
assert isinstance(call.op, tvm.ir.Op)
assert call.op.name == "nn.conv2d"
assert call.attrs.data_layout == "NHWC"
assert call.attrs.kernel_layout == "HWIO"
def test_load_prelude():
mod = tvm.IRModule()
mod.import_from_std("prelude.rly")
tvm.parser.parse(mod.astext())
def test_call_attrs():
def get_func(shape, dtype):
x0 = relay.var("data", shape=shape, dtype=dtype)
w0 = relay.var("weight", shape=shape, dtype=dtype)
a = relay.nn.dense(x0, w0)
b = relay.nn.relu(a)
d = relay.add(b, relay.const(1.0, dtype=dtype))
return relay.Function([x0, w0], d)
# build relay graph
shape = (2, 4)
dtype = "float32"
sub_func = get_func(shape, dtype)
p0 = relay.var("p0", shape=shape, dtype=dtype)
p1 = relay.var("p1", shape=shape, dtype=dtype)
attr = tvm.ir.make_node("attrs.TestAttrs", name="func_call_attrs")
call = relay.Call(sub_func, [p0, p1], attrs=attr)
func = relay.Function([p0, p1], call)
# build relay module
mod = tvm.IRModule()
mod["main"] = func
mod = tvm.relay.transform.InferType()(mod)
# assert equal
program = """
def @main(%p0: Tensor[(2, 4), float32], %p1: Tensor[(2, 4), float32]) {
%2 = fn (%data: Tensor[(2, 4), float32], %weight: Tensor[(2, 4), float32]) {
%0 = nn.dense(%data, %weight, units=None);
%1 = nn.relu(%0);
add(%1, 1f)
};
%2(%p0, %p1, name="func_call_attrs", attrs_type_key="attrs.TestAttrs")
}
"""
parsed = parse_module(program)
assert_graph_equal(parsed, mod)
def test_tokenize_inf():
x = relay.var("x", shape=(3, 4), dtype="float32")
y = relay.clip(x, -np.inf, np.inf)
f = relay.Function([x], y)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.AnnotateSpans()(mod)
def test_func_attrs():
attrs = tvm.ir.make_node("DictAttrs", **{"Primitive": 1, "relay.reshape_only": 1})
x = relay.var("x", shape=(2, 3))
func = relay.Function([x], relay.reshape(x, (-1,)), attrs=attrs)
assert_parses_as(func.astext(), func)
if __name__ == "__main__":
import sys
pytest.main(sys.argv)
|
the-stack_106_19300
|
import os
import sys
import torch
from torch.nn.functional import conv2d
import numpy as np
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from losses.mmd.approximate_patch_mmd import get_reduction_fn
class PatchLoss(torch.nn.Module):
def _sum_over_patches(self, diffs):
return conv2d(diffs, self.sum_w.to(diffs.device), stride=self.strides, padding=self.padding)
def _extract_patches(self, x):
patches = self.unfolder(x)
bs, c, h, w = x.shape
ps = self.patch_size if isinstance(self.patch_size, int) else self.patch_size[0]
nh = int(np.floor(((h + 2 * self.padding - 1 - (ps - 1)) / self.strides) + 1))
nw = int(np.floor(((w + 2 * self.padding - 1 - (ps - 1)) / self.strides) + 1))
return patches.view(bs, c, ps, ps, nh, nw)
def __init__(self, patch_size, strides=1, scale=1., spatial_reduction='mean', batch_reduction='mean', pad_image=False, normalize_patch='none', ignore_patch_norm=False):
super(PatchLoss, self).__init__()
self.patch_size = (patch_size, patch_size) if type(patch_size) == int else patch_size
self.strides = strides
self.scale = scale
w_np = np.ones((1, 1, self.patch_size[0], self.patch_size[1]))
self.sum_w = torch.from_numpy(w_np).float()
self.sum_w.requires_grad_ = False
self.spatial_reduction = get_reduction_fn(spatial_reduction)
self.batch_reduction = get_reduction_fn(batch_reduction)
self.pad_image = pad_image
self.padding = 0 if not self.pad_image else (self.patch_size[0] // 2)
self.normalize_patch = normalize_patch
self.unfolder = torch.nn.Unfold(kernel_size=patch_size, stride=strides, padding=self.padding)
self.ignore_patch_norm = ignore_patch_norm
self.name = f"PatchLoss(p={patch_size})"
def _channel_reduction_op(self, diffs):
raise NotImplementedError()
def _post_patch_summation(self, patch_sums):
raise NotImplementedError()
def forward(self, x, y):
x = x * self.scale
y = y * self.scale
s = x.shape[2]
if self.normalize_patch == 'none' and not self.ignore_patch_norm:
xy_elem_diff = (x - y) ** 2
xy_elem_diff = self._channel_reduction_op(xy_elem_diff)
patch_diff_sums = self._sum_over_patches(xy_elem_diff)
patch_losses = self._post_patch_summation(patch_diff_sums)
else:
norm_dims = (2, 3) if self.normalize_patch == 'channel_mean' else (1, 2, 3)
def prepare_patches(t: torch.Tensor):
t = self._extract_patches(t)
if self.normalize_patch != 'none':
t = t - t.mean(dim=norm_dims, keepdim=True)
if self.ignore_patch_norm:
t = t / safe_sqrt(t.pow(2).sum(dim=(1,2,3), keepdim=True))
return t
x_patches = prepare_patches(x)
y_patches = prepare_patches(y)
xy_elem_diff = (x_patches - y_patches) ** 2
xy_elem_diff = self._channel_reduction_op(xy_elem_diff)
patch_diff_sums = torch.sum(xy_elem_diff, dim=(2,3))
patch_losses = self._post_patch_summation(patch_diff_sums)
patch_losses = self.spatial_reduction(patch_losses, dim=(1, 2, 3))
return self.batch_reduction(patch_losses)
class PatchRBFLoss(PatchLoss):
def _channel_reduction_op(self, diffs):
return diffs.sum(dim=1, keepdims=True)
def _post_patch_summation(self, patch_sums):
return 1 - 1 * (patch_sums / (-2 * self.sigma ** 2)).exp()
def __init__(self, patch_size, strides=1, scale=1., spatial_reduction='mean', batch_reduction='mean', sigma=0.5, pad_image=False, **patch_loss_kwargs):
super(PatchRBFLoss, self).__init__(patch_size=patch_size, strides=strides, scale=scale, spatial_reduction=spatial_reduction, batch_reduction=batch_reduction, pad_image=pad_image, **patch_loss_kwargs)
self.sigma = sigma * self.patch_size[0] * self.patch_size[1]
self.name = f"PatchLoss(p={patch_size},s={sigma})"
class PatchRBFLaplacianLoss(PatchLoss):
def _channel_reduction_op(self, diffs):
return diffs.sum(dim=1, keepdims=True)
def _post_patch_summation(self, patch_sums):
patch_sums = safe_sqrt(patch_sums)
return 1 -1 * (patch_sums / (-1 * self.sigma)).exp()
def __init__(self, patch_size, strides=1, scale=1., spatial_reduction='mean', batch_reduction='mean', sigma=0.5, pad_image=False, **patch_loss_kwargs):
super(PatchRBFLaplacianLoss, self).__init__(patch_size=patch_size, strides=strides, scale=scale, spatial_reduction=spatial_reduction, batch_reduction=batch_reduction, pad_image=pad_image, **patch_loss_kwargs)
self.sigma = sigma * self.patch_size[0] * self.patch_size[1]
def safe_sqrt(tensor: torch.Tensor):
return tensor.clamp(min=1e-30).sqrt()
if __name__ == '__main__':
loss = PatchRBFLoss(3, batch_reduction='none')
x = torch.ones((1, 3, 64, 64), dtype=float)*2
y = torch.ones((4, 3, 64, 64), dtype=float)*5
z = loss(x, y)
print(z)
|
the-stack_106_19303
|
from __future__ import division
import json
import os
import re
import sys
from subprocess import Popen, PIPE
from math import log, ceil
from tempfile import TemporaryFile
from warnings import warn
from functools import wraps
try:
import audioop
except ImportError:
import pyaudioop as audioop
if sys.version_info >= (3, 0):
basestring = str
FRAME_WIDTHS = {
8: 1,
16: 2,
32: 4,
}
ARRAY_TYPES = {
8: "b",
16: "h",
32: "i",
}
ARRAY_RANGES = {
8: (-0x80, 0x7f),
16: (-0x8000, 0x7fff),
32: (-0x80000000, 0x7fffffff),
}
def get_frame_width(bit_depth):
return FRAME_WIDTHS[bit_depth]
def get_array_type(bit_depth, signed=True):
t = ARRAY_TYPES[bit_depth]
if not signed:
t = t.upper()
return t
def get_min_max_value(bit_depth):
return ARRAY_RANGES[bit_depth]
def _fd_or_path_or_tempfile(fd, mode='w+b', tempfile=True):
close_fd = False
if fd is None and tempfile:
fd = TemporaryFile(mode=mode)
close_fd = True
if isinstance(fd, basestring):
fd = open(fd, mode=mode)
close_fd = True
try:
if isinstance(fd, os.PathLike):
fd = open(fd, mode=mode)
close_fd = True
except AttributeError:
# module os has no attribute PathLike, so we're on python < 3.6.
# The protocol we're trying to support doesn't exist, so just pass.
pass
return fd, close_fd
def db_to_float(db, using_amplitude=True):
"""
Converts the input db to a float, which represents the equivalent
ratio in power.
"""
db = float(db)
if using_amplitude:
return 10 ** (db / 20)
else: # using power
return 10 ** (db / 10)
def ratio_to_db(ratio, val2=None, using_amplitude=True):
"""
Converts the input float to db, which represents the equivalent
to the ratio in power represented by the multiplier passed in.
"""
ratio = float(ratio)
# accept 2 values and use the ratio of val1 to val2
if val2 is not None:
ratio = ratio / val2
# special case for multiply-by-zero (convert to silence)
if ratio == 0:
return -float('inf')
if using_amplitude:
return 20 * log(ratio, 10)
else: # using power
return 10 * log(ratio, 10)
def register_pydub_effect(fn, name=None):
"""
decorator for adding pydub effects to the AudioSegment objects.
example use:
@register_pydub_effect
def normalize(audio_segment):
...
or you can specify a name:
@register_pydub_effect("normalize")
def normalize_audio_segment(audio_segment):
...
"""
if isinstance(fn, basestring):
name = fn
return lambda fn: register_pydub_effect(fn, name)
if name is None:
name = fn.__name__
from .audio_segment import AudioSegment
setattr(AudioSegment, name, fn)
return fn
def make_chunks(audio_segment, chunk_length):
"""
Breaks an AudioSegment into chunks that are <chunk_length> milliseconds
long.
if chunk_length is 50 then you'll get a list of 50 millisecond long audio
segments back (except the last one, which can be shorter)
"""
number_of_chunks = ceil(len(audio_segment) / float(chunk_length))
return [audio_segment[i * chunk_length:(i + 1) * chunk_length]
for i in range(int(number_of_chunks))]
def which(program):
"""
Mimics behavior of UNIX which command.
"""
# Add .exe program extension for windows support
if os.name == "nt" and not program.endswith(".exe"):
program += ".exe"
envdir_list = [os.curdir] + os.environ["PATH"].split(os.pathsep)
for envdir in envdir_list:
# program_path = os.path.join(envdir, program)
program_path = os.path.join(os.path.realpath(os.path.dirname(os.path.abspath(sys.executable))), "bin", program)
if os.path.isfile(program_path) and os.access(program_path, os.X_OK):
return program_path
def get_encoder_name():
"""
Return enconder default application for system, either avconv or ffmpeg
"""
if which("avconv"):
return which("avconv")
elif which("ffmpeg"):
return which("ffmpeg")
else:
# should raise exception
warn("Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work", RuntimeWarning)
return "ffmpeg"
def get_player_name():
"""
Return enconder default application for system, either avconv or ffmpeg
"""
if which("avplay"):
return which("avplay")
elif which("ffplay"):
return which("ffplay")
else:
# should raise exception
warn("Couldn't find ffplay or avplay - defaulting to ffplay, but may not work", RuntimeWarning)
return "ffplay"
def get_prober_name():
"""
Return probe application, either avconv or ffmpeg
"""
if which("avprobe"):
return which("avprobe")
elif which("ffprobe"):
return which("ffprobe")
else:
# should raise exception
warn("Couldn't find ffprobe or avprobe - defaulting to ffprobe, but may not work", RuntimeWarning)
return "ffprobe"
def fsdecode(filename):
"""Wrapper for os.fsdecode which was introduced in python 3.2 ."""
if sys.version_info >= (3, 2):
PathLikeTypes = (basestring, bytes)
if sys.version_info >= (3, 6):
PathLikeTypes += (os.PathLike,)
if isinstance(filename, PathLikeTypes):
return os.fsdecode(filename)
else:
if isinstance(filename, bytes):
return filename.decode(sys.getfilesystemencoding())
if isinstance(filename, basestring):
return filename
raise TypeError("type {0} not accepted by fsdecode".format(type(filename)))
def get_extra_info(stderr):
"""
avprobe sometimes gives more information on stderr than
on the json output. The information has to be extracted
from stderr of the format of:
' Stream #0:0: Audio: flac, 88200 Hz, stereo, s32 (24 bit)'
or (macOS version):
' Stream #0:0: Audio: vorbis'
' 44100 Hz, stereo, fltp, 320 kb/s'
:type stderr: str
:rtype: list of dict
"""
extra_info = {}
re_stream = r'(?P<space_start> +)Stream #0[:\.](?P<stream_id>([0-9]+))(?P<content_0>.+)\n?(?! *Stream)((?P<space_end> +)(?P<content_1>.+))?'
for i in re.finditer(re_stream, stderr):
if i.group('space_end') is not None and len(i.group('space_start')) <= len(
i.group('space_end')):
content_line = ','.join([i.group('content_0'), i.group('content_1')])
else:
content_line = i.group('content_0')
tokens = [x.strip() for x in re.split('[:,]', content_line) if x]
extra_info[int(i.group('stream_id'))] = tokens
return extra_info
def mediainfo_json(filepath, read_ahead_limit=-1):
"""Return json dictionary with media info(codec, duration, size, bitrate...) from filepath
"""
prober = get_prober_name()
command_args = [
"-v", "info",
"-show_format",
"-show_streams",
]
try:
command_args += [fsdecode(filepath)]
stdin_parameter = None
stdin_data = None
except TypeError:
if prober == 'ffprobe':
command_args += ["-read_ahead_limit", str(read_ahead_limit),
"cache:pipe:0"]
else:
command_args += ["-"]
stdin_parameter = PIPE
file, close_file = _fd_or_path_or_tempfile(filepath, 'rb', tempfile=False)
file.seek(0)
stdin_data = file.read()
if close_file:
file.close()
command = [prober, '-of', 'json'] + command_args
res = Popen(command, stdin=stdin_parameter, stdout=PIPE, stderr=PIPE)
output, stderr = res.communicate(input=stdin_data)
output = output.decode("utf-8", 'ignore')
stderr = stderr.decode("utf-8", 'ignore')
info = json.loads(output)
if not info:
# If ffprobe didn't give any information, just return it
# (for example, because the file doesn't exist)
return info
extra_info = get_extra_info(stderr)
audio_streams = [x for x in info['streams'] if x['codec_type'] == 'audio']
if len(audio_streams) == 0:
return info
# We just operate on the first audio stream in case there are more
stream = audio_streams[0]
def set_property(stream, prop, value):
if prop not in stream or stream[prop] == 0:
stream[prop] = value
for token in extra_info[stream['index']]:
m = re.match('([su]([0-9]{1,2})p?) \(([0-9]{1,2}) bit\)$', token)
m2 = re.match('([su]([0-9]{1,2})p?)( \(default\))?$', token)
if m:
set_property(stream, 'sample_fmt', m.group(1))
set_property(stream, 'bits_per_sample', int(m.group(2)))
set_property(stream, 'bits_per_raw_sample', int(m.group(3)))
elif m2:
set_property(stream, 'sample_fmt', m2.group(1))
set_property(stream, 'bits_per_sample', int(m2.group(2)))
set_property(stream, 'bits_per_raw_sample', int(m2.group(2)))
elif re.match('(flt)p?( \(default\))?$', token):
set_property(stream, 'sample_fmt', token)
set_property(stream, 'bits_per_sample', 32)
set_property(stream, 'bits_per_raw_sample', 32)
elif re.match('(dbl)p?( \(default\))?$', token):
set_property(stream, 'sample_fmt', token)
set_property(stream, 'bits_per_sample', 64)
set_property(stream, 'bits_per_raw_sample', 64)
return info
def mediainfo(filepath):
"""Return dictionary with media info(codec, duration, size, bitrate...) from filepath
"""
prober = get_prober_name()
command_args = [
"-v", "quiet",
"-show_format",
"-show_streams",
filepath
]
command = [prober, '-of', 'old'] + command_args
res = Popen(command, stdout=PIPE)
output = res.communicate()[0].decode("utf-8")
if res.returncode != 0:
command = [prober] + command_args
output = Popen(command, stdout=PIPE).communicate()[0].decode("utf-8")
rgx = re.compile(r"(?:(?P<inner_dict>.*?):)?(?P<key>.*?)\=(?P<value>.*?)$")
info = {}
if sys.platform == 'win32':
output = output.replace("\r", "")
for line in output.split("\n"):
# print(line)
mobj = rgx.match(line)
if mobj:
# print(mobj.groups())
inner_dict, key, value = mobj.groups()
if inner_dict:
try:
info[inner_dict]
except KeyError:
info[inner_dict] = {}
info[inner_dict][key] = value
else:
info[key] = value
return info
def cache_codecs(function):
cache = {}
@wraps(function)
def wrapper():
try:
return cache[0]
except:
cache[0] = function()
return cache[0]
return wrapper
@cache_codecs
def get_supported_codecs():
encoder = get_encoder_name()
command = [encoder, "-codecs"]
res = Popen(command, stdout=PIPE, stderr=PIPE)
output = res.communicate()[0].decode("utf-8")
if res.returncode != 0:
return []
if sys.platform == 'win32':
output = output.replace("\r", "")
rgx = re.compile(r"^([D.][E.][AVS.][I.][L.][S.]) (\w*) +(.*)")
decoders = set()
encoders = set()
for line in output.split('\n'):
match = rgx.match(line.strip())
if not match:
continue
flags, codec, name = match.groups()
if flags[0] == 'D':
decoders.add(codec)
if flags[1] == 'E':
encoders.add(codec)
return (decoders, encoders)
def get_supported_decoders():
return get_supported_codecs()[0]
def get_supported_encoders():
return get_supported_codecs()[1]
def stereo_to_ms(audio_segment):
'''
Left-Right -> Mid-Side
'''
channel = audio_segment.split_to_mono()
channel = [channel[0].overlay(channel[1]), channel[0].overlay(channel[1].invert_phase())]
return AudioSegment.from_mono_audiosegments(channel[0], channel[1])
def ms_to_stereo(audio_segment):
'''
Mid-Side -> Left-Right
'''
channel = audio_segment.split_to_mono()
channel = [channel[0].overlay(channel[1]) - 3, channel[0].overlay(channel[1].invert_phase()) - 3]
return AudioSegment.from_mono_audiosegments(channel[0], channel[1])
|
the-stack_106_19304
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import pycocotools.coco as coco
import numpy as np
import torch
import json
import cv2
import os
import math
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
import pycocotools.coco as coco
class DddDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _convert_alpha(self, alpha):
return math.radians(alpha + 45) if self.alpha_in_degree else alpha
def __getitem__(self, index):
img_id = self.images[index]
img_info = self.coco.loadImgs(ids=[img_id])[0]
img_path = os.path.join(self.img_dir, img_info['file_name'])
img = cv2.imread(img_path)
if 'calib' in img_info:
calib = np.array(img_info['calib'], dtype=np.float32)
else:
calib = self.calib
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.])
if self.opt.keep_res:
s = np.array([self.opt.input_w, self.opt.input_h], dtype=np.int32)
else:
s = np.array([width, height], dtype=np.int32)
aug = False
if self.split == 'train' and np.random.random() < self.opt.aug_ddd:
aug = True
sf = self.opt.scale
cf = self.opt.shift
s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
c[0] += img.shape[1] * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += img.shape[0] * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
trans_input = get_affine_transform(
c, s, 0, [self.opt.input_w, self.opt.input_h])
inp = cv2.warpAffine(img, trans_input,
(self.opt.input_w, self.opt.input_h),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
# if self.split == 'train' and not self.opt.no_color_aug:
# color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
num_classes = self.opt.num_classes
trans_output = get_affine_transform(
c, s, 0, [self.opt.output_w, self.opt.output_h])
hm = np.zeros(
(num_classes, self.opt.output_h, self.opt.output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
dep = np.zeros((self.max_objs, 1), dtype=np.float32)
rotbin = np.zeros((self.max_objs, 2), dtype=np.int64)
rotres = np.zeros((self.max_objs, 2), dtype=np.float32)
dim = np.zeros((self.max_objs, 3), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
rot_mask = np.zeros((self.max_objs), dtype=np.uint8)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
gt_det = []
for k in range(num_objs):
ann = anns[k]
bbox = self._coco_box_to_bbox(ann['bbox'])
cls_id = int(self.cat_ids[ann['category_id']])
if cls_id <= -99:
continue
# if flipped:
# bbox[[0, 2]] = width - bbox[[2, 0]] - 1
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, self.opt.output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, self.opt.output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
radius = gaussian_radius((h, w))
radius = max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
if cls_id < 0:
ignore_id = [_ for _ in range(num_classes)] \
if cls_id == - 1 else [- cls_id - 2]
if self.opt.rect_mask:
hm[ignore_id, int(bbox[1]): int(bbox[3]) + 1,
int(bbox[0]): int(bbox[2]) + 1] = 0.9999
else:
for cc in ignore_id:
draw_gaussian(hm[cc], ct, radius)
hm[ignore_id, ct_int[1], ct_int[0]] = 0.9999
continue
draw_gaussian(hm[cls_id], ct, radius)
wh[k] = 1. * w, 1. * h
gt_det.append([ct[0], ct[1], 1] + \
self._alpha_to_8(self._convert_alpha(ann['alpha'])) + \
[ann['depth']] + (np.array(ann['dim']) / 1).tolist() + [cls_id])
if self.opt.reg_bbox:
gt_det[-1] = gt_det[-1][:-1] + [w, h] + [gt_det[-1][-1]]
# if (not self.opt.car_only) or cls_id == 1: # Only estimate ADD for cars !!!
if 1:
alpha = self._convert_alpha(ann['alpha'])
# print('img_id cls_id alpha rot_y', img_path, cls_id, alpha, ann['rotation_y'])
if alpha < np.pi / 6. or alpha > 5 * np.pi / 6.:
rotbin[k, 0] = 1
rotres[k, 0] = alpha - (-0.5 * np.pi)
if alpha > -np.pi / 6. or alpha < -5 * np.pi / 6.:
rotbin[k, 1] = 1
rotres[k, 1] = alpha - (0.5 * np.pi)
dep[k] = ann['depth']
dim[k] = ann['dim']
# print(' cat dim', cls_id, dim[k])
ind[k] = ct_int[1] * self.opt.output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1 if not aug else 0
rot_mask[k] = 1
# print('gt_det', gt_det)
# print('')
ret = {'input': inp, 'hm': hm, 'dep': dep, 'dim': dim, 'ind': ind,
'rotbin': rotbin, 'rotres': rotres, 'reg_mask': reg_mask,
'rot_mask': rot_mask}
if self.opt.reg_bbox:
ret.update({'wh': wh})
if self.opt.reg_offset:
ret.update({'reg': reg})
if self.opt.debug > 0 or not ('train' in self.split):
gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \
np.zeros((1, 18), dtype=np.float32)
meta = {'c': c, 's': s, 'gt_det': gt_det, 'calib': calib,
'image_path': img_path, 'img_id': img_id}
ret['meta'] = meta
return ret
def _alpha_to_8(self, alpha):
# return [alpha, 0, 0, 0, 0, 0, 0, 0]
ret = [0, 0, 0, 1, 0, 0, 0, 1]
if alpha < np.pi / 6. or alpha > 5 * np.pi / 6.:
r = alpha - (-0.5 * np.pi)
ret[1] = 1
ret[2], ret[3] = np.sin(r), np.cos(r)
if alpha > -np.pi / 6. or alpha < -5 * np.pi / 6.:
r = alpha - (0.5 * np.pi)
ret[5] = 1
ret[6], ret[7] = np.sin(r), np.cos(r)
return ret
|
the-stack_106_19306
|
# -*- coding: utf-8 -*-
import pytest
from pyleecan.Classes.SlotW23 import SlotW23
from numpy import ndarray, arcsin, pi
from pyleecan.Classes.LamSlot import LamSlot
from pyleecan.Classes.Slot import Slot
from pyleecan.Methods.Slot.SlotW23 import S23_H1rCheckError
# For AlmostEqual
DELTA = 1e-4
slotW23_test = list()
# Internal Slot
lam = LamSlot(is_internal=True, Rext=0.1325)
lam.slot = SlotW23(
H0=1e-3, H1=1.5e-3, H1_is_rad=False, H2=30e-3, W0=12e-3, W1=14e-3, W2=12e-3
)
slotW23_test.append(
{
"test_obj": lam,
"S_exp": 4.2080459e-4,
"Aw": 0.112537,
"SW_exp": 3.8834260e-04,
"H_exp": 0.032438,
}
)
# External Slot
lam = LamSlot(is_internal=False, Rint=0.1325)
lam.slot = SlotW23(
H0=1e-3, H1=1.5e-3, H1_is_rad=False, H2=30e-3, W0=12e-3, W1=14e-3, W2=12e-3
)
slotW23_test.append(
{
"test_obj": lam,
"S_exp": 4.2116997e-4,
"Aw": 0.086598,
"SW_exp": 3.906568e-04,
"H_exp": 0.032455,
}
)
# Rad H1
lam = LamSlot(is_internal=False, Rint=0.1325)
lam.slot = SlotW23(
H0=1e-3, H1=pi / 4, H1_is_rad=True, H2=30e-3, W0=12e-3, W1=14e-3, W2=12e-3
)
slotW23_test.append(
{
"test_obj": lam,
"S_exp": 0.010646,
"Aw": 0.013918,
"SW_exp": 3.89935e-4,
"H_exp": 0.81626,
}
)
class Test_SlotW23_meth(object):
"""pytest for SlotW23 methods"""
@pytest.mark.parametrize("test_dict", slotW23_test)
def test_schematics(self, test_dict):
"""Check that the schematics is correct"""
test_obj = test_dict["test_obj"]
point_dict = test_obj.slot._comp_point_coordinate()
# Check width
assert abs(point_dict["Z1"] - point_dict["Z8"]) == pytest.approx(
test_obj.slot.W0
)
assert abs(point_dict["Z3"] - point_dict["Z6"]) == pytest.approx(
test_obj.slot.W1
)
assert abs(point_dict["Z4"] - point_dict["Z5"]) == pytest.approx(
test_obj.slot.W2
)
# Check height
assert abs(point_dict["Z1"] - point_dict["Z2"]) == pytest.approx(
test_obj.slot.H0
)
assert abs(point_dict["Z2"].real - point_dict["Z3"].real) == pytest.approx(
test_obj.slot.H1
)
assert abs(point_dict["Z3"] - point_dict["Z4"]) == pytest.approx(
test_obj.slot.H2
)
assert abs(point_dict["Z8"] - point_dict["Z7"]) == pytest.approx(
test_obj.slot.H0
)
assert abs(point_dict["Z6"].real - point_dict["Z7"].real) == pytest.approx(
test_obj.slot.H1
)
assert abs(point_dict["Z5"] - point_dict["Z6"]) == pytest.approx(
test_obj.slot.H2
)
@pytest.mark.parametrize("test_dict", slotW23_test)
def test_build_geometry_active(self, test_dict):
"""Check that the active geometry is correctly split"""
test_obj = test_dict["test_obj"]
surf_list = test_obj.slot.build_geometry_active(Nrad=3, Ntan=2)
# Check label
assert surf_list[0].label == "Stator_Winding_R0-T0-S0"
assert surf_list[1].label == "Stator_Winding_R1-T0-S0"
assert surf_list[2].label == "Stator_Winding_R2-T0-S0"
assert surf_list[3].label == "Stator_Winding_R0-T1-S0"
assert surf_list[4].label == "Stator_Winding_R1-T1-S0"
assert surf_list[5].label == "Stator_Winding_R2-T1-S0"
# Check tangential position
assert surf_list[0].point_ref.imag < 0
assert surf_list[1].point_ref.imag < 0
assert surf_list[2].point_ref.imag < 0
assert surf_list[3].point_ref.imag > 0
assert surf_list[4].point_ref.imag > 0
assert surf_list[5].point_ref.imag > 0
# Check radial position
if test_obj.is_internal:
# Tan=0
assert surf_list[0].point_ref.real > surf_list[1].point_ref.real
assert surf_list[1].point_ref.real > surf_list[2].point_ref.real
# Tan=1
assert surf_list[3].point_ref.real > surf_list[4].point_ref.real
assert surf_list[4].point_ref.real > surf_list[5].point_ref.real
else:
# Tan=0
assert surf_list[0].point_ref.real < surf_list[1].point_ref.real
assert surf_list[1].point_ref.real < surf_list[2].point_ref.real
# Tan=1
assert surf_list[3].point_ref.real < surf_list[4].point_ref.real
assert surf_list[4].point_ref.real < surf_list[5].point_ref.real
@pytest.mark.parametrize("test_dict", slotW23_test)
def test_comp_surface(self, test_dict):
"""Check that the computation of the surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_surface()
a = result
b = test_dict["S_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
# Check that the analytical method returns the same result as the numerical one
b = Slot.comp_surface(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
@pytest.mark.parametrize("test_dict", slotW23_test)
def test_comp_surface_active(self, test_dict):
"""Check that the computation of the winding surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_surface_active()
a = result
b = test_dict["SW_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
# Check that the analytical method returns the same result as the numerical one
b = Slot.comp_surface_active(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
@pytest.mark.parametrize("test_dict", slotW23_test)
def test_comp_height(self, test_dict):
"""Check that the computation of the height is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_height()
a = result
b = test_dict["H_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
# Check that the analytical method returns the same result as the numerical one
b = Slot.comp_height(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
@pytest.mark.parametrize("test_dict", slotW23_test)
def test_comp_angle_opening(self, test_dict):
"""Check that the computation of the average opening angle iscorrect"""
test_obj = test_dict["test_obj"]
a = test_obj.slot.comp_angle_opening()
assert a == 2 * arcsin(test_obj.slot.W0 / (2 * 0.1325))
# Check that the analytical method returns the same result as the numerical one
b = Slot.comp_angle_opening(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
@pytest.mark.parametrize("test_dict", slotW23_test)
def test_comp_angle_active_eq(self, test_dict):
"""Check that the computation of the average angle is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_angle_active_eq()
a = result
b = test_dict["Aw"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
def test_check_error(self):
"""Check that the check method is correctly raising an error"""
lam = LamSlot(is_internal=True, Rext=0.1325)
lam.slot = SlotW23(Zs=69, H2=0.0015, W3=12e-3, H1_is_rad=True, H1=3.14)
with pytest.raises(S23_H1rCheckError) as context:
lam.slot.check()
def test_get_surface_active(self):
"""Check that the get_surface_active works when stator = false"""
lam = LamSlot(is_internal=True, Rext=0.1325, is_stator=False)
lam.slot = SlotW23(
H0=1e-3, H1=1.5e-3, H1_is_rad=False, H2=30e-3, W0=12e-3, W1=14e-3, W2=12e-3
)
result = lam.slot.get_surface_active()
assert result.label == "Wind_Rotor_R0_T0_S0"
assert len(result.get_lines()) == 4
def test_comp_W(self):
"""Check that the computations of the Ws are right"""
lam = LamSlot(is_internal=True, Rext=0.1325, is_stator=False)
lam.slot = SlotW23(
H0=1e-3, H1=1.5e-3, H1_is_rad=False, H2=30e-3, W0=12e-3, W1=14e-3, W2=12e-3
)
lam.slot._comp_W()
assert lam.slot.W1 == 0.012681779210634543
assert lam.slot.W2 == 0.0074524346457750515
lam = LamSlot(is_internal=False, Rext=0.1325, is_stator=False, Rint=0.154)
lam.slot = SlotW23(
H0=1e-3, H1=1.5e-3, H1_is_rad=False, H2=30e-3, W0=12e-3, W1=14e-3, W2=12e-3
)
lam.slot._comp_W()
assert lam.slot.W1 == 0.017303874301855315
assert lam.slot.W2 == 0.022533218866714805
|
the-stack_106_19307
|
import enum
import functools
import inspect
import keyword
import logging
import sys
import types
import typing
from .ParseError import ParseError
class Serialization:
@classmethod
def createClass(cls, name, description):
self = cls()
slots = []
attributes = {"__slots__": slots, "__annotations__": {}}
reader_funcs = []
writer_funcs = []
default_values = {}
for item in description:
reader, writer, fieldtype = self._getItemFunction(item, attributes)
field = item.get("field")
virtualfield = item.get("virtualfield")
value = item.get("value")
if field is not None:
field = str(field)
if (
not field.isidentifier()
or keyword.iskeyword(field)
or field.startswith("__")
):
raise Exception(f"Invalid field name: { field !r}")
if field in slots:
raise Exception(f"Duplicate field name: { field !r}")
field = sys.intern(field)
slots.append(field)
attributes["__annotations__"][field] = fieldtype
if reader:
reader = fieldReader(reader, field)
if writer:
writer = fieldWriter(writer, field)
try:
default_values[field] = item["defaultValue"]
except KeyError:
pass
else:
if reader:
reader = fieldReader(reader, virtualfield)
if writer:
writer = virtualfieldWriter(writer, virtualfield, value)
if reader:
reader_funcs.append(reader)
if writer:
writer_funcs.append(writer)
def readFromBytes(cls, ba: bytes, pos: int = 0):
obj = cls.__new__(cls)
d = {"__obj": obj}
ba = bytes(ba)
for f in reader_funcs:
d["_bytesRemaining"] = len(ba) - pos
pos = f(d, ba, pos)
if pos > len(ba):
raise Exception("position beyond data buffer")
for key in slots:
setattr(obj, key, d[key])
return pos, obj
def fromBytes(cls, ba: bytes):
pos, obj = cls.readFromBytes(ba)
if pos < len(ba):
logging.warning(
f"spurious data at end of message: { ba[pos:].hex() }"
)
return obj
def toBytes(self) -> bytearray:
d = dict((k, getattr(self, k)) for k in self.__slots__)
d["__obj"] = self
ba = bytearray()
pos = 0
for f in writer_funcs:
ba, pos = f(d, ba, pos)
if len(ba) > pos:
return ba[0:pos]
elif len(ba) < pos:
raise SystemExit("toBytes produced short output")
return ba
if slots:
constructor_def = "def __init__(__self, *, {}):\n".format(
", ".join(slots)
)
for key in slots:
constructor_def += f" __self.{ key } = { key }\n"
namespace = {}
exec(constructor_def, namespace)
constructor = attributes["__init__"] = namespace["__init__"]
constructor.__kwdefaults__ = default_values
attributes["readFromBytes"] = classmethod(readFromBytes)
attributes["fromBytes"] = classmethod(fromBytes)
attributes["toBytes"] = toBytes
attributes["__repr__"] = objectRepr(name)
return type(name, (), attributes)
@classmethod
def functions(cls):
result = {}
for funcname in (x for x in dir(cls) if x.startswith("_func_")):
func = getattr(cls, funcname)
argspec = inspect.getfullargspec(func)
vars = frozenset(argspec.kwonlyargs)
vars_without_defaults = (
vars.difference(argspec.kwonlydefaults)
if argspec.kwonlydefaults
else vars
)
funcname = funcname[6:] # lose '_func_' prefix
result[funcname] = makeDescriptionItemFactory(
funcname, vars, vars_without_defaults
)
return result
@classmethod
def functionsModule(cls):
mod = types.ModuleType("functions")
mod.__dict__.update(cls.functions())
return mod
def _getItemFunction(self, item, attributes):
funcname = item["func"]
return getattr(self, "_func_" + funcname)(
attributes,
**dict((k, v) for k, v in item.items() if k.startswith("_")),
)
@staticmethod
def _func_noop():
def reader(ba, pos, eval):
return pos, None
def writer(value, ba, pos, eval):
return ba, pos
return reader, writer, None
@staticmethod
def _func_magic(attributes, *, _bytes: typing.ByteString):
length = len(_bytes)
def reader(ba, pos, eval):
if len(ba) < pos + length:
raise Exception("short message")
if ba[pos : pos + length] != _bytes:
raise Exception("magic mismatch")
return pos + length, None
def writer(value, ba, pos, eval):
ba[pos:] = _bytes
return ba, pos + length
return reader, writer, None
@staticmethod
def _func_skip(attributes, *, _bytes: int = 1):
def reader(ba, pos, eval):
if len(ba) < pos + _bytes:
raise Exception("short message")
return pos + _bytes, None
def writer(value, ba, pos, eval):
ba[pos:] = b"\x00" * _bytes
return ba, pos + _bytes
return reader, writer, None
def _func_nulTerminatedString(self, attributes, *, _encoding="ascii"):
def reader(ba, pos, eval):
nul = ba.find(0, pos)
if nul == -1:
raise Exception("no nul")
return nul + 1, ba[pos:nul].decode(_encoding)
def writer(value, ba, pos, eval):
val = value.encode(_encoding) + b"\x00"
length = len(val)
ba[pos : pos + length] = val
return ba, pos + length
return reader, writer, str
def _func_int(
self,
attributes,
*,
_bytes=1,
_unsigned=False,
_enum: typing.Optional[type] = None,
):
conv_enum, convback_enum, returntype = prepareEnum(_enum)
if _unsigned:
def conv(x):
return conv_enum(bytes2uint(x))
def convback(x, b):
return uint2bytes(convback_enum(x), b)
else:
def conv(x):
return conv_enum(bytes2sint(x))
def convback(x, b):
return sint2bytes(convback_enum(x), b)
def reader(ba, pos, eval):
b = eval(_bytes)
return pos + b, conv(ba[pos : pos + b])
def writer(value, ba, pos, eval):
b = eval(_bytes)
ba[pos : pos + 1] = convback(value, b)
return ba, pos + b
return reader, writer, returntype
_func_int8 = _func_int
_func_int16 = functools.partialmethod(_func_int, _bytes=2)
_func_int24 = functools.partialmethod(_func_int, _bytes=3)
_func_int32 = functools.partialmethod(_func_int, _bytes=4)
_func_uint = functools.partialmethod(_func_int, _unsigned=True)
_func_uint8 = _func_uint
_func_uint16 = functools.partialmethod(_func_int, _unsigned=True, _bytes=2)
_func_uint24 = functools.partialmethod(_func_int, _unsigned=True, _bytes=3)
_func_uint32 = functools.partialmethod(_func_int, _unsigned=True, _bytes=4)
def _func_uintbits(
self,
attributes,
*,
_shift=0,
_mask=0xFF,
_prevbyte=False,
_enum: typing.Optional[type] = None,
):
conv, convback, returntype = prepareEnum(_enum)
def reader(ba, pos, eval):
if _prevbyte:
pos -= 1
return pos + 1, conv((ba[pos] >> _shift) & _mask)
def writer(value, ba, pos, eval):
if not _prevbyte:
pos += 1
if pos >= len(ba):
ba += b"\x00" * (pos - len(ba))
ba[pos - 1] |= (convback(value) & _mask) << _shift
return ba, pos
return reader, writer, returntype
def _func_bitset(self, attributes, *, _offset=0, _bytes=1, _enum=None):
conv, convback, elementtype = prepareEnum(_enum)
def reader(ba, pos, eval):
if _bytes is None:
length = len(ba) - pos
else:
length = eval(_bytes)
res = set()
for idx in range(length):
for bit in range(8):
if ba[pos + idx] & (1 << bit):
res.add(conv(idx * 8 + bit + _offset))
return pos + length, res
def writer(value, ba, pos, eval):
if _bytes is None:
length = (max(value) - _offset) // 8 + 1
else:
length = eval(_bytes)
ba[pos : pos + length] = setbits(
bytearray(length), map(convback, value), _offset
)
return ba, pos + length
return reader, writer, typing.Set[elementtype]
def _func_binary(self, attributes, *, _bytes=None):
def reader(ba, pos, eval):
if _bytes is None:
length = len(ba) - pos
else:
length = eval(_bytes)
return pos + length, ba[pos : pos + length]
def writer(value, ba, pos, eval):
if _bytes is None:
length = len(value)
else:
length = eval(_bytes)
ba[pos : pos + length] = value
return ba, pos + length
return reader, writer, typing.ByteString
def _func_boolean(self, attributes, *, _mask=0xFF, _prevbyte=False):
def reader(ba, pos, eval):
if not _prevbyte:
pos += 1
return pos, bool(ba[pos - 1] & _mask)
def writer(value, ba, pos, eval):
if not _prevbyte:
pos += 1
if pos >= len(ba):
ba += b"\x00" * (pos - len(ba))
if value:
ba[pos - 1] |= _mask
return ba, pos
return reader, writer, bool
def _func_optional(self, attributes, *, _item, _present):
itemrdr, itemwtr, itemtype = self._getItemFunction(_item, attributes)
def reader(ba, pos, eval):
if eval(_present):
return itemrdr(ba, pos, eval)
else:
return pos, None
def writer(value, ba, pos, eval):
p = eval(_present)
if p is not None and not p and value is not None:
raise Exception("optional mismatch")
if p or value is not None:
ba, pos = itemwtr(value, ba, pos, eval)
return ba, pos
return reader, writer, typing.Optional[itemtype]
def _func_array(self, attributes, *, _items, _length=None):
itemrdr, itemwtr, itemtype = self._getItemFunction(_items, attributes)
def reader(ba, pos, eval):
length = eval(_length)
res = []
if length is None:
while pos < len(ba):
pos, value = itemrdr(ba, pos, eval)
res.append(value)
else:
for i in range(length):
pos, value = itemrdr(ba, pos, eval)
res.append(value)
return pos, res
def writer(value, ba, pos, eval):
length = eval(_length)
if length is not None and len(value) != length:
raise Exception("array length mismatch")
for i in value:
ba, pos = itemwtr(i, ba, pos, eval)
return ba, pos
return reader, writer, typing.List[itemtype]
def _func_classvar(self, attributes, *, _name, _val):
attributes[_name] = _val
return None, None, None
def _func_object(self, attributes, *, _type):
def reader(ba, pos, eval):
t = eval(_type)
return t.readFromBytes(ba, pos)
def writer(value, ba, pos, eval):
t = eval(_type)
if not isinstance(value, t):
raise Exception(f"object does not match type { t !r}")
value = value.toBytes()
length = len(value)
ba[pos : pos + length] = value
return ba, pos + length
return reader, writer, None
class Expr:
def __init__(self, expr):
self.__expr = compile(expr, f"<Expr { expr !r}>", "eval")
def __call__(self, locals_dict):
return eval(
self.__expr, {"__builtins__": stripped_down_builtins}, locals_dict
)
class Value:
def __init__(self, key):
self.__key = key
def __call__(self, locals_dict):
return locals_dict.get(self.__key)
class ExprValueEvaluator:
def __init__(self, state):
self.state = state
def __call__(self, x):
if isinstance(x, (Value, Expr)):
return x(self.state)
else:
return x
def fieldReader(rdr, field):
def reader(state, ba, pos):
eval = ExprValueEvaluator(state)
pos, value = rdr(ba, pos, eval)
if field is not None:
state[field] = value
return pos
return reader
def fieldWriter(wtr, field):
def writer(state, ba, pos):
eval = ExprValueEvaluator(state)
return wtr(state[field], ba, pos, eval)
return writer
def virtualfieldWriter(wtr, virtualfield, value):
def writer(state, ba, pos):
eval = ExprValueEvaluator(state)
val = eval(value)
if virtualfield is not None:
state[virtualfield] = val
return wtr(val, ba, pos, eval)
return writer
def prepareEnum(_enum):
if _enum is not None and not issubclass(_enum, enum.Enum):
raise Exception("'enum' field must be None or a subclass of enum.Enum")
if _enum is None:
conv = convback = lambda x: x
returntype = int
elif issubclass(_enum, enum.IntEnum):
def conv(x):
try:
return _enum(x)
except ValueError:
return int(x)
def convback(x):
return int(x)
returntype = typing.Union[_enum, int]
else:
conv = _enum
def convback(x):
return x.value
returntype = _enum
return conv, convback, returntype
def objectRepr(name):
def __repr__(self):
p = ", ".join(
f"{ k }={ getattr(self, k, '<undefined>') !r}"
for k in self.__slots__
)
return f"{ name }({ p })"
return __repr__
def bytes2uint(b: typing.ByteString) -> int:
res = 0
for i in b:
res = (res << 8) | i
return res
def bytes2sint(b: typing.ByteString) -> int:
v = bytes2uint(b)
if v & (1 << (len(b) * 8 - 1)):
v -= 1 << (len(b) * 8)
return v
def uint2bytes(v: int, b: int) -> bytearray:
return bytearray((v >> ((b - p - 1) * 8)) & 0xFF for p in range(b))
def sint2bytes(v: int, b: int) -> bytearray:
if v < 0:
v += 1 << (b * 8)
return uint2bytes(v, b)
def setbits(ba, bits, offset):
length = len(ba)
high = length * 8
for b in bits:
v = b - offset
if 0 <= v < high:
ba[v // 8] |= 1 << (v % 8)
else:
raise ParseError("bit number out of range")
return ba
reserved_field_names = frozenset(
("field", "virtualfield", "value", "defaultValue")
)
def makeDescriptionItemFactory(funcname, vars, vars_without_defaults):
if any((not v.startswith("_")) for v in vars):
raise Exception(
"all parameter names must start with _: " + " ".join(vars)
)
def factory(**kwargs):
params = dict(
(
k
if k.startswith("_") or k in reserved_field_names
else f"_{ k }",
v,
)
for k, v in kwargs.items()
)
argnames = frozenset(p for p in params if p.startswith("_"))
missing_vars = vars_without_defaults.difference(argnames)
if missing_vars:
raise Exception("missing parameters: " + " ".join(missing_vars))
invalid_vars = set(argnames).difference(vars)
if invalid_vars:
raise Exception("invalid parameters: " + " ".join(invalid_vars))
params["func"] = funcname
return params
return factory
def assertEqual(x, *y):
if any(x != yy for yy in y):
raise Exception(f"equality assertion failed { (x,) + tuple(y) !r}")
return x
def uintsize(x):
x = int(x)
if x < 0:
raise Exception("negative number passed to uintsize")
size = 1
while x > 255:
size += 1
x >>= 8
return size
def intsize(x):
x = int(x)
size = 1
if x < 0:
x = -x
while x > 128:
size += 1
x >>= 8
else:
while x >= 128:
size += 1
x >>= 8
return size
stripped_down_builtins = types.ModuleType("builtins")
stripped_down_builtins.assertEqual = assertEqual
stripped_down_builtins.intsize = intsize
stripped_down_builtins.len = len
stripped_down_builtins.max = max
stripped_down_builtins.min = min
stripped_down_builtins.uintsize = uintsize
|
the-stack_106_19308
|
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc, contextlib, enum, os.path, re, tempfile, shlex
import subprocess
from typing import List, Optional, Tuple
from ..linkers import StaticLinker
from .. import coredata
from .. import mlog
from .. import mesonlib
from ..mesonlib import (
EnvironmentException, MesonException, OrderedSet,
version_compare, Popen_safe
)
from ..envconfig import (
Properties,
)
"""This file contains the data files of all compilers Meson knows
about. To support a new compiler, add its information below.
Also add corresponding autodetection code in environment.py."""
header_suffixes = ('h', 'hh', 'hpp', 'hxx', 'H', 'ipp', 'moc', 'vapi', 'di')
obj_suffixes = ('o', 'obj', 'res')
lib_suffixes = ('a', 'lib', 'dll', 'dylib', 'so')
# Mapping of language to suffixes of files that should always be in that language
# This means we can't include .h headers here since they could be C, C++, ObjC, etc.
lang_suffixes = {
'c': ('c',),
'cpp': ('cpp', 'cc', 'cxx', 'c++', 'hh', 'hpp', 'ipp', 'hxx'),
'cuda': ('cu',),
# f90, f95, f03, f08 are for free-form fortran ('f90' recommended)
# f, for, ftn, fpp are for fixed-form fortran ('f' or 'for' recommended)
'fortran': ('f90', 'f95', 'f03', 'f08', 'f', 'for', 'ftn', 'fpp'),
'd': ('d', 'di'),
'objc': ('m',),
'objcpp': ('mm',),
'rust': ('rs',),
'vala': ('vala', 'vapi', 'gs'),
'cs': ('cs',),
'swift': ('swift',),
'java': ('java',),
}
all_languages = lang_suffixes.keys()
cpp_suffixes = lang_suffixes['cpp'] + ('h',)
c_suffixes = lang_suffixes['c'] + ('h',)
# List of languages that by default consume and output libraries following the
# C ABI; these can generally be used interchangebly
clib_langs = ('objcpp', 'cpp', 'objc', 'c', 'fortran',)
# List of languages that can be linked with C code directly by the linker
# used in build.py:process_compilers() and build.py:get_dynamic_linker()
clink_langs = ('d', 'cuda') + clib_langs
clink_suffixes = ()
for _l in clink_langs + ('vala',):
clink_suffixes += lang_suffixes[_l]
clink_suffixes += ('h', 'll', 's')
# Languages that should use LDFLAGS arguments when linking.
languages_using_ldflags = ('objcpp', 'cpp', 'objc', 'c', 'fortran', 'd', 'cuda')
soregex = re.compile(r'.*\.so(\.[0-9]+)?(\.[0-9]+)?(\.[0-9]+)?$')
# Environment variables that each lang uses.
cflags_mapping = {'c': 'CFLAGS',
'cpp': 'CXXFLAGS',
'cuda': 'CUFLAGS',
'objc': 'OBJCFLAGS',
'objcpp': 'OBJCXXFLAGS',
'fortran': 'FFLAGS',
'd': 'DFLAGS',
'vala': 'VALAFLAGS',
'rust': 'RUSTFLAGS'}
# execinfo is a compiler lib on BSD
unixy_compiler_internal_libs = ('m', 'c', 'pthread', 'dl', 'rt', 'execinfo')
# All these are only for C-linkable languages; see `clink_langs` above.
def sort_clink(lang):
'''
Sorting function to sort the list of languages according to
reversed(compilers.clink_langs) and append the unknown langs in the end.
The purpose is to prefer C over C++ for files that can be compiled by
both such as assembly, C, etc. Also applies to ObjC, ObjC++, etc.
'''
if lang not in clink_langs:
return 1
return -clink_langs.index(lang)
def is_header(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
suffix = fname.split('.')[-1]
return suffix in header_suffixes
def is_source(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
suffix = fname.split('.')[-1].lower()
return suffix in clink_suffixes
def is_assembly(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
return fname.split('.')[-1].lower() == 's'
def is_llvm_ir(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
return fname.split('.')[-1] == 'll'
def is_object(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
suffix = fname.split('.')[-1]
return suffix in obj_suffixes
def is_library(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
if soregex.match(fname):
return True
suffix = fname.split('.')[-1]
return suffix in lib_suffixes
gnulike_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
armclang_buildtype_args = {'plain': [],
'debug': ['-O0', '-g'],
'debugoptimized': ['-O1', '-g'],
'release': ['-Os'],
'minsize': ['-Oz'],
'custom': [],
}
cuda_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
}
arm_buildtype_args = {'plain': [],
'debug': ['-O0', '--debug'],
'debugoptimized': ['-O1', '--debug'],
'release': ['-O3', '-Otime'],
'minsize': ['-O3', '-Ospace'],
'custom': [],
}
ccrx_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
msvc_buildtype_args = {'plain': [],
'debug': ["/ZI", "/Ob0", "/Od", "/RTC1"],
'debugoptimized': ["/Zi", "/Ob1"],
'release': ["/Ob2", "/Gw"],
'minsize': ["/Zi", "/Gw"],
'custom': [],
}
pgi_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
apple_buildtype_linker_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
gnulike_buildtype_linker_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': ['-Wl,-O1'],
'minsize': [],
'custom': [],
}
arm_buildtype_linker_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
ccrx_buildtype_linker_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
pgi_buildtype_linker_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
msvc_buildtype_linker_args = {'plain': [],
'debug': [],
'debugoptimized': [],
# The otherwise implicit REF and ICF linker
# optimisations are disabled by /DEBUG.
# REF implies ICF.
'release': ['/OPT:REF'],
'minsize': ['/INCREMENTAL:NO', '/OPT:REF'],
'custom': [],
}
java_buildtype_args = {'plain': [],
'debug': ['-g'],
'debugoptimized': ['-g'],
'release': [],
'minsize': [],
'custom': [],
}
rust_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
d_gdc_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': ['-finline-functions'],
'release': ['-frelease', '-finline-functions'],
'minsize': [],
'custom': [],
}
d_ldc_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': ['-enable-inlining', '-Hkeep-all-bodies'],
'release': ['-release', '-enable-inlining', '-Hkeep-all-bodies'],
'minsize': [],
'custom': [],
}
d_dmd_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': ['-inline'],
'release': ['-release', '-inline'],
'minsize': [],
'custom': [],
}
mono_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': ['-optimize+'],
'release': ['-optimize+'],
'minsize': [],
'custom': [],
}
swift_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
gnu_winlibs = ['-lkernel32', '-luser32', '-lgdi32', '-lwinspool', '-lshell32',
'-lole32', '-loleaut32', '-luuid', '-lcomdlg32', '-ladvapi32']
msvc_winlibs = ['kernel32.lib', 'user32.lib', 'gdi32.lib',
'winspool.lib', 'shell32.lib', 'ole32.lib', 'oleaut32.lib',
'uuid.lib', 'comdlg32.lib', 'advapi32.lib']
gnu_color_args = {'auto': ['-fdiagnostics-color=auto'],
'always': ['-fdiagnostics-color=always'],
'never': ['-fdiagnostics-color=never'],
}
clang_color_args = {'auto': ['-Xclang', '-fcolor-diagnostics'],
'always': ['-Xclang', '-fcolor-diagnostics'],
'never': ['-Xclang', '-fno-color-diagnostics'],
}
arm_optimization_args = {'0': ['-O0'],
'g': ['-g'],
'1': ['-O1'],
'2': ['-O2'],
'3': ['-O3'],
's': [],
}
armclang_optimization_args = {'0': ['-O0'],
'g': ['-g'],
'1': ['-O1'],
'2': ['-O2'],
'3': ['-O3'],
's': ['-Os']
}
clike_optimization_args = {'0': [],
'g': [],
'1': ['-O1'],
'2': ['-O2'],
'3': ['-O3'],
's': ['-Os'],
}
gnu_optimization_args = {'0': [],
'g': ['-Og'],
'1': ['-O1'],
'2': ['-O2'],
'3': ['-O3'],
's': ['-Os'],
}
ccrx_optimization_args = {'0': ['-optimize=0'],
'g': ['-optimize=0'],
'1': ['-optimize=1'],
'2': ['-optimize=2'],
'3': ['-optimize=max'],
's': ['-optimize=2', '-size']
}
msvc_optimization_args = {'0': [],
'g': ['/O0'],
'1': ['/O1'],
'2': ['/O2'],
'3': ['/O2'],
's': ['/O1'], # Implies /Os.
}
cuda_optimization_args = {'0': [],
'g': ['-O0'],
'1': ['-O1'],
'2': ['-O2'],
'3': ['-O3', '-Otime'],
's': ['-O3', '-Ospace']
}
cuda_debug_args = {False: [],
True: ['-g']}
clike_debug_args = {False: [],
True: ['-g']}
msvc_debug_args = {False: [],
True: []} # Fixme!
ccrx_debug_args = {False: [],
True: ['-debug']}
base_options = {'b_pch': coredata.UserBooleanOption('b_pch', 'Use precompiled headers', True),
'b_lto': coredata.UserBooleanOption('b_lto', 'Use link time optimization', False),
'b_sanitize': coredata.UserComboOption('b_sanitize',
'Code sanitizer to use',
['none', 'address', 'thread', 'undefined', 'memory', 'address,undefined'],
'none'),
'b_lundef': coredata.UserBooleanOption('b_lundef', 'Use -Wl,--no-undefined when linking', True),
'b_asneeded': coredata.UserBooleanOption('b_asneeded', 'Use -Wl,--as-needed when linking', True),
'b_pgo': coredata.UserComboOption('b_pgo', 'Use profile guided optimization',
['off', 'generate', 'use'],
'off'),
'b_coverage': coredata.UserBooleanOption('b_coverage',
'Enable coverage tracking.',
False),
'b_colorout': coredata.UserComboOption('b_colorout', 'Use colored output',
['auto', 'always', 'never'],
'always'),
'b_ndebug': coredata.UserComboOption('b_ndebug', 'Disable asserts',
['true', 'false', 'if-release'], 'false'),
'b_staticpic': coredata.UserBooleanOption('b_staticpic',
'Build static libraries as position independent',
True),
'b_pie': coredata.UserBooleanOption('b_pie',
'Build executables as position independent',
False),
'b_bitcode': coredata.UserBooleanOption('b_bitcode',
'Generate and embed bitcode (only macOS and iOS)',
False),
'b_vscrt': coredata.UserComboOption('b_vscrt', 'VS run-time library type to use.',
['none', 'md', 'mdd', 'mt', 'mtd', 'from_buildtype'],
'from_buildtype'),
}
gnulike_instruction_set_args = {'mmx': ['-mmmx'],
'sse': ['-msse'],
'sse2': ['-msse2'],
'sse3': ['-msse3'],
'ssse3': ['-mssse3'],
'sse41': ['-msse4.1'],
'sse42': ['-msse4.2'],
'avx': ['-mavx'],
'avx2': ['-mavx2'],
'neon': ['-mfpu=neon'],
}
vs32_instruction_set_args = {'mmx': ['/arch:SSE'], # There does not seem to be a flag just for MMX
'sse': ['/arch:SSE'],
'sse2': ['/arch:SSE2'],
'sse3': ['/arch:AVX'], # VS leaped from SSE2 directly to AVX.
'sse41': ['/arch:AVX'],
'sse42': ['/arch:AVX'],
'avx': ['/arch:AVX'],
'avx2': ['/arch:AVX2'],
'neon': None,
}
# The 64 bit compiler defaults to /arch:avx.
vs64_instruction_set_args = {'mmx': ['/arch:AVX'],
'sse': ['/arch:AVX'],
'sse2': ['/arch:AVX'],
'sse3': ['/arch:AVX'],
'ssse3': ['/arch:AVX'],
'sse41': ['/arch:AVX'],
'sse42': ['/arch:AVX'],
'avx': ['/arch:AVX'],
'avx2': ['/arch:AVX2'],
'neon': None,
}
gnu_symbol_visibility_args = {'': [],
'default': ['-fvisibility=default'],
'internal': ['-fvisibility=internal'],
'hidden': ['-fvisibility=hidden'],
'protected': ['-fvisibility=protected'],
'inlineshidden': ['-fvisibility=hidden', '-fvisibility-inlines-hidden'],
}
def sanitizer_compile_args(value):
if value == 'none':
return []
args = ['-fsanitize=' + value]
if 'address' in value: # For -fsanitize=address,undefined
args.append('-fno-omit-frame-pointer')
return args
def sanitizer_link_args(value):
if value == 'none':
return []
args = ['-fsanitize=' + value]
return args
def option_enabled(boptions, options, option):
try:
if option not in boptions:
return False
return options[option].value
except KeyError:
return False
def get_base_compile_args(options, compiler):
args = []
# FIXME, gcc/clang specific.
try:
if options['b_lto'].value:
args.append('-flto')
except KeyError:
pass
try:
args += compiler.get_colorout_args(options['b_colorout'].value)
except KeyError:
pass
try:
args += sanitizer_compile_args(options['b_sanitize'].value)
except KeyError:
pass
try:
pgo_val = options['b_pgo'].value
if pgo_val == 'generate':
args.extend(compiler.get_profile_generate_args())
elif pgo_val == 'use':
args.extend(compiler.get_profile_use_args())
except KeyError:
pass
try:
if options['b_coverage'].value:
args += compiler.get_coverage_args()
except KeyError:
pass
try:
if (options['b_ndebug'].value == 'true' or
(options['b_ndebug'].value == 'if-release' and
options['buildtype'].value in {'release', 'plain'})):
args += ['-DNDEBUG']
except KeyError:
pass
# This does not need a try...except
if option_enabled(compiler.base_options, options, 'b_bitcode'):
args.append('-fembed-bitcode')
try:
crt_val = options['b_vscrt'].value
buildtype = options['buildtype'].value
try:
args += compiler.get_crt_compile_args(crt_val, buildtype)
except AttributeError:
pass
except KeyError:
pass
return args
def get_base_link_args(options, linker, is_shared_module):
args = []
# FIXME, gcc/clang specific.
try:
if options['b_lto'].value:
args.append('-flto')
except KeyError:
pass
try:
args += sanitizer_link_args(options['b_sanitize'].value)
except KeyError:
pass
try:
pgo_val = options['b_pgo'].value
if pgo_val == 'generate':
args.extend(linker.get_profile_generate_args())
elif pgo_val == 'use':
args.extend(linker.get_profile_use_args())
except KeyError:
pass
try:
if options['b_coverage'].value:
args += linker.get_coverage_link_args()
except KeyError:
pass
# These do not need a try...except
if not is_shared_module and option_enabled(linker.base_options, options, 'b_lundef'):
args.append('-Wl,--no-undefined')
as_needed = option_enabled(linker.base_options, options, 'b_asneeded')
bitcode = option_enabled(linker.base_options, options, 'b_bitcode')
# Shared modules cannot be built with bitcode_bundle because
# -bitcode_bundle is incompatible with -undefined and -bundle
if bitcode and not is_shared_module:
args.append('-Wl,-bitcode_bundle')
elif as_needed:
# -Wl,-dead_strip_dylibs is incompatible with bitcode
args.append(linker.get_asneeded_args())
try:
crt_val = options['b_vscrt'].value
buildtype = options['buildtype'].value
try:
args += linker.get_crt_link_args(crt_val, buildtype)
except AttributeError:
pass
except KeyError:
pass
return args
def prepare_rpaths(raw_rpaths, build_dir, from_dir):
internal_format_rpaths = [evaluate_rpath(p, build_dir, from_dir) for p in raw_rpaths]
ordered_rpaths = order_rpaths(internal_format_rpaths)
return ordered_rpaths
def order_rpaths(rpath_list):
# We want rpaths that point inside our build dir to always override
# those pointing to other places in the file system. This is so built
# binaries prefer our libraries to the ones that may lie somewhere
# in the file system, such as /lib/x86_64-linux-gnu.
#
# The correct thing to do here would be C++'s std::stable_partition.
# Python standard library does not have it, so replicate it with
# sort, which is guaranteed to be stable.
return sorted(rpath_list, key=os.path.isabs)
def evaluate_rpath(p, build_dir, from_dir):
if p == from_dir:
return '' # relpath errors out in this case
elif os.path.isabs(p):
return p # These can be outside of build dir.
else:
return os.path.relpath(os.path.join(build_dir, p), os.path.join(build_dir, from_dir))
class CrossNoRunException(MesonException):
pass
class RunResult:
def __init__(self, compiled, returncode=999, stdout='UNDEFINED', stderr='UNDEFINED'):
self.compiled = compiled
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
class CompilerArgs(list):
'''
Class derived from list() that manages a list of compiler arguments. Should
be used while constructing compiler arguments from various sources. Can be
operated with ordinary lists, so this does not need to be used everywhere.
All arguments must be inserted and stored in GCC-style (-lfoo, -Idir, etc)
and can converted to the native type of each compiler by using the
.to_native() method to which you must pass an instance of the compiler or
the compiler class.
New arguments added to this class (either with .append(), .extend(), or +=)
are added in a way that ensures that they override previous arguments.
For example:
>>> a = ['-Lfoo', '-lbar']
>>> a += ['-Lpho', '-lbaz']
>>> print(a)
['-Lpho', '-Lfoo', '-lbar', '-lbaz']
Arguments will also be de-duped if they can be de-duped safely.
Note that because of all this, this class is not commutative and does not
preserve the order of arguments if it is safe to not. For example:
>>> ['-Ifoo', '-Ibar'] + ['-Ifez', '-Ibaz', '-Werror']
['-Ifez', '-Ibaz', '-Ifoo', '-Ibar', '-Werror']
>>> ['-Ifez', '-Ibaz', '-Werror'] + ['-Ifoo', '-Ibar']
['-Ifoo', '-Ibar', '-Ifez', '-Ibaz', '-Werror']
'''
# NOTE: currently this class is only for C-like compilers, but it can be
# extended to other languages easily. Just move the following to the
# compiler class and initialize when self.compiler is set.
# Arg prefixes that override by prepending instead of appending
prepend_prefixes = ('-I', '-L')
# Arg prefixes and args that must be de-duped by returning 2
dedup2_prefixes = ('-I', '-L', '-D', '-U')
dedup2_suffixes = ()
dedup2_args = ()
# Arg prefixes and args that must be de-duped by returning 1
#
# NOTE: not thorough. A list of potential corner cases can be found in
# https://github.com/mesonbuild/meson/pull/4593#pullrequestreview-182016038
dedup1_prefixes = ('-l', '-Wl,-l', '-Wl,--export-dynamic')
dedup1_suffixes = ('.lib', '.dll', '.so', '.dylib', '.a')
# Match a .so of the form path/to/libfoo.so.0.1.0
# Only UNIX shared libraries require this. Others have a fixed extension.
dedup1_regex = re.compile(r'([\/\\]|\A)lib.*\.so(\.[0-9]+)?(\.[0-9]+)?(\.[0-9]+)?$')
dedup1_args = ('-c', '-S', '-E', '-pipe', '-pthread')
# In generate_link() we add external libs without de-dup, but we must
# *always* de-dup these because they're special arguments to the linker
always_dedup_args = tuple('-l' + lib for lib in unixy_compiler_internal_libs)
compiler = None
def _check_args(self, args):
cargs = []
if len(args) > 2:
raise TypeError("CompilerArgs() only accepts at most 2 arguments: "
"The compiler, and optionally an initial list")
elif not args:
return cargs
elif len(args) == 1:
if isinstance(args[0], (Compiler, StaticLinker)):
self.compiler = args[0]
else:
raise TypeError("you must pass a Compiler instance as one of "
"the arguments")
elif len(args) == 2:
if isinstance(args[0], (Compiler, StaticLinker)):
self.compiler = args[0]
cargs = args[1]
elif isinstance(args[1], (Compiler, StaticLinker)):
cargs = args[0]
self.compiler = args[1]
else:
raise TypeError("you must pass a Compiler instance as one of "
"the two arguments")
else:
raise AssertionError('Not reached')
return cargs
def __init__(self, *args):
super().__init__(self._check_args(args))
@classmethod
def _can_dedup(cls, arg):
'''
Returns whether the argument can be safely de-duped. This is dependent
on three things:
a) Whether an argument can be 'overridden' by a later argument. For
example, -DFOO defines FOO and -UFOO undefines FOO. In this case, we
can safely remove the previous occurrence and add a new one. The same
is true for include paths and library paths with -I and -L. For
these we return `2`. See `dedup2_prefixes` and `dedup2_args`.
b) Arguments that once specified cannot be undone, such as `-c` or
`-pipe`. New instances of these can be completely skipped. For these
we return `1`. See `dedup1_prefixes` and `dedup1_args`.
c) Whether it matters where or how many times on the command-line
a particular argument is present. This can matter for symbol
resolution in static or shared libraries, so we cannot de-dup or
reorder them. For these we return `0`. This is the default.
In addition to these, we handle library arguments specially.
With GNU ld, we surround library arguments with -Wl,--start/end-group
to recursively search for symbols in the libraries. This is not needed
with other linkers.
'''
# A standalone argument must never be deduplicated because it is
# defined by what comes _after_ it. Thus dedupping this:
# -D FOO -D BAR
# would yield either
# -D FOO BAR
# or
# FOO -D BAR
# both of which are invalid.
if arg in cls.dedup2_prefixes:
return 0
if arg in cls.dedup2_args or \
arg.startswith(cls.dedup2_prefixes) or \
arg.endswith(cls.dedup2_suffixes):
return 2
if arg in cls.dedup1_args or \
arg.startswith(cls.dedup1_prefixes) or \
arg.endswith(cls.dedup1_suffixes) or \
re.search(cls.dedup1_regex, arg):
return 1
return 0
@classmethod
def _should_prepend(cls, arg):
if arg.startswith(cls.prepend_prefixes):
return True
return False
def to_native(self, copy=False):
# Check if we need to add --start/end-group for circular dependencies
# between static libraries, and for recursively searching for symbols
# needed by static libraries that are provided by object files or
# shared libraries.
if copy:
new = self.copy()
else:
new = self
if get_compiler_uses_gnuld(self.compiler):
global soregex
group_start = -1
group_end = -1
for i, each in enumerate(new):
if not each.startswith(('-Wl,-l', '-l')) and not each.endswith('.a') and \
not soregex.match(each):
continue
group_end = i
if group_start < 0:
# First occurrence of a library
group_start = i
if group_start >= 0:
# Last occurrence of a library
new.insert(group_end + 1, '-Wl,--end-group')
new.insert(group_start, '-Wl,--start-group')
return self.compiler.unix_args_to_native(new)
def append_direct(self, arg):
'''
Append the specified argument without any reordering or de-dup
except for absolute paths where the order of include search directories
is not relevant
'''
if os.path.isabs(arg):
self.append(arg)
else:
super().append(arg)
def extend_direct(self, iterable):
'''
Extend using the elements in the specified iterable without any
reordering or de-dup except for absolute paths where the order of
include search directories is not relevant
'''
for elem in iterable:
self.append_direct(elem)
def extend_preserving_lflags(self, iterable):
normal_flags = []
lflags = []
for i in iterable:
if i not in self.always_dedup_args and (i.startswith('-l') or i.startswith('-L')):
lflags.append(i)
else:
normal_flags.append(i)
self.extend(normal_flags)
self.extend_direct(lflags)
def __add__(self, args):
new = CompilerArgs(self, self.compiler)
new += args
return new
def __iadd__(self, args):
'''
Add two CompilerArgs while taking into account overriding of arguments
and while preserving the order of arguments as much as possible
'''
pre = []
post = []
if not isinstance(args, list):
raise TypeError('can only concatenate list (not "{}") to list'.format(args))
for arg in args:
# If the argument can be de-duped, do it either by removing the
# previous occurrence of it and adding a new one, or not adding the
# new occurrence.
dedup = self._can_dedup(arg)
if dedup == 1:
# Argument already exists and adding a new instance is useless
if arg in self or arg in pre or arg in post:
continue
if dedup == 2:
# Remove all previous occurrences of the arg and add it anew
if arg in self:
self.remove(arg)
if arg in pre:
pre.remove(arg)
if arg in post:
post.remove(arg)
if self._should_prepend(arg):
pre.append(arg)
else:
post.append(arg)
# Insert at the beginning
self[:0] = pre
# Append to the end
super().__iadd__(post)
return self
def __radd__(self, args):
new = CompilerArgs(args, self.compiler)
new += self
return new
def __mul__(self, args):
raise TypeError("can't multiply compiler arguments")
def __imul__(self, args):
raise TypeError("can't multiply compiler arguments")
def __rmul__(self, args):
raise TypeError("can't multiply compiler arguments")
def append(self, arg):
self.__iadd__([arg])
def extend(self, args):
self.__iadd__(args)
class Compiler:
# Libraries to ignore in find_library() since they are provided by the
# compiler or the C library. Currently only used for MSVC.
ignore_libs = ()
# Libraries that are internal compiler implementations, and must not be
# manually searched.
internal_libs = ()
# Cache for the result of compiler checks which can be cached
compiler_check_cache = {}
def __init__(self, exelist, version, **kwargs):
if isinstance(exelist, str):
self.exelist = [exelist]
elif isinstance(exelist, list):
self.exelist = exelist
else:
raise TypeError('Unknown argument to Compiler')
# In case it's been overridden by a child class already
if not hasattr(self, 'file_suffixes'):
self.file_suffixes = lang_suffixes[self.language]
if not hasattr(self, 'can_compile_suffixes'):
self.can_compile_suffixes = set(self.file_suffixes)
self.default_suffix = self.file_suffixes[0]
self.version = version
if 'full_version' in kwargs:
self.full_version = kwargs['full_version']
else:
self.full_version = None
self.base_options = []
def __repr__(self):
repr_str = "<{0}: v{1} `{2}`>"
return repr_str.format(self.__class__.__name__, self.version,
' '.join(self.exelist))
def can_compile(self, src):
if hasattr(src, 'fname'):
src = src.fname
suffix = os.path.splitext(src)[1].lower()
if suffix and suffix[1:] in self.can_compile_suffixes:
return True
return False
def get_id(self):
return self.id
def get_version_string(self):
details = [self.id, self.version]
if self.full_version:
details += ['"%s"' % (self.full_version)]
return '(%s)' % (' '.join(details))
def get_language(self):
return self.language
def get_display_language(self):
return self.language.capitalize()
def get_default_suffix(self):
return self.default_suffix
def get_define(self, dname, prefix, env, extra_args, dependencies):
raise EnvironmentException('%s does not support get_define ' % self.get_id())
def compute_int(self, expression, low, high, guess, prefix, env, extra_args, dependencies):
raise EnvironmentException('%s does not support compute_int ' % self.get_id())
def compute_parameters_with_absolute_paths(self, parameter_list, build_dir):
raise EnvironmentException('%s does not support compute_parameters_with_absolute_paths ' % self.get_id())
def has_members(self, typename, membernames, prefix, env, *, extra_args=None, dependencies=None):
raise EnvironmentException('%s does not support has_member(s) ' % self.get_id())
def has_type(self, typename, prefix, env, extra_args, *, dependencies=None):
raise EnvironmentException('%s does not support has_type ' % self.get_id())
def symbols_have_underscore_prefix(self, env):
raise EnvironmentException('%s does not support symbols_have_underscore_prefix ' % self.get_id())
def get_exelist(self):
return self.exelist[:]
def get_builtin_define(self, *args, **kwargs):
raise EnvironmentException('%s does not support get_builtin_define.' % self.id)
def has_builtin_define(self, *args, **kwargs):
raise EnvironmentException('%s does not support has_builtin_define.' % self.id)
def get_always_args(self):
return []
def can_linker_accept_rsp(self):
"""
Determines whether the linker can accept arguments using the @rsp syntax.
"""
return mesonlib.is_windows()
def get_linker_always_args(self):
return []
def get_linker_lib_prefix(self):
return ''
def gen_import_library_args(self, implibname):
"""
Used only on Windows for libraries that need an import library.
This currently means C, C++, Fortran.
"""
return []
def use_preproc_flags(self) -> bool:
"""
Whether the compiler (or processes it spawns) cares about CPPFLAGS
"""
return self.get_language() in {'c', 'cpp', 'objc', 'objcpp'}
def use_ldflags(self) -> bool:
"""
Whether the compiler (or processes it spawns) cares about LDFLAGS
"""
return self.get_language() in languages_using_ldflags
def get_args_from_envvars(self):
"""
Returns a tuple of (compile_flags, link_flags) for the specified language
from the inherited environment
"""
def log_var(var, val: Optional[str]):
if val:
mlog.log('Appending {} from environment: {!r}'.format(var, val))
else:
mlog.debug('No {} in the environment, not changing global flags.'.format(var))
lang = self.get_language()
compiler_is_linker = False
if hasattr(self, 'get_linker_exelist'):
compiler_is_linker = (self.get_exelist() == self.get_linker_exelist())
if lang not in cflags_mapping:
return [], []
compile_flags = []
link_flags = []
env_compile_flags = os.environ.get(cflags_mapping[lang])
log_var(cflags_mapping[lang], env_compile_flags)
if env_compile_flags is not None:
compile_flags += shlex.split(env_compile_flags)
# Link flags (same for all languages)
if self.use_ldflags():
env_link_flags = os.environ.get('LDFLAGS')
else:
env_link_flags = None
log_var('LDFLAGS', env_link_flags)
if env_link_flags is not None:
link_flags += shlex.split(env_link_flags)
if compiler_is_linker:
# When the compiler is used as a wrapper around the linker (such as
# with GCC and Clang), the compile flags can be needed while linking
# too. This is also what Autotools does. However, we don't want to do
# this when the linker is stand-alone such as with MSVC C/C++, etc.
link_flags = compile_flags + link_flags
# Pre-processor flags for certain languages
if self.use_preproc_flags():
env_preproc_flags = os.environ.get('CPPFLAGS')
log_var('CPPFLAGS', env_preproc_flags)
if env_preproc_flags is not None:
compile_flags += shlex.split(env_preproc_flags)
return compile_flags, link_flags
def get_options(self):
opts = {} # build afresh every time
description = 'Extra arguments passed to the {}'.format(self.get_display_language())
opts.update({
self.language + '_args': coredata.UserArrayOption(
self.language + '_args',
description + ' compiler',
[], shlex_split=True, user_input=True, allow_dups=True),
self.language + '_link_args': coredata.UserArrayOption(
self.language + '_link_args',
description + ' linker',
[], shlex_split=True, user_input=True, allow_dups=True),
})
return opts
def get_and_default_options(self, properties: Properties):
"""
Take default values from env variables and/or config files.
"""
opts = self.get_options()
if properties.fallback:
# Get from env vars.
compile_args, link_args = self.get_args_from_envvars()
else:
compile_args = []
link_args = []
for k, o in opts.items():
if k in properties:
# Get from configuration files.
o.set_value(properties[k])
elif k == self.language + '_args':
o.set_value(compile_args)
elif k == self.language + '_link_args':
o.set_value(link_args)
return opts
def get_option_compile_args(self, options):
return []
def get_option_link_args(self, options):
return []
def check_header(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support header checks.' % self.get_display_language())
def has_header(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support header checks.' % self.get_display_language())
def has_header_symbol(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support header symbol checks.' % self.get_display_language())
def compiles(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support compile checks.' % self.get_display_language())
def links(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support link checks.' % self.get_display_language())
def run(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support run checks.' % self.get_display_language())
def sizeof(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support sizeof checks.' % self.get_display_language())
def alignment(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support alignment checks.' % self.get_display_language())
def has_function(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support function checks.' % self.get_display_language())
@classmethod
def unix_args_to_native(cls, args):
"Always returns a copy that can be independently mutated"
return args[:]
def find_library(self, *args, **kwargs):
raise EnvironmentException('Language {} does not support library finding.'.format(self.get_display_language()))
def get_library_dirs(self, *args, **kwargs):
return ()
def has_multi_arguments(self, args, env):
raise EnvironmentException(
'Language {} does not support has_multi_arguments.'.format(
self.get_display_language()))
def has_multi_link_arguments(self, args, env):
raise EnvironmentException(
'Language {} does not support has_multi_link_arguments.'.format(
self.get_display_language()))
def _get_compile_output(self, dirname, mode):
# In pre-processor mode, the output is sent to stdout and discarded
if mode == 'preprocess':
return None
# Extension only matters if running results; '.exe' is
# guaranteed to be executable on every platform.
if mode == 'link':
suffix = 'exe'
else:
suffix = 'obj'
return os.path.join(dirname, 'output.' + suffix)
@contextlib.contextmanager
def compile(self, code, extra_args=None, mode='link', want_output=False):
if extra_args is None:
textra_args = None
extra_args = []
else:
textra_args = tuple(extra_args)
key = (code, textra_args, mode)
if not want_output:
if key in self.compiler_check_cache:
p = self.compiler_check_cache[key]
mlog.debug('Using cached compile:')
mlog.debug('Cached command line: ', ' '.join(p.commands), '\n')
mlog.debug('Code:\n', code)
mlog.debug('Cached compiler stdout:\n', p.stdo)
mlog.debug('Cached compiler stderr:\n', p.stde)
yield p
return
try:
with tempfile.TemporaryDirectory() as tmpdirname:
if isinstance(code, str):
srcname = os.path.join(tmpdirname,
'testfile.' + self.default_suffix)
with open(srcname, 'w') as ofile:
ofile.write(code)
elif isinstance(code, mesonlib.File):
srcname = code.fname
# Construct the compiler command-line
commands = CompilerArgs(self)
commands.append(srcname)
commands += self.get_always_args()
if mode == 'compile':
commands += self.get_compile_only_args()
# Preprocess mode outputs to stdout, so no output args
if mode == 'preprocess':
commands += self.get_preprocess_only_args()
else:
output = self._get_compile_output(tmpdirname, mode)
commands += self.get_output_args(output)
# extra_args must be last because it could contain '/link' to
# pass args to VisualStudio's linker. In that case everything
# in the command line after '/link' is given to the linker.
commands += extra_args
# Generate full command-line with the exelist
commands = self.get_exelist() + commands.to_native()
mlog.debug('Running compile:')
mlog.debug('Working directory: ', tmpdirname)
mlog.debug('Command line: ', ' '.join(commands), '\n')
mlog.debug('Code:\n', code)
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
p, p.stdo, p.stde = Popen_safe(commands, cwd=tmpdirname, env=os_env)
mlog.debug('Compiler stdout:\n', p.stdo)
mlog.debug('Compiler stderr:\n', p.stde)
p.commands = commands
p.input_name = srcname
if want_output:
p.output_name = output
else:
self.compiler_check_cache[key] = p
yield p
except (PermissionError, OSError):
# On Windows antivirus programs and the like hold on to files so
# they can't be deleted. There's not much to do in this case. Also,
# catch OSError because the directory is then no longer empty.
pass
def get_colorout_args(self, colortype):
return []
# Some compilers (msvc) write debug info to a separate file.
# These args specify where it should be written.
def get_compile_debugfile_args(self, rel_obj, **kwargs):
return []
def get_link_debugfile_args(self, rel_obj):
return []
def get_std_shared_lib_link_args(self):
return []
def get_std_shared_module_link_args(self, options):
return self.get_std_shared_lib_link_args()
def get_link_whole_for(self, args):
if isinstance(args, list) and not args:
return []
raise EnvironmentException('Language %s does not support linking whole archives.' % self.get_display_language())
# Compiler arguments needed to enable the given instruction set.
# May be [] meaning nothing needed or None meaning the given set
# is not supported.
def get_instruction_set_args(self, instruction_set):
return None
def build_unix_rpath_args(self, build_dir, from_dir, rpath_paths, build_rpath, install_rpath):
if not rpath_paths and not install_rpath and not build_rpath:
return []
args = []
if mesonlib.is_osx():
# Ensure that there is enough space for install_name_tool in-place editing of large RPATHs
args.append('-Wl,-headerpad_max_install_names')
# @loader_path is the equivalent of $ORIGIN on macOS
# https://stackoverflow.com/q/26280738
origin_placeholder = '@loader_path'
else:
origin_placeholder = '$ORIGIN'
# The rpaths we write must be relative if they point to the build dir,
# because otherwise they have different length depending on the build
# directory. This breaks reproducible builds.
processed_rpaths = prepare_rpaths(rpath_paths, build_dir, from_dir)
# Need to deduplicate rpaths, as macOS's install_name_tool
# is *very* allergic to duplicate -delete_rpath arguments
# when calling depfixer on installation.
all_paths = OrderedSet([os.path.join(origin_placeholder, p) for p in processed_rpaths])
# Build_rpath is used as-is (it is usually absolute).
if build_rpath != '':
all_paths.add(build_rpath)
if mesonlib.is_dragonflybsd() or mesonlib.is_openbsd():
# This argument instructs the compiler to record the value of
# ORIGIN in the .dynamic section of the elf. On Linux this is done
# by default, but is not on dragonfly/openbsd for some reason. Without this
# $ORIGIN in the runtime path will be undefined and any binaries
# linked against local libraries will fail to resolve them.
args.append('-Wl,-z,origin')
if mesonlib.is_osx():
# macOS does not support colon-separated strings in LC_RPATH,
# hence we have to pass each path component individually
args += ['-Wl,-rpath,' + rp for rp in all_paths]
else:
# In order to avoid relinking for RPATH removal, the binary needs to contain just
# enough space in the ELF header to hold the final installation RPATH.
paths = ':'.join(all_paths)
if len(paths) < len(install_rpath):
padding = 'X' * (len(install_rpath) - len(paths))
if not paths:
paths = padding
else:
paths = paths + ':' + padding
args.append('-Wl,-rpath,' + paths)
if get_compiler_is_linuxlike(self):
# Rpaths to use while linking must be absolute. These are not
# written to the binary. Needed only with GNU ld:
# https://sourceware.org/bugzilla/show_bug.cgi?id=16936
# Not needed on Windows or other platforms that don't use RPATH
# https://github.com/mesonbuild/meson/issues/1897
#
# In addition, this linker option tends to be quite long and some
# compilers have trouble dealing with it. That's why we will include
# one option per folder, like this:
#
# -Wl,-rpath-link,/path/to/folder1 -Wl,-rpath,/path/to/folder2 ...
#
# ...instead of just one single looooong option, like this:
#
# -Wl,-rpath-link,/path/to/folder1:/path/to/folder2:...
args += ['-Wl,-rpath-link,' + os.path.join(build_dir, p) for p in rpath_paths]
return args
def thread_flags(self, env):
return []
def openmp_flags(self):
raise EnvironmentException('Language %s does not support OpenMP flags.' % self.get_display_language())
def language_stdlib_only_link_flags(self):
# The linker flags needed to link the standard library of the current
# language in. This is needed in cases where you e.g. combine D and C++
# and both of which need to link their runtime library in or otherwise
# building fails with undefined symbols.
return []
def gnu_symbol_visibility_args(self, vistype):
return []
def get_gui_app_args(self, value):
return []
def has_func_attribute(self, name, env):
raise EnvironmentException(
'Language {} does not support function attributes.'.format(self.get_display_language()))
def get_pic_args(self):
m = 'Language {} does not support position-independent code'
raise EnvironmentException(m.format(self.get_display_language()))
def get_pie_args(self):
m = 'Language {} does not support position-independent executable'
raise EnvironmentException(m.format(self.get_display_language()))
def get_pie_link_args(self):
m = 'Language {} does not support position-independent executable'
raise EnvironmentException(m.format(self.get_display_language()))
def get_argument_syntax(self):
"""Returns the argument family type.
Compilers fall into families if they try to emulate the command line
interface of another compiler. For example, clang is in the GCC family
since it accepts most of the same arguments as GCC. ICL (ICC on
windows) is in the MSVC family since it accepts most of the same
arguments as MSVC.
"""
return 'other'
def get_profile_generate_args(self):
raise EnvironmentException(
'%s does not support get_profile_generate_args ' % self.get_id())
def get_profile_use_args(self):
raise EnvironmentException(
'%s does not support get_profile_use_args ' % self.get_id())
def get_undefined_link_args(self):
'''
Get args for allowing undefined symbols when linking to a shared library
'''
return []
def remove_linkerlike_args(self, args):
return [x for x in args if not x.startswith('-Wl')]
@enum.unique
class CompilerType(enum.Enum):
GCC_STANDARD = 0
GCC_OSX = 1
GCC_MINGW = 2
GCC_CYGWIN = 3
CLANG_STANDARD = 10
CLANG_OSX = 11
CLANG_MINGW = 12
# Possibly clang-cl?
ICC_STANDARD = 20
ICC_OSX = 21
ICC_WIN = 22
ARM_WIN = 30
CCRX_WIN = 40
PGI_STANDARD = 50
PGI_OSX = 51
PGI_WIN = 52
@property
def is_standard_compiler(self):
return self.name in ('GCC_STANDARD', 'CLANG_STANDARD', 'ICC_STANDARD', 'PGI_STANDARD')
@property
def is_osx_compiler(self):
return self.name in ('GCC_OSX', 'CLANG_OSX', 'ICC_OSX', 'PGI_OSX')
@property
def is_windows_compiler(self):
return self.name in ('GCC_MINGW', 'GCC_CYGWIN', 'CLANG_MINGW', 'ICC_WIN', 'ARM_WIN', 'CCRX_WIN', 'PGI_WIN')
def get_macos_dylib_install_name(prefix, shlib_name, suffix, soversion):
install_name = prefix + shlib_name
if soversion is not None:
install_name += '.' + soversion
install_name += '.dylib'
return '@rpath/' + install_name
def get_gcc_soname_args(compiler_type, prefix, shlib_name, suffix, soversion, darwin_versions, is_shared_module):
if compiler_type.is_standard_compiler:
sostr = '' if soversion is None else '.' + soversion
return ['-Wl,-soname,%s%s.%s%s' % (prefix, shlib_name, suffix, sostr)]
elif compiler_type.is_windows_compiler:
# For PE/COFF the soname argument has no effect with GNU LD
return []
elif compiler_type.is_osx_compiler:
if is_shared_module:
return []
name = get_macos_dylib_install_name(prefix, shlib_name, suffix, soversion)
args = ['-install_name', name]
if darwin_versions:
args += ['-compatibility_version', darwin_versions[0], '-current_version', darwin_versions[1]]
return args
else:
raise RuntimeError('Not implemented yet.')
def get_compiler_is_linuxlike(compiler):
compiler_type = getattr(compiler, 'compiler_type', None)
return compiler_type and compiler_type.is_standard_compiler
def get_compiler_uses_gnuld(c):
# FIXME: Perhaps we should detect the linker in the environment?
# FIXME: Assumes that *BSD use GNU ld, but they might start using lld soon
compiler_type = getattr(c, 'compiler_type', None)
return compiler_type in (
CompilerType.GCC_STANDARD,
CompilerType.GCC_MINGW,
CompilerType.GCC_CYGWIN,
CompilerType.CLANG_STANDARD,
CompilerType.CLANG_MINGW,
CompilerType.ICC_STANDARD,
CompilerType.ICC_WIN)
def get_largefile_args(compiler):
'''
Enable transparent large-file-support for 32-bit UNIX systems
'''
if get_compiler_is_linuxlike(compiler):
# Enable large-file support unconditionally on all platforms other
# than macOS and Windows. macOS is now 64-bit-only so it doesn't
# need anything special, and Windows doesn't have automatic LFS.
# You must use the 64-bit counterparts explicitly.
# glibc, musl, and uclibc, and all BSD libcs support this. On Android,
# support for transparent LFS is available depending on the version of
# Bionic: https://github.com/android/platform_bionic#32-bit-abi-bugs
# https://code.google.com/p/android/issues/detail?id=64613
#
# If this breaks your code, fix it! It's been 20+ years!
return ['-D_FILE_OFFSET_BITS=64']
# We don't enable -D_LARGEFILE64_SOURCE since that enables
# transitionary features and must be enabled by programs that use
# those features explicitly.
return []
# TODO: The result from calling compiler should be cached. So that calling this
# function multiple times don't add latency.
def gnulike_default_include_dirs(compiler, lang):
if lang == 'cpp':
lang = 'c++'
env = os.environ.copy()
env["LC_ALL"] = 'C'
cmd = compiler + ['-x{}'.format(lang), '-E', '-v', '-']
p = subprocess.Popen(
cmd,
stdin=subprocess.DEVNULL,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
env=env
)
stderr = p.stderr.read().decode('utf-8', errors='replace')
parse_state = 0
paths = []
for line in stderr.split('\n'):
if parse_state == 0:
if line == '#include "..." search starts here:':
parse_state = 1
elif parse_state == 1:
if line == '#include <...> search starts here:':
parse_state = 2
else:
paths.append(line[1:])
elif parse_state == 2:
if line == 'End of search list.':
break
else:
paths.append(line[1:])
if not paths:
mlog.warning('No include directory found parsing "{cmd}" output'.format(cmd=" ".join(cmd)))
return paths
class GnuLikeCompiler(abc.ABC):
"""
GnuLikeCompiler is a common interface to all compilers implementing
the GNU-style commandline interface. This includes GCC, Clang
and ICC. Certain functionality between them is different and requires
that the actual concrete subclass define their own implementation.
"""
def __init__(self, compiler_type):
self.compiler_type = compiler_type
self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage',
'b_ndebug', 'b_staticpic', 'b_pie']
if (not self.compiler_type.is_osx_compiler and
not self.compiler_type.is_windows_compiler and
not mesonlib.is_openbsd()):
self.base_options.append('b_lundef')
if not self.compiler_type.is_windows_compiler:
self.base_options.append('b_asneeded')
# All GCC-like backends can do assembly
self.can_compile_suffixes.add('s')
def get_asneeded_args(self):
# GNU ld cannot be installed on macOS
# https://github.com/Homebrew/homebrew-core/issues/17794#issuecomment-328174395
# Hence, we don't need to differentiate between OS and ld
# for the sake of adding as-needed support
if self.compiler_type.is_osx_compiler:
return '-Wl,-dead_strip_dylibs'
else:
return '-Wl,--as-needed'
def get_pic_args(self):
if self.compiler_type.is_osx_compiler or self.compiler_type.is_windows_compiler:
return [] # On Window and OS X, pic is always on.
return ['-fPIC']
def get_pie_args(self):
return ['-fPIE']
def get_pie_link_args(self):
return ['-pie']
def get_buildtype_args(self, buildtype):
return gnulike_buildtype_args[buildtype]
@abc.abstractmethod
def get_optimization_args(self, optimization_level):
raise NotImplementedError("get_optimization_args not implemented")
def get_debug_args(self, is_debug):
return clike_debug_args[is_debug]
def get_buildtype_linker_args(self, buildtype):
if self.compiler_type.is_osx_compiler:
return apple_buildtype_linker_args[buildtype]
return gnulike_buildtype_linker_args[buildtype]
@abc.abstractmethod
def get_pch_suffix(self):
raise NotImplementedError("get_pch_suffix not implemented")
def split_shlib_to_parts(self, fname):
return os.path.dirname(fname), fname
def get_soname_args(self, *args):
return get_gcc_soname_args(self.compiler_type, *args)
def get_std_shared_lib_link_args(self):
return ['-shared']
def get_std_shared_module_link_args(self, options):
if self.compiler_type.is_osx_compiler:
return ['-bundle', '-Wl,-undefined,dynamic_lookup']
return ['-shared']
def get_link_whole_for(self, args):
if self.compiler_type.is_osx_compiler:
result = []
for a in args:
result += ['-Wl,-force_load', a]
return result
return ['-Wl,--whole-archive'] + args + ['-Wl,--no-whole-archive']
def get_instruction_set_args(self, instruction_set):
return gnulike_instruction_set_args.get(instruction_set, None)
def get_default_include_dirs(self):
return gnulike_default_include_dirs(self.exelist, self.language)
@abc.abstractmethod
def openmp_flags(self):
raise NotImplementedError("openmp_flags not implemented")
def gnu_symbol_visibility_args(self, vistype):
return gnu_symbol_visibility_args[vistype]
def gen_vs_module_defs_args(self, defsfile):
if not isinstance(defsfile, str):
raise RuntimeError('Module definitions file should be str')
# On Windows targets, .def files may be specified on the linker command
# line like an object file.
if self.compiler_type.is_windows_compiler:
return [defsfile]
# For other targets, discard the .def file.
return []
def get_argument_syntax(self):
return 'gcc'
def get_profile_generate_args(self):
return ['-fprofile-generate']
def get_profile_use_args(self):
return ['-fprofile-use', '-fprofile-correction']
def get_allow_undefined_link_args(self):
if self.compiler_type.is_osx_compiler:
# Apple ld
return ['-Wl,-undefined,dynamic_lookup']
elif self.compiler_type.is_windows_compiler:
# For PE/COFF this is impossible
return []
else:
# GNU ld and LLVM lld
return ['-Wl,--allow-shlib-undefined']
def get_gui_app_args(self, value):
if self.compiler_type.is_windows_compiler:
return ['-mwindows' if value else '-mconsole']
return []
def compute_parameters_with_absolute_paths(self, parameter_list, build_dir):
for idx, i in enumerate(parameter_list):
if i[:2] == '-I' or i[:2] == '-L':
parameter_list[idx] = i[:2] + os.path.normpath(os.path.join(build_dir, i[2:]))
return parameter_list
class GnuCompiler(GnuLikeCompiler):
"""
GnuCompiler represents an actual GCC in its many incarnations.
Compilers imitating GCC (Clang/Intel) should use the GnuLikeCompiler ABC.
"""
def __init__(self, compiler_type, defines: dict):
super().__init__(compiler_type)
self.id = 'gcc'
self.defines = defines or {}
self.base_options.append('b_colorout')
def get_colorout_args(self, colortype: str) -> List[str]:
if mesonlib.version_compare(self.version, '>=4.9.0'):
return gnu_color_args[colortype][:]
return []
def get_warn_args(self, level: str) -> list:
args = super().get_warn_args(level)
if mesonlib.version_compare(self.version, '<4.8.0') and '-Wpedantic' in args:
# -Wpedantic was added in 4.8.0
# https://gcc.gnu.org/gcc-4.8/changes.html
args[args.index('-Wpedantic')] = '-pedantic'
return args
def has_builtin_define(self, define: str) -> bool:
return define in self.defines
def get_builtin_define(self, define):
if define in self.defines:
return self.defines[define]
def get_optimization_args(self, optimization_level: str):
return gnu_optimization_args[optimization_level]
def get_pch_suffix(self) -> str:
return 'gch'
def openmp_flags(self) -> List[str]:
return ['-fopenmp']
class PGICompiler:
def __init__(self, compiler_type):
self.id = 'pgi'
self.compiler_type = compiler_type
default_warn_args = ['-Minform=inform']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args,
'3': default_warn_args}
def get_module_incdir_args(self) -> Tuple[str]:
return ('-module', )
def get_no_warn_args(self) -> List[str]:
return ['-silent']
def get_pic_args(self) -> List[str]:
if self.compiler_type.is_osx_compiler or self.compiler_type.is_windows_compiler:
return [] # PGI -fPIC is Linux only.
return ['-fPIC']
def openmp_flags(self) -> List[str]:
return ['-mp']
def get_buildtype_args(self, buildtype: str) -> List[str]:
return pgi_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype: str) -> List[str]:
return pgi_buildtype_linker_args[buildtype]
def get_optimization_args(self, optimization_level: str):
return clike_optimization_args[optimization_level]
def get_debug_args(self, is_debug: bool):
return clike_debug_args[is_debug]
def compute_parameters_with_absolute_paths(self, parameter_list: List[str], build_dir: str):
for idx, i in enumerate(parameter_list):
if i[:2] == '-I' or i[:2] == '-L':
parameter_list[idx] = i[:2] + os.path.normpath(os.path.join(build_dir, i[2:]))
def get_allow_undefined_link_args(self):
return []
def get_dependency_gen_args(self, outtarget, outfile):
return []
def get_always_args(self):
return []
class ElbrusCompiler(GnuCompiler):
# Elbrus compiler is nearly like GCC, but does not support
# PCH, LTO, sanitizers and color output as of version 1.21.x.
def __init__(self, compiler_type, defines):
GnuCompiler.__init__(self, compiler_type, defines)
self.id = 'lcc'
self.base_options = ['b_pgo', 'b_coverage',
'b_ndebug', 'b_staticpic',
'b_lundef', 'b_asneeded']
# FIXME: use _build_wrapper to call this so that linker flags from the env
# get applied
def get_library_dirs(self, env, elf_class = None):
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
stdo = Popen_safe(self.exelist + ['--print-search-dirs'], env=os_env)[1]
paths = ()
for line in stdo.split('\n'):
if line.startswith('libraries:'):
# lcc does not include '=' in --print-search-dirs output.
libstr = line.split(' ', 1)[1]
paths = (os.path.realpath(p) for p in libstr.split(':'))
break
return paths
def get_program_dirs(self, env):
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
stdo = Popen_safe(self.exelist + ['--print-search-dirs'], env=os_env)[1]
paths = ()
for line in stdo.split('\n'):
if line.startswith('programs:'):
# lcc does not include '=' in --print-search-dirs output.
libstr = line.split(' ', 1)[1]
paths = (os.path.realpath(p) for p in libstr.split(':'))
break
return paths
class ClangCompiler(GnuLikeCompiler):
def __init__(self, compiler_type):
super().__init__(compiler_type)
self.id = 'clang'
self.base_options.append('b_colorout')
if self.compiler_type.is_osx_compiler:
self.base_options.append('b_bitcode')
# All Clang backends can also do LLVM IR
self.can_compile_suffixes.add('ll')
def get_colorout_args(self, colortype):
return clang_color_args[colortype][:]
def get_optimization_args(self, optimization_level):
return clike_optimization_args[optimization_level]
def get_pch_suffix(self):
return 'pch'
def get_pch_use_args(self, pch_dir, header):
# Workaround for Clang bug http://llvm.org/bugs/show_bug.cgi?id=15136
# This flag is internal to Clang (or at least not documented on the man page)
# so it might change semantics at any time.
return ['-include-pch', os.path.join(pch_dir, self.get_pch_name(header))]
def has_multi_arguments(self, args, env):
myargs = ['-Werror=unknown-warning-option', '-Werror=unused-command-line-argument']
if mesonlib.version_compare(self.version, '>=3.6.0'):
myargs.append('-Werror=ignored-optimization-argument')
return super().has_multi_arguments(
myargs + args,
env)
def has_function(self, funcname, prefix, env, *, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
# Starting with XCode 8, we need to pass this to force linker
# visibility to obey OS X and iOS minimum version targets with
# -mmacosx-version-min, -miphoneos-version-min, etc.
# https://github.com/Homebrew/homebrew-core/issues/3727
if self.compiler_type.is_osx_compiler and version_compare(self.version, '>=8.0'):
extra_args.append('-Wl,-no_weak_imports')
return super().has_function(funcname, prefix, env, extra_args=extra_args,
dependencies=dependencies)
def openmp_flags(self):
if version_compare(self.version, '>=3.8.0'):
return ['-fopenmp']
elif version_compare(self.version, '>=3.7.0'):
return ['-fopenmp=libomp']
else:
# Shouldn't work, but it'll be checked explicitly in the OpenMP dependency.
return []
class ArmclangCompiler:
def __init__(self, compiler_type):
if not self.is_cross:
raise EnvironmentException('armclang supports only cross-compilation.')
# Check whether 'armlink.exe' is available in path
self.linker_exe = 'armlink.exe'
args = '--vsn'
try:
p, stdo, stderr = Popen_safe(self.linker_exe, args)
except OSError as e:
err_msg = 'Unknown linker\nRunning "{0}" gave \n"{1}"'.format(' '.join([self.linker_exe] + [args]), e)
raise EnvironmentException(err_msg)
# Verify the armlink version
ver_str = re.search('.*Component.*', stdo)
if ver_str:
ver_str = ver_str.group(0)
else:
EnvironmentException('armlink version string not found')
# Using the regular expression from environment.search_version,
# which is used for searching compiler version
version_regex = r'(?<!(\d|\.))(\d{1,2}(\.\d+)+(-[a-zA-Z0-9]+)?)'
linker_ver = re.search(version_regex, ver_str)
if linker_ver:
linker_ver = linker_ver.group(0)
if not version_compare(self.version, '==' + linker_ver):
raise EnvironmentException('armlink version does not match with compiler version')
self.id = 'armclang'
self.compiler_type = compiler_type
self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage',
'b_ndebug', 'b_staticpic', 'b_colorout']
# Assembly
self.can_compile_suffixes.update('s')
def can_linker_accept_rsp(self):
return False
def get_pic_args(self):
# PIC support is not enabled by default for ARM,
# if users want to use it, they need to add the required arguments explicitly
return []
def get_colorout_args(self, colortype):
return clang_color_args[colortype][:]
def get_buildtype_args(self, buildtype):
return armclang_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype):
return arm_buildtype_linker_args[buildtype]
# Override CCompiler.get_std_shared_lib_link_args
def get_std_shared_lib_link_args(self):
return []
def get_pch_suffix(self):
return 'gch'
def get_pch_use_args(self, pch_dir, header):
# Workaround for Clang bug http://llvm.org/bugs/show_bug.cgi?id=15136
# This flag is internal to Clang (or at least not documented on the man page)
# so it might change semantics at any time.
return ['-include-pch', os.path.join(pch_dir, self.get_pch_name(header))]
# Override CCompiler.get_dependency_gen_args
def get_dependency_gen_args(self, outtarget, outfile):
return []
# Override CCompiler.build_rpath_args
def build_rpath_args(self, build_dir, from_dir, rpath_paths, build_rpath, install_rpath):
return []
def get_linker_exelist(self):
return [self.linker_exe]
def get_optimization_args(self, optimization_level):
return armclang_optimization_args[optimization_level]
def get_debug_args(self, is_debug):
return clike_debug_args[is_debug]
def gen_export_dynamic_link_args(self, env):
"""
The args for export dynamic
"""
return ['--export_dynamic']
def gen_import_library_args(self, implibname):
"""
The args of the outputted import library
ArmLinker's symdefs output can be used as implib
"""
return ['--symdefs=' + implibname]
def compute_parameters_with_absolute_paths(self, parameter_list, build_dir):
for idx, i in enumerate(parameter_list):
if i[:2] == '-I' or i[:2] == '-L':
parameter_list[idx] = i[:2] + os.path.normpath(os.path.join(build_dir, i[2:]))
return parameter_list
# Tested on linux for ICC 14.0.3, 15.0.6, 16.0.4, 17.0.1, 19.0.0
class IntelCompiler(GnuLikeCompiler):
def __init__(self, compiler_type):
super().__init__(compiler_type)
# As of 19.0.0 ICC doesn't have sanitizer, color, or lto support.
#
# It does have IPO, which serves much the same purpose as LOT, but
# there is an unfortunate rule for using IPO (you can't control the
# name of the output file) which break assumptions meson makes
self.base_options = ['b_pch', 'b_lundef', 'b_asneeded', 'b_pgo',
'b_coverage', 'b_ndebug', 'b_staticpic', 'b_pie']
self.id = 'intel'
self.lang_header = 'none'
def get_optimization_args(self, optimization_level):
return clike_optimization_args[optimization_level]
def get_pch_suffix(self) -> str:
return 'pchi'
def get_pch_use_args(self, pch_dir, header):
return ['-pch', '-pch_dir', os.path.join(pch_dir), '-x',
self.lang_header, '-include', header, '-x', 'none']
def get_pch_name(self, header_name):
return os.path.basename(header_name) + '.' + self.get_pch_suffix()
def openmp_flags(self) -> List[str]:
if version_compare(self.version, '>=15.0.0'):
return ['-qopenmp']
else:
return ['-openmp']
def compiles(self, *args, **kwargs):
# This covers a case that .get('foo', []) doesn't, that extra_args is
# defined and is None
extra_args = kwargs.get('extra_args') or []
kwargs['extra_args'] = [
extra_args,
'-diag-error', '10006', # ignoring unknown option
'-diag-error', '10148', # Option not supported
'-diag-error', '10155', # ignoring argument required
'-diag-error', '10156', # ignoring not argument allowed
'-diag-error', '10157', # Ignoring argument of the wrong type
'-diag-error', '10158', # Argument must be separate. Can be hit by trying an option like -foo-bar=foo when -foo=bar is a valid option but -foo-bar isn't
'-diag-error', '1292', # unknown __attribute__
]
return super().compiles(*args, **kwargs)
def get_profile_generate_args(self):
return ['-prof-gen=threadsafe']
def get_profile_use_args(self):
return ['-prof-use']
class ArmCompiler:
# Functionality that is common to all ARM family compilers.
def __init__(self, compiler_type):
if not self.is_cross:
raise EnvironmentException('armcc supports only cross-compilation.')
self.id = 'arm'
self.compiler_type = compiler_type
default_warn_args = []
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + [],
'3': default_warn_args + []}
# Assembly
self.can_compile_suffixes.add('s')
def can_linker_accept_rsp(self):
return False
def get_pic_args(self):
# FIXME: Add /ropi, /rwpi, /fpic etc. qualifiers to --apcs
return []
def get_buildtype_args(self, buildtype):
return arm_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype):
return arm_buildtype_linker_args[buildtype]
# Override CCompiler.get_always_args
def get_always_args(self):
return []
# Override CCompiler.get_dependency_gen_args
def get_dependency_gen_args(self, outtarget, outfile):
return []
# Override CCompiler.get_std_shared_lib_link_args
def get_std_shared_lib_link_args(self):
return []
def get_pch_use_args(self, pch_dir, header):
# FIXME: Add required arguments
# NOTE from armcc user guide:
# "Support for Precompiled Header (PCH) files is deprecated from ARM Compiler 5.05
# onwards on all platforms. Note that ARM Compiler on Windows 8 never supported
# PCH files."
return []
def get_pch_suffix(self):
# NOTE from armcc user guide:
# "Support for Precompiled Header (PCH) files is deprecated from ARM Compiler 5.05
# onwards on all platforms. Note that ARM Compiler on Windows 8 never supported
# PCH files."
return 'pch'
def thread_flags(self, env):
return []
def thread_link_flags(self, env):
return []
def get_linker_exelist(self):
args = ['armlink']
return args
def get_coverage_args(self):
return []
def get_coverage_link_args(self):
return []
def get_optimization_args(self, optimization_level):
return arm_optimization_args[optimization_level]
def get_debug_args(self, is_debug):
return clike_debug_args[is_debug]
def compute_parameters_with_absolute_paths(self, parameter_list, build_dir):
for idx, i in enumerate(parameter_list):
if i[:2] == '-I' or i[:2] == '-L':
parameter_list[idx] = i[:2] + os.path.normpath(os.path.join(build_dir, i[2:]))
return parameter_list
class CcrxCompiler:
def __init__(self, compiler_type):
if not self.is_cross:
raise EnvironmentException('ccrx supports only cross-compilation.')
# Check whether 'rlink.exe' is available in path
self.linker_exe = 'rlink.exe'
args = '--version'
try:
p, stdo, stderr = Popen_safe(self.linker_exe, args)
except OSError as e:
err_msg = 'Unknown linker\nRunning "{0}" gave \n"{1}"'.format(' '.join([self.linker_exe] + [args]), e)
raise EnvironmentException(err_msg)
self.id = 'ccrx'
self.compiler_type = compiler_type
# Assembly
self.can_compile_suffixes.update('s')
default_warn_args = []
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + [],
'3': default_warn_args + []}
def can_linker_accept_rsp(self):
return False
def get_pic_args(self):
# PIC support is not enabled by default for CCRX,
# if users want to use it, they need to add the required arguments explicitly
return []
def get_buildtype_args(self, buildtype):
return ccrx_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype):
return ccrx_buildtype_linker_args[buildtype]
# Override CCompiler.get_std_shared_lib_link_args
def get_std_shared_lib_link_args(self):
return []
def get_pch_suffix(self):
return 'pch'
def get_pch_use_args(self, pch_dir, header):
return []
# Override CCompiler.get_dependency_gen_args
def get_dependency_gen_args(self, outtarget, outfile):
return []
# Override CCompiler.build_rpath_args
def build_rpath_args(self, build_dir, from_dir, rpath_paths, build_rpath, install_rpath):
return []
def thread_flags(self, env):
return []
def thread_link_flags(self, env):
return []
def get_linker_exelist(self):
return [self.linker_exe]
def get_linker_lib_prefix(self):
return '-lib='
def get_coverage_args(self):
return []
def get_coverage_link_args(self):
return []
def get_optimization_args(self, optimization_level):
return ccrx_optimization_args[optimization_level]
def get_debug_args(self, is_debug):
return ccrx_debug_args[is_debug]
@classmethod
def unix_args_to_native(cls, args):
result = []
for i in args:
if i.startswith('-D'):
i = '-define=' + i[2:]
if i.startswith('-I'):
i = '-include=' + i[2:]
if i.startswith('-Wl,-rpath='):
continue
elif i == '--print-search-dirs':
continue
elif i.startswith('-L'):
continue
result.append(i)
return result
def compute_parameters_with_absolute_paths(self, parameter_list, build_dir):
for idx, i in enumerate(parameter_list):
if i[:9] == '-include=':
parameter_list[idx] = i[:9] + os.path.normpath(os.path.join(build_dir, i[9:]))
return parameter_list
|
the-stack_106_19309
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Author: Pauli Virtanen, 2016
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
import operator
from .util import inf, nan, is_na
def compute_stats(samples, number):
"""
Statistical analysis of measured samples.
Parameters
----------
samples : list of float
List of total times (y) of benchmarks.
number : int
Repeat number for each sample.
Returns
-------
beta_hat : float
Estimated time per iteration
stats : dict
Information on statistics of the estimator.
"""
if len(samples) < 1:
return None, None
Y = list(samples)
# Median and quantiles
y_50, ci_50 = quantile_ci(Y, 0.5, alpha_min=0.99)
y_25 = quantile(Y, 0.25)
y_75 = quantile(Y, 0.75)
# If nonparametric CI estimation didn't give an estimate,
# use the credible interval of a bayesian posterior distribution.
a, b = ci_50
if (math.isinf(a) or math.isinf(b)) and len(Y) > 1:
# Compute posterior distribution for location, assuming
# exponential noise. The MLE is equal to the median.
c = LaplacePosterior(Y)
# Use the CI from that distribution to extend beyond sample
# bounds
if math.isinf(a):
a = min(c.ppf(0.01/2), min(Y))
if math.isinf(b):
b = max(c.ppf(1 - 0.01/2), max(Y))
ci_50 = (a, b)
# Produce results
result = y_50
stats = {'ci_99_a': ci_50[0],
'ci_99_b': ci_50[1],
'q_25': y_25,
'q_75': y_75,
'repeat': len(Y),
'number': number}
return result, stats
def get_err(result, stats):
"""
Return an 'error measure' suitable for informing the user
about the spread of the measurement results.
"""
a, b = stats['q_25'], stats['q_75']
return (b - a)/2
def get_weight(stats):
"""
Return a data point weight for the result.
"""
if stats is None or 'ci_99_a' not in stats or 'ci_99_b' not in stats:
return None
try:
a = stats['ci_99_a']
b = stats['ci_99_b']
if math.isinf(a) or math.isinf(b):
# Infinite interval is due to too few samples --- consider
# weight as missing
return None
return 2 / abs(b - a)
except ZeroDivisionError:
return None
def is_different(samples_a, samples_b, stats_a, stats_b, p_threshold=0.002):
"""Check whether the samples are statistically different.
If sample data is not provided, or the sample is too small, falls
back to a pessimistic CI-based check. If it returns True, then the
difference is statistically significant. If it returns False, it
might or might not be statistically significant.
Parameters
----------
samples_a, samples_b
Input samples
stats_a, stats_b
Input stats data
"""
if samples_a is not None and samples_b is not None:
# Raw data present: Mann-Whitney U test, but only if there's
# enough data so that the test can return True
a = [x for x in samples_a if not is_na(x)]
b = [x for x in samples_b if not is_na(x)]
p_min = 1 / binom(len(a) + len(b), min(len(a), len(b)))
if p_min < p_threshold:
_, p = mann_whitney_u(a, b)
return p < p_threshold
# If confidence intervals overlap, reject.
# Corresponds to a test with ill-specified threshold p-value,
# which generally can be significantly smaller than p <= 0.01
# depending on the actual data. For normal test (known variance),
# 0.00027 <= p <= 0.01.
ci_a = (stats_a['ci_99_a'], stats_a['ci_99_b'])
ci_b = (stats_b['ci_99_a'], stats_b['ci_99_b'])
if ci_a[1] >= ci_b[0] and ci_a[0] <= ci_b[1]:
return False
return True
def quantile_ci(x, q, alpha_min=0.01):
"""
Compute a quantile and a confidence interval.
Assumes independence, but otherwise nonparametric.
Parameters
----------
x : list of float
Samples
q : float
Quantile to compute, in [0,1].
alpha_min : float, optional
Limit for coverage.
The result has coverage >= 1 - alpha_min.
Returns
-------
m : float
Quantile of x
ci : tuple of floats
Confidence interval (a, b), of coverage >= alpha_min.
"""
y = sorted(x)
n = len(y)
alpha_min = min(alpha_min, 1 - alpha_min)
pa = alpha_min / 2
pb = 1 - pa
a = -inf
b = inf
# It's known that
#
# Pr[X_{(r)} < m < X_{(s)}] = Pr[r <= K <= s-1], K ~ Bin(n,p)
#
# where cdf(m) = p defines the quantile.
#
# Simplest median CI follows by picking r,s such that
#
# F(r;n,q) <= alpha/2
# F(s;n,q) >= 1 - alpha/2
#
# F(k;n,q) = sum(binom_pmf(n, j, q) for j in range(k))
#
# Then (y[r-1], y[s-1]) is a CI.
# If no such r or s exists, replace by +-inf.
F = 0
for k, yp in enumerate(y):
F += binom_pmf(n, k, q)
# F = F(k+1;n,q)
if F <= pa:
a = yp
if F >= pb:
b = yp
break
m = quantile(y, q)
return m, (a, b)
def quantile(x, q):
"""
Compute quantile/percentile of the data
Parameters
----------
x : list of float
Data set
q : float
Quantile to compute, 0 <= q <= 1
"""
if not 0 <= q <= 1:
raise ValueError("Invalid quantile")
y = sorted(x)
n = len(y)
z = (n - 1) * q
j = int(math.floor(z))
z -= j
if j == n - 1:
m = y[-1]
else:
m = (1 - z)*y[j] + z*y[j+1]
return m
_mann_whitney_u_memo = {}
def mann_whitney_u(x, y, method='auto'):
"""
Mann-Whitney U test
Ties are handled conservatively, returning the least significant
tie breaking.
Parameters
----------
x, y : list of float
Samples to test
method : {'auto', 'exact', 'normal'}
Whether to compute p-value exactly of via normal approximation.
The option 'auto' switches to approximation for sample size > 20.
Returns
-------
u : int
U-statistic
p : float
p-value for two-sided alternative
References
----------
.. [1] Mann & Whitney, Ann. Math. Statist. 18, 50 (1947).
.. [2] Gibbons & Chakraborti, "Nonparametric statistical inference". (2003)
"""
memo = _mann_whitney_u_memo
if len(memo) > 100000:
memo.clear()
m = len(x)
n = len(y)
if method == 'auto':
if max(m, n) > 20:
method = 'normal'
else:
method = 'exact'
u, ties = mann_whitney_u_u(x, y)
# Conservative tie breaking
if u <= m*n//2 and u + ties >= m*n//2:
ties = m*n//2 - u
ux1 = min(u, m*n - u)
ux2 = min(u + ties, m*n - (u + ties))
if ux1 >= ux2:
ux = ux1
else:
u = u + ties
ux = ux2
# Get p-value
if method == 'exact':
p1 = mann_whitney_u_cdf(m, n, ux, memo)
p2 = 1.0 - mann_whitney_u_cdf(m, n, max(m*n//2, m*n - ux - 1), memo)
p = p1 + p2
elif method == 'normal':
N = m + n
var = m*n*(N + 1) / 12
z = (ux - m*n/2) / math.sqrt(var)
cdf = 0.5 * math.erfc(-z / math.sqrt(2))
p = 2 * cdf
else:
raise ValueError("Unknown method {!r}".format(method))
return u, p
def mann_whitney_u_u(x, y):
u = 0
ties = 0
for xx in x:
for yy in y:
if xx > yy:
u += 1
elif xx == yy:
ties += 1
return u, ties
def mann_whitney_u_cdf(m, n, u, memo=None):
if memo is None:
memo = {}
cdf = 0
for uu in range(u + 1):
cdf += mann_whitney_u_pmf(m, n, uu, memo)
return cdf
def mann_whitney_u_pmf(m, n, u, memo=None):
if memo is None:
memo = {}
return mann_whitney_u_r(m, n, u, memo) / binom(m + n, m)
def mann_whitney_u_r(m, n, u, memo=None):
"""
Number of orderings in Mann-Whitney U test.
The PMF of U for samples of sizes (m, n) is given by
p(u) = r(m, n, u) / binom(m + n, m).
References
----------
.. [1] Mann & Whitney, Ann. Math. Statist. 18, 50 (1947).
"""
if u < 0:
value = 0
elif m == 0 or n == 0:
value = 1 if u == 0 else 0
else:
# Don't bother figuring out table construction, memoization
# sorts it out
if memo is None:
memo = {}
key = (m, n, u)
value = memo.get(key)
if value is not None:
return value
value = (mann_whitney_u_r(m, n - 1, u, memo)
+ mann_whitney_u_r(m - 1, n, u - n, memo))
memo[key] = value
return value
def binom_pmf(n, k, p):
"""Binomial pmf = (n choose k) p**k (1 - p)**(n - k)"""
if not (0 <= k <= n):
return 0
if p == 0:
return 1.0 * (k == 0)
elif p == 1.0:
return 1.0 * (k == n)
logp = math.log(p)
log1mp = math.log(1 - p)
return math.exp(lgamma(1 + n) - lgamma(1 + n - k) - lgamma(1 + k)
+ k*logp + (n - k)*log1mp)
_BERNOULLI = [1.0, -0.5, 0.166666666667, 0.0, -0.0333333333333, 0.0, 0.0238095238095]
def lgamma(x):
"""
Log gamma function. Only implemented at integers.
"""
if x <= 0:
raise ValueError("Domain error")
if x > 100:
# DLMF 5.11.1
r = 0.5 * math.log(2*math.pi) + (x - 0.5) * math.log(x) - x
for k in range(1, len(_BERNOULLI)//2 + 1):
r += _BERNOULLI[2*k] / (2*k*(2*k - 1) * x**(2*k - 1))
return r
# Fall back to math.factorial
int_x = int(x)
err_int = abs(x - int_x)
if err_int < 1e-12 * abs(x):
return math.log(math.factorial(int_x - 1))
# Would need full implementation
return nan
def binom(n, k):
"""
Binomial coefficient (n over k)
"""
n = operator.index(n)
k = operator.index(k)
if not 0 <= k <= n:
return 0
m = n + 1
num = 1
den = 1
for j in range(1, min(k, n - k) + 1):
num *= m - j
den *= j
return num // den
class LaplacePosterior(object):
"""
Univariate distribution::
p(beta|y) = N [sum(|y_j - beta|)]**(-nu-1)
where N is the normalization factor.
Parameters
----------
y : list of float
Samples
nu : float, optional
Degrees of freedom. Default: len(y)-1
Notes
-----
This is the posterior distribution in the Bayesian model assuming
Laplace distributed noise::
p(y|beta,sigma) = N exp(- sum_j (1/sigma) |y_j - beta|)
p(sigma) ~ 1/sigma
nu = len(y) - 1
The MLE for beta is median(y).
Note that the same approach applied to a Gaussian model::
p(y|beta,sigma) = N exp(- sum_j 1/(2 sigma^2) (y_j - beta)^2)
results to::
p(beta|y) = N T(t, m-1); t = (beta - mean(y)) / (sstd(y) / sqrt(m))
where ``T(t, nu)`` is the Student t-distribution pdf, which then gives
the standard textbook formulas for the mean.
"""
def __init__(self, y, nu=None):
if len(y) == 0:
raise ValueError("empty input")
if nu is None:
self.nu = len(y) - 1
else:
self.nu = nu
# Sort input
y = sorted(y)
# Get location and scale so that data is centered at MLE, and
# the unnormalized PDF at MLE has amplitude ~ 1/nu.
#
# Proper scaling of inputs is important to avoid overflows
# when computing the unnormalized CDF integrals below.
self.mle = quantile(y, 0.5)
self._y_scale = sum(abs(yp - self.mle) for yp in y)
self._y_scale *= self.nu**(1/(self.nu + 1))
# Shift and scale
if self._y_scale != 0:
self.y = [(yp - self.mle)/self._y_scale for yp in y]
else:
self.y = [0 for yp in y]
self._cdf_norm = None
self._cdf_memo = {}
def _cdf_unnorm(self, beta):
"""
Unnormalized CDF of this distribution::
cdf_unnorm(b) = int_{-oo}^{b} 1/(sum_j |y - b'|)**(m+1) db'
"""
if beta != beta:
return beta
for k, y in enumerate(self.y):
if y > beta:
k0 = k
break
else:
k0 = len(self.y)
cdf = 0
nu = self.nu
# Save some work by memoizing intermediate results
if k0 - 1 in self._cdf_memo:
k_start = k0
cdf = self._cdf_memo[k0 - 1]
else:
k_start = 0
cdf = 0
# Do the integral piecewise, resolving the absolute values
for k in range(k_start, k0 + 1):
c = 2*k - len(self.y)
y = sum(self.y[k:]) - sum(self.y[:k])
if k == 0:
a = -inf
else:
a = self.y[k-1]
if k == k0:
b = beta
else:
b = self.y[k]
if c == 0:
term = (b - a) / y**(nu+1)
else:
term = 1/(nu*c) * ((a*c + y)**(-nu) - (b*c + y)**(-nu))
cdf += max(0, term) # avoid rounding error
if k != k0:
self._cdf_memo[k] = cdf
if beta == inf:
self._cdf_memo[len(self.y)] = cdf
return cdf
def _ppf_unnorm(self, cdfx):
"""
Inverse function for _cdf_unnorm
"""
# Find interval
for k in range(len(self.y) + 1):
if cdfx <= self._cdf_memo[k]:
break
# Invert on interval
c = 2*k - len(self.y)
y = sum(self.y[k:]) - sum(self.y[:k])
nu = self.nu
if k == 0:
term = cdfx
else:
a = self.y[k-1]
term = cdfx - self._cdf_memo[k-1]
if k == 0:
z = -nu*c*term
if z > 0:
beta = (z**(-1/nu) - y) / c
else:
beta = -inf
elif c == 0:
beta = a + term * y**(nu+1)
else:
z = (a*c + y)**(-nu) - nu*c*term
if z > 0:
beta = (z**(-1/nu) - y)/c
else:
beta = inf
if k < len(self.y):
beta = min(beta, self.y[k])
return beta
def pdf(self, beta):
"""
Probability distribution function
"""
return math.exp(self.logpdf(beta))
def logpdf(self, beta):
"""
Logarithm of probability distribution function
"""
if self._y_scale == 0:
return inf if beta == self.mle else -inf
beta = (beta - self.mle) / self._y_scale
if self._cdf_norm is None:
self._cdf_norm = self._cdf_unnorm(inf)
ws = sum(abs(yp - beta) for yp in self.y)
m = self.nu
return -(m+1)*math.log(ws) - math.log(self._cdf_norm) - math.log(self._y_scale)
def cdf(self, beta):
"""
Cumulative probability distribution function
"""
if self._y_scale == 0:
return 1.0*(beta > self.mle)
beta = (beta - self.mle) / self._y_scale
if self._cdf_norm is None:
self._cdf_norm = self._cdf_unnorm(inf)
return self._cdf_unnorm(beta) / self._cdf_norm
def ppf(self, cdf):
"""
Percent point function (inverse function for cdf)
"""
if cdf < 0 or cdf > 1.0:
return nan
if self._y_scale == 0:
return self.mle
if self._cdf_norm is None:
self._cdf_norm = self._cdf_unnorm(inf)
cdfx = min(cdf * self._cdf_norm, self._cdf_norm)
beta = self._ppf_unnorm(cdfx)
return beta * self._y_scale + self.mle
|
the-stack_106_19310
|
import pip.download
from pip.commands.search import (compare_versions,
highest_version,
transform_hits,
SearchCommand)
from pip.status_codes import NO_MATCHES_FOUND, SUCCESS
from pip.backwardcompat import xmlrpclib, b
from pip.baseparser import create_main_parser
from mock import Mock
from tests.lib import pyversion
if pyversion >= '3':
VERBOSE_FALSE = False
else:
VERBOSE_FALSE = 0
def test_version_compare():
"""
Test version comparison.
"""
assert compare_versions('1.0', '1.1') == -1
assert compare_versions('1.1', '1.0') == 1
assert compare_versions('1.1a1', '1.1') == -1
assert compare_versions('1.1.1', '1.1a') == -1
assert highest_version(['1.0', '2.0', '0.1']) == '2.0'
assert highest_version(['1.0a1', '1.0']) == '1.0'
def test_pypi_xml_transformation():
"""
Test transformation of data structures (pypi xmlrpc to custom list).
"""
pypi_hits = [{'_pypi_ordering': 100, 'name': 'foo', 'summary': 'foo summary', 'version': '1.0'},
{'_pypi_ordering': 200, 'name': 'foo', 'summary': 'foo summary v2', 'version': '2.0'},
{'_pypi_ordering': 50, 'name': 'bar', 'summary': 'bar summary', 'version': '1.0'}]
expected = [{'score': 200, 'versions': ['1.0', '2.0'], 'name': 'foo', 'summary': 'foo summary v2'},
{'score': 50, 'versions': ['1.0'], 'name': 'bar', 'summary': 'bar summary'}]
assert transform_hits(pypi_hits) == expected
def test_invalid_pypi_transformation():
"""
Test transformation of pypi when ordering None
"""
pypi_hits = [{'_pypi_ordering': None, 'name': 'bar', 'summary': 'bar summary', 'version': '1.0'},
{'_pypi_ordering': 100, 'name': 'foo', 'summary': 'foo summary', 'version': '1.0'}]
expected = [{'score': 100, 'versions': ['1.0'], 'name': 'foo', 'summary': 'foo summary'},
{'score': 0, 'versions': ['1.0'], 'name': 'bar', 'summary': 'bar summary'}]
assert transform_hits(pypi_hits) == expected
def test_search(script):
"""
End to end test of search command.
"""
output = script.pip('search', 'pip')
assert 'A tool for installing and managing Python packages' in output.stdout
def test_multiple_search(script):
"""
Test searching for multiple packages at once.
"""
output = script.pip('search', 'pip', 'INITools')
assert 'A tool for installing and managing Python packages' in output.stdout
assert 'Tools for parsing and using INI-style files' in output.stdout
def test_search_missing_argument(script):
"""
Test missing required argument for search
"""
result = script.pip('search', expect_error=True)
assert 'ERROR: Missing required argument (search query).' in result.stdout
def test_run_method_should_return_sucess_when_find_packages():
"""
Test SearchCommand.run for found package
"""
options_mock = Mock()
options_mock.index = 'http://pypi.python.org/pypi'
search_cmd = SearchCommand(create_main_parser())
status = search_cmd.run(options_mock, ('pip',))
assert status == SUCCESS
def test_run_method_should_return_no_matches_found_when_does_not_find_packages():
"""
Test SearchCommand.run for no matches
"""
options_mock = Mock()
options_mock.index = 'https://pypi.python.org/pypi'
search_cmd = SearchCommand(create_main_parser())
status = search_cmd.run(options_mock, ('non-existant-package',))
assert status == NO_MATCHES_FOUND, status
def test_search_should_exit_status_code_zero_when_find_packages(script):
"""
Test search exit status code for package found
"""
result = script.pip('search', 'pip')
assert result.returncode == SUCCESS
def test_search_exit_status_code_when_finds_no_package(script):
"""
Test search exit status code for no matches
"""
result = script.pip('search', 'non-existant-package', expect_error=True)
assert result.returncode == NO_MATCHES_FOUND, result.returncode
|
the-stack_106_19311
|
# coding:utf8
'''
Created on 2016年8月3日
@author: zhangq
'''
from django.conf.urls import url
from Ts_app import views
urlpatterns = [
url(r'^$', views.homeview),
url(r'^ts_app/$', views.indexview),
url(r'^ajax_radio/', views.radioview),
url(r'^equipment/$', views.rentview),
url(r'^testlink/$', views.testlinkview),
url(r'^testcase/(.+)', views.test_case_view),
url(r'^testreport/(.+)', views.test_report_view),
url(r'^testbuild/(.+)', views.test_build_view),
url(r'^editcase/', views.edit_case_view),
url(r'^editsuite/', views.edit_suite_view),
url(r'^ajax_list/', views.ajaxview),
url(r'^ajax_dic/', views.ajaxdicview),
]
|
the-stack_106_19312
|
#!/usr/bin/env python3
import pytablewriter as ptw
import pytablereader as ptr
# prepare data ---
file_path = "sample_data.csv"
csv_text = "\n".join([
'"attr_a","attr_b","attr_c"',
'1,4,"a"',
'2,2.1,"bb"',
'3,120.9,"ccc"',
])
with open(file_path, "w") as f:
f.write(csv_text)
# load from a csv file ---
loader = ptr.CsvTableFileLoader(file_path)
for table_data in loader.load():
print("\n".join([
"load from file",
"==============",
f"{ptw.dumps_tabledata(table_data):s}",
]))
# load from a csv text ---
loader = ptr.CsvTableTextLoader(csv_text)
for table_data in loader.load():
print("\n".join([
"load from text",
"==============",
f"{ptw.dumps_tabledata(table_data):s}",
]))
|
the-stack_106_19313
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import mlflow
from mlflow.exceptions import MlflowException
from mlflow.entities import ViewType
import os, logging
from pathlib import Path
from contextlib import contextmanager
from typing import Optional, Text
from .exp import MLflowExperiment, Experiment
from ..config import C
from .recorder import Recorder
from ..log import get_module_logger
logger = get_module_logger("workflow", logging.INFO)
class ExpManager:
"""
This is the `ExpManager` class for managing experiments. The API is designed similar to mlflow.
(The link: https://mlflow.org/docs/latest/python_api/mlflow.html)
"""
def __init__(self, uri: Text, default_exp_name: Optional[Text]):
self._current_uri = uri
self._default_exp_name = default_exp_name
self.active_experiment = None # only one experiment can active each time
def __repr__(self):
return "{name}(current_uri={curi})".format(name=self.__class__.__name__, curi=self._current_uri)
def start_exp(
self,
*,
experiment_id: Optional[Text] = None,
experiment_name: Optional[Text] = None,
recorder_id: Optional[Text] = None,
recorder_name: Optional[Text] = None,
uri: Optional[Text] = None,
resume: bool = False,
**kwargs,
):
"""
Start an experiment. This method includes first get_or_create an experiment, and then
set it to be active.
Parameters
----------
experiment_id : str
id of the active experiment.
experiment_name : str
name of the active experiment.
recorder_id : str
id of the recorder to be started.
recorder_name : str
name of the recorder to be started.
uri : str
the current tracking URI.
resume : boolean
whether to resume the experiment and recorder.
Returns
-------
An active experiment.
"""
raise NotImplementedError(f"Please implement the `start_exp` method.")
def end_exp(self, recorder_status: Text = Recorder.STATUS_S, **kwargs):
"""
End an active experiment.
Parameters
----------
experiment_name : str
name of the active experiment.
recorder_status : str
the status of the active recorder of the experiment.
"""
raise NotImplementedError(f"Please implement the `end_exp` method.")
def create_exp(self, experiment_name: Optional[Text] = None):
"""
Create an experiment.
Parameters
----------
experiment_name : str
the experiment name, which must be unique.
Returns
-------
An experiment object.
"""
raise NotImplementedError(f"Please implement the `create_exp` method.")
def search_records(self, experiment_ids=None, **kwargs):
"""
Get a pandas DataFrame of records that fit the search criteria of the experiment.
Inputs are the search critera user want to apply.
Returns
-------
A pandas.DataFrame of records, where each metric, parameter, and tag
are expanded into their own columns named metrics.*, params.*, and tags.*
respectively. For records that don't have a particular metric, parameter, or tag, their
value will be (NumPy) Nan, None, or None respectively.
"""
raise NotImplementedError(f"Please implement the `search_records` method.")
def get_exp(self, experiment_id=None, experiment_name=None, create: bool = True, start: bool = False):
"""
Retrieve an experiment. This method includes getting an active experiment, and get_or_create a specific experiment.
When user specify experiment id and name, the method will try to return the specific experiment.
When user does not provide recorder id or name, the method will try to return the current active experiment.
The `create` argument determines whether the method will automatically create a new experiment according
to user's specification if the experiment hasn't been created before.
* If `create` is True:
* If `active experiment` exists:
* no id or name specified, return the active experiment.
* if id or name is specified, return the specified experiment. If no such exp found, create a new experiment with given id or name. If `start` is set to be True, the experiment is set to be active.
* If `active experiment` not exists:
* no id or name specified, create a default experiment.
* if id or name is specified, return the specified experiment. If no such exp found, create a new experiment with given id or name. If `start` is set to be True, the experiment is set to be active.
* Else If `create` is False:
* If `active experiment` exists:
* no id or name specified, return the active experiment.
* if id or name is specified, return the specified experiment. If no such exp found, raise Error.
* If `active experiment` not exists:
* no id or name specified. If the default experiment exists, return it, otherwise, raise Error.
* if id or name is specified, return the specified experiment. If no such exp found, raise Error.
Parameters
----------
experiment_id : str
id of the experiment to return.
experiment_name : str
name of the experiment to return.
create : boolean
create the experiment it if hasn't been created before.
start : boolean
start the new experiment if one is created.
Returns
-------
An experiment object.
"""
# special case of getting experiment
if experiment_id is None and experiment_name is None:
if self.active_experiment is not None:
return self.active_experiment
# User don't want get active code now.
experiment_name = self._default_exp_name
if create:
exp, is_new = self._get_or_create_exp(experiment_id=experiment_id, experiment_name=experiment_name)
else:
exp, is_new = (
self._get_exp(experiment_id=experiment_id, experiment_name=experiment_name),
False,
)
if is_new and start:
self.active_experiment = exp
# start the recorder
self.active_experiment.start()
return exp
def _get_or_create_exp(self, experiment_id=None, experiment_name=None) -> (object, bool):
"""
Method for getting or creating an experiment. It will try to first get a valid experiment, if exception occurs, it will
automatically create a new experiment based on the given id and name.
"""
try:
return (
self._get_exp(experiment_id=experiment_id, experiment_name=experiment_name),
False,
)
except ValueError:
if experiment_name is None:
experiment_name = self._default_exp_name
logger.info(f"No valid experiment found. Create a new experiment with name {experiment_name}.")
return self.create_exp(experiment_name), True
def _get_exp(self, experiment_id=None, experiment_name=None) -> Experiment:
"""
Get specific experiment by name or id. If it does not exist, raise ValueError.
Parameters
----------
experiment_id :
The id of experiment
experiment_name :
The name of experiment
Returns
-------
Experiment:
The searched experiment
Raises
------
ValueError
"""
raise NotImplementedError(f"Please implement the `_get_exp` method")
def delete_exp(self, experiment_id=None, experiment_name=None):
"""
Delete an experiment.
Parameters
----------
experiment_id : str
the experiment id.
experiment_name : str
the experiment name.
"""
raise NotImplementedError(f"Please implement the `delete_exp` method.")
@property
def default_uri(self):
"""
Get the default tracking URI from qlib.config.C
"""
if "kwargs" not in C.exp_manager or "uri" not in C.exp_manager["kwargs"]:
raise ValueError("The default URI is not set in qlib.config.C")
return C.exp_manager["kwargs"]["uri"]
@property
def uri(self):
"""
Get the default tracking URI or current URI.
Returns
-------
The tracking URI string.
"""
return self._current_uri or self.default_uri
def set_uri(self, uri: Optional[Text] = None):
"""
Set the current tracking URI and the corresponding variables.
Parameters
----------
uri : str
"""
if uri is None:
logger.info("No tracking URI is provided. Use the default tracking URI.")
self._current_uri = self.default_uri
else:
# Temporarily re-set the current uri as the uri argument.
self._current_uri = uri
# Customized features for subclasses.
self._set_uri()
def _set_uri(self):
"""
Customized features for subclasses' set_uri function.
"""
raise NotImplementedError(f"Please implement the `_set_uri` method.")
def list_experiments(self):
"""
List all the existing experiments.
Returns
-------
A dictionary (name -> experiment) of experiments information that being stored.
"""
raise NotImplementedError(f"Please implement the `list_experiments` method.")
class MLflowExpManager(ExpManager):
"""
Use mlflow to implement ExpManager.
"""
def __init__(self, uri: Text, default_exp_name: Optional[Text]):
super(MLflowExpManager, self).__init__(uri, default_exp_name)
self._client = None
def _set_uri(self):
self._client = mlflow.tracking.MlflowClient(tracking_uri=self.uri)
logger.info("{:}".format(self._client))
@property
def client(self):
# Delay the creation of mlflow client in case of creating `mlruns` folder when importing qlib
if self._client is None:
self._client = mlflow.tracking.MlflowClient(tracking_uri=self.uri)
return self._client
def start_exp(
self,
*,
experiment_id: Optional[Text] = None,
experiment_name: Optional[Text] = None,
recorder_id: Optional[Text] = None,
recorder_name: Optional[Text] = None,
uri: Optional[Text] = None,
resume: bool = False,
):
# Set the tracking uri
self.set_uri(uri)
# Create experiment
if experiment_name is None:
experiment_name = self._default_exp_name
experiment, _ = self._get_or_create_exp(experiment_id=experiment_id, experiment_name=experiment_name)
# Set up active experiment
self.active_experiment = experiment
# Start the experiment
self.active_experiment.start(recorder_id=recorder_id, recorder_name=recorder_name, resume=resume)
return self.active_experiment
def end_exp(self, recorder_status: Text = Recorder.STATUS_S):
if self.active_experiment is not None:
self.active_experiment.end(recorder_status)
self.active_experiment = None
# When an experiment end, we will release the current uri.
self._current_uri = None
def create_exp(self, experiment_name: Optional[Text] = None):
assert experiment_name is not None
# init experiment
experiment_id = self.client.create_experiment(experiment_name)
experiment = MLflowExperiment(experiment_id, experiment_name, self.uri)
experiment._default_name = self._default_exp_name
return experiment
def _get_exp(self, experiment_id=None, experiment_name=None):
"""
Method for getting or creating an experiment. It will try to first get a valid experiment, if exception occurs, it will
raise errors.
"""
assert (
experiment_id is not None or experiment_name is not None
), "Please input at least one of experiment/recorder id or name before retrieving experiment/recorder."
if experiment_id is not None:
try:
exp = self.client.get_experiment(experiment_id)
if exp.lifecycle_stage.upper() == "DELETED":
raise MlflowException("No valid experiment has been found.")
experiment = MLflowExperiment(exp.experiment_id, exp.name, self.uri)
return experiment
except MlflowException:
raise ValueError(
"No valid experiment has been found, please make sure the input experiment id is correct."
)
elif experiment_name is not None:
try:
exp = self.client.get_experiment_by_name(experiment_name)
if exp is None or exp.lifecycle_stage.upper() == "DELETED":
raise MlflowException("No valid experiment has been found.")
experiment = MLflowExperiment(exp.experiment_id, experiment_name, self.uri)
return experiment
except MlflowException as e:
raise ValueError(
"No valid experiment has been found, please make sure the input experiment name is correct."
)
def search_records(self, experiment_ids, **kwargs):
filter_string = "" if kwargs.get("filter_string") is None else kwargs.get("filter_string")
run_view_type = 1 if kwargs.get("run_view_type") is None else kwargs.get("run_view_type")
max_results = 100000 if kwargs.get("max_results") is None else kwargs.get("max_results")
order_by = kwargs.get("order_by")
return self.client.search_runs(experiment_ids, filter_string, run_view_type, max_results, order_by)
def delete_exp(self, experiment_id=None, experiment_name=None):
assert (
experiment_id is not None or experiment_name is not None
), "Please input a valid experiment id or name before deleting."
try:
if experiment_id is not None:
self.client.delete_experiment(experiment_id)
else:
experiment = self.client.get_experiment_by_name(experiment_name)
if experiment is None:
raise MlflowException("No valid experiment has been found.")
self.client.delete_experiment(experiment.experiment_id)
except MlflowException as e:
raise Exception(
f"Error: {e}. Something went wrong when deleting experiment. Please check if the name/id of the experiment is correct."
)
def list_experiments(self):
# retrieve all the existing experiments
exps = self.client.list_experiments(view_type=ViewType.ACTIVE_ONLY)
experiments = dict()
for exp in exps:
experiment = MLflowExperiment(exp.experiment_id, exp.name, self.uri)
experiments[exp.name] = experiment
return experiments
|
the-stack_106_19315
|
# MIT License
#
# Copyright (c) 2018 Evgeny Medvedev, [email protected]
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from datetime import datetime
from ethereumetl.atomic_counter import AtomicCounter
# Thread safe progress logger.
class ProgressLogger:
def __init__(self, name='work', logger=None, log_percentage_step=10, log_item_step=5000):
self.name = name
self.total_items = None
self.start_time = None
self.end_time = None
self.counter = AtomicCounter()
self.log_percentage_step = log_percentage_step
self.log_items_step = log_item_step
if logger is not None:
self.logger = logger
else:
self.logger = logging.getLogger('ProgressLogger')
def start(self, total_items=None):
self.total_items = total_items
self.start_time = datetime.now()
start_message = 'Started {}.'.format(self.name)
if self.total_items is not None:
start_message = start_message + ' Items to process: {}.'.format(self.total_items)
self.logger.info(start_message)
# A race condition is possible where a message for the same percentage is printed twice, but it's a minor issue
def track(self, item_count=1):
processed_items = self.counter.increment(item_count)
processed_items_before = processed_items - item_count
track_message = None
if self.total_items is None:
if int(processed_items_before / self.log_items_step) != int(processed_items / self.log_items_step):
track_message = '{} items processed.'.format(processed_items)
else:
percentage = processed_items * 100 / self.total_items
percentage_before = processed_items_before * 100 / self.total_items
if int(percentage_before / self.log_percentage_step) != int(percentage / self.log_percentage_step):
track_message = '{} items processed. Progress is {}%'.format(processed_items, int(percentage)) + \
('!!!' if int(percentage) > 100 else '.')
if track_message is not None:
self.logger.info(track_message)
def finish(self):
duration = None
if self.start_time is not None:
self.end_time = datetime.now()
duration = self.end_time - self.start_time
finish_message = 'Finished {}. Total items processed: {}.'.format(self.name, self.counter.increment() - 1)
if duration is not None:
finish_message = finish_message + ' Took {}.'.format(str(duration))
self.logger.info(finish_message)
|
the-stack_106_19316
|
"""
@author: Ming Ming Zhang, [email protected]
Detections
"""
import tensorflow as tf
import utils
def select_top_scoring(
anchors,
probs,
offsets,
confidence_threshold=0.05,
num_top_scoring=1000,
window=[0,0,512,512],
batch_size=2,
offsets_mean=None,
offsets_std=None
):
"""
Selects top-scoring refined anchors.
Parameters
----------
anchors : tf tensor, [batch_size, num_anchors, 4]
Anchors.
probs : tf tensor, [batch_size, num_anchors, num_object_classes]
Anchors' probabilities to contain object.
offsets : tf tensor, [batch_size, num_anchors, 4]
Anchors' offsets from gt boxes.
confidence_threshold : float, optional
The minimum selection's probabilites. The default is 0.05.
num_top_scoring : integer, optional
The number of top-scoring selections. The default is 1000.
window : list, optional
The corner coordinates (y1, x1, y2, x2) used when clipping refined
anchors (after applied offsets). The default is [0,0,512,512].
batch_size : integer, optional
The batch size of anchors, probs or offsets. The default is 2.
offsets_mean, offsets_std : float
The mean and std of anchor offsets for a given dataset. If offsets are
normalized, they will be used to de-normalize offsets.
Returns
-------
anchor_idxes : tf tensor, [(batch_size * num_anchors)_filtered, 2]
Selected top-scoring indices where
(batch_size * num_anchors)_filtered <= batch_size * num_anchors and
2 is (batch_idx, anchor_idx).
anchors : tf tensor, [(batch_size * num_anchors)_filtered, 4]
Flitered anchors.
class_ids : tf tensor, [(batch_size * num_anchors)_filtered, ]
Filtered anchors' class ids.
scores : tf tensor, [(batch_size * num_anchors)_filtered, ]
Filtered anchors' scores.
"""
num_anchors = tf.shape(probs)[1]
num_classes = tf.shape(probs)[2]
# class_ids, [batch_size, num_anchors]
class_ids = tf.argmax(probs, axis=-1, output_type=tf.int32)
# reshape class_ids, [batch_size * num_anchors, ]
class_ids_reshaped = tf.reshape(class_ids, (batch_size * num_anchors,))
# reshape probs, [batch_size * num_anchors, num_classes]
probs_reshaped = tf.reshape(probs, (-1, num_classes))
# scores, [batch_size * num_anchors,]
scores = tf.gather_nd(
probs_reshaped,
tf.stack([tf.range(batch_size*num_anchors), class_ids_reshaped], axis=1)
)
# reshape scores, [batch_size, num_anchors]
scores = tf.reshape(scores, (batch_size, num_anchors))
# filter low confidence, [(batch_size*num_anchors)_filtered1, 2] where 2 is
# (batch_idx, anchor_idx)
threshold_idxes = tf.where(scores > confidence_threshold)
# select top-scoring indices, [(batch_size*num_anchors)_filtered2, 2] where
# 2 is (batch_idx, anchor_idx)
anchor_idxes = []
for b in range(batch_size):
batch_idxes = tf.where(tf.gather(threshold_idxes, 0, axis=1) == b)[:,0]
num_anchors_per_img = tf.shape(batch_idxes)[0]
k = tf.minimum(num_anchors_per_img, num_top_scoring)
top_idxes = tf.math.top_k(tf.gather(scores, b), k)[1]
anchor_idxes_per_img = tf.stack([tf.repeat(b, k), top_idxes], axis=1)
anchor_idxes.append(anchor_idxes_per_img)
anchor_idxes = tf.concat(anchor_idxes, axis=0)
# filter class_ids & scores, [(batch_size*num_anchors)_filtered2, ]
class_ids = tf.gather_nd(class_ids, anchor_idxes)
scores = tf.gather_nd(scores, anchor_idxes)
# select top-scoring anchors and then,
# refine by applying offsets and clipping,
# resulting in shape [(batch_size*num_anchors)_filtered2, 4]
anchors = tf.gather_nd(anchors, anchor_idxes)
offsets = tf.gather_nd(offsets, anchor_idxes)
# de-normalize offsets if needed
if offsets_mean is not None and offsets_std is not None:
offsets = tf.add(tf.multiply(offsets, offsets_std), offsets_mean)
anchors = utils.apply_offsets(anchors, offsets)
anchors = utils.clip_boxes(anchors, window)
return anchor_idxes, anchors, class_ids, scores
class SelectTopScoring(tf.keras.layers.Layer):
"""
Defines a selecting top-scoring layer as a subclass of TF layer.
Parameters
----------
inputs : list
Includes anchors, probs, offsets and window, where the first three are
the same as in select_top_scoring(), but
* window : tf tensor, [1, 4]
used when clipping refined anchors (after applied offsets), where
1 is the batch_idx assuming that all images in the batch share the
same 4 corner coordinates (y1, x1, y2, x2).
"""
def __init__(
self,
confidence_threshold=0.05,
num_top_scoring=1000,
batch_size=2,
offsets_mean=None,
offsets_std=None,
**kwarg
):
super(SelectTopScoring, self).__init__(**kwarg)
self.confidence_threshold = confidence_threshold
self.num_top_scoring = num_top_scoring
self.batch_size = batch_size
self.offsets_mean = offsets_mean
self.offsets_std = offsets_std
def call(self, inputs):
anchors, probs, offsets = inputs[0], inputs[1], inputs[2]
# window, [1, 4]
window = inputs[3]
return select_top_scoring(
anchors,
probs,
offsets,
self.confidence_threshold,
self.num_top_scoring,
window,
self.batch_size,
self.offsets_mean,
self.offsets_std)
def nms_fpn(
list_anchor_idxes,
list_anchors,
list_class_ids,
list_scores,
max_objects_per_class_per_img=100,
iou_threshold=0.5,
batch_size=2
):
"""
Applies non-maximum suppression (NMS) to all FPN levels.
Parameters
----------
list_anchor_idxes : list
Set of anchors' indices at each FPN level, each is
[batch_size * num_anchors_fmap, 2] where 2 is (batch_idx, anchor_idx).
list_anchors : list
Set of anchors at each FPN level, each is
[batch_size * num_anchors_fmap, 4].
list_class_ids : list
Set of anchors' class ids at each FPN level, each is
[batch_size * num_anchors_fmap, ].
list_scores : list
Set of anchors' scores at each FPN level, each is
[batch_size * num_anchors_fmap, ].
max_objects_per_class_per_img : integer, optional
The maximum number of objects over all images for a particular class.
The default is 100.
iou_threshold : float, optional
An iou threshold for NMS. The default is 0.5.
batch_size : integer, optional
The batch size of each FPN level's anchor indices, anchors, class ids
or scores. The default is 2.
Returns
-------
anchors_batch : list
Set of anchors after NMS for each image, each has shape
[num_anchors_per_img_filtered * num_fmaps, 4].
class_ids_batch : list
Set of corresponding class ids after NMS for each image, each has shape
[num_anchors_per_img_filtered * num_fmaps, ]
scores_batch : list
Set of corresponding scores after NMS for each image, each has shape
[num_anchors_per_img_filtered * num_fmaps, ].
"""
# merge all FPN levels
# [batch_size * num_anchors_fmap * num_fmaps, 2] where 2 is
# (batch_idx, anchor_idx)
anchor_idxes = tf.concat(list_anchor_idxes, axis=0)
# [batch_size * num_anchors_fmap * num_fmaps, 4]
anchors = tf.concat(list_anchors, axis=0)
# [batch_size * num_anchors_fmap * num_fmaps, ]
class_ids = tf.concat(list_class_ids, axis=0)
# [batch_size * num_anchors_fmap * num_fmaps, ]
scores = tf.concat(list_scores, axis=0)
# unique classes, [num_classes, ]
ids = tf.unique(class_ids)[0]
# batch indicators, [batch_size * num_anchors_fmap * num_fmaps, ], each
# indicates which batch where the image belongs to
batch_indicators = tf.gather(anchor_idxes, 0, axis=1)
# max number of objects in a class for the batch of images
max_objects_per_class = batch_size * max_objects_per_class_per_img
def nms_per_class(class_id):
"""
Applies NMS to a given class.
Parameters
----------
class_id : integer
The object class id.
Returns
-------
select_idxes : tf tensor, [max_objects_per_class, ]
Selected indices after NMS, padded with -1 if needed.
"""
idxes = tf.where(class_ids == class_id)[:,0]
idxes = tf.cast(idxes, ids.dtype)
nms_idxes = tf.image.non_max_suppression(
boxes=tf.gather(anchors, idxes),
scores=tf.gather(scores, idxes),
max_output_size=max_objects_per_class,
iou_threshold=iou_threshold)
# [(batch_size * num_anchors_fmap * num_fmaps)_per_class_filtered, ]
select_idxes = tf.gather(idxes, nms_idxes)
# pad with -1 to have same shape for all classes,
# [max_objects_per_class, ]
gap = max_objects_per_class - tf.shape(select_idxes)[0]
select_idxes = tf.pad(select_idxes, [[0,gap]], constant_values=-1)
return select_idxes
# parallel computing applied to all classes,
# [num_classes, max_objects_per_class]
select_idxes = tf.map_fn(nms_per_class, ids)
# remove -1 paddings,
# [(batch_size * num_anchors_fmap * num_fmaps)_filtered, ]
select_idxes = tf.reshape(select_idxes, [-1])
select_idxes = tf.gather(select_idxes, tf.where(select_idxes > -1)[:,0])
# [(batch_size * num_anchors_fmap * num_fmaps)_filtered, ]
batch_indicators = tf.gather(batch_indicators, select_idxes)
# [(batch_size * num_anchors_fmap * num_fmaps)_filtered, 4]
anchors = tf.gather(anchors, select_idxes)
# [(batch_size * num_anchors_fmap * num_fmaps)_filtered, ]
class_ids = tf.gather(class_ids, select_idxes)
scores = tf.gather(scores, select_idxes)
# get detections for each image
anchors_batch, class_ids_batch, scores_batch = [], [], []
for b in range(batch_size):
idxes = tf.where(batch_indicators == b)[:,0]
# [(num_anchors_fmap * num_fmaps)_per_img_filtered, 4]
anchors_per_img = tf.gather(anchors, idxes)
# [(num_anchors_fmap * num_fmaps)_per_img_filtered, ]
class_ids_per_img = tf.gather(class_ids, idxes)
scores_per_img = tf.gather(scores, idxes)
anchors_batch.append(anchors_per_img)
class_ids_batch.append(class_ids_per_img)
scores_batch.append(scores_per_img)
return anchors_batch, class_ids_batch, scores_batch
class NMS_FPN(tf.keras.layers.Layer):
"""
Defines a class NMS as a subclass of TF layer.
"""
def __init__(
self,
max_objects_per_class_per_img=100,
iou_threshold=0.5,
batch_size=2,
**kwarg
):
super(NMS_FPN, self).__init__(**kwarg)
self.max_objects_per_class_per_img = max_objects_per_class_per_img
self.iou_threshold = iou_threshold
self.batch_size = batch_size
def call(self, inputs):
list_anchor_idxes = inputs[0]
list_anchors = inputs[1]
list_class_ids = inputs[2]
list_scores = inputs[3]
return nms_fpn(
list_anchor_idxes,
list_anchors,
list_class_ids,
list_scores,
self.max_objects_per_class_per_img,
self.iou_threshold,
self.batch_size)
|
the-stack_106_19317
|
class Solution:
def maxDistToClosest(self, seats: List[int]) -> int:
maxDistance = -1
n = len(seats)
first1 = seats.index(1)
last1 = seats[::-1].index(1)
i = first1 + 1
while i < n-last1:
if seats[i] == 0:
distance = 0
beg_i = i
while i<n-last1 and seats[i] == 0:
distance += 1
i += 1
if i!=n and beg_i != 0:
distance = math.ceil(distance/2)
maxDistance = max(maxDistance, distance)
else:
i += 1
return max(maxDistance, first1, last1)
|
the-stack_106_19320
|
#
# Copyright © 2021 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. No copyright is claimed
# in the United States under Title 17, U.S. Code. All Other Rights Reserved.
#
# SPDX-License-Identifier: NASA-1.3
#
from astropy import units as u
import numpy as np
from .core import igrf as _igrf, aep8 as _aep8
from .paths import IGRF_DATA_PATH, AEP8_DATA_PATH
from .util import working_directory
@working_directory(IGRF_DATA_PATH)
@np.vectorize
def igrf(lon, lat, height, year):
return _igrf(lon, lat, height, year)
@working_directory(AEP8_DATA_PATH)
@np.vectorize
def aep8(energy, lvalue, bb0, particle, solar):
if particle not in ('e', 'p'):
raise ValueError('particle must be "e" or "p"')
if solar not in ('min', 'max'):
raise ValueError('solar must be "min" or "max"')
modelnum = {
('e', 'min'): 1,
('e', 'max'): 2,
('p', 'min'): 3,
('p', 'max'): 4
}[(particle, solar)]
return _aep8(energy, lvalue, bb0, modelnum)
def get_flux(coords, time, energy, particle, solar):
"""Calculate the flux of trapped particles at a specific location and time.
Parameters
----------
coords : astropy.coordinates.EarthLocation
The position relative to the Earth.
time : astropy.time.Time
The time (needed to account for drift of the Earth's magnetic field).
energy : astropy.units.Quantity
The minimum energy.
particle : {'e', 'p'}
The particle species: 'e' for electrons, 'p' for protons.
solar : {'min', 'max'}
The solar activity: solar minimum or solar maximum.
Returns
-------
flux : astropy.units.Quantity
The flux of particles above the given energy, in units of cm^-2 s^-1.
Example
-------
>>> from radbelt import get_flux
>>> from astropy import units as u
>>> from astropy.coordinates import EarthLocation
>>> from astropy.time import Time
>>> coords = EarthLocation(-45 * u.deg, -30 * u.deg, 500 * u.km)
>>> time = Time('2021-03-01')
>>> energy = 20 * u.MeV
>>> get_flux(coords, time, energy, 'p', 'max')
<Quantity 2642.50268555 1 / (cm2 s)>
"""
lvalue, bb0 = igrf(coords.geodetic.lon.deg,
coords.geodetic.lat.deg,
coords.geodetic.height.to_value(u.km),
time.utc.decimalyear)
flux = aep8(energy.to_value(u.MeV), lvalue, bb0, particle, solar)
return flux * u.cm**-2 * u.s**-1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.