filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_26230 | """
yxf: Convert from XLSForm to YAML and back.
To convert an XLSForm to a YAML file: `python -m yxf form.xlsx`.
By default, the result will be called `form.yaml`, in other words, the same name
as the input file with the extension changed to `.yaml`. You can specify a
different output file name using the `--output othername.yaml` option.
To convert a YAML file to an XLSForm: `python -m yxf form.yaml`.
"""
import argparse
import collections
import logging
import pathlib
import re
import markdown_it
import markdown_it.tree
import openpyxl
import openpyxl.styles
import openpyxl.utils
import strictyaml
from . import xlsform
log = logging.getLogger("yxf.__main__")
def _row_to_dict(headers, values):
row_dict = collections.OrderedDict()
for h, v in zip(headers, values):
if v is None:
continue
if h is None:
raise ValueError(f"Cell with no column header: {v}")
row_dict[h] = v
return row_dict
def _convert_sheet(sheet):
headers = xlsform.headers(sheet)
result = []
for row in xlsform.content_rows(sheet, values_only=True):
values = xlsform.truncate_row(row)
values = [xlsform.stringify_value(v) for v in values]
row_dict = _row_to_dict(headers, values)
if row_dict:
result.append(row_dict)
return result
def _convert_to_sheet(sheet, rows, keys):
key_set = set(keys)
for i, key in enumerate(keys):
sheet.cell(row=1, column=i + 1, value=key)
next_row = 2
previous_list_name = rows[0].get("list_name") if rows else None
for row in rows:
if row.get("type") == "begin_group":
next_row += 1
if row.get("list_name") != previous_list_name:
previous_list_name = row.get("list_name")
next_row += 1
if not all(k in key_set for k in row.keys()):
missing_key = next(k for k in row.keys() if k not in key_set)
raise ValueError(
f'Invalid key "{missing_key}" in row "{row.get("name", "(unnamed)")}". '
f"Add it to yxf.headers.{sheet.title} in the YAML file."
)
for i, key in enumerate(keys):
if key in row:
sheet.cell(row=next_row, column=i + 1, value=row[key])
next_row += 1
return sheet
def _write_to_xlsform(form, target):
wb = openpyxl.Workbook()
for sheet_name in form:
if sheet_name == "yxf":
continue
_convert_to_sheet(
wb.create_sheet(sheet_name),
form[sheet_name],
form["yxf"]["headers"][sheet_name],
)
wb.remove(wb.active)
xlsform.make_pretty(wb)
wb.save(target)
def _check_existing_output(filename, force):
if filename.exists() and not force:
raise ValueError(f"File already exists (use --force to override): {filename}")
def _ensure_yxf_comment(form, name, file_format):
desired_comment = f"Converted by yxf, from {name}. Edit the {file_format} file instead of the Excel file."
first_line = form["survey"][0]
if "#" not in first_line or not first_line["#"].startswith("Converted by yxf,"):
form["survey"].insert(0, {"#": desired_comment})
else:
form["survey"][0]["#"] = desired_comment
if "#" not in form["yxf"]["headers"]["survey"]:
form["yxf"]["headers"]["survey"].insert(0, "#")
def _validate_sheet_name(sheet_name, filename, line):
if sheet_name not in ["survey", "choices", "settings"]:
raise ValueError(
f"{filename}:{line}: Invalid sheet name (must be survey, choices, or settings): {sheet_name}"
)
def _load_workbook(filename):
wb = openpyxl.load_workbook(filename, read_only=True)
result = collections.OrderedDict()
headers = collections.OrderedDict()
for sheet_name in ["survey", "choices", "settings"]:
if sheet_name in wb:
result[sheet_name] = _convert_sheet(wb[sheet_name])
headers[sheet_name] = xlsform.headers(wb[sheet_name])
if headers[sheet_name] and headers[sheet_name][0] != "#":
if "#" in headers[sheet_name]:
raise ValueError(
f"The comment column must come first in sheet {sheet_name}."
)
if "survey" not in result:
raise ValueError('An XLSForm must have a "survey" sheet.')
result["yxf"] = {"headers": headers}
return result
def xlsform_to_yaml(filename: pathlib.Path, target: pathlib.Path):
"""Convert XLSForm file `filename` to YAML file `target`."""
log.info("xlsform_to_yaml: %s -> %s", filename, target)
form = _load_workbook(filename)
_ensure_yxf_comment(form, filename.name, "YAML")
with open(target, "w", encoding="utf-8") as f:
f.write(strictyaml.as_document(form).as_yaml())
def xlsform_to_markdown(filename: pathlib.Path, target: pathlib.Path):
"""Convert XLSForm file `filename` to Markdown file `target`."""
log.info("xlsform_to_markdown: %s -> %s", filename, target)
form = _load_workbook(filename)
_ensure_yxf_comment(form, filename.name, "Markdown")
md = []
for sheet_name in ["survey", "choices", "settings"]:
if sheet_name not in form:
continue
md.append(f"## {sheet_name}")
md.append("")
sheet = form[sheet_name]
headers = form["yxf"]["headers"][sheet_name]
header_indices = dict(zip(headers, range(len(headers))))
# Before we render the table, look for comments and render those.
# We simply put them as paragraphs in the Markdown file.
for row in sheet:
if "#" in row:
if row["#"]:
md.append(row["#"])
md.append("")
del row["#"]
if headers[0] == "#":
headers.pop(0)
del header_indices["#"]
header_indices = {k: v - 1 for (k, v) in header_indices.items()}
for i, row in enumerate(sheet):
for k, v in row.items():
# Markdown does not support multi-line entries in cells. Check
# and complain if needed.
if "\n" in v:
log.warning(
f"{filename.name}:{i + 2} Multi-line value for column {k}.\n"
"Markdown does not support multi-line values. Use YAML instead."
)
v = v.replace("\n", " ")
# Markdown uses "|" as a table cell separator. Escape it if it
# occurs in one of the values. And duplicate each escape character.
row[k] = v.replace("\\", "\\\\").replace("|", "\\|")
# Find column widths
widths = [len(h) for h in headers]
for row in sheet:
for k, v in row.items():
i = header_indices[k]
widths[i] = max(widths[i], len(v))
# Render the table
header_row = [h.ljust(w) for (h, w) in zip(headers, widths)]
md.append(f"| {' | '.join(header_row)} |")
separator_row = ["-" * w for w in widths]
md.append(f"| {' | '.join(separator_row)} |")
for row in sheet:
if not row:
continue
formatted_row = [row.get(h, "").ljust(w) for (h, w) in zip(headers, widths)]
md.append(f"| {' | '.join(formatted_row)} |")
md.append("")
with open(target, "w", encoding="utf-8") as f:
f.write("\n".join(md))
def yaml_to_xlsform(filename: pathlib.Path, target: pathlib.Path):
"""Convert YAML file `filename` to XLSForm file `target`."""
log.info("yaml_to_xlsform: %s -> %s", filename, target)
with open(filename, encoding="utf-8") as f:
form = strictyaml.load(f.read()).data
if "yxf" not in form:
raise ValueError('YAML file must have a "yxf" entry.')
if "survey" not in form:
raise ValueError('YAML file must have a "survey" entry.')
_ensure_yxf_comment(form, filename.name, "YAML")
_write_to_xlsform(form, target)
def markdown_to_xlsform(filename: pathlib.Path, target: pathlib.Path):
"""Convert Markdown file `filename` to XLSForm file `target`."""
log.info("markdown_to_xlsform: %s -> %s", filename, target)
with open(filename, encoding="utf-8") as f:
md = f.read()
parser = markdown_it.MarkdownIt("js-default")
ast = markdown_it.tree.SyntaxTreeNode(parser.parse(md))
form = collections.OrderedDict()
form_headers = collections.OrderedDict()
sheet_name = None
for node in ast:
if node.tag == "h2":
sheet_name = node.children[0].content
_validate_sheet_name(sheet_name, filename.name, node.map[0])
result = []
elif node.tag == "p":
content = node.children[0].content
match = re.match(r"%%\s*(.*)", content)
if match:
sheet_name = match.group(1)
_validate_sheet_name(sheet_name, filename.name, node.map[0])
else:
# Other paragraphs are treated as comments and added to the
# beginning of the current sheet.
result.append({"#": content})
elif node.tag == "table":
if not sheet_name:
raise ValueError(
f"{filename.name}:{node.map[0]}: No sheet name specified for table."
)
thead, tbody = node.children
headers = [c.children[0].content for c in thead.children[0].children]
add_comment_column = headers[0] != "#" and result and "#" in result[0]
if add_comment_column:
headers.insert(0, "#")
rows = tbody.children
rows = [[c.children[0].content for c in row.children] for row in rows]
for values in rows:
if add_comment_column:
values.insert(0, "")
row_dict = _row_to_dict(headers, values)
if row_dict:
result.append(row_dict)
form[sheet_name] = result
form_headers[sheet_name] = headers
form["yxf"] = {"headers": form_headers}
_ensure_yxf_comment(form, filename.name, "Markdown")
_write_to_xlsform(form, target)
def main():
"""yxf: Convert from XLSForm to YAML and back."""
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("markdown_it").setLevel(logging.INFO)
parser = argparse.ArgumentParser(
description="Convert from XLSForm to YAML and back"
)
parser.add_argument("file", type=pathlib.Path, help="a file to be converted")
parser.add_argument(
"--markdown",
action="store_true",
help="use Markdown instead of YAML",
)
parser.add_argument(
"-o",
"--output",
type=pathlib.Path,
help="output file name (default: same as input, with extension changed)",
)
parser.add_argument(
"-f",
"--force",
action="store_true",
help="allow overwriting existing output files",
)
args = parser.parse_args()
if args.file.suffix == ".xlsx":
if args.markdown or (args.output and args.output.suffix == ".md"):
args.output = args.output or args.file.with_suffix(".md")
_check_existing_output(args.output, args.force)
xlsform_to_markdown(args.file, args.output)
else:
args.output = args.output or args.file.with_suffix(".yaml")
_check_existing_output(args.output, args.force)
xlsform_to_yaml(args.file, args.output)
elif args.file.suffix == ".yaml":
args.output = args.output or args.file.with_suffix(".xlsx")
_check_existing_output(args.output, args.force)
yaml_to_xlsform(args.file, args.output)
elif args.file.suffix == ".md":
args.output = args.output or args.file.with_suffix(".xlsx")
_check_existing_output(args.output, args.force)
markdown_to_xlsform(args.file, args.output)
else:
raise ValueError(f"Unrecognized file extension: {args.file}")
if __name__ == "__main__":
main()
|
the-stack_106_26232 | import sys, os
sys.path.append(os.pardir)
from Common.math_functions import *
from Common.gradient import numerical_gradient
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size,
weight_init_std=0.01):
# 初始化权重
self.params = {}
self.params['W1'] = weight_init_std * \
np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = weight_init_std * \
np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def predict(self, x):
W1, W2 = self.params['W1'], self.params['W2']
b1, b2 = self.params['b1'], self.params['b2']
a1 = np.dot(x, W1) + b1 # 第一次神经网络
z1 = sigmoid(a1) # 第一层输出
a2 = np.dot(z1, W2) + b2 # 第二次神经网络
y = softmax(a2)
return y
def loss(self, x, t):
''' 损失函数
:param x: 输入数据
:param t: 监督数据
:return: 新的损失函数
'''
y = self.predict(x)
return cross_entropy_error(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
t = np.argmax(t, axis=1)
accuracy = np.sum(y == t)
return accuracy
def numerical_gradient(self, x, t):
loss_W = lambda W: self.loss(x, t)
grads = {}
grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
return grads |
the-stack_106_26233 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_query_table_async.py
DESCRIPTION:
These samples demonstrate the following: querying a table for entities.
USAGE:
python sample_query_table_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_STORAGE_CONNECTION_STRING - the connection string to your storage account
"""
import os
import copy
import random
import asyncio
class SampleTablesQuery(object):
connection_string = os.getenv("AZURE_TABLES_CONNECTION_STRING")
table_name = "OfficeSupplies"
entity_name = "marker"
name_filter = "Name eq '{}'".format(entity_name)
async def _insert_random_entities(self):
from azure.data.tables.aio import TableClient
brands = ["Crayola", "Sharpie", "Chameleon"]
colors = ["red", "blue", "orange", "yellow"]
names = ["marker", "pencil", "pen"]
entity_template = {
"PartitionKey": "pk",
"RowKey": "row",
}
table_client = TableClient.from_connection_string(self.connection_string, self.table_name)
async with table_client:
await table_client.create_table()
for i in range(10):
e = copy.deepcopy(entity_template)
e["RowKey"] += str(i)
e["Name"] = random.choice(names)
e["Brand"] = random.choice(brands)
e["Color"] = random.choice(colors)
await table_client.create_entity(entity=e)
async def sample_query_entities(self):
await self._insert_random_entities()
from azure.data.tables.aio import TableClient
from azure.core.exceptions import HttpResponseError
table_client = TableClient.from_connection_string(self.connection_string, self.table_name)
# [START query_entities]
async with table_client:
try:
entity_name = "marker"
name_filter = "Name eq '{}'".format(entity_name)
async for entity_chosen in table_client.query_entities(filter=name_filter, select=["Brand","Color"]):
print(entity_chosen)
except HttpResponseError as e:
pass
# [END query_entities]
finally:
await table_client.delete_table()
async def main():
stq = SampleTablesQuery()
await stq.sample_query_entities()
if __name__ == '__main__':
asyncio.run(main())
|
the-stack_106_26234 | # Copyright 2013-2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for miscellaneous external dependencies.
from pathlib import Path
import functools
import re
import sysconfig
import typing as T
from .. import mlog
from .. import mesonlib
from ..environment import detect_cpu_family
from .base import (
DependencyException, DependencyMethods, ExternalDependency,
PkgConfigDependency, CMakeDependency, ConfigToolDependency,
factory_methods, DependencyFactory,
)
if T.TYPE_CHECKING:
from ..environment import Environment, MachineChoice
from .base import DependencyType # noqa: F401
@factory_methods({DependencyMethods.PKGCONFIG, DependencyMethods.CMAKE})
def netcdf_factory(env: 'Environment', for_machine: 'MachineChoice',
kwargs: T.Dict[str, T.Any], methods: T.List[DependencyMethods]) -> T.List['DependencyType']:
language = kwargs.get('language', 'c')
if language not in ('c', 'cpp', 'fortran'):
raise DependencyException('Language {} is not supported with NetCDF.'.format(language))
candidates = [] # type: T.List['DependencyType']
if DependencyMethods.PKGCONFIG in methods:
if language == 'fortran':
pkg = 'netcdf-fortran'
else:
pkg = 'netcdf'
candidates.append(functools.partial(PkgConfigDependency, pkg, env, kwargs, language=language))
if DependencyMethods.CMAKE in methods:
candidates.append(functools.partial(CMakeDependency, 'NetCDF', env, kwargs, language=language))
return candidates
class OpenMPDependency(ExternalDependency):
# Map date of specification release (which is the macro value) to a version.
VERSIONS = {
'201811': '5.0',
'201611': '5.0-revision1', # This is supported by ICC 19.x
'201511': '4.5',
'201307': '4.0',
'201107': '3.1',
'200805': '3.0',
'200505': '2.5',
'200203': '2.0',
'199810': '1.0',
}
def __init__(self, environment, kwargs):
language = kwargs.get('language')
super().__init__('openmp', environment, kwargs, language=language)
self.is_found = False
if self.clib_compiler.get_id() == 'pgi':
# through at least PGI 19.4, there is no macro defined for OpenMP, but OpenMP 3.1 is supported.
self.version = '3.1'
self.is_found = True
self.compile_args = self.link_args = self.clib_compiler.openmp_flags()
return
try:
openmp_date = self.clib_compiler.get_define(
'_OPENMP', '', self.env, self.clib_compiler.openmp_flags(), [self], disable_cache=True)[0]
except mesonlib.EnvironmentException as e:
mlog.debug('OpenMP support not available in the compiler')
mlog.debug(e)
openmp_date = None
if openmp_date:
self.version = self.VERSIONS[openmp_date]
# Flang has omp_lib.h
header_names = ('omp.h', 'omp_lib.h')
for name in header_names:
if self.clib_compiler.has_header(name, '', self.env, dependencies=[self], disable_cache=True)[0]:
self.is_found = True
self.compile_args = self.link_args = self.clib_compiler.openmp_flags()
break
if not self.is_found:
mlog.log(mlog.yellow('WARNING:'), 'OpenMP found but omp.h missing.')
class ThreadDependency(ExternalDependency):
def __init__(self, name: str, environment, kwargs):
super().__init__(name, environment, kwargs)
self.is_found = True
# Happens if you are using a language with threads
# concept without C, such as plain Cuda.
if self.clib_compiler is None:
self.compile_args = []
self.link_args = []
else:
self.compile_args = self.clib_compiler.thread_flags(environment)
self.link_args = self.clib_compiler.thread_link_flags(environment)
@staticmethod
def get_methods():
return [DependencyMethods.AUTO, DependencyMethods.CMAKE]
class BlocksDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('blocks', environment, kwargs)
self.name = 'blocks'
self.is_found = False
if self.env.machines[self.for_machine].is_darwin():
self.compile_args = []
self.link_args = []
else:
self.compile_args = ['-fblocks']
self.link_args = ['-lBlocksRuntime']
if not self.clib_compiler.has_header('Block.h', '', environment, disable_cache=True) or \
not self.clib_compiler.find_library('BlocksRuntime', environment, []):
mlog.log(mlog.red('ERROR:'), 'BlocksRuntime not found.')
return
source = '''
int main(int argc, char **argv)
{
int (^callback)(void) = ^ int (void) { return 0; };
return callback();
}'''
with self.clib_compiler.compile(source, extra_args=self.compile_args + self.link_args) as p:
if p.returncode != 0:
mlog.log(mlog.red('ERROR:'), 'Compiler does not support blocks extension.')
return
self.is_found = True
class Python3DependencySystem(ExternalDependency):
def __init__(self, name, environment, kwargs):
super().__init__(name, environment, kwargs)
if not environment.machines.matches_build_machine(self.for_machine):
return
if not environment.machines[self.for_machine].is_windows():
return
self.name = 'python3'
self.static = kwargs.get('static', False)
# We can only be sure that it is Python 3 at this point
self.version = '3'
self._find_libpy3_windows(environment)
@staticmethod
def get_windows_python_arch():
pyplat = sysconfig.get_platform()
if pyplat == 'mingw':
pycc = sysconfig.get_config_var('CC')
if pycc.startswith('x86_64'):
return '64'
elif pycc.startswith(('i686', 'i386')):
return '32'
else:
mlog.log('MinGW Python built with unknown CC {!r}, please file'
'a bug'.format(pycc))
return None
elif pyplat == 'win32':
return '32'
elif pyplat in ('win64', 'win-amd64'):
return '64'
mlog.log('Unknown Windows Python platform {!r}'.format(pyplat))
return None
def get_windows_link_args(self):
pyplat = sysconfig.get_platform()
if pyplat.startswith('win'):
vernum = sysconfig.get_config_var('py_version_nodot')
if self.static:
libpath = Path('libs') / 'libpython{}.a'.format(vernum)
else:
comp = self.get_compiler()
if comp.id == "gcc":
libpath = 'python{}.dll'.format(vernum)
else:
libpath = Path('libs') / 'python{}.lib'.format(vernum)
lib = Path(sysconfig.get_config_var('base')) / libpath
elif pyplat == 'mingw':
if self.static:
libname = sysconfig.get_config_var('LIBRARY')
else:
libname = sysconfig.get_config_var('LDLIBRARY')
lib = Path(sysconfig.get_config_var('LIBDIR')) / libname
if not lib.exists():
mlog.log('Could not find Python3 library {!r}'.format(str(lib)))
return None
return [str(lib)]
def _find_libpy3_windows(self, env):
'''
Find python3 libraries on Windows and also verify that the arch matches
what we are building for.
'''
pyarch = self.get_windows_python_arch()
if pyarch is None:
self.is_found = False
return
arch = detect_cpu_family(env.coredata.compilers.host)
if arch == 'x86':
arch = '32'
elif arch == 'x86_64':
arch = '64'
else:
# We can't cross-compile Python 3 dependencies on Windows yet
mlog.log('Unknown architecture {!r} for'.format(arch),
mlog.bold(self.name))
self.is_found = False
return
# Pyarch ends in '32' or '64'
if arch != pyarch:
mlog.log('Need', mlog.bold(self.name), 'for {}-bit, but '
'found {}-bit'.format(arch, pyarch))
self.is_found = False
return
# This can fail if the library is not found
largs = self.get_windows_link_args()
if largs is None:
self.is_found = False
return
self.link_args = largs
# Compile args
inc = sysconfig.get_path('include')
platinc = sysconfig.get_path('platinclude')
self.compile_args = ['-I' + inc]
if inc != platinc:
self.compile_args.append('-I' + platinc)
self.version = sysconfig.get_config_var('py_version')
self.is_found = True
@staticmethod
def get_methods():
if mesonlib.is_windows():
return [DependencyMethods.PKGCONFIG, DependencyMethods.SYSCONFIG]
elif mesonlib.is_osx():
return [DependencyMethods.PKGCONFIG, DependencyMethods.EXTRAFRAMEWORK]
else:
return [DependencyMethods.PKGCONFIG]
def log_tried(self):
return 'sysconfig'
class PcapDependencyConfigTool(ConfigToolDependency):
tools = ['pcap-config']
tool_name = 'pcap-config'
@staticmethod
def finish_init(self) -> None:
self.compile_args = self.get_config_value(['--cflags'], 'compile_args')
self.link_args = self.get_config_value(['--libs'], 'link_args')
self.version = self.get_pcap_lib_version()
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL]
def get_pcap_lib_version(self):
# Since we seem to need to run a program to discover the pcap version,
# we can't do that when cross-compiling
if not self.env.machines.matches_build_machine(self.for_machine):
return None
v = self.clib_compiler.get_return_value('pcap_lib_version', 'string',
'#include <pcap.h>', self.env, [], [self])
v = re.sub(r'libpcap version ', '', v)
v = re.sub(r' -- Apple version.*$', '', v)
return v
class CupsDependencyConfigTool(ConfigToolDependency):
tools = ['cups-config']
tool_name = 'cups-config'
@staticmethod
def finish_init(ctdep):
ctdep.compile_args = ctdep.get_config_value(['--cflags'], 'compile_args')
ctdep.link_args = ctdep.get_config_value(['--ldflags', '--libs'], 'link_args')
@staticmethod
def get_methods():
if mesonlib.is_osx():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL, DependencyMethods.EXTRAFRAMEWORK, DependencyMethods.CMAKE]
else:
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL, DependencyMethods.CMAKE]
class LibWmfDependencyConfigTool(ConfigToolDependency):
tools = ['libwmf-config']
tool_name = 'libwmf-config'
@staticmethod
def finish_init(ctdep):
ctdep.compile_args = ctdep.get_config_value(['--cflags'], 'compile_args')
ctdep.link_args = ctdep.get_config_value(['--libs'], 'link_args')
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL]
class LibGCryptDependencyConfigTool(ConfigToolDependency):
tools = ['libgcrypt-config']
tool_name = 'libgcrypt-config'
@staticmethod
def finish_init(ctdep):
ctdep.compile_args = ctdep.get_config_value(['--cflags'], 'compile_args')
ctdep.link_args = ctdep.get_config_value(['--libs'], 'link_args')
ctdep.version = ctdep.get_config_value(['--version'], 'version')[0]
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL]
class GpgmeDependencyConfigTool(ConfigToolDependency):
tools = ['gpgme-config']
tool_name = 'gpg-config'
@staticmethod
def finish_init(ctdep):
ctdep.compile_args = ctdep.get_config_value(['--cflags'], 'compile_args')
ctdep.link_args = ctdep.get_config_value(['--libs'], 'link_args')
ctdep.version = ctdep.get_config_value(['--version'], 'version')[0]
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL]
class ShadercDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('shaderc', environment, kwargs)
static_lib = 'shaderc_combined'
shared_lib = 'shaderc_shared'
libs = [shared_lib, static_lib]
if self.static:
libs.reverse()
cc = self.get_compiler()
for lib in libs:
self.link_args = cc.find_library(lib, environment, [])
if self.link_args is not None:
self.is_found = True
if self.static and lib != static_lib:
mlog.warning('Static library {!r} not found for dependency {!r}, may '
'not be statically linked'.format(static_lib, self.name))
break
def log_tried(self):
return 'system'
@staticmethod
def get_methods():
return [DependencyMethods.SYSTEM, DependencyMethods.PKGCONFIG]
@factory_methods({DependencyMethods.PKGCONFIG})
def curses_factory(env: 'Environment', for_machine: 'MachineChoice',
kwargs: T.Dict[str, T.Any], methods: T.List[DependencyMethods]) -> T.List['DependencyType']:
candidates = [] # type: T.List['DependencyType']
if DependencyMethods.PKGCONFIG in methods:
pkgconfig_files = ['ncurses', 'ncursesw']
for pkg in pkgconfig_files:
candidates.append(functools.partial(PkgConfigDependency, pkg, env, kwargs))
return candidates
@factory_methods({DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM})
def shaderc_factory(env: 'Environment', for_machine: 'MachineChoice',
kwargs: T.Dict[str, T.Any], methods: T.List[DependencyMethods]) -> T.List['DependencyType']:
"""Custom DependencyFactory for ShaderC.
ShaderC's odd you get three different libraries from the same build
thing are just easier to represent as a separate function than
twisting DependencyFactory even more.
"""
candidates = [] # type: T.List['DependencyType']
if DependencyMethods.PKGCONFIG in methods:
# ShaderC packages their shared and static libs together
# and provides different pkg-config files for each one. We
# smooth over this difference by handling the static
# keyword before handing off to the pkg-config handler.
shared_libs = ['shaderc']
static_libs = ['shaderc_combined', 'shaderc_static']
if kwargs.get('static', False):
c = [functools.partial(PkgConfigDependency, name, env, kwargs)
for name in static_libs + shared_libs]
else:
c = [functools.partial(PkgConfigDependency, name, env, kwargs)
for name in shared_libs + static_libs]
candidates.extend(c)
if DependencyMethods.SYSTEM in methods:
candidates.append(functools.partial(ShadercDependency, env, kwargs))
return candidates
cups_factory = DependencyFactory(
'cups',
[DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL, DependencyMethods.EXTRAFRAMEWORK, DependencyMethods.CMAKE],
configtool_class=CupsDependencyConfigTool,
cmake_name='Cups',
)
gpgme_factory = DependencyFactory(
'gpgme',
[DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL],
configtool_class=GpgmeDependencyConfigTool,
)
libgcrypt_factory = DependencyFactory(
'libgcrypt',
[DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL],
configtool_class=LibGCryptDependencyConfigTool,
)
libwmf_factory = DependencyFactory(
'libwmf',
[DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL],
configtool_class=LibWmfDependencyConfigTool,
)
pcap_factory = DependencyFactory(
'pcap',
[DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL],
configtool_class=PcapDependencyConfigTool,
pkgconfig_name='libpcap',
)
python3_factory = DependencyFactory(
'python3',
[DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM, DependencyMethods.EXTRAFRAMEWORK],
system_class=Python3DependencySystem,
# There is no version number in the macOS version number
framework_name='Python',
# There is a python in /System/Library/Frameworks, but thats python 2.x,
# Python 3 will always be in /Library
extra_kwargs={'paths': ['/Library/Frameworks']},
)
threads_factory = DependencyFactory(
'threads',
[DependencyMethods.SYSTEM, DependencyMethods.CMAKE],
cmake_name='Threads',
system_class=ThreadDependency,
)
|
the-stack_106_26235 | r"""
This module contains classes for working with sparse matrices.
"""
from __future__ import division
from copy import deepcopy
from collections.abc import Mapping, MutableMapping
from numbers import Number, Integral
import numpy as np
import sympy as sp
from scipy.sparse import bmat, dia_matrix, kron, diags as sp_diags
from scipy.sparse.linalg import spsolve
from mpi4py import MPI
from .utilities import integrate_sympy
__all__ = ['SparseMatrix', 'SpectralMatrix', 'extract_diagonal_matrix',
'extract_bc_matrices', 'check_sanity', 'get_dense_matrix',
'TPMatrix', 'BlockMatrix', 'BlockMatrices', 'Identity',
'get_dense_matrix_sympy', 'get_dense_matrix_quadpy']
comm = MPI.COMM_WORLD
class SparseMatrix(MutableMapping):
r"""Base class for sparse matrices.
The data is stored as a dictionary, where keys and values are, respectively,
the offsets and values of the diagonals. In addition, each matrix is stored
with a coefficient that is used as a scalar multiple of the matrix.
Parameters
----------
d : dict
Dictionary, where keys are the diagonal offsets and values the
diagonals
shape : two-tuple of ints
scale : number, optional
Scale matrix with this number
Note
----
The dictionary can use a function to generate its values. See, e.g.,
the ASDSDmat matrix, where diagonals for keys > 2 are functions that return
the proper diagonal when looked up.
Examples
--------
A tridiagonal matrix of shape N x N could be created as
>>> from shenfun import SparseMatrix
>>> import numpy as np
>>> N = 4
>>> d = {-1: 1, 0: -2, 1: 1}
>>> S = SparseMatrix(d, (N, N))
>>> dict(S)
{-1: 1, 0: -2, 1: 1}
In case of variable values, store the entire diagonal. For an N x N
matrix use
>>> d = {-1: np.ones(N-1),
... 0: -2*np.ones(N),
... 1: np.ones(N-1)}
>>> S = SparseMatrix(d, (N, N))
>>> dict(S)
{-1: array([1., 1., 1.]), 0: array([-2., -2., -2., -2.]), 1: array([1., 1., 1.])}
"""
# pylint: disable=redefined-builtin, missing-docstring
def __init__(self, d, shape, scale=1.0):
self._storage = dict(d)
self.shape = shape
self._diags = dia_matrix((1, 1))
self.scale = scale
self._matvec_methods = []
def matvec(self, v, c, format='dia', axis=0):
"""Matrix vector product
Returns c = dot(self, v)
Parameters
----------
v : array
Numpy input array of ndim>=1
c : array
Numpy output array of same shape as v
format : str, optional
Choice for computation
- csr - Compressed sparse row format
- dia - Sparse matrix with DIAgonal storage
- python - Use numpy and vectorization
- self - To be implemented in subclass
- cython - Cython implementation that may be implemented in subclass
- numba - Numba implementation that may be implemented in subclass
axis : int, optional
The axis over which to take the matrix vector product
"""
N, M = self.shape
c.fill(0)
# Roll relevant axis to first
if axis > 0:
v = np.moveaxis(v, axis, 0)
c = np.moveaxis(c, axis, 0)
if format == 'python':
for key, val in self.items():
if np.ndim(val) > 0: # broadcasting
val = val[(slice(None), ) + (np.newaxis,)*(v.ndim-1)]
if key < 0:
c[-key:min(N, M-key)] += val*v[:min(M, N+key)]
else:
c[:min(N, M-key)] += val*v[key:min(M, N+key)]
c *= self.scale
else:
if format not in ('csr', 'dia'): # Fallback on 'csr'. Should probably throw warning
format = 'csr'
diags = self.diags(format=format)
P = int(np.prod(v.shape[1:]))
y = diags.dot(v[:M].reshape(M, P)).squeeze()
d = tuple([slice(0, m) for m in y.shape])
c[d] = y.reshape(c[d].shape)
if axis > 0:
c = np.moveaxis(c, 0, axis)
v = np.moveaxis(v, 0, axis)
return c
def diags(self, format='dia'):
"""Return a regular sparse matrix of specified format
Parameters
----------
format : str, optional
Choice of matrix type (see scipy.sparse.diags)
- dia - Sparse matrix with DIAgonal storage
- csr - Compressed sparse row
- csc - Compressed sparse column
Note
----
This method returns the matrix scaled by self.scale.
"""
#if self._diags.shape != self.shape or self._diags.format != format:
self._diags = sp_diags(list(self.values()), list(self.keys()),
shape=self.shape, format=format)
scale = self.scale
if isinstance(scale, np.ndarray):
scale = np.atleast_1d(scale).item()
self._diags = self._diags*scale
return self._diags
def __getitem__(self, key):
v = self._storage[key]
if hasattr(v, '__call__'):
return v(key)
return v
def __delitem__(self, key):
del self._storage[key]
def __setitem__(self, key, val):
self._storage[key] = val
def __iter__(self):
return iter(self._storage)
def __len__(self):
return len(self._storage)
def __quasi__(self, Q):
return Q.diags('csc')*self.diags('csc')
def __eq__(self, a):
if self.shape != a.shape:
return False
if not self.same_keys(a):
return False
if (np.abs(self.diags('csr') - a.diags('csr')) >= 2e-8).nnz > 0:
return False
if self.scale != a.scale:
return False
return True
def __neq__(self, a):
return not self.__eq__(a)
def __imul__(self, y):
"""self.__imul__(y) <==> self*=y"""
assert isinstance(y, Number)
self.scale *= y
return self
def __mul__(self, y):
"""Returns copy of self.__mul__(y) <==> self*y"""
if isinstance(y, Number):
return SparseMatrix(deepcopy(dict(self)), self.shape,
scale=self.scale*y)
elif isinstance(y, np.ndarray):
c = np.empty_like(y)
c = self.matvec(y, c)
return c
elif isinstance(y, SparseMatrix):
return self.diags('csc')*y.diags('csc')
raise RuntimeError
def __rmul__(self, y):
"""Returns copy of self.__rmul__(y) <==> y*self"""
return self.__mul__(y)
def __div__(self, y):
"""Returns elementwise division if `y` is a Number, or a linear algebra
solve if `y` is an array.
Parameters
----------
y : Number or array
"""
if isinstance(y, Number):
return SparseMatrix(deepcopy(dict(self)), self.shape,
scale=self.scale/y)
elif isinstance(y, np.ndarray):
b = np.zeros_like(y)
b = self.solve(y, b)
return b
else:
raise NotImplementedError
def __truediv__(self, y):
"""Returns copy self.__div__(y) <==> self/y"""
return self.__div__(y)
def __add__(self, d):
"""Return copy of self.__add__(y) <==> self+d"""
if self == d:
if abs(self.scale+d.scale) < 1e-15:
f = SparseMatrix({0: 0}, self.shape)
else:
f = SparseMatrix(deepcopy(dict(self)), self.shape,
self.scale+d.scale)
else:
if abs(self.scale) < 1e-15 and abs(d.scale) < 1e-15:
f = SparseMatrix({0: 0}, self.shape)
elif abs(self.scale) < 1e-15:
f = SparseMatrix(deepcopy(dict(d)), d.shape, d.scale)
else:
f = SparseMatrix(deepcopy(dict(self)), self.shape, self.scale)
assert isinstance(d, Mapping)
for key, val in d.items():
if key in f:
# Check if symmetric and make copy if necessary
if -key in f:
if id(f[key]) == id(f[-key]):
f[-key] = deepcopy(f[key])
f[key] = f[key] + d.scale/self.scale*val
else:
f[key] = d.scale/f.scale*val
return f
def __iadd__(self, d):
"""self.__iadd__(d) <==> self += d"""
assert isinstance(d, Mapping)
assert d.shape == self.shape
#if self == d:
# self.scale += d.scale
if abs(d.scale) < 1e-16:
pass
elif abs(self.scale) < 1e-16:
self.clear()
for key, val in d.items():
self[key] = val
self.scale = d.scale
else:
for key, val in d.items():
if key in self:
# Check if symmetric and make copy if necessary
#self[key] = self[key]*self.scale
if -key in self:
if id(self[key]) == id(self[-key]):
self[-key] = deepcopy(self[key])
self[key] = self[key] + d.scale/self.scale*val
else:
self[key] = d.scale/self.scale*val
return self
def __sub__(self, d):
"""Return copy of self.__sub__(d) <==> self-d"""
assert isinstance(d, Mapping)
if self == d:
f = SparseMatrix(deepcopy(dict(self)), self.shape,
self.scale-d.scale)
elif abs(self.scale) < 1e-16:
f = SparseMatrix(deepcopy(dict(d)), d.shape, -d.scale)
else:
f = SparseMatrix(deepcopy(dict(self)), self.shape, self.scale)
for key, val in d.items():
if key in f:
# Check if symmetric and make copy if necessary
if -key in f:
if id(f[key]) == id(f[-key]):
f[-key] = deepcopy(f[key])
f[key] = f[key] - d.scale/self.scale*val
else:
f[key] = -d.scale/self.scale*val
return f
def __isub__(self, d):
"""self.__isub__(d) <==> self -= d"""
assert isinstance(d, Mapping)
assert d.shape == self.shape
if self == d:
self.scale -= d.scale
elif abs(self.scale) < 1e-16:
self.clear()
for key, val in d.items():
self[key] = val
self.scale = -d.scale
else:
for key, val in d.items():
if key in self:
#self[key] = self[key]*self.scale
# Check if symmetric and make copy if necessary
if -key in self:
if id(self[key]) == id(self[-key]):
self[-key] = deepcopy(self[key])
self[key] =self[key] - d.scale/self.scale*val
else:
self[key] = -d.scale/self.scale*val
#self.scale = 1
return self
def __neg__(self):
"""self.__neg__() <==> self *= -1"""
self.scale *= -1
return self
def __hash__(self):
return hash(frozenset(self))
def get_key(self):
return self.__hash__()
def same_keys(self, a):
return self.__hash__() == a.__hash__()
def scale_array(self, c, sc):
assert isinstance(sc, Number)
if abs(sc-1) > 1e-8:
c *= sc
def incorporate_scale(self):
if abs(self.scale-1) < 1e-8:
return
if hasattr(self, '_keyscale'):
self._keyscale *= self.scale
else:
for key, val in self.items():
self[key] = val*self.scale
self.scale = 1
def solve(self, b, u=None, axis=0, use_lu=False):
"""Solve matrix system Au = b
where A is the current matrix (self)
Parameters
----------
b : array
Array of right hand side on entry and solution on exit unless
u is provided.
u : array, optional
Output array
axis : int, optional
The axis over which to solve for if b and u are multi-
dimensional
use_lu : bool, optional
Look for already computed LU-matrix
Note
----
Vectors may be one- or multidimensional.
"""
assert self.shape[0] == self.shape[1]
assert self.shape[0] == b.shape[axis]
if u is None:
u = b
else:
assert u.shape == b.shape
# Roll relevant axis to first
if axis > 0:
u = np.moveaxis(u, axis, 0)
if u is not b:
b = np.moveaxis(b, axis, 0)
if b.ndim == 1:
if use_lu:
if b.dtype.char in 'FDG' and self._lu.U.dtype.char in 'fdg':
u.real[:] = self._lu.solve(b.real)
u.imag[:] = self._lu.solve(b.imag)
else:
u[:] = self._lu.solve(b)
else:
u[:] = spsolve(self.diags('csc'), b)
else:
N = b.shape[0]
P = np.prod(b.shape[1:])
if use_lu:
if b.dtype.char in 'FDG' and self._lu.U.dtype.char in 'fdg':
u.real[:] = self._lu.solve(b.real.reshape((N, P))).reshape(u.shape)
u.imag[:] = self._lu.solve(b.imag.reshape((N, P))).reshape(u.shape)
else:
u[:] = self._lu.solve(b.reshape((N, P))).reshape(u.shape)
else:
u[:] = spsolve(self.diags('csc'), b.reshape((N, P))).reshape(u.shape)
if axis > 0:
u = np.moveaxis(u, 0, axis)
if u is not b:
b = np.moveaxis(b, 0, axis)
return u
def isdiagonal(self):
if len(self) == 1:
if (0 in self):
return True
return False
def isidentity(self):
if not len(self) == 1:
return False
if (0 not in self):
return False
d = self[0]
if np.all(d == 1):
return True
return False
@property
def issymmetric(self):
M = self.diags()
return (abs(M-M.T) > 1e-8).nnz == 0
def clean_diagonals(self, reltol=1e-8):
"""Eliminate essentially zerovalued diagonals
Parameters
----------
reltol : number
Relative tolerance
"""
a = self * np.ones(self.shape[1])
relmax = abs(a).max() / self.shape[1]
list_keys = []
for key, val in self.items():
if abs(np.linalg.norm(val))/relmax < reltol:
list_keys.append(key)
for key in list_keys:
del self[key]
return self
class SpectralMatrix(SparseMatrix):
r"""Base class for inner product matrices.
Parameters
----------
d : dict
Dictionary, where keys are the diagonal offsets and values the
diagonals
trial : 2-tuple of (basis, int)
The basis is an instance of a class for one of the bases in
- :mod:`.legendre.bases`
- :mod:`.chebyshev.bases`
- :mod:`.fourier.bases`
- :mod:`.laguerre.bases`
- :mod:`.hermite.bases`
- :mod:`.jacobi.bases`
The int represents the number of times the trial function
should be differentiated. Representing matrix column.
test : 2-tuple of (basis, int)
As trial, but representing matrix row.
scale : number, optional
Scale matrix with this number
Examples
--------
Mass matrix for Chebyshev Dirichlet basis:
.. math::
(\phi_k, \phi_j)_w = \int_{-1}^{1} \phi_k(x) \phi_j(x) w(x) dx
Stiffness matrix for Chebyshev Dirichlet basis:
.. math::
(\phi_k'', \phi_j)_w = \int_{-1}^{1} \phi_k''(x) \phi_j(x) w(x) dx
The matrices can be automatically created using, e.g., for the mass
matrix of the Dirichlet space::
SD = ShenDirichlet
N = 16
M = SpectralMatrix({}, (SD(N), 0), (SD(N), 0))
where the first (SD(N), 0) represents the test function and
the second the trial function. The stiffness matrix can be obtained as::
A = SpectralMatrix({}, (SD(N), 0), (SD(N), 2))
where (SD(N), 2) signals that we use the second derivative of this trial
function. The number N is the number of quadrature points used for the
basis.
The automatically created matrices may be overloaded with more exactly
computed diagonals.
Note that matrices with the Neumann basis are stored using index space
:math:`k = 0, 1, ..., N-2`, i.e., including the zero index for a nonzero
average value.
"""
def __init__(self, d, test, trial, scale=1.0, measure=1):
assert isinstance(test[1], (int, np.integer))
assert isinstance(trial[1], (int, np.integer))
self.testfunction = test
self.trialfunction = trial
self.measure = measure
shape = (test[0].dim(), trial[0].dim())
if d == {}:
D = get_dense_matrix(test, trial, measure)[:shape[0], :shape[1]]
#D = get_denser_matrix(test, trial, measure)[:shape[0], :shape[1]]
#D = get_dense_matrix_sympy(test, trial, measure)[:shape[0], :shape[1]]
d = extract_diagonal_matrix(D)
SparseMatrix.__init__(self, d, shape, scale)
if shape[0] == shape[1]:
from shenfun.la import Solve
self.solver = Solve(self, test[0])
def matvec(self, v, c, format='csr', axis=0):
u = self.trialfunction[0]
ss = [slice(None)]*len(v.shape)
ss[axis] = u.slice()
c = super(SpectralMatrix, self).matvec(v[tuple(ss)], c, format=format, axis=axis)
if self.testfunction[0].use_fixed_gauge:
ss[axis] = 0
c[tuple(ss)] = 0
return c
def solve(self, b, u=None, axis=0, use_lu=False):
"""Solve matrix system Au = b
where A is the current matrix (self)
Parameters
----------
b : array
Array of right hand side on entry and solution on exit unless
u is provided.
u : array, optional
Output array
axis : int, optional
The axis over which to solve for if b and u are multidimensional
use_lu : bool, optional
Look for already computed LU-matrix
Note
----
Vectors may be one- or multidimensional.
"""
u = self.solver(b, u=u, axis=axis, use_lu=use_lu)
return u
@property
def tensorproductspace(self):
"""Return the :class:`.TensorProductSpace` this matrix has been
computed for"""
return self.testfunction[0].tensorproductspace
@property
def axis(self):
"""Return the axis of the :class:`.TensorProductSpace` this matrix is
created for"""
return self.testfunction[0].axis
def __hash__(self):
return hash(((self.testfunction[0].__class__, self.testfunction[1]),
(self.trialfunction[0].__class__, self.trialfunction[1])))
def get_key(self):
if self.__class__.__name__.endswith('mat'):
return self.__class__.__name__
return self.__hash__()
def simplify_diagonal_matrices(self):
if self.isdiagonal():
self.scale = self.scale*self[0]
self[0] = 1
def __eq__(self, a):
if isinstance(a, Number):
return False
if not isinstance(a, SparseMatrix):
return False
if self.shape != a.shape:
return False
if self.get_key() != a.get_key():
return False
sl = np.array(list(self.keys()))
sa = np.array(list(a.keys()))
if not sl.shape == sa.shape:
return False
sl.sort()
sa.sort()
if not np.linalg.norm((sl-sa)**2) == 0:
return False
if np.linalg.norm(sl[0] - sa[0]) > 1e-8:
return False
return True
def __mul__(self, y):
"""Returns copy of self.__mul__(y) <==> self*y"""
if isinstance(y, Number):
f = SpectralMatrix(deepcopy(dict(self)), self.testfunction,
self.trialfunction, self.scale*y)
elif isinstance(y, np.ndarray):
f = SparseMatrix.__mul__(self, y)
elif isinstance(y, SparseMatrix):
f = self.diags('csc')*y.diags('csc')
return f
def __div__(self, y):
"""Returns copy self.__div__(y) <==> self/y"""
if isinstance(y, Number):
f = SpectralMatrix(deepcopy(dict(self)), self.testfunction,
self.trialfunction, self.scale/y)
elif isinstance(y, np.ndarray):
f = SparseMatrix.__div__(self, y)
return f
def __add__(self, y):
"""Return copy of self.__add__(y) <==> self+y"""
assert isinstance(y, Mapping)
if self == y:
f = SpectralMatrix(deepcopy(dict(self)), self.testfunction,
self.trialfunction, self.scale+y.scale)
else:
f = SparseMatrix.__add__(self, y)
return f
def __iadd__(self, d):
"""self.__iadd__(d) <==> self += d"""
SparseMatrix.__iadd__(self, d)
if self == d:
return self
else: # downcast
return SparseMatrix(dict(self), self.shape, self.scale)
def __sub__(self, y):
"""Return copy of self.__sub__(y) <==> self-y"""
assert isinstance(y, Mapping)
if self == y:
f = SpectralMatrix(deepcopy(dict(self)), self.testfunction,
self.trialfunction, self.scale-y.scale)
else:
f = SparseMatrix.__sub__(self, y)
return f
def __isub__(self, y):
"""self.__isub__(d) <==> self -= y"""
SparseMatrix.__isub__(self, y)
if self == y:
return self
else: # downcast
return SparseMatrix(dict(self), self.shape, self.scale)
class Identity(SparseMatrix):
"""The identity matrix in :class:`.SparseMatrix` form
Parameters
----------
shape : 2-tuple of ints
The shape of the matrix
scale : number, optional
Scalar multiple of the matrix, defaults to unity
"""
def __init__(self, shape, scale=1):
SparseMatrix.__init__(self, {0:1}, shape, scale)
self.measure = 1
def solve(self, b, u=None, axis=0):
if u is None:
u = b
else:
assert u.shape == b.shape
u[:] = b
u *= (1/self.scale)
return u
def BlockMatrices(tpmats):
"""Return two instances of the :class:`.BlockMatrix` class.
Parameters
----------
tpmats : sequence of :class:`.TPMatrix`'es
There should be both boundary matrices from inhomogeneous Dirichlet
or Neumann conditions, as well as regular matrices.
Note
----
Use :class:`.BlockMatrix` directly if you do not have any inhomogeneous
boundary conditions.
"""
bc_mats = extract_bc_matrices([tpmats])
assert len(bc_mats) > 0, 'No boundary matrices - use BlockMatrix'
return BlockMatrix(tpmats), BlockMatrix(bc_mats)
class BlockMatrix:
r"""A class for block matrices
Parameters
----------
tpmats : sequence of :class:`.TPMatrix` or :class:`.SparseMatrix`
The individual blocks for the matrix
Note
----
The tensor product matrices must be either boundary
matrices or regular matrices, not both. If your problem contains
inhomogeneous boundary conditions, then create two BlockMatrices,
one for the implicit terms and one for the boundary terms. To this
end you can use :class:`.BlockMatrices`.
Example
-------
Stokes equations, periodic in x and y-directions
.. math::
-\nabla^2 u - \nabla p &= 0 \\
\nabla \cdot u &= 0 \\
u(x, y, z=\pm 1) &= 0
We use for the z-direction a Dirichlet basis (SD) and a regular basis with
no boundary conditions (ST). This is combined with Fourier in the x- and
y-directions (K0, K1), such that we get two TensorProductSpaces (TD, TT)
that are the Cartesian product of these bases
.. math::
TD &= K0 \times K1 \times SD \\
TT &= K0 \times K1 \times ST
We choose trialfunctions :math:`u \in [TD]^3` and :math:`p \in TT`, and then
solve the weak problem
.. math::
\left( \nabla v, \nabla u\right) + \left(\nabla \cdot v, p \right) = 0\\
\left( q, \nabla \cdot u\right) = 0
for all :math:`v \in [TD]^3` and :math:`q \in TT`.
To solve the problem we need to assemble a block matrix
.. math::
\begin{bmatrix}
\left( \nabla v, \nabla u\right) & \left(\nabla \cdot v, p \right) \\
\left( q, \nabla \cdot u\right) & 0
\end{bmatrix}
This matrix is assemble below
>>> from shenfun import *
>>> from mpi4py import MPI
>>> comm = MPI.COMM_WORLD
>>> N = (24, 24, 24)
>>> K0 = FunctionSpace(N[0], 'Fourier', dtype='d')
>>> K1 = FunctionSpace(N[1], 'Fourier', dtype='D')
>>> SD = FunctionSpace(N[2], 'Legendre', bc=(0, 0))
>>> ST = FunctionSpace(N[2], 'Legendre')
>>> TD = TensorProductSpace(comm, (K0, K1, SD), axes=(2, 1, 0))
>>> TT = TensorProductSpace(comm, (K0, K1, ST), axes=(2, 1, 0))
>>> VT = VectorSpace(TD)
>>> Q = CompositeSpace([VT, TD])
>>> up = TrialFunction(Q)
>>> vq = TestFunction(Q)
>>> u, p = up
>>> v, q = vq
>>> A00 = inner(grad(v), grad(u))
>>> A01 = inner(div(v), p)
>>> A10 = inner(q, div(u))
>>> M = BlockMatrix(A00+A01+A10)
"""
def __init__(self, tpmats):
assert isinstance(tpmats, (list, tuple))
tpmats = [tpmats] if not isinstance(tpmats[0], (list, tuple)) else tpmats
self.mixedbase = mixedbase = tpmats[0][0].mixedbase
self.dims = dims = mixedbase.num_components()
self.mats = np.zeros((dims, dims), dtype=int).tolist()
tps = mixedbase.flatten() if hasattr(mixedbase, 'flatten') else [mixedbase]
offset = [np.zeros(tps[0].dimensions, dtype=int)]
for i, tp in enumerate(tps):
dims = tp.dim() if not hasattr(tp, 'dims') else tp.dims() # 1D basis does not have dims()
offset.append(np.array(dims + offset[i]))
self.offset = offset
self.global_shape = self.offset[-1]
self += tpmats
def __add__(self, a):
"""Return copy of self.__add__(a) <==> self+a"""
return BlockMatrix(self.get_mats()+a.get_mats())
def __iadd__(self, a):
"""self.__iadd__(a) <==> self += a
Parameters
----------
a : :class:`.BlockMatrix` or list of :class:`.TPMatrix` instances
"""
if isinstance(a, BlockMatrix):
tpmats = a.get_mats()
elif isinstance(a, (list, tuple)):
tpmats = a
for mat in tpmats:
if not isinstance(mat, list):
mat = [mat]
for m in mat:
assert isinstance(m, (TPMatrix, SparseMatrix))
i, j = m.global_index
m0 = self.mats[i][j]
if isinstance(m0, int):
self.mats[i][j] = [m]
else:
found = False
for n in m0:
if m == n:
n += m
found = True
continue
if not found:
self.mats[i][j].append(m)
def get_mats(self, return_first=False):
"""Return flattened list of matrices in self"""
tpmats = []
for mi in self.mats:
for mij in mi:
if isinstance(mij, (list, tuple)):
for m in mij:
if isinstance(m, (TPMatrix, SparseMatrix)):
if return_first:
return m
else:
tpmats.append(m)
return tpmats
def matvec(self, v, c):
"""Compute matrix vector product
c = self * v
Parameters
----------
v : :class:`.Function`
c : :class:`.Function`
Returns
-------
c : :class:`.Function`
"""
assert v.function_space() == self.mixedbase
assert c.function_space() == self.mixedbase
c.v.fill(0)
z = np.zeros_like(c.v[0])
for i, mi in enumerate(self.mats):
for j, mij in enumerate(mi):
if isinstance(mij, Number):
if abs(mij) > 1e-8:
c.v[i] += mij*v.v[j]
else:
for m in mij:
z.fill(0)
z = m.matvec(v.v[j], z)
c.v[i] += z
return c
def __getitem__(self, ij):
return self.mats[ij[0]][ij[1]]
def get_offset(self, i, axis=0):
return self.offset[i][axis]
def diags(self, it=(0,), format='csr'):
"""Return global block matrix in scipy sparse format
For multidimensional forms the returned matrix is constructed for
given indices in the periodic directions.
Parameters
----------
it : n-tuple of ints
where n is dimensions. These are the indices into the scale arrays
of the TPMatrices in various blocks. Should be zero along the non-
periodic direction.
format : str
The format of the returned matrix. See `Scipy sparse matrices <https://docs.scipy.org/doc/scipy/reference/sparse.html>`_
"""
from .spectralbase import MixedFunctionSpace
bm = []
for mi in self.mats:
bm.append([])
for mij in mi:
if isinstance(mij, Number):
bm[-1].append(None)
else:
m = mij[0]
if isinstance(self.mixedbase, MixedFunctionSpace):
d = m.diags(format)
for mj in mij[1:]:
d = d + mj.diags(format)
elif len(m.naxes) == 2:
# 2 non-periodic directions
assert len(m.mats) == 2, "Only implemented without periodic directions"
d = m.scale.item()*kron(m.mats[0].diags(format), m.mats[1].diags(format))
for mj in mij[1:]:
d = d + mj.scale.item()*kron(mj.mats[0].diags(format), mj.mats[1].diags(format))
else:
iit = np.where(np.array(m.scale.shape) == 1, 0, it) # if shape is 1 use index 0, else use given index (shape=1 means the scale is constant in that direction)
sc = m.scale[tuple(iit)]
d = sc*m.pmat.diags(format)
for mj in mij[1:]:
iit = np.where(np.array(mj.scale.shape) == 1, 0, it)
sc = mj.scale[tuple(iit)]
d = d + sc*mj.pmat.diags(format)
bm[-1].append(d)
return bmat(bm, format=format)
def solve(self, b, u=None, constraints=(), return_system=False, Alu=None, BM=None):
r"""
Solve matrix system Au = b
where A is the current :class:`.BlockMatrix` (self)
Parameters
----------
b : array
Array of right hand side
u : array, optional
Output array
constraints : sequence of 3-tuples of (int, int, number)
Any 3-tuple describe a dof to be constrained. The first int
represents the block number of the function to be constrained. The
second int gives which degree of freedom to constrain and the number
gives the value it should obtain. For example, for the global
restriction that
.. math::
\frac{1}{V}\int p dx = number
where we have
.. math::
p = \sum_{k=0}^{N-1} \hat{p}_k \phi_k
it is sufficient to fix the first dof of p, \hat{p}_0, since
the bases are created such that all basis functions except the
first integrates to zero. So in this case the 3-tuple can be
(2, 0, 0) if p is found in block 2 of the mixed basis.
The constraint can only be applied to bases with no given
explicit boundary condition, like the pure Chebyshev or Legendre
bases.
Other Parameters
----------------
return_system : bool, optional
If True then return the assembled block matrix as well as the
solution in a 2-tuple (solution, matrix). This is helpful for
repeated solves, because the returned matrix may then be
factorized once and reused.
Only for non-periodic problems
Alu : pre-factorized matrix, optional
Computed with Alu = splu(self), where self is the assembled block
matrix. Only for non-periodic problems.
"""
from .forms.arguments import Function
import scipy.sparse as sp
space = b.function_space()
if u is None:
u = Function(space)
else:
assert u.shape == b.shape
if BM: # Add contribution to right hand side due to inhomogeneous boundary conditions
u.set_boundary_dofs()
w0 = np.zeros_like(u)
b -= BM.matvec(u, w0)
tpmat = self.get_mats(True)
axis = tpmat.naxes[0] if isinstance(tpmat, TPMatrix) else 0
tp = space.flatten() if hasattr(space, 'flatten') else [space]
nvars = b.shape[0] if len(b.shape) > space.dimensions else 1
u = u.reshape(1, *u.shape) if nvars == 1 else u
b = b.reshape(1, *b.shape) if nvars == 1 else b
for con in constraints:
assert len(con) == 3
assert isinstance(con[0], Integral)
assert isinstance(con[1], Integral)
assert isinstance(con[2], Number)
N = self.global_shape[axis]
gi = np.zeros(N, dtype=b.dtype)
go = np.zeros(N, dtype=b.dtype)
if space.dimensions == 1:
s = [0, 0]
Ai = self.diags((0,))
for k in range(nvars):
s[0] = k
s[1] = tp[k].slice()
gi[self.offset[k][axis]:self.offset[k+1][axis]] = b[tuple(s)]
if Alu is not None:
go[:] = Alu.solve(gi)
else:
go[:] = sp.linalg.spsolve(Ai, gi)
for k in range(nvars):
s[0] = k
s[1] = tp[k].slice()
u[tuple(s)] = go[self.offset[k][axis]:self.offset[k+1][axis]]
if return_system:
return u, Ai
elif space.dimensions == 2:
if len(tpmat.naxes) == 2: # 2 non-periodic axes
s = [0, 0, 0]
if Alu is None:
Ai = self.diags(format='csr')
gi = np.zeros(space.dim(), dtype=b.dtype)
go = np.zeros(space.dim(), dtype=b.dtype)
start = 0
for k in range(nvars):
s[0] = k
s[1] = tp[k].bases[0].slice()
s[2] = tp[k].bases[1].slice()
gi[start:(start+tp[k].dim())] = b[tuple(s)].ravel()
start += tp[k].dim()
for con in constraints:
dim = 0
for i in range(con[0]):
dim += tp[i].dim()
if Alu is None:
Ai, gi = self.apply_constraint(Ai, gi, dim, 0, con)
else:
gi[dim] = con[2]
if Alu is not None:
go[:] = Alu.solve(gi)
else:
go[:] = sp.linalg.spsolve(Ai, gi)
start = 0
for k in range(nvars):
s[0] = k
s[1] = tp[k].bases[0].slice()
s[2] = tp[k].bases[1].slice()
u[tuple(s)] = go[start:(start+tp[k].dim())].reshape((1, tp[k].bases[0].dim(), tp[k].bases[1].dim()))
start += tp[k].dim()
if return_system:
return u, Ai
else:
s = [0]*3
ii, jj = {0:(2, 1), 1:(1, 2)}[axis]
d0 = [0, 0]
for i in range(b.shape[ii]):
d0[(axis+1)%2] = i
Ai = self.diags(d0)
s[ii] = i
for k in range(nvars):
s[0] = k
s[jj] = tp[k].bases[axis].slice()
gi[self.offset[k][axis]:self.offset[k+1][axis]] = b[tuple(s)]
for con in constraints:
Ai, gi = self.apply_constraint(Ai, gi, self.offset[con[0]][axis], i, con)
go[:] = sp.linalg.spsolve(Ai, gi)
for k in range(nvars):
s[0] = k
s[jj] = tp[k].bases[axis].slice()
u[tuple(s)] = go[self.offset[k][axis]:self.offset[k+1][axis]]
elif space.dimensions == 3:
s = [0]*4
ii, jj = {0:(2, 3), 1:(1, 3), 2:(1, 2)}[axis]
d0 = [0, 0, 0]
for i in range(b.shape[ii]):
for j in range(b.shape[jj]):
d0[ii-1], d0[jj-1] = i, j
Ai = self.diags(d0)
s[ii], s[jj] = i, j
for k in range(nvars):
s[0] = k
s[axis+1] = tp[k].bases[axis].slice()
gi[self.offset[k][axis]:self.offset[k+1][axis]] = b[tuple(s)]
for con in constraints:
Ai, gi = self.apply_constraint(Ai, gi, self.offset[con[0]][axis], (i, j), con)
go[:] = sp.linalg.spsolve(Ai, gi)
for k in range(nvars):
s[0] = k
s[axis+1] = tp[k].bases[axis].slice()
u[tuple(s)] = go[self.offset[k][axis]:self.offset[k+1][axis]]
u = u.reshape(u.shape[1:]) if nvars == 1 else u
b = b.reshape(b.shape[1:]) if nvars == 1 else b
return u
@staticmethod
def apply_constraint(A, b, offset, i, constraint):
if constraint is None or comm.Get_rank() > 0:
return A, b
if isinstance(i, int):
if i > 0:
return A, b
if isinstance(i, tuple):
if np.sum(np.array(i)) > 0:
return A, b
row = offset + constraint[1]
#print('Applying constraint row %d con (%d, %d, %2.5f)' %(row, *constraint))
assert isinstance(constraint, tuple)
assert len(constraint) == 3
val = constraint[2]
b[row] = val
r = A.getrow(row).nonzero()
#rp = A.getrow(row-1).nonzero()
A[(row, r[1])] = 0
#A[(row, rp[1])] = 0
#A[row, offset] = 1
A[row, row] = 1
#A[offset, row] = 1
#A[row-1, row-1] = 1
return A, b
class TPMatrix:
"""Tensor product matrix
A :class:`.TensorProductSpace` is the tensor product of ``D`` univariate
function spaces. A normal matrix (a second order tensor) is assembled from
bilinear forms (i.e., forms containing both test and trial functions) on
one univariate function space. A bilinear form on a tensor product space
will assemble to ``D`` outer products of such univariate matrices. That is,
for a two-dimensional tensor product you get fourth order tensors (outer
product of two matrices), and three-dimensional tensor product spaces leads
to a sixth order tensor (outer product of three matrices). This class
contains ``D`` second order matrices. The complete matrix is as such the
outer product of these ``D`` matrices.
Note that the outer product of two matrices often is called the Kronecker
product.
Parameters
----------
mats : sequence, or sequence of sequence of matrices
Instances of :class:`.SpectralMatrix` or :class:`.SparseMatrix`
The length of ``mats`` is the number of dimensions of the
:class:`.TensorProductSpace`
testspace : Function space
The test :class:`.TensorProductSpace`
trialspace : Function space
The trial :class:`.TensorProductSpace`
scale : array, optional
Scalar multiple of matrices. Must have ndim equal to the number of
dimensions in the :class:`.TensorProductSpace`, and the shape must be 1
along any directions with a nondiagonal matrix.
global_index : 2-tuple, optional
Indices (test, trial) into mixed space :class:`.CompositeSpace`.
mixedbase : :class:`.CompositeSpace`, optional
Instance of the base space
"""
def __init__(self, mats, testspace, trialspace, scale=1.0, global_index=None, mixedbase=None):
assert isinstance(mats, (list, tuple))
assert len(mats) == len(testspace)
self.mats = mats
self.space = testspace
self.trialspace = trialspace
self.scale = scale
self.pmat = 1
self.naxes = []
self.global_index = global_index
self.mixedbase = mixedbase
self._issimplified = False
def simplify_diagonal_matrices(self):
self.naxes = []
for axis, mat in enumerate(self.mats):
if not mat:
continue
#if mat.isdiagonal():
if axis not in self.space.get_nondiagonal_axes():
if self.dimensions == 1: # Don't bother with the 1D case
continue
else:
d = mat[0] # get diagoal
if np.ndim(d):
d = self.space[axis].broadcast_to_ndims(d)
d = d*mat.scale
self.scale = self.scale*d
self.mats[axis] = Identity(mat.shape)
else:
self.naxes.append(axis)
# Decomposition
if len(self.space) > 1:
s = self.scale.shape
ss = [slice(None)]*self.space.dimensions
ls = self.space.local_slice()
for axis, shape in enumerate(s):
if shape > 1:
ss[axis] = ls[axis]
self.scale = (self.scale[tuple(ss)]).copy()
# If only one non-diagonal matrix, then make a simple link to
# this matrix.
if len(self.naxes) == 1:
self.pmat = self.mats[self.naxes[0]]
elif len(self.naxes) == 2: # 2 nondiagonal
self.pmat = self.mats
self._issimplified = True
def solve(self, b, u=None):
if len(self.naxes) == 0:
sl = tuple([s.slice() for s in self.trialspace.bases])
d = self.scale
with np.errstate(divide='ignore'):
d = 1./self.scale
d = np.where(np.isfinite(d), d, 0)
if u is None:
from .forms.arguments import Function
u = Function(self.space)
u[sl] = b[sl] * d[sl]
elif len(self.naxes) == 1:
axis = self.naxes[0]
u = self.pmat.solve(b, u=u, axis=axis)
with np.errstate(divide='ignore'):
u /= self.scale
u[:] = np.where(np.isfinite(u), u, 0)
elif len(self.naxes) == 2:
from shenfun.la import SolverGeneric2ND
H = SolverGeneric2ND([self])
u = H(b, u)
return u
def matvec(self, v, c):
c.fill(0)
if len(self.naxes) == 0:
c[:] = self.scale*v
elif len(self.naxes) == 1:
axis = self.naxes[0]
rank = v.rank if hasattr(v, 'rank') else 0
if rank == 0:
c = self.pmat.matvec(v, c, axis=axis)
else:
c = self.pmat.matvec(v[self.global_index[1]], c, axis=axis)
c = c*self.scale
elif len(self.naxes) == 2:
# 2 non-periodic directions (may be non-aligned in second axis, hence transfers)
npaxes = deepcopy(self.naxes)
space = self.space
if space.forward.input_array.shape != space.forward.output_array.shape:
space = space.get_unplanned(True) # in case self.space is padded
pencilA = space.forward.output_pencil
subcomms = [s.Get_size() for s in pencilA.subcomm]
axis = pencilA.axis
assert subcomms[axis] == 1
npaxes.remove(axis)
second_axis = npaxes[0]
pencilB = pencilA.pencil(second_axis)
transAB = pencilA.transfer(pencilB, c.dtype.char)
cB = np.zeros(transAB.subshapeB, dtype=c.dtype)
cC = np.zeros(transAB.subshapeB, dtype=c.dtype)
bb = self.mats[axis]
c = bb.matvec(v, c, axis=axis)
# align in second non-periodic axis
transAB.forward(c, cB)
bb = self.mats[second_axis]
cC = bb.matvec(cB, cC, axis=second_axis)
transAB.backward(cC, c)
c *= self.scale
return c
def get_key(self):
"""Return key of the one nondiagonal matrix in the TPMatrix
Note
----
Raises an error of there are more than one single nondiagonal matrix
in TPMatrix.
"""
naxis = self.space.get_nondiagonal_axes()
assert len(naxis) == 1
return self.mats[naxis[0]].get_key()
def isidentity(self):
return np.all([m.isidentity() for m in self.mats])
def isdiagonal(self):
return np.all([m.isdiagonal() for m in self.mats])
def is_bc_matrix(self):
for m in self.mats:
if hasattr(m, 'trialfunction'):
if m.trialfunction[0].boundary_condition() == 'Apply':
return True
return False
@property
def dimensions(self):
"""Return dimension of TPMatrix"""
return len(self.mats)
def __mul__(self, a):
"""Returns copy of self.__mul__(a) <==> self*a"""
if isinstance(a, Number):
TPMatrix(self.mats, self.space, self.trialspace, self.scale*a,
self.global_index, self.mixedbase)
elif isinstance(a, np.ndarray):
c = np.empty_like(a)
c = self.matvec(a, c)
return c
def __rmul__(self, a):
"""Returns copy of self.__rmul__(a) <==> a*self"""
if isinstance(a, Number):
return self.__mul__(a)
else:
raise NotImplementedError
def __imul__(self, a):
"""Returns self.__imul__(a) <==> self*=a"""
if isinstance(a, Number):
self.scale *= a
elif isinstance(a, np.ndarray):
self.scale = self.scale*a
return self
def __div__(self, a):
"""Returns copy self.__div__(a) <==> self/a"""
if isinstance(a, Number):
return TPMatrix(self.mats, self.space, self.trialspace, self.scale/a,
self.global_index, self.mixedbase)
elif isinstance(a, np.ndarray):
b = np.zeros_like(a)
b = self.solve(a, b)
return b
else:
raise NotImplementedError
def __neg__(self):
"""self.__neg__() <==> self *= -1"""
self.scale *= -1
return self
def __eq__(self, a):
"""Check if matrices and global_index are the same.
Note
----
The attribute scale may still be different
"""
assert isinstance(a, TPMatrix)
if not self.global_index == a.global_index:
return False
for m0, m1 in zip(self.mats, a.mats):
if not m0.get_key() == m1.get_key():
return False
if not m0 == m1:
return False
return True
def __ne__(self, a):
return not self.__eq__(a)
def __add__(self, a):
"""Return copy of self.__add__(a) <==> self+a"""
assert isinstance(a, TPMatrix)
assert self == a
return TPMatrix(self.mats, self.space, self.trialspace, self.scale+a.scale,
self.global_index, self.mixedbase)
def __iadd__(self, a):
"""self.__iadd__(a) <==> self += a"""
assert isinstance(a, TPMatrix)
assert self == a
self.scale = self.scale + a.scale
return self
def __sub__(self, a):
"""Return copy of self.__sub__(a) <==> self-a"""
assert isinstance(a, TPMatrix)
assert self == a
return TPMatrix(self.mats, self.space, self.trialspace, self.scale-a.scale,
self.global_index, self.mixedbase)
def __isub__(self, a):
"""self.__isub__(a) <==> self -= a"""
assert isinstance(a, TPMatrix)
assert self == a
self.scale = self.scale - a.scale
return self
def diags(self, format='csr'):
if self.dimensions == 2:
return kron(self.mats[0].diags(format=format), self.mats[1].diags(format=format))
elif self.dimensions == 3:
return kron(self.mats[0].diags(format=format), kron(self.mats[1].diags(format=format), self.mats[2].diags(format=format)))
def check_sanity(A, test, trial, measure=1):
"""Sanity check for matrix.
Test that automatically created matrix agrees with overloaded one
Parameters
----------
A : matrix
test : 2-tuple of (basis, int)
The basis is an instance of a class for one of the bases in
- :mod:`.legendre.bases`
- :mod:`.chebyshev.bases`
- :mod:`.fourier.bases`
- :mod:`.laguerre.bases`
- :mod:`.hermite.bases`
- :mod:`.jacobi.bases`
The int represents the number of times the test function
should be differentiated. Representing matrix row.
trial : 2-tuple of (basis, int)
As test, but representing matrix column.
measure : sympy function of coordinate, optional
"""
N, M = A.shape
if measure == 1:
D = get_dense_matrix(test, trial, measure)[:N, :M]
else:
D = get_denser_matrix(test, trial, measure)
Dsp = extract_diagonal_matrix(D)
for key, val in A.items():
assert np.allclose(val*A.scale, Dsp[key])
def get_dense_matrix(test, trial, measure=1):
"""Return dense matrix automatically computed from basis
Parameters
----------
test : 2-tuple of (basis, int)
The basis is an instance of a class for one of the bases in
- :mod:`.legendre.bases`
- :mod:`.chebyshev.bases`
- :mod:`.fourier.bases`
- :mod:`.laguerre.bases`
- :mod:`.hermite.bases`
- :mod:`.jacobi.bases`
The int represents the number of times the test function
should be differentiated. Representing matrix row.
trial : 2-tuple of (basis, int)
As test, but representing matrix column.
measure : Sympy expression of coordinate, or number, optional
Additional weight to integral. For example, in cylindrical
coordinates an additional measure is the radius `r`.
"""
K0 = test[0].slice().stop - test[0].slice().start
K1 = trial[0].slice().stop - trial[0].slice().start
N = test[0].N
x = test[0].mpmath_points_and_weights(N, map_true_domain=False)[0]
ws = test[0].get_measured_weights(N, measure)
v = test[0].evaluate_basis_derivative_all(x=x, k=test[1])[:, :K0]
u = trial[0].evaluate_basis_derivative_all(x=x, k=trial[1])[:, :K1]
A = np.dot(np.conj(v.T)*ws[np.newaxis, :], u)
if A.dtype.char in 'FDG':
if np.linalg.norm(A.real) / np.linalg.norm(A.imag) > 1e14:
A = A.real.copy()
return A
def get_denser_matrix(test, trial, measure=1):
"""Return dense matrix automatically computed from basis
Use slightly more quadrature points than usual N
Parameters
----------
test : 2-tuple of (basis, int)
The basis is an instance of a class for one of the bases in
- :mod:`.legendre.bases`
- :mod:`.chebyshev.bases`
- :mod:`.fourier.bases`
- :mod:`.laguerre.bases`
- :mod:`.hermite.bases`
- :mod:`.jacobi.bases`
The int represents the number of times the test function
should be differentiated. Representing matrix row.
trial : 2-tuple of (basis, int)
As test, but representing matrix column.
measure : Sympy expression of coordinate, or number, optional
Additional weight to integral. For example, in cylindrical
coordinates an additional measure is the radius `r`.
"""
test2 = test[0].get_refined((test[0].N*3)//2)
K0 = test[0].slice().stop - test[0].slice().start
K1 = trial[0].slice().stop - trial[0].slice().start
N = test2.N
x = test2.mpmath_points_and_weights(N, map_true_domain=False)[0]
ws = test2.get_measured_weights(N, measure)
v = test[0].evaluate_basis_derivative_all(x=x, k=test[1])[:, :K0]
u = trial[0].evaluate_basis_derivative_all(x=x, k=trial[1])[:, :K1]
return np.dot(np.conj(v.T)*ws[np.newaxis, :], u)
def extract_diagonal_matrix(M, abstol=1e-10, reltol=1e-10):
"""Return SparseMatrix version of dense matrix ``M``
Parameters
----------
M : Numpy array of ndim=2
abstol : float
Tolerance. Only diagonals with max(:math:`|d|`) < tol are
kept in the returned SparseMatrix, where :math:`d` is the
diagonal
reltol : float
Relative tolerance. Only diagonals with
max(:math:`|d|`)/max(:math:`|M|`) > reltol are kept in the
returned SparseMatrix
"""
d = {}
relmax = abs(M).max()
dtype = float if M.dtype == 'O' else M.dtype # For mpf object
for i in range(M.shape[1]):
u = M.diagonal(i).copy()
if abs(u).max() > abstol and abs(u).max()/relmax > reltol:
d[i] = np.array(u, dtype=dtype)
for i in range(1, M.shape[0]):
l = M.diagonal(-i).copy()
if abs(l).max() > abstol and abs(l).max()/relmax > reltol:
d[-i] = np.array(l, dtype=dtype)
return SparseMatrix(d, M.shape)
def extract_bc_matrices(mats):
"""Extract boundary matrices from list of ``mats``
Parameters
----------
mats : list of list of :class:`.TPMatrix`es
Returns
-------
list
list of boundary matrices.
Note
----
The ``mats`` list is modified in place since boundary matrices are
extracted.
"""
bc_mats = []
for a in mats:
for b in a.copy():
if isinstance(b, SparseMatrix):
if b.trialfunction[0].boundary_condition() == 'Apply':
bc_mats.append(b)
a.remove(b)
elif isinstance(b, TPMatrix):
if b.is_bc_matrix():
bc_mats.append(b)
a.remove(b)
return bc_mats
def get_dense_matrix_sympy(test, trial, measure=1):
"""Return dense matrix automatically computed from basis
Parameters
----------
test : 2-tuple of (basis, int)
The basis is an instance of a class for one of the bases in
- :mod:`.legendre.bases`
- :mod:`.chebyshev.bases`
- :mod:`.fourier.bases`
- :mod:`.laguerre.bases`
- :mod:`.hermite.bases`
- :mod:`.jacobi.bases`
The int represents the number of times the test function
should be differentiated. Representing matrix row.
trial : 2-tuple of (basis, int)
As test, but representing matrix column.
measure : Sympy expression of coordinate, or number, optional
Additional weight to integral. For example, in cylindrical
coordinates an additional measure is the radius `r`.
"""
N = test[0].slice().stop - test[0].slice().start
M = trial[0].slice().stop - trial[0].slice().start
V = np.zeros((N, M), dtype=test[0].forward.output_array.dtype)
x = sp.Symbol('x', real=True)
if not measure == 1:
if isinstance(measure, sp.Expr):
s = measure.free_symbols
assert len(s) == 1
x = s.pop()
xm = test[0].map_true_domain(x)
measure = measure.subs(x, xm)
else:
assert isinstance(measure, Number)
# Weight of weighted space
measure *= test[0].weight()
for i in range(test[0].slice().start, test[0].slice().stop):
pi = np.conj(test[0].sympy_basis(i, x=x))
for j in range(trial[0].slice().start, trial[0].slice().stop):
pj = trial[0].sympy_basis(j, x=x)
integrand = sp.simplify(measure*pi.diff(x, test[1])*pj.diff(x, trial[1]))
V[i, j] = integrate_sympy(integrand,
(x, test[0].sympy_reference_domain()[0], test[0].sympy_reference_domain()[1]))
return V
def get_dense_matrix_quadpy(test, trial, measure=1):
"""Return dense matrix automatically computed from basis
Using quadpy to compute the integral adaptively with high accuracy.
This should be equivalent to integrating analytically with sympy,
as long as the integrand is smooth enough and the integral can be
found with quadrature.
Parameters
----------
test : 2-tuple of (basis, int)
The basis is an instance of a class for one of the bases in
- :mod:`.legendre.bases`
- :mod:`.chebyshev.bases`
- :mod:`.fourier.bases`
- :mod:`.laguerre.bases`
- :mod:`.hermite.bases`
- :mod:`.jacobi.bases`
The int represents the number of times the test function
should be differentiated. Representing matrix row.
trial : 2-tuple of (basis, int)
As test, but representing matrix column.
measure : Sympy expression of coordinate, or number, optional
Additional weight to integral. For example, in cylindrical
coordinates an additional measure is the radius `r`.
"""
import quadpy
N = test[0].slice().stop - test[0].slice().start
M = trial[0].slice().stop - trial[0].slice().start
V = np.zeros((N, M), dtype=test[0].forward.output_array.dtype)
x = sp.Symbol('x', real=True)
if not measure == 1:
if isinstance(measure, sp.Expr):
s = measure.free_symbols
assert len(s) == 1
x = s.pop()
xm = test[0].map_true_domain(x)
measure = measure.subs(x, xm)
else:
assert isinstance(measure, Number)
# Weight of weighted space
measure *= test[0].weight()
for i in range(test[0].slice().start, test[0].slice().stop):
pi = np.conj(test[0].sympy_basis(i, x=x))
for j in range(trial[0].slice().start, trial[0].slice().stop):
pj = trial[0].sympy_basis(j, x=x)
integrand = sp.simplify(measure*pi.diff(x, test[1])*pj.diff(x, trial[1]))
if not integrand == 0:
V[i, j] = quadpy.c1.integrate_adaptive(sp.lambdify(x, integrand),
test[0].reference_domain())[0]
return V
|
the-stack_106_26237 | import queue
import consts
import logging
import requests
import threading
from domain.Table import Table
from domain.Waiters import Waiters
logger = logging.getLogger(__name__)
lock = threading.Lock()
class DinningHall:
def __init__(self, config):
self.config = config
self.id_ = config["restaurant_id"]
self.name = f'DH-{self.id_}'
self.done_orders = []
self.orders = []
self.tables = [Table(self, i) for i in range(config['tables_no'])]
self.waiters = [Waiters(self, i) for i in range(config['waiters_no'])]
self.orders_q = queue.Queue()
self.rating_stars = []
self.avg_rating = config['rating']
self.TIME_UNIT = 1
self.free_tables = queue.Queue()
def start_dinning_workers(self):
for table in self.tables:
self.free_tables.put_nowait(table)
for waiter in self.waiters:
threading.Thread(target=waiter.search_order, args=(self.free_tables, ), daemon=True).start()
def get_restaurant_data(self):
return {'config': self.config}
def get_menu(self):
return {'menu': self.config['menu'], 'restaurant_name': self.config['name']}
def order(self, data):
logger.info(f'{self.name}, NEW order: {data["order_id"]} | request to kitchen PORT: {self.config["kitchen_port"]}\n')
self.orders.append(data)
res = requests.post(f'http://{consts.KH_HOST}:{self.config["kitchen_port"]}/order', json=data)
return res.json()
def get_order(self, order_id):
logger.info(f'{self.name}, client requested for order: {order_id}\n')
order = next((x for x in self.done_orders if x['order_id'] == order_id), None)
if order is not None:
logger.info(f'{self.name}, client received order: {order_id}\n')
return {**order, 'is_ready': True}
else:
logger.info(f'{self.name}, client order: {order_id} is not ready!\n')
return {'order_id': order_id, 'is_ready': False, 'estimated_waiting_time': 3}
def distribution(self, order):
self.done_orders.append(order)
if order['table_id'] is not None:
# serve the order to table
logger.info(f'{self.name} NEW distribution for table: {order["table_id"]}')
waiter = next((w for w in self.waiters if w.id == order['waiter_id']), None)
waiter.serve_order(order)
else:
# keep the order, so client can request it
logger.info(f'{self.name} NEW distribution for client service')
return {'isSuccess': True}
def update_rating(self, data):
lock.acquire()
self.rating_stars.append(data['stars'])
avg = float(sum(s for s in self.rating_stars)) / len(self.rating_stars)
self.avg_rating = avg
lock.release()
logger.info(f'{self.name} order_id: {data["order_id"]} | updated RATING: {self.avg_rating}')
return {'updated_rating': self.avg_rating}
|
the-stack_106_26238 | import logging
from gaphor import UML
from gaphor.core.format import format
from gaphor.core.modeling.properties import attribute
from gaphor.core.styling import (
FontStyle,
FontWeight,
TextAlign,
TextDecoration,
VerticalAlign,
)
from gaphor.diagram.presentation import (
Classified,
ElementPresentation,
from_package_str,
)
from gaphor.diagram.shapes import (
Box,
EditableText,
Text,
draw_border,
draw_top_separator,
)
from gaphor.diagram.support import represents
from gaphor.UML.classes.stereotype import stereotype_compartments
log = logging.getLogger(__name__)
@represents(UML.Class)
@represents(UML.Stereotype)
class ClassItem(ElementPresentation[UML.Class], Classified):
"""This item visualizes a Class instance.
A ClassItem contains two compartments: one for attributes and one
for operations.
"""
def __init__(self, diagram, id=None):
super().__init__(diagram, id=id)
self.watch("show_stereotypes", self.update_shapes).watch(
"show_attributes", self.update_shapes
).watch("show_operations", self.update_shapes).watch(
"subject[NamedElement].name"
).watch(
"subject[NamedElement].namespace.name"
).watch(
"subject[Classifier].isAbstract", self.update_shapes
)
attribute_watches(self, "Class")
operation_watches(self, "Class")
stereotype_watches(self)
show_stereotypes: attribute[int] = attribute("show_stereotypes", int)
show_attributes: attribute[int] = attribute("show_attributes", int, default=True)
show_operations: attribute[int] = attribute("show_operations", int, default=True)
def additional_stereotypes(self):
if isinstance(self.subject, UML.Stereotype):
return ["stereotype"]
elif UML.model.is_metaclass(self.subject):
return ["metaclass"]
else:
return ()
def update_shapes(self, event=None):
self.shape = Box(
Box(
Text(
text=lambda: UML.model.stereotypes_str(
self.subject, self.additional_stereotypes()
),
),
EditableText(
text=lambda: self.subject.name or "",
style={
"font-weight": FontWeight.BOLD,
"font-style": FontStyle.ITALIC
if self.subject and self.subject.isAbstract
else FontStyle.NORMAL,
},
),
Text(
text=lambda: from_package_str(self),
style={"font-size": "x-small"},
),
style={"padding": (12, 4, 12, 4)},
),
*(
self.show_attributes
and self.subject
and [attributes_compartment(self.subject)]
or []
),
*(
self.show_operations
and self.subject
and [operations_compartment(self.subject)]
or []
),
*(self.show_stereotypes and stereotype_compartments(self.subject) or []),
style={
"vertical-align": VerticalAlign.TOP,
},
draw=draw_border,
)
def attribute_watches(presentation, cast):
presentation.watch(
f"subject[{cast}].ownedAttribute", presentation.update_shapes
).watch(
f"subject[{cast}].ownedAttribute.association", presentation.update_shapes
).watch(
f"subject[{cast}].ownedAttribute.name"
).watch(
f"subject[{cast}].ownedAttribute.isStatic", presentation.update_shapes
).watch(
f"subject[{cast}].ownedAttribute.isDerived"
).watch(
f"subject[{cast}].ownedAttribute.visibility"
).watch(
f"subject[{cast}].ownedAttribute.lowerValue"
).watch(
f"subject[{cast}].ownedAttribute.upperValue"
).watch(
f"subject[{cast}].ownedAttribute.defaultValue"
).watch(
f"subject[{cast}].ownedAttribute.type"
).watch(
f"subject[{cast}].ownedAttribute.typeValue"
)
def operation_watches(presentation, cast):
presentation.watch(
f"subject[{cast}].ownedOperation", presentation.update_shapes
).watch(f"subject[{cast}].ownedOperation.name").watch(
f"subject[{cast}].ownedOperation.isAbstract", presentation.update_shapes
).watch(
f"subject[{cast}].ownedOperation.isStatic", presentation.update_shapes
).watch(
f"subject[{cast}].ownedOperation.visibility"
).watch(
f"subject[{cast}].ownedOperation.returnResult.lowerValue"
).watch(
f"subject[{cast}].ownedOperation.returnResult.upperValue"
).watch(
f"subject[{cast}].ownedOperation.returnResult.typeValue"
).watch(
f"subject[{cast}].ownedOperation.formalParameter.lowerValue"
).watch(
f"subject[{cast}].ownedOperation.formalParameter.upperValue"
).watch(
f"subject[{cast}].ownedOperation.formalParameter.typeValue"
).watch(
f"subject[{cast}].ownedOperation.formalParameter.defaultValue"
)
def stereotype_watches(presentation):
presentation.watch("subject.appliedStereotype", presentation.update_shapes).watch(
"subject.appliedStereotype.classifier.name"
).watch("subject.appliedStereotype.slot", presentation.update_shapes).watch(
"subject.appliedStereotype.slot.definingFeature.name"
).watch(
"subject.appliedStereotype.slot.value", presentation.update_shapes
)
def attributes_compartment(subject):
# We need to fix the attribute value, since the for loop changes it.
def lazy_format(attribute):
return lambda: format(attribute)
return Box(
*(
Text(
text=lazy_format(attribute),
style={
"text-align": TextAlign.LEFT,
"text-decoration": TextDecoration.UNDERLINE
if attribute.isStatic
else TextDecoration.NONE,
},
)
for attribute in subject.ownedAttribute
if not attribute.association
),
style={"padding": (4, 4, 4, 4)},
draw=draw_top_separator,
)
def operations_compartment(subject):
def lazy_format(operation):
return lambda: format(
operation, visibility=True, type=True, multiplicity=True, default=True
)
return Box(
*(
Text(
text=lazy_format(operation),
style={
"text-align": TextAlign.LEFT,
"font-style": FontStyle.ITALIC
if operation.isAbstract
else FontStyle.NORMAL,
"text-decoration": TextDecoration.UNDERLINE
if operation.isStatic
else TextDecoration.NONE,
},
)
for operation in subject.ownedOperation
),
style={"padding": (4, 4, 4, 4)},
draw=draw_top_separator,
)
|
the-stack_106_26240 | # Copyright 2020 EraO Prosopagnosia Helper Dev Team, Liren Pan, Yixiao Hong, Hongzheng Xu, Stephen Huang, Tiancong Wang
#
# Supervised by Prof. Steve Mann (http://www.eecg.toronto.edu/~mann/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mysql.connector
import time
from app.sql.config.DbConfig import db_config
from flask import request, redirect, url_for, send_from_directory, render_template, session, g
from werkzeug.utils import secure_filename
from app import webapp
from app.S3Helper import store_file, get_file_path_by_key, create_presigned_url_expanded, delete_file
# The function used to establish connection to sql database
from app.util.AWSHelper import compare_faces
def connect_to_database():
'''
Function used to connet to database
:return:
'''
return mysql.connector.connect(user=db_config['user'], password=db_config['password'], host=db_config['host'],
database=db_config['database'], use_pure=True)
def get_database():
'''
Description:
These two functions allow us to connect to database and get basic information
:return: connected database object
'''
db = getattr(g, '_database', None)
if db is None:
db = g._database = connect_to_database()
return db
# UPLOAD_FOLDER = '/home/ubuntu/ece1779_projects/img/'
# UPLOAD_FOLDER = '/Users/fredpan/Desktop/output/'
# UPLOAD_FOLDER = '/home/yixiao/Desktop/after/'
UPLOAD_FOLDER = '/'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
webapp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
'''
Description:
This function checks allowed extension type.
:param filename: The file name which need to be judged
:return: True if the file is illegible and False if its not
'''
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# after user click the upload button
@webapp.route('/name_tag_modify', methods=['POST'])
def name_tag_modify():
'''
This function helps modify the name tag of the photo stored in the database
:return: redirect to file managment
'''
name_tag = request.form.get('nameTag', "")
imageName = request.form.get('imageName', "")
# update database
# connect to database and create the record
cnx = get_database()
cursor = cnx.cursor()
sql = "UPDATE file_info SET person_name = %s WHERE cloud_image_name = %s"
val = (name_tag, imageName)
cursor.execute(sql, val)
cnx.commit()
session['info'] = "Name Tag Updated!"
return redirect(url_for('file_management'))
# after user click the upload button
@webapp.route('/delete_image', methods=['POST'])
def delete_image():
'''
This function deletes a stored reference face and also deletes the name tag in the database
:return:
'''
deleteImageName = request.form.get('deleteImageName', "")
# delete image from s3
resut1 = delete_file(deleteImageName)
# delete image from database
resut2 = True
try:
cnx = get_database()
cursor = cnx.cursor()
sql = "DELETE FROM file_info WHERE cloud_image_name = %s"
val = (deleteImageName,)
cursor.execute(sql, val)
cnx.commit()
except Exception as ex:
resut2 = False
if not (resut1 and resut2):
session['error'] = "A problem occurred while deleting file!"
else:
session['info'] = "File deleted!"
return redirect(url_for('file_management'))
# after user click the upload button
@webapp.route('/which_face', methods=['POST'])
def which_face():
'''
This function is called from the web page to perform the face compairation function
:return:
'''
try:
if request.method == 'POST':
img_file = request.files['img']
# check if the post request has the file part
if 'img' not in request.files:
raise Exception("No file upload in the request!")
# test if file too large:
# if user does not select file, browser also
# submit an empty part without filename
if img_file.filename == '':
raise Exception("No file selected!")
if len(img_file.filename) >= 50:
raise Exception("File name too long")
if img_file and allowed_file(img_file.filename):
# ===================================================#
# ======Till this step the file is good to process===#
# ===================================================#
# img_bytes = img_file.read()
store_file('temp_image.jpg', img_file)
match_result, match_output = compare_faces('temp_image.jpg')
temp_image_path = create_presigned_url_expanded('temp_image.jpg')
error_msg = None
info_msg = None
if match_result == False:
info_msg = match_output
return render_template("process_image.html", uploadImagePath=temp_image_path,
match_result=str(match_output), info_msg=info_msg, error_msg=error_msg)
else:
info_msg = "Match succeed, result as follows:"
return render_template("process_image.html", uploadImagePath=temp_image_path,
match_result=str(match_output), info_msg=info_msg, error_msg=error_msg)
else:
raise Exception("Not a Correct File Type!")
except Exception as ex:
print("problem is:", str(ex))
return render_template("process_image.html", error_msg=str(ex))
# after user click the upload button
@webapp.route('/upload', methods=['POST'])
def upload_file():
'''
Description:
This function will be called if the user tries to upload an image and this function checks if the upload is valid.
If so the function will keep a copy of the image and an OpenCV-processed image in the database, with the proper
naming scheme.
The function can raise exceptions if there are any of the following problems: no file selected; filename too long;
wrong extension type; file too large.
If the uploaded is valid then we will connect to the database and create a record. First, we update the information
in session from our database. Second, we assign systematic names to the image and its processed image depending on
the user id and their upload counter. Third, we save the image to the cloud, process it through OpenCV and then
save the processed image to the cloud. Fourth, we gather all information and update our file name table in the
database. Last we increase the upload counter by 1 and update it.
:return: upload_management.html
'''
try:
if request.method == 'POST':
file = request.files['file']
# check if the post request has the file part
if 'file' not in request.files:
raise Exception("No file upload in the request!")
# test if file too large:
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
raise Exception("No file selected!")
if len(file.filename) >= 50:
raise Exception("File name too long")
if file and allowed_file(file.filename):
# ===================================================#
# ======Till this step the file is good to process===#
# ===================================================#
# connect to database and create the record
cnx = get_database()
cursor = cnx.cursor()
# rename the upload img as: userpid_useruploadcounter_imagename.extention
userFileName = secure_filename(file.filename) # example: example.jpg
cloudSaveFilename = str(session["uid"]) + "_" + str(time.time()).replace('.',
'') + "_" + userFileName # example: 12_1_example.jpg
store_file(cloudSaveFilename, file)
new_file = get_file_path_by_key(cloudSaveFilename)
# prepare for values for sql
fileName = userFileName
uploadImagePath = UPLOAD_FOLDER + cloudSaveFilename
ts = time.time()
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
personName = str(time.time()).replace('.', '')
input_name = request.form.get('nameTag', "")
if input_name.strip() != '':
personName = input_name
# update file_name table
query = "INSERT INTO file_info (file_name, upload_image_path, cloud_image_name, create_time, person_name) VALUES (%s, %s, %s, %s, %s)"
data = (fileName, uploadImagePath, cloudSaveFilename, timeStamp, personName)
cursor.execute(query, data)
cnx.commit()
# get the image path for both image_before and image_after
info_msg = "Photo Uploaded Successfully!"
return render_template("upload_management.html",
uploadImagePath=create_presigned_url_expanded(cloudSaveFilename),
fileName=fileName, person_name=personName, info_msg=info_msg)
else:
raise Exception("Not a Correct File Type!")
except Exception as ex:
print("problem is:", str(ex))
return render_template("upload_management.html", error_msg=str(ex))
@webapp.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(webapp.config['UPLOAD_FOLDER'], filename)
@webapp.route('/file_management')
def file_management():
'''
This function allows user to check uploaded and processed images when the url'/file_management' is called.
If the session information is all valid, we will connect to the database and try to get all images with the
required uid and then show them.
:return: "file_management.html"
'''
if ('authenticated' in session) and ('username' in session):
# check if the cookie includes username and authenticated flag
if session['authenticated'] == True:
# ==========prepare the loop for flexable amont of images===========#
# connect to database and create the record
cnx = get_database()
cursor = cnx.cursor()
query = "SELECT file_name, cloud_image_name, person_name FROM file_info"
cursor.execute(query)
results = cursor.fetchall()
# if there is no uploaded image:
if len(results) == 0:
return render_template("file_management.html", fileNumber=0, dictList=[])
# if there exists uploaded image:
else:
# need following args for render html template : dictList, filenumber>0
# for each dictionary in dictList, 5 elements:
# modelName: ex. model1
# cloudSaveFilename: ex. 07_2_example.jpg
# cloudProcessedFileName ex. p_07_2_example.jpg
# userFileName: ex. example.jpg
# processedUserFileName: ex. processed_example.jpg
dictList = []
fileNumber = len(results)
# build the dictList
for i in range(fileNumber):
newdict = dict()
newdict["userFileName"] = results[i][0]
newdict["cloudSaveFilename"] = create_presigned_url_expanded(results[i][1])
newdict["cloudImageName"] = results[i][1]
newdict["personName"] = results[i][2]
newdict["modalID"] = "modal" + str(i)
newdict["buttonID"] = "button" + str(i)
newdict["closeID"] = "close" + str(i)
dictList.append(newdict)
info_msg = None
if ('info' in session):
info_msg = session['info']
session.pop('info')
error_msg = None
if ('error' in session):
error_msg = session['error']
session.pop('error')
return render_template("file_management.html", fileNumber=fileNumber, dictList=dictList,
info_msg=info_msg, error_msg=error_msg)
else:
return redirect(url_for('user_login'))
|
the-stack_106_26242 | """
The main module containing functions for parsing text strings into crs objects.
"""
# possible use module: https://github.com/rockdoc/grabbag/wiki/CRS-WKT-Parser
# also note some paramter descriptions: http://www.geoapi.org/3.0/javadoc/org/opengis/referencing/doc-files/WKT.html
# and see gdal source code: http://gis.stackexchange.com/questions/129764/how-are-esri-wkt-projections-different-from-ogc-wkt-projections
# especially: http://fossies.org/windows/misc/saga_2.1.4_x64.zip/saga_2.1.4_x64/saga_prj.dic
# also: http://saga.sourcearchive.com/documentation/2.0.7plus-pdfsg-2/crs__base_8cpp_source.html
from .elements import datums
from .elements import ellipsoids
from .elements import parameters
from .elements import containers
from .elements import units
from .elements import projections
from . import utils
def from_epsg_code(code):
"""
Load crs object from epsg code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The EPSG code as an integer.
Returns:
- CRS object.
"""
# must go online (or look up local table) to get crs details
code = str(code)
proj4 = utils.crscode_to_string("epsg", code, "proj4")
crs = from_proj4(proj4)
return crs
def from_esri_code(code):
"""
Load crs object from esri code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The ESRI code as an integer.
Returns:
- CRS object.
"""
# must go online (or look up local table) to get crs details
code = str(code)
proj4 = utils.crscode_to_string("esri", code, "proj4")
crs = from_proj4(proj4)
return crs
def from_sr_code(code):
"""
Load crs object from sr-org code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The SR-ORG code as an integer.
Returns:
- CRS object.
"""
# must go online (or look up local table) to get crs details
code = str(code)
proj4 = utils.crscode_to_string("sr-org", code, "proj4")
crs = from_proj4(proj4)
return crs
def from_ogc_wkt(string, strict=False):
"""
Parse crs as ogc wkt formatted string and return the resulting crs object.
Arguments:
- *string*: The OGC WKT representation as a string.
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- CRS object.
"""
# parse arguments into components
# use args to create crs
return _from_wkt(string, "ogc", strict)
def from_esri_wkt(string, strict=False):
"""
Parse crs as esri wkt formatted string and return the resulting crs object.
Arguments:
- *string*: The ESRI WKT representation as a string.
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- CRS object.
"""
# parse arguments into components
# use args to create crs
return _from_wkt(string, "esri", strict)
def from_unknown_wkt(string, strict=False):
"""
Given an unknown wkt string, detect if uses ogc or esri flavor, and parse the crs accordingly.
Arguments:
- *string*: The unknown WKT representation as a string.
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- CRS object.
"""
# parse arguments into components
# use args to create crs
return _from_wkt(string, None, strict)
def _from_wkt(string, wkttype=None, strict=False):
"""
Internal method for parsing wkt, with minor differences depending on ogc or esri style.
Arguments:
- *string*: The OGC or ESRI WKT representation as a string.
- *wkttype* (optional): How to parse the WKT string, as either 'ogc', 'esri', or None. If None, tries to autodetect the wkt type before parsing (default).
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- CRS object.
"""
# TODO
# - Make function for finding next elemt by name, instead of knowing its arg index position
# - Maybe verify elem arg name
# make sure valid wkttype
if wkttype: wkttype = wkttype.lower()
assert wkttype in ("ogc","esri",None)
# remove newlines and multi spaces
string = " ".join(string.split())
# parse arguments into components
def _consume_bracket(chars, char):
"char must be the opening bracket"
consumed = ""
depth = 1
while char and depth > 0:
consumed += char
char = next(chars, None)
# update depth level
if char == "[":
depth += 1
elif char == "]":
depth -= 1
consumed += char # consume the last closing char too
return consumed
def _consume_quote(chars, char, quotechar):
"char and quotechar must be the opening quote char"
consumed = ""
# consume the first opening char
consumed += char
char = next(chars, None)
# consume inside
while char and char != quotechar:
consumed += char
char = next(chars, None)
# consume the last closing char too
consumed += char
return consumed
def _next_elem(chars, char):
"char must be the first char of the text that precedes brackets"
header = ""
# skip until next header
while not char.isalpha():
char = next(chars, None)
# first consume the element text header
while char.isalpha():
header += char
char = next(chars, None)
# skip until next brackets (in case of spaces)
while char != "[":
char = next(chars, None)
# then consume the element bracket contents
if char == "[":
content = _consume_bracket(chars, char)
char = next(chars, None)
# split content into args list
content = content[1:-1] # remove enclosing brackets
content = _split_except(content)
# recursively load all subelems
for i,item in enumerate(content):
if isinstance(item, str) and "[" in item:
chars = (char for char in item)
char = next(chars)
item = _next_elem(chars, char)
content[i] = item
return header, content
def _clean_value(string):
string = string.strip()
try: string = float(string)
except: pass
return string
def _split_except(string):
"split the string on every comma, except not while inside quotes or square brackets"
chars = (char for char in string)
char = next(chars)
items = []
consumed = ""
while char:
# dont split on quotes, just consume it
if char in ("'", '"'):
consumed += _consume_quote(chars, char, char)
# dont split inside brackets, just consume it
elif char == "[":
consumed += _consume_bracket(chars, char)
# new splitchar found, add what has been consumed so far as an item, reset, and start consuming until next splitchar
elif char == ",":
consumed = _clean_value(consumed)
items.append(consumed)
consumed = ""
# consume normal char
elif char:
consumed += char
# next
char = next(chars, None)
# append last item too
consumed = _clean_value(consumed)
items.append(consumed)
return items
# load into nested tuples and arglists
crstuples = []
chars = (char for char in string)
char = next(chars)
while char:
header,content = _next_elem(chars, char)
crstuples.append((header, content))
char = next(chars, None)
# autodetect wkttype if not specified
if not wkttype:
topheader,topcontent = crstuples[0]
if topheader == "PROJCS":
geogcsheader,geogcscontent = topcontent[1]
elif topheader == "GEOGCS":
geogcsheader,geogcscontent = topheader,topcontent
# datum elem should be second under geogcs
datumheader, datumcontent = geogcscontent[1]
datumname = datumcontent[0].upper().strip('"')
# esri wkt datums all use "D_" before the datum name
if datumname.startswith("D_"):
wkttype = "esri"
else:
wkttype = "ogc"
# parse into actual crs objects
def _parse_top(header, content):
"procedure for parsing the toplevel crs element and all its children"
if header.upper() == "PROJCS":
# find name
name = content[0].strip('"')
# find geogcs elem (by running parse again)
subheader, subcontent = content[1]
geogcs = _parse_top(subheader, subcontent)
# find projection elem
subheader, subcontent = content[2]
projname = subcontent[0].strip('"')
projclass = projections.find(projname, "%s_wkt" % wkttype, strict)
if projclass:
projdef = projclass()
proj = containers.Projection(projdef)
else:
raise Exception("The specified projection name could not be found")
# find params
params = []
for part in content:
if isinstance(part, tuple):
subheader,subcontent = part
if subheader == "PARAMETER":
name, value = subcontent[0].strip('"'), subcontent[1]
itemclass = parameters.find(name, "%s_wkt" % wkttype, strict)
if itemclass:
item = itemclass(value)
params.append(item)
# find unit
for part in content:
if isinstance(part, tuple):
subheader,subcontent = part
if subheader == "UNIT":
break
unitname,value = subcontent[0].strip('"'), subcontent[1]
unitclass = units.find(unitname, "%s_wkt" % wkttype, strict)
if unitclass:
unit = unitclass()
unittype = parameters.UnitType(unit)
else:
unit = units.Unknown()
unittype = parameters.UnitType(unit)
metmult = parameters.MeterMultiplier(value)
linunit = parameters.Unit(unittype, metmult)
# find twin axis maybe
## if len(content) >= 6:
## twinax = (parameters.Axis(
## else:
## twinax = None
# put it all together
projcs = containers.ProjCS("Unknown", geogcs, proj, params, linunit) #, twinax)
return projcs
elif header.upper() == "GEOGCS":
# name
name = content[0].strip('"')
# datum
subheader, subcontent = content[1]
## datum name
datumname = subcontent[0].strip('"')
datumclass = datums.find(datumname, "%s_wkt" % wkttype, strict)
if datumclass:
datumdef = datumclass()
else:
datumdef = datums.Unknown()
## datum ellipsoid
subsubheader, subsubcontent = subcontent[1]
ellipsname = subsubcontent[0].strip('"')
ellipsclass = ellipsoids.find(ellipsname, "%s_wkt" % wkttype, strict)
if ellipsclass:
ellipsdef = ellipsclass()
else:
ellipsdef = ellipsoids.Unknown()
ellipsoid = containers.Ellipsoid(ellipsdef, subsubcontent[1], subsubcontent[2])
## datum shift
if wkttype == "ogc":
for subsubheader,subsubcontent in subcontent[1:]:
if subsubheader == "TOWGS84":
datumshift = parameters.DatumShift(subsubcontent)
break
else:
datumshift = None
elif wkttype == "esri":
# not used in esri wkt
datumshift = None
## put it all togehter
datum = containers.Datum(datumdef, ellipsoid, datumshift)
# prime mer
subheader, subcontent = content[2]
prime_mer = parameters.PrimeMeridian(subcontent[1])
# angunit
subheader, subcontent = content[3]
unitname,value = subcontent[0].strip('"'), subcontent[1]
unitclass = units.find(unitname, "%s_wkt" % wkttype, strict)
if unitclass:
unit = unitclass()
unittype = parameters.UnitType(unit)
else:
unit = units.Unknown()
unittype = parameters.UnitType(unit)
metmult = parameters.MeterMultiplier(value)
angunit = parameters.AngularUnit(unittype, metmult)
# twin axis
# ...
# put it all together
geogcs = containers.GeogCS(name, datum, prime_mer, angunit, twin_ax=None)
return geogcs
# toplevel collection
header, content = crstuples[0]
toplevel = _parse_top(header, content)
crs = containers.CRS(toplevel)
# use args to create crs
return crs
def from_proj4(proj4, strict=False):
"""
Parse crs as proj4 formatted string or dict and return the resulting crs object.
Arguments:
- *proj4*: The proj4 representation as a string or dict.
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- CRS object.
"""
# parse arguments into components
# use args to create crs
# TODO: SLIGTHLY MESSY STILL, CLEANUP..
params = []
if isinstance(proj4, dict):
# add leading + sign as expected below, proj4 dicts do not have that
partdict = dict([('+'+k,v) for k,v in proj4.items()])
else:
partdict = dict([part.split("=") for part in proj4.split()
if len(part.split("=")) == 2 ])
# INIT CODES
# eg, +init=EPSG:1234
if "+init" in partdict:
# first, get the default proj4 string of the +init code
codetype, code = partdict["+init"].split(":")
if codetype == "EPSG":
initproj4 = utils.crscode_to_string("epsg", code, "proj4")
elif codetype == "ESRI":
initproj4 = utils.crscode_to_string("esri", code, "proj4")
# make the default into param dict
initpartdict = dict([part.split("=") for part in initproj4.split()
if len(part.split("=")) == 2 ])
# override the default with any custom params specified along with the +init code
initpartdict.update(partdict)
# rerun from_proj4() again on the derived proj4 params as if it was not made with the +init code
del initpartdict["+init"]
string = " ".join("%s=%s" % (key,val) for key,val in initpartdict.items())
return from_proj4(string)
# DATUM
# datum param is required
if "+datum" in partdict:
# get predefined datum def
datumname = partdict["+datum"]
datumclass = datums.find(datumname, "proj4", strict)
if datumclass:
datumdef = datumclass()
else:
datumdef = datums.Unknown()
else:
datumdef = datums.Unknown()
# ELLIPS
# ellipse param is required
if "+ellps" in partdict:
# get predefined ellips def
ellipsname = partdict["+ellps"]
ellipsclass = ellipsoids.find(ellipsname, "proj4", strict)
if ellipsclass:
ellipsdef = ellipsclass
elif "+a" in partdict and "+f" in partdict:
ellipsdef = ellipsoids.Unknown()
elif "+a" in partdict and "+rf" in partdict:
ellipsdef = ellipsoids.Unknown()
else:
raise Exception("The specified ellipsoid name could not be found, and there was no manual specification of the semimajor axis and inverse flattening to use as a substitute.")
elif "+a" in partdict and "+f" in partdict:
# alternatively, it is okay with a missing ellipsoid if +a and +f are specified
# TODO: +f seems to never be specified when +ellps is missing, only +a and +b, look into...
ellipsdef = ellipsoids.Unknown()
elif "+a" in partdict and "+rf" in partdict:
ellipsdef = ellipsoids.Unknown()
else:
raise Exception("Could not find the required +ellps element, nor a manual specification of the +a or +f elements, or +a and +rf elements.")
# TO WGS 84 COEFFS
if "+towgs84" in partdict:
coeffs = partdict["+towgs84"].split(",")
datumshift = parameters.DatumShift(coeffs)
# TODO: if no datum, use ellips + towgs84 params to create the correct datum
# ...??
# COMBINE DATUM AND ELLIPS
## create datum and ellips param objs
if "+rf" in partdict:
inv_flat = partdict.get("+rf")
elif "+f" in partdict:
inv_flat = 1.0 / float(partdict.get("+f"))
elif ellipsdef.inv_flat is not None:
inv_flat = ellipsdef.inv_flat
else:
raise Exception("Could not find the required +ellps element, nor a manual specification of the +f or +rf elements.")
ellips = containers.Ellipsoid(ellipsdef,
semimaj_ax=partdict.get("+a"),
inv_flat=inv_flat)
if "+datum" in partdict:
datum = containers.Datum(datumdef, ellips)
elif "+towgs84" in partdict:
datum = containers.Datum(datumdef, ellips, datumshift)
else:
datum = containers.Datum(datumdef, ellips)
# PRIME MERIDIAN
# set default
prime_mer = parameters.PrimeMeridian(0)
# overwrite with user input
if "+pm" in partdict:
# for now only support longitude, later add name support from below:
## greenwich 0dE
## lisbon 9d07'54.862"W
## paris 2d20'14.025"E
## bogota 74d04'51.3"E
## madrid 3d41'16.48"W
## rome 12d27'8.4"E
## bern 7d26'22.5"E
## jakarta 106d48'27.79"E
## ferro 17d40'W
## brussels 4d22'4.71"E
## stockholm 18d3'29.8"E
## athens 23d42'58.815"E
## oslo 10d43'22.5"E
prime_mer = parameters.PrimeMeridian(partdict["+pm"])
# ANGULAR UNIT
## proj4 cannot set angular unit, so just set to default
metmulti = parameters.MeterMultiplier(0.017453292519943295)
unittype = parameters.UnitType(units.Degree())
angunit = parameters.AngularUnit(unittype, metmulti)
# GEOGCS (note, currently does not load axes)
geogcs = containers.GeogCS("Unknown", datum, prime_mer, angunit) #, twin_ax)
# PROJECTION
if "+proj" in partdict:
# get predefined proj def
projname = partdict["+proj"]
projclass = projections.find(projname, "proj4", strict)
if projclass:
projdef = projclass()
elif projname == "longlat":
# proj4 special case, longlat as projection name means unprojected geogcs
projdef = None
else:
raise Exception("The specified projection name could not be found")
else:
raise Exception("Could not find required +proj element")
if projdef:
# create proj param obj
proj = containers.Projection(projdef)
# Because proj4 has no element hierarchy, using automatic element find() would
# ...would not be very effective, as that would need a try-fail approach for each
# ...element type (parameter, projection, datum, ellipsoid, unit).
# ...Instead load each element individually.
# CENTRAL MERIDIAN
if "+lon_0" in partdict:
val = partdict["+lon_0"]
obj = parameters.CentralMeridian(val)
params.append(obj)
# FALSE EASTING
if "+x_0" in partdict:
val = partdict["+x_0"]
obj = parameters.FalseEasting(val)
params.append(obj)
# FALSE NORTHING
if "+y_0" in partdict:
val = partdict["+y_0"]
obj = parameters.FalseNorthing(val)
params.append(obj)
# SCALING FACTOR
if "+k_0" in partdict or "+k" in partdict:
if "+k_0" in partdict: val = partdict["+k_0"]
elif "+k" in partdict: val = partdict["+k"]
obj = parameters.ScalingFactor(val)
params.append(obj)
# LATITUDE ORIGIN
if "+lat_0" in partdict:
val = partdict["+lat_0"]
obj = parameters.LatitudeOrigin(val)
params.append(obj)
# LATITUDE TRUE SCALE
if "+lat_ts" in partdict:
val = partdict["+lat_ts"]
obj = parameters.LatitudeTrueScale(val)
params.append(obj)
# LONGITUDE CENTER
if "+lonc" in partdict:
val = partdict["+lonc"]
obj = parameters.LongitudeCenter(val)
params.append(obj)
# AZIMUTH
if "+alpha" in partdict:
val = partdict["+alpha"]
obj = parameters.Azimuth(val)
params.append(obj)
# STD PARALLEL 1
if "+lat_1" in partdict:
val = partdict["+lat_1"]
obj = parameters.LatitudeFirstStndParallel(val)
params.append(obj)
# STD PARALLEL 2
if "+lat_2" in partdict:
val = partdict["+lat_2"]
obj = parameters.LatitudeSecondStndParallel(val)
params.append(obj)
# SATELLITE HEIGHT
if "+h" in partdict:
val = partdict["+h"]
obj = parameters.SatelliteHeight(val)
params.append(obj)
# TILT ANGLE
if "+tilt" in partdict:
val = partdict["+tilt"]
obj = parameters.TiltAngle(val)
params.append(obj)
# UNIT
# get values
if "+units" in partdict:
# unit name takes precedence over to_meter
unitname = partdict["+units"]
unitclass = units.find(unitname, "proj4", strict)
if unitclass:
unit = unitclass()
unittype = parameters.UnitType(unit)
metmulti = parameters.MeterMultiplier(unit.to_meter) # takes meter multiplier from name, ignoring any custom meter multiplier
else:
raise Exception("The specified unit name could not be found")
elif "+to_meter" in partdict:
# no unit name specified, only to_meter conversion factor
unittype = parameters.UnitType(units.Unknown())
metmulti = parameters.MeterMultiplier(partdict["+to_meter"])
else:
# if nothing specified, defaults to meter
unittype = parameters.UnitType(units.Meter())
metmulti = parameters.MeterMultiplier(1.0)
## create unitobj
unit = parameters.Unit(unittype, metmulti)
# PROJCS
projcs = containers.ProjCS("Unknown", geogcs, proj, params, unit)
# CRS
crs = containers.CRS(projcs)
else:
# means projdef was None, ie unprojected longlat geogcs
crs = containers.CRS(geogcs)
# FINISHED
return crs
##def from_ogc_urn(string, strict=False):
## # hmmm, seems like ogc urn could be anything incl online link, epsg, etc...
## # if necessary, must go online (or lookup local table) to get details
## # maybe test which of these and run their function?
## # examples urn:ogc:def:crs:OGC:1.3:CRS1
## # or with EPSG instead of OGC
##
## # If OGC, 1.3 is pdf version, and after that is a name from list below
## # as found in pdf: "Definition identifier URNs in OGC namespace"
## # OGC crs definitions
## # URN | CRS name | Definition reference
## # urn:ogc:def:crs:OGC:1.3:CRS1 Map CS B.2 in OGC 06-042
## # urn:ogc:def:crs:OGC:1.3:CRS84 WGS 84 longitude-latitude B.3 in OGC 06-042
## # urn:ogc:def:crs:OGC:1.3:CRS83 NAD83 longitude-latitude B.4 in OGC 06-042
## # urn:ogc:def:crs:OGC:1.3:CRS27 NAD27 longitude-latitude B.5 in OGC 06-042
## # urn:ogc:def:crs:OGC:1.3:CRS88 NAVD 88 B.6 in OGC 06-042
## # urn:ogc:def:crs:OGC:1.3:AUTO42001:99:8888 Auto universal transverse mercator B.7 in OGC 06-042
## # urn:ogc:def:crs:OGC:1.3:AUTO42002:99:8888 Auto transverse mercator B.8 in OGC 06-042
## # urn:ogc:def:crs:OGC:1.3:AUTO42003:99:8888 Auto orthographic B.9 in OGC 06-042
## # urn:ogc:def:crs:OGC:1.3:AUTO42004:99:8888 Auto equirectangular B.10 in OGC 06-042
## # urn:ogc:def:crs:OGC:1.3:AUTO42005:99 Auto Mollweide B.11 in OGC 06-042
##
## pass
def from_unknown_text(text, strict=False):
"""
Detect crs string format and parse into crs object with appropriate function.
Arguments:
- *text*: The crs text representation of unknown type.
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- CRS object.
"""
if text.startswith("+"):
crs = from_proj4(text, strict)
elif text.startswith(("PROJCS[","GEOGCS[")):
crs = from_unknown_wkt(text, strict)
#elif text.startswith("urn:"):
# crs = from_ogc_urn(text, strict)
elif text.startswith("EPSG:"):
crs = from_epsg_code(text.split(":")[1])
elif text.startswith("ESRI:"):
crs = from_esri_code(text.split(":")[1])
elif text.startswith("SR-ORG:"):
crs = from_sr_code(text.split(":")[1])
else: raise Exception("Could not detect which type of crs")
return crs
##def from_geotiff_parameters(**params):
## pass
|
the-stack_106_26244 | """
Test module for . . .
"""
# Standard library imports
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import logging
from os.path import abspath, dirname, join, realpath
from sys import path
# Third party imports
import pytest
# Local imports
logger = logging.getLogger(__name__)
test_dir = realpath(dirname(__file__))
src_dir = abspath(join(test_dir, '..'))
path.append(src_dir)
print(path)
import pypiserver
@pytest.mark.parametrize('conf_options', [
{},
{'root': '~/stable_packages'},
{'root': '~/unstable_packages', 'authenticated': 'upload',
'passwords': '~/htpasswd'},
# Verify that the strip parser works properly.
{'authenticated': str('upload')},
])
def test_paste_app_factory(conf_options, monkeypatch):
"""Test the paste_app_factory method"""
monkeypatch.setattr('pypiserver.core.configure',
lambda **x: (x, [x.keys()]))
pypiserver.paste_app_factory({}, **conf_options)
def test_app_factory(monkeypatch):
monkeypatch.setattr('pypiserver.core.configure',
lambda **x: (x, [x.keys()]))
assert pypiserver.app() is not pypiserver.app()
|
the-stack_106_26246 | """
Transaction support for Gaphor
"""
import logging
from typing import List
from gaphor import application
from gaphor.event import TransactionBegin, TransactionCommit, TransactionRollback
log = logging.getLogger(__name__)
def transactional(func):
"""The transactional decorator makes a function transactional. A
Transaction instance is created before the decorated function is called.
If calling the function leads to an exception being raised, the transaction
is rolled-back. Otherwise, it is committed."""
def _transactional(*args, **kwargs):
r = None
event_manager = application.Application.get_service("event_manager")
tx = Transaction(event_manager)
try:
r = func(*args, **kwargs)
except Exception:
log.error(
"Transaction terminated due to an exception, performing a rollback",
exc_info=True,
)
try:
tx.rollback()
except Exception:
log.error("Rollback failed", exc_info=True)
raise
else:
tx.commit()
return r
return _transactional
class TransactionError(Exception):
"""
Errors related to the transaction module.
"""
class Transaction:
"""
The transaction. On start and end of a transaction an event is emitted.
>>> import gaphor.services.eventmanager
>>> event_manager = gaphor.services.eventmanager.EventManager()
Transactions can be nested. If the outermost transaction is committed or
rolled back, an event is emitted.
Events can be handled programmatically:
>>> tx = Transaction(event_manager)
>>> tx.commit()
It can be assigned as decorator:
>>> @transactional
... def foo():
... pass
Or with the ``with`` statement:
>>> with Transaction(event_manager):
... pass
"""
_stack: List = []
def __init__(self, event_manager):
"""Initialize the transaction. If this is the first transaction in
the stack, a TransactionBegin event is emitted."""
self.event_manager = event_manager
self._need_rollback = False
if not self._stack:
self._handle(TransactionBegin())
self._stack.append(self)
def commit(self):
"""Commit the transaction. First, the transaction is closed.
If it needs to be rolled-back, a TransactionRollback event is emitted.
Otherwise, a TransactionCommit event is emitted."""
self._close()
if not self._stack:
if self._need_rollback:
self._handle(TransactionRollback())
else:
self._handle(TransactionCommit())
def rollback(self):
"""Roll-back the transaction. First, the transaction is closed.
Every transaction on the stack is then marked for roll-back. If
the stack is empty, a TransactionRollback event is emitted."""
self._close()
for tx in self._stack:
tx._need_rollback = True
else:
if not self._stack:
self._handle(TransactionRollback())
def _close(self):
"""Close the transaction. If the stack is empty, a TransactionError
is raised. If the last transaction on the stack isn't this transaction,
a Transaction error is raised."""
try:
last = self._stack.pop()
except IndexError:
raise TransactionError("No Transaction on stack.")
if last is not self:
self._stack.append(last)
raise TransactionError(
"Transaction on stack is not the transaction being closed."
)
def _handle(self, event):
try:
event_manager = self.event_manager
except (application.NotInitializedError, application.ComponentLookupError):
log.warning("Could not lookup event_manager. Not emitting events.")
else:
event_manager.handle(event)
def __enter__(self):
"""Provide with-statement transaction support."""
return self
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
"""Provide with-statement transaction support. If an error occurred,
the transaction is rolled back. Otherwise, it is committed."""
if exc_type:
self.rollback()
else:
self.commit()
|
the-stack_106_26247 | # -*- encoding: utf-8 -*-
import os
from datetime import datetime
from boto3.session import Session
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Backs up PostgreSQL database to AWS S3'
def handle(self, *args, **options):
AWS_ACCESS_KEY_ID = os.environ.get('AWS_S3_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_S3_SECRET_ACCESS_KEY')
AWS_REGION_NAME = os.environ.get('AWS_S3_REGION_NAME')
AWS_BUCKET_NAME = os.environ.get('AWS_S3_BUCKET_NAME')
session = Session(aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=AWS_REGION_NAME)
s3 = session.resource('s3')
bucket = s3.Bucket(AWS_BUCKET_NAME)
timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H%M_UTC')
db_file = "pgbackup_{}.dump".format(timestamp)
db_host = settings.DATABASES['default']['HOST']
db_port = settings.DATABASES['default']['PORT']
db_name = settings.DATABASES['default']['NAME']
db_user = settings.DATABASES['default']['USER']
# See command definitions at http://www.postgresql.org/docs/9.4/static/app-pgdump.html
pg_dump_command = "pg_dump --host={host} --port={port} --user={user} --format=c -O -x --file={file_name} {name}".format(
host=db_host,
port=db_port,
user=db_user,
file_name=db_file,
name=db_name)
self.stdout.write("Enter {}'s psql password".format(db_user))
os.system(pg_dump_command)
bucket.upload_file(db_file, db_file)
os.system("rm {}".format(db_file))
self.stdout.write("{} successfully uploaded to AWS S3".format(db_file))
|
the-stack_106_26248 | import gzip
import itertools
import re
import zlib
from typing import Tuple, List
from .structs.edge import Edge
from .structs.graph import Graph
from .structs.node import Node
from .utils import smiles2mol
verbose = False
def trace(msg):
if (verbose):
print("[parse.py] " + msg)
# Type ALIASES
Atom = str
Bond = Tuple[str, str, str]
Mapping_ID_Graph = Tuple[str, Graph]
def flatmap(func, *iterable):
return itertools.chain.from_iterable(map(func, *iterable))
def decompress(val):
try:
s = zlib.decompress(val, 16 + zlib.MAX_WBITS)
except:
return val
return s
def decode_utf8(binary_str):
try:
return binary_str.decode("utf-8")
except Exception as e:
print(e)
return None
def stripcomments(txt):
return re.sub('//.*?\n|/\*.*?\*/', '', txt, flags=re.S)
def from_abstract(file_path: str) -> List[Graph]:
"""
Abstract method
For signature purpose, do not use
"""
raise NotImplementedError()
# –––––––––––––––––––– PARSE FUNCTIONS ; CONSUME STRINGS
def parse_dimacs(text_content) -> Graph:
directed = False
graph_id = ""
graph_strict = False
lines = text_content.split("\n")
header = lines.pop(0)
while not header.startswith('p '):
header = lines.pop(0)
(vertices, edges) = (int(header.split(' ')[2]), int(header.split(' ')[3]))
ids_nodes = [str(id_node) for id_node in range(1, vertices + 1)]
graph = Graph(id="")
nodes = [Node(str(id_node), ".") for id_node in ids_nodes]
graph.add_nodes(nodes)
edges = []
id_edge = 1
for line in lines:
if len(line.split(' ')) == 3:
tokens = line.split(' ')
id_a = tokens[1]
id_b = tokens[2]
edges.append(Edge(id_edge, graph.V[id_a], graph.V[id_b], 1))
id_edge += 1
graph.add_edges(edges)
return graph
def parse_dot(text_content) -> Graph:
directed = False
graph_id = ""
graph_strict = False
lines = text_content.split("\n")
line = ""
# trim first line
while line == "":
line = lines.pop(0)
# FIRST LINE
tokens = line.split()
token = tokens.pop(0)
if token == "strict":
graph_strict = True
token = tokens.pop(0)
if token == "digraph":
directed = True
token = tokens.pop(0)
elif token == "graph":
token = tokens.pop(0)
else:
raise Exception('graph|digraph token not found')
if token != "{":
graph_id = token
token = tokens.pop(0)
if token != "{":
raise Exception('expected "{" not found')
# BODY
edgeop = "->" if directed else "--"
nodes = []
edges = []
while lines[0].strip() != "}":
line = lines.pop(0)
if len(line.strip()) == 0:
pass
line = stripcomments(line)
opts = re.search(r"\[(.*)\]", line)
opts_dict = {}
if opts:
for opt in opts.group(1).split(','):
k = opt.split("=")[0].strip()
v = opt.split("=")[1].strip()
opts_dict[k] = v
tokens = line.split()
if edgeop in tokens: # edge declaration
id_a = tokens.pop(0)
assert tokens.pop(0) == edgeop
id_b = tokens.pop(0)
mod = str(opts_dict["weight"]) if "weight" in opts_dict else "1"
edges.append((id_a, id_b, mod))
elif tokens: # node declaration
id_node = tokens.pop(0)
label = str(opts_dict["label"]) if "label" in opts_dict else ""
nodes.append((id_node, label))
# print(nodes)
# print(edges)
graph = Graph(id=graph_id)
nodes = [Node(str(id_node), label) for (id_node, label) in nodes]
graph.add_nodes(nodes)
edges = [Edge(i, graph.V[a], graph.V[b], mod) for (i, (a, b, mod)) in enumerate(edges, 1)]
graph.add_edges(edges)
# print(graph)
return graph
def parse_smiles(smiles: str, ignore_hydrogens=False) -> Graph:
"""
parse method from_dot
"""
mol_block = "Fake_ID" + smiles2mol.to_mol_block(smiles)
mol = parse_Mol(mol_block, ignore_hydrogens)
return Mol_to_Graph(mol[0], mol[1])
def parse_Mol(mol_content: str, ignore_hydrogens=False) -> (List[Atom], List[Bond]):
"""
parseMol
Parse a Mol block into two lists
- atoms
- bonds
"""
trace("[parse_Mol]")
trace(str(mol_content))
try:
lines = mol_content.split('\n')
while not lines[0]:
lines.pop(0)
trace("""\tMOL HEADER
{0}
{1}
{2}
{3}
""".format(lines[0].split(), lines[1].split(), lines[2].split(), lines[3].split()))
# following the standard, the two first integers on the 4th line are respectively atoms_nb and bonds_nb
(atoms_nb, bonds_nb) = tuple([int(i) for i in lines[3].split() if i.isdigit()][:2])
atoms = lines[4: 4 + atoms_nb]
atoms = [line for line in lines if len(line.split()) >= 4 and line.split()[3].isalpha()]
trace(str(atoms))
bonds = lines[4 + atoms_nb: 4 + atoms_nb + bonds_nb]
# bonds = [ line for line in lines if len(line.split()) == 7 and all([ float(i).is_integer() for i in line.split()])]
trace(str(bonds))
atoms = [atom.split()[3] for atom in atoms]
bonds = [tuple(bond.split()[:3]) for bond in bonds]
trace("""\t{0} ATOMS annouced ; {1} ATOMS found
{2}
\n
{3} BONDS annouced ; {4} BONDS found
{5}
""".format(str(atoms_nb), len(atoms), atoms, str(bonds_nb), len(bonds), bonds))
# assert len(atoms) == atoms_nb
# assert len(bonds) == bonds_nb
if ignore_hydrogens:
H_indexes = [str(i) for (i, atom_symbol) in enumerate(atoms, 1) if str(atom_symbol) == "H"]
atoms = [atom for atom in atoms if atom != "H"]
bonds = [bond for bond in bonds if (bond[0] not in H_indexes and bond[1] not in H_indexes)]
trace("""\t{0} ATOMS != H
{1}
\n
{2} BONDS !== H
{3}
H_indexes : {4}
""".format(len(atoms), atoms, len(bonds), bonds, H_indexes))
return (atoms, bonds)
except Exception as e:
print(str(e))
raise (e)
return (None, None)
def is_Mol(block: str) -> bool:
"""
isMol
Read the first character of a SDF file block
If the fist char is a ">", then it's not a Mol block
"""
trace("[isMol]")
trace(str(block))
trace(str((not ">" in block)))
return not ">" in block
def Mol_to_Graph(atoms: List[Atom], bonds: List[Bond]) -> Graph:
"""
NOTE : enumerate are used with an offset 1, because obiously, chemists start indexing from 1
"""
trace("[Mol_to_Graph]")
trace(str(atoms))
trace(str(bonds))
if not atoms:
return None
graph = Graph()
nodes = [Node(str(i), label) for (i, label) in enumerate(atoms, 1)]
graph.add_nodes(nodes)
edges = [Edge(i, graph.V[a], graph.V[b], mod) for (i, (a, b, mod)) in enumerate(bonds, 1)]
graph.add_edges(edges)
return graph
# –––––––––––––––––––– FROM FUNCTIONS ; CONSUME FILES
def from_dimacs(dimacs_content: str = None, file_path: str = None) -> List[Graph]:
"""
parse method from_dot
Parses a dimacs file
"""
if file_path:
if (file_path.endswith('.gz')):
fp = gzip.open(file_path, 'rt', encoding='utf-8')
dimacs_content = fp.read()
else:
with open(file_path, 'r') as content_file:
dimacs_content = content_file.read()
return [parse_dimacs(dimacs_content)]
def from_dot(dot_content: str = None, file_path: str = None) -> List[Graph]:
"""
parse method from_dot
Parses a dot file
"""
if file_path:
if (file_path.endswith('.gz')):
fp = gzip.open(file_path, 'rt', encoding='utf-8')
dot_content = fp.read()
else:
with open(file_path, 'r') as content_file:
dot_content = content_file.read()
dots = dot_content.split('}')
return [parse_dot(dot + '}') for dot in dots[:-1:]]
def from_sdf(sdf_content: str = None, file_path: str = None, ignore_hydrogens=False) -> List[Graph]:
"""
parse graph from_sdf
Read chemical files and parses them into instances of `Graph`.
As this function is not meant to be called in a loop,
inner functions only relative to chemical files parsing are declared.
Type Aliases :
Atom = str
Bond = List[str]
"""
if file_path:
if (file_path.endswith('.gz')):
fp = gzip.open(file_path, 'rt', encoding='utf-8')
sdf_content = fp.read()
else:
with open(file_path, 'r') as content_file:
sdf_content = content_file.read()
return [
Mol_to_Graph(mol[0], mol[1])
for mol
in [
parse_Mol(mol_file, ignore_hydrogens)
for mol_file
in [
part[0]
for part
in [
compound.split('M END')
for compound
in sdf_content.split("$$$$")
if (compound.strip(' \t\n\r') != '')
]
if is_Mol(part)
]
]
]
def from_pubchem_xml(xml_content: str = None, file_path: str = None, ignore_hydrogens=False,
ensure_uq_covalent_unit=True) -> List[Graph]:
return [t[1] for t in map_pubchem_xml(xml_content, file_path, ignore_hydrogens, ensure_uq_covalent_unit)]
def map_pubchem_xml(xml_content: str = None, file_path: str = None, ignore_hydrogens=False,
ensure_uq_covalent_unit=True) -> List[Mapping_ID_Graph]:
"""
parse graph from pubchem xml
Read chemical files and parses them into instances of `Graph`.
schema = http://www.ncbi.nlm.nih.gov ftp://ftp.ncbi.nlm.nih.gov/pubchem/specifications/pubchem.xsd
"""
import xml.etree.cElementTree as ET
# from lxml import etree as ET
# Type ALIASES
Atom = str
Bond = Tuple[str, str, str]
def parse_pc_compound(pc_compound: "Element '{http://www.ncbi.nlm.nih.gov}PC-Compound'") -> (str, Graph):
"""
parseMol
Parse a Mol block into two lists
- atoms
- bonds
"""
try:
atoms = []
bonds = []
h_index = []
pc_id = pc_compound.find('{http://www.ncbi.nlm.nih.gov}PC-Compound_id') \
.find('{http://www.ncbi.nlm.nih.gov}PC-CompoundType') \
.find('{http://www.ncbi.nlm.nih.gov}PC-CompoundType_id') \
.find('{http://www.ncbi.nlm.nih.gov}PC-CompoundType_id_cid')
pc_id = next(pc_id.iter()).text
trace("Cid : #" + pc_id)
covalent_units = pc_compound.find('{http://www.ncbi.nlm.nih.gov}PC-Compound_count') \
.find('{http://www.ncbi.nlm.nih.gov}PC-Count') \
.find('{http://www.ncbi.nlm.nih.gov}PC-Count_covalent-unit')
covalent_units = next(covalent_units.iter()).text
trace(covalent_units + " covalent units")
if ensure_uq_covalent_unit:
assert int(covalent_units) == 1
pc_atoms = pc_compound.find('{http://www.ncbi.nlm.nih.gov}PC-Compound_atoms').find(
'{http://www.ncbi.nlm.nih.gov}PC-Atoms')
atoms_aid = pc_atoms.find('{http://www.ncbi.nlm.nih.gov}PC-Atoms_aid')
atoms_elements = pc_atoms.find('{http://www.ncbi.nlm.nih.gov}PC-Atoms_element')
assert len(atoms_aid) == len(atoms_elements)
trace(str(len(atoms_aid)) + " atoms found")
for (i, id_node) in enumerate(atoms_aid):
if atoms_elements[i].attrib["value"].capitalize() == "H":
h_index.append(id_node.text)
if not ignore_hydrogens or atoms_elements[i].attrib["value"].capitalize() != "H":
atoms.append((id_node.text, atoms_elements[i].attrib["value"].capitalize()))
trace(str(len(atoms)) + " atoms stored")
trace(str(atoms))
trace(str(h_index))
pc_bonds = pc_compound.find('{http://www.ncbi.nlm.nih.gov}PC-Compound_bonds').find(
'{http://www.ncbi.nlm.nih.gov}PC-Bonds')
bonds_aid_a = pc_bonds.find('{http://www.ncbi.nlm.nih.gov}PC-Bonds_aid1')
bonds_aid_b = pc_bonds.find('{http://www.ncbi.nlm.nih.gov}PC-Bonds_aid2')
bonds_order = pc_bonds.find('{http://www.ncbi.nlm.nih.gov}PC-Bonds_order')
assert len(bonds_aid_a) == len(bonds_aid_b) and len(bonds_aid_a) == len(bonds_order)
trace(str(len(bonds_aid_a)) + " bonds found")
for (i, aid_a) in enumerate(bonds_aid_a):
if (not ignore_hydrogens or (aid_a.text not in h_index and bonds_aid_b[i].text not in h_index)):
bonds.append((aid_a.text, bonds_aid_b[i].text, bonds_order[i].text))
trace(str(len(bonds)) + " bonds stored")
trace(str(bonds))
graph = Graph(pc_id)
nodes = [Node(str(id_atom), label) for (id_atom, label) in atoms]
graph.add_nodes(nodes)
edges = [Edge(i, graph.V[a], graph.V[b], mod) for (i, (a, b, mod)) in enumerate(bonds, 1)]
graph.add_edges(edges)
return (pc_id, graph)
except Exception as e:
trace(str(e))
# raise e
return (pc_id, None)
def parse_xml(text):
try:
return ET.XML(text)
except Exception as e:
trace(str(e))
# raise e
return None
# if file is provided, and it might be huge, and thus require smart parsing
if file_path:
trace("parsing " + file_path)
if (file_path.endswith('.gz')):
fp = gzip.open(file_path, 'rt', encoding='utf-8')
xml_content = fp.read()
trace(xml_content)
else:
fp = open(file_path, 'r', encoding='utf-8')
def compound_gen():
fp.seek(0)
# get an iterable
context = ET.iterparse(fp, events=("start", "end"))
# turn it into an iterator
context = iter(context)
# get the root element
event, root = context.__next__()
for event, elem in context:
# print(event)
# print(elem.tag)
if event == "end" and elem.tag == "{http://www.ncbi.nlm.nih.gov}PC-Compound":
yield parse_pc_compound(elem)
root.clear()
return compound_gen()
else: # keep it simple and stupid otherwise
return [parse_pc_compound(pc_compound) for pc_compound in parse_xml(xml_content)]
|
the-stack_106_26249 | import plotly.graph_objects as go
import pandas as pd
import numpy as np
from dataset_utils import get_datasets
GRID_COLOR = "#595959"
JOB_TITLES = [
'Business Analyst',
'Data Analyst',
'Data Scientist',
'Data Engineer/DBA',
'Software Engineer',
'Statistician/Research Scientist'
]
PROGRAMMING_LANGUAGE = ['Bash', 'C', 'C++', 'Java', 'Javascript', 'MATLAB',
'Other', 'Python', 'R', 'SQL', 'TypeScript']
TIME_WRITING_CODE = ['< 1 years', '1-2 years',
'3-5 years', '5-10 years', '10-20 years', '20+ years']
COMPANY_SIZE = [
'0-49 employees',
'50-249 employees',
'250-999 employees',
'1000-9,999 employees',
'> 10,000 employees'
]
##################################### datasets #####################################################
kaggle, glassdoor = get_datasets()
##################################### polar chart plotting #########################################
class PolarPlot():
def __init__(self):
self.figure = go.Figure()
self.range = (0, 0)
self.theta = ['Business Analyst', 'Data Analyst', 'Data Scientist', 'Data Engineer/DBA',
'Software Engineer', 'Statistician/Research Scientist', 'Business Analyst']
def update_common_layout(self):
"""
Updates general layout characteristics
"""
self.figure.update_layout(
showlegend=True,
legend_itemclick='toggleothers',
legend_itemdoubleclick='toggle',
plot_bgcolor="rgba(0, 0, 0, 0)",
paper_bgcolor="rgba(0, 0, 0, 0)",
autosize=True,
font_color="white",
uirevision=True,
height=400,
margin=dict(t=10)
)
def update_commom_polar_layout(self):
"""
Updates polar layout characteristics
"""
self.figure.update_layout(
polar_bgcolor='rgba(0, 0, 0, 0)',
polar_radialaxis_visible=True,
polar_radialaxis_showticklabels=True,
polar_radialaxis_tickfont_color='darkgrey',
polar_radialaxis_showline=False,
polar_radialaxis_layer='below traces',
polar_radialaxis_gridcolor=GRID_COLOR,
polar_radialaxis_range=self.range,
# polar_angularaxis_color='gray',
polar_angularaxis_showline=True,
polar_angularaxis_linecolor=GRID_COLOR,
polar_angularaxis_gridcolor=GRID_COLOR,
)
def add_data(self, data, country, hover_template='%{r:0.0f}%'):
"""
Adds a trace to the figure following the same standard for each trace
"""
# add the first element to the end of the list to "close" the polar chart
data.append(data[0])
self.figure.add_trace(
go.Scatterpolar(
r=data,
theta=self.theta,
mode='lines',
name=country,
hoverinfo='name+r',
hovertemplate=hover_template,
showlegend=True,
line_shape='spline',
line_smoothing=0.8,
line_width=3
)
)
# update the max range
self.update_range(data)
def update_range(self, data):
"""
Updates the range to be 110% of maximum value of all traces
"""
max_range = max(data) * 1.1
self.range = (
0, max_range) if max_range > self.range[1] else self.range
def get_figure(self):
"""
Update layouts and shows the figure
"""
self.update_common_layout()
self.update_commom_polar_layout()
return self.figure
def plot_polar(polar_plot, data, traces, x_names, agg_column, group_column, trace_column, hover_template):
data_cp = data.copy()
polar_plot.figure.data = tuple()
for trace_name in traces:
if agg_column in ('JobDescription', 'CloudPlatf'):
data_cp['TempCol'] = data_cp[agg_column].apply(
lambda x: trace_name.lower() in x)
else:
data_cp['TempCol'] = data_cp[agg_column].apply(
lambda x: trace_name in x)
plot_data = data_cp.groupby([group_column], as_index=False).agg({
'TempCol': ['sum', 'count']})
plot_data['TempColPct'] = plot_data['TempCol']['sum'] / \
plot_data['TempCol']['count'] * 100
plot_data = plot_data.TempColPct.tolist()
polar_plot.add_data(plot_data, trace_name, hover_template)
##################################### Line chart plotting ############################################
class LinePlot():
def __init__(self):
self.figure = go.Figure()
self.range = (0, 100)
def update_axis_title(self, x, y):
self.figure.update_layout(
xaxis_title_text=x,
yaxis_title_text=y,
)
def update_layout(self):
"""
Creates a clean layout for ploting, adjusting multiple settings
"""
self.figure.update_layout(
plot_bgcolor="rgba(0, 0, 0, 0)",
paper_bgcolor="rgba(0, 0, 0, 0)",
showlegend=True,
legend_font_color='gray',
legend_itemclick='toggleothers',
legend_itemdoubleclick='toggle',
xaxis={
"visible": True,
"showgrid": False,
"gridwidth": 0.8,
# "color": "white",
},
yaxis={
"showgrid": True,
"gridcolor": GRID_COLOR,
"gridwidth": 0.5,
},
font_color='white'
)
def add_data(self, x_names, y_data, trace_name, hover_template):
"""
Adds a trace to the figure following the same standard for each trace
"""
self.figure.add_trace(
go.Scatter(
x=x_names,
y=y_data,
mode='lines',
name=trace_name,
hoverinfo='name+y',
hovertemplate=hover_template,
line_shape='spline',
line_smoothing=0.8,
line_width=3
)
)
def get_figure(self):
self.update_layout()
return self.figure
def plot_lines(line_plot, data, traces, x_names, agg_column, group_column, trace_column, hover_template):
"""
Creates aggregation to plot
"""
line_plot.figure.data = tuple()
for trace_name in traces:
data_filtered = data[data[trace_column] == trace_name]
plot_data = data_filtered.groupby([group_column], as_index=False).agg({
agg_column: ['mean', 'count']})
plot_data = plot_data[agg_column]['mean'].tolist()
line_plot.add_data(x_names, plot_data, trace_name,
hover_template=hover_template)
########################################## getters ##################################################
job_proportion_polar_plot = PolarPlot()
time_of_coding_line_plot = LinePlot()
salary_line_plot = LinePlot()
job_skills_polar_plot = PolarPlot()
job_desc_polar_plot = PolarPlot()
prog_language_line_plot = LinePlot()
def get_salary_line_plot(job_titles=None):
# salary_line_plot.figure.data = tuple()
traces = job_titles if job_titles is not None else JOB_TITLES
x_names = COMPANY_SIZE
plot_lines(
salary_line_plot,
data=kaggle,
traces=traces,
x_names=x_names,
agg_column='Salary',
group_column='CompanySize',
trace_column='JobTitle',
hover_template='U$%{y:,.2r}'
)
xaxis_title = 'Company size'
yaxis_title = 'Average Salary (USD per Year)'
salary_line_plot.update_axis_title(xaxis_title, yaxis_title)
return salary_line_plot
def get_job_skills_polar_plot(selected_languages=None):
traces = selected_languages if selected_languages is not None else PROGRAMMING_LANGUAGE
x_names = JOB_TITLES
plot_polar(
job_skills_polar_plot,
data=kaggle,
traces=traces,
x_names=x_names,
agg_column='ProgLang',
group_column='JobTitle',
trace_column='ProgLang',
hover_template='%{r:0.0f}%'
)
job_skills_polar_plot.figure.update_layout(
polar_radialaxis_tickvals=[25, 50, 75],
polar_radialaxis_ticktext=['25%', '50%', '75%'],
polar_radialaxis_tickmode='array',
)
return job_skills_polar_plot
def get_job_proportion_polar_plot(countries):
job_proportion_polar_plot.figure.data = tuple()
proportion_dict = dict()
for country in countries:
glassdoor_country = glassdoor[glassdoor.Country == f"{country}"].groupby(
["JobTitle"], as_index=False).Count.sum().Count.tolist()
glassdoor_country = (np.array(glassdoor_country) /
sum(glassdoor_country) * 100).tolist()
proportion_dict[f"{country}"] = glassdoor_country
for country, proportion in proportion_dict.items():
job_proportion_polar_plot.add_data(proportion, country)
return job_proportion_polar_plot
def get_job_propotion_pie_chart(country):
glassdoor_country = glassdoor[glassdoor.Country == f"{country}"].groupby(
["JobTitle"], as_index=False).Count.sum().Count.tolist()
glassdoor_country = (np.array(glassdoor_country) /
sum(glassdoor_country) * 100).tolist()
fig = go.Figure(data=go.Pie(labels=JOB_TITLES, values=glassdoor_country))
fig.update_traces(
hoverinfo="label+percent",
textposition='inside',
textinfo='label',
)
fig.update_layout(
plot_bgcolor="rgba(0, 0, 0, 0)",
paper_bgcolor="rgba(0, 0, 0, 0)",
showlegend=False,
margin=dict(t=10),
title=dict(
text=country,
x=0.5,
y=0.95,
xanchor="center",
yanchor="top"
),
font_color="white"
# height=400
)
return fig
def get_prog_language_line_plot(selected_time_writing_code=None):
traces = selected_time_writing_code if selected_time_writing_code is not None else list(
set(kaggle.TimeWritingCode.tolist()))
x_names = ['{} languages'.format(x) for x in range(7)]
plot_lines(
prog_language_line_plot,
data=kaggle,
traces=traces,
x_names=x_names,
agg_column='Salary',
group_column='QtyProgLang',
trace_column='TimeWritingCode',
hover_template='U$%{y:,.2r}'
)
# Adding Averarage
# plot_data = kaggle.groupby(
# ['QtyProgLang'], as_index=False).agg({'Salary': 'mean'})
# plot_data = plot_data.Salary.tolist()
# prog_language_line_plot.add_data(
# x_names, plot_data, 'Average', hover_template='U$%{y:,.2r}')
xaxis_title = 'Quantity of programming languages used on a regular basis'
yaxis_title = 'Average Salary (USD per Year)'
prog_language_line_plot.update_axis_title(xaxis_title, yaxis_title)
return prog_language_line_plot
def get_countries():
countries = list(set(glassdoor["Country"]))
countries.sort()
return countries
def get_job_titles():
return JOB_TITLES
def get_programming_language():
return PROGRAMMING_LANGUAGE
def get_time_writing_code():
return TIME_WRITING_CODE
|
the-stack_106_26252 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import xlwt
def csv_to_xlsx():
with open('1.csv', 'r', encoding='utf-8') as f:
read = csv.reader(f)
workbook = xlwt.Workbook()
sheet = workbook.add_sheet('data') # 创建一个sheet表格
l = 0
for line in read:
print(line)
r = 0
for i in line:
print(i)
sheet.write(l, r, i) # 一个一个将单元格数据写入
r = r + 1
l = l + 1
workbook.save('1.xlsx') # 保存Excel
if __name__ == '__main__':
csv_to_xlsx()
|
the-stack_106_26256 | from django import forms
from django.urls import reverse
from django.utils.translation import gettext_lazy as _, gettext_noop # NoQA
from pretix.base.forms import SettingsForm
from pretix.base.models import Event
from pretix.control.views.event import (
EventSettingsFormView, EventSettingsViewMixin,
)
class VaccSettingsForm(SettingsForm):
vaccination_interval_check = forms.BooleanField(
label=_('Enable validation'),
required=False,
)
vaccination_future_max = forms.IntegerField(
label=_('Maximum time frame for first shot'),
required=True,
min_value=1,
widget=forms.NumberInput(
attrs={'addon_after': _('days')}
),
)
vaccination_interval_min = forms.IntegerField(
label=_('Minimum interval between first and second shot'),
required=True,
min_value=1,
widget=forms.NumberInput(
attrs={'addon_after': _('days')}
),
)
vaccination_interval_max = forms.IntegerField(
label=_('Maximum interval between first and second shot'),
required=True,
min_value=1,
widget=forms.NumberInput(
attrs={'addon_after': _('days')}
),
)
class VaccSettings(EventSettingsViewMixin, EventSettingsFormView):
model = Event
form_class = VaccSettingsForm
template_name = 'pretix_vaccination_interval/settings.html'
permission = 'can_change_event_settings'
def get_success_url(self) -> str:
return reverse('plugins:pretix_vaccination_interval:settings', kwargs={
'organizer': self.request.event.organizer.slug,
'event': self.request.event.slug
})
|
the-stack_106_26257 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.selector import Selector
from scrapy.loader.processors import TakeFirst, MapCompose, Join
from w3lib.html import remove_tags
def clean_html_text(value):
cleaned_text = ''
try:
cleaned_text = remove_tags(value)
except:
cleaned_text = "No Reviews"
return cleaned_text
def get_platforms(value):
platforms = []
platform = value.split(" ")[-1]
if platform == 'win':
platforms.append('Windows')
if platform == 'mac':
platforms.append('Mac OS')
if platform == 'linux':
platforms.append('Linux')
if platform == 'vr_required':
platforms.append('VR Only')
if platform == 'vr_supported':
platforms.append('VR Supported')
return platforms
def get_original_price(html_string):
original_price = ''
selector_obj = Selector(text=html_string)
opWithDisc = selector_obj.xpath(".//div[contains(@class, 'discounted')]/span/strike/text()").get()
if opWithDisc != None:
original_price = opWithDisc
else:
original_price = selector_obj.xpath("normalize-space(.//div[@class='col search_price responsive_secondrow']/text())").get()
# Sometimes normalize-space don't work, so we can use str.strip inside input_processor
return original_price
def clean_discount_rate(value):
if value != None:
result = value.lstrip('-')
else:
result = "0%"
return result
class SteamstoreItem(scrapy.Item):
game_url = scrapy.Field(
output_processor = TakeFirst()
)
img_url = scrapy.Field(
output_processor = TakeFirst()
)
game_name = scrapy.Field(
output_processor = TakeFirst()
)
release_date = scrapy.Field(
output_processor = TakeFirst()
)
platforms = scrapy.Field(
input_processor = MapCompose(get_platforms)
)
rating = scrapy.Field(
input_processor = MapCompose(clean_html_text),
output_processor = TakeFirst()
)
original_price = scrapy.Field(
input_processor = MapCompose(get_original_price, str.strip),
output_processor = Join('')
)
discounted_price = scrapy.Field(
output_processor = TakeFirst()
)
discounted_rate = scrapy.Field(
input_processor = MapCompose(clean_discount_rate),
output_processor = TakeFirst()
)
|
the-stack_106_26259 | from urllib.parse import urlencode
from requests import Session
from db_redis import RedisQueue
from request import WeixinRequest
from config import *
import requests
from requests.exceptions import ConnectionError, ReadTimeout
from pyquery import PyQuery as pq
class Spider():
base_url = 'https://weixin.sogou.com/weixin'
keyword = 'NBA'
headers = {
'Cookie': 'SUV = 00B317881B17ED1B5B6908F93A1EF865;CXID = CFB60C2B607DA865459A24D17B3BA704;SUID = 344EB76F3865860A5B724D730004C0D3;ABTEST = 0 | 1547686493 | v1;weixinIndexVisited = 1;ppinf = 5 | 1548046655 | 1549256255 | dHJ1c3Q6MToxfGNsaWVudGlkOjQ6MjAxN3x1bmlxbmFtZTo4OmF1dGhldGljfGNydDoxMDoxNTQ4MDQ2NjU1fHJlZm5pY2s6ODphdXRoZXRpY3x1c2VyaWQ6NDQ6bzl0Mmx1TUNxYTBlU3lGcnExbE92aHQ3WG1Gd0B3ZWl4aW4uc29odS5jb218;pprdig = nfjiTQ62SluMVzhnMfzfgAECp - 10K12U8UYHC2eqYnJVPFubSIi041j - db4Tgv9yqfBiud8xlrNycXt7MXCYlucNOBpWcu1gu3Dn0Q8Lh7PPEBSYMvNTKTqTGx3x - l45Ndurz5 - 5Aq16RW8U - jenH5XD8vbJVnyHQP4ank0IJiA;sgid = 11 - 38791065 - AVxFUTibcPxuhorUqhoFwgzQ;SNUID = 925AC9C5DEDB5D85D39AAC46DE277CC1;IPLOC = CN4211;ppmdig = 15484905700000007acac90ac9b6a5f99ecab3769c06d166;JSESSIONID = aaaRpyLsVNeWx8b3_W5Hw;sct = 4',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
'Host': 'weixin.sogou.com'
}
session = Session()
queue = RedisQueue()
def start(self):
self.session.headers.update(self.headers)
start_url = self.base_url + '?' + urlencode({'query':self.keyword, 'type':2})
first_request = WeixinRequest(url=start_url, callback=self.parse_index, need_proxy=True)
self.queue.add(first_request)
def get_proxy(self):
try:
response = requests.get(PROXY_POOL_URL)
if response.status_code == 200:
print('Get proxy', response.text)
return response.text
return None
except ConnectionError as e:
return None
def parse_index(self, response):
doc = pq(response.text)
items = doc('.news-box .news-list .txt-box h3 a').items()
for item in items:
url = item.attr('href')
weixin_request = WeixinRequest(url=url, callback=self.parse_detail, need_proxy=True)
yield weixin_request
next = doc('.sogou_next')
if next:
url = self.base_url + next.attr('href')
yield WeixinRequest(url=url, callback=self.parse_index, need_proxy=True)
def parse_detail(self, response):
doc = pq(response.text)
data = {
'title': doc('.rich_media_title').text(),
'content': doc('.rich_media_content').text(),
'nickname': doc('.profile_nickname').text(),
'wechat': doc('.profile_meta_value').text(),
'date': doc('#publish_time').text()
}
yield data
def request(self, weixin_request):
try:
if weixin_request.need_proxy:
proxy = self.get_proxy()
proxies = {
'http': 'http://' + proxy,
'https': 'https://' +proxy
}
return self.session.send(weixin_request.prepare(), proxies=proxies, timeout=
weixin_request.timeout, allow_redirects=False)
return self.session.send(weixin_request.prepare(), timeout=
weixin_request.timeout, allow_redirects=False)
except (ConnectionError, ReadTimeout) as e:
print(e.args)
return None
def error(self, weixin_request):
weixin_request.failed_time += 1
print('Request failed', weixin_request.failed_time, 'Times', weixin_request.url)
if weixin_request.failed_time < 5:
self.queue.add(weixin_request)
def schedule(self):
while not self.queue.empty():
weixin_request = self.queue.pop()
callback = weixin_request.callback
print('Schedule', weixin_request.url)
response = self.request(weixin_request)
if response and response.status_code == 200:
results = list(callback(response))
if results:
for result in results:
print('New result', type(result))
if isinstance(result, WeixinRequest):
self.queue.add(result)
if isinstance(result, dict):
# insert into mysql
print('Get an article')
print(result)
else:
self.error(weixin_request)
else:
self.error(weixin_request)
def run(self):
self.start()
self.schedule()
if __name__ == '__main__':
spider = Spider()
spider.start()
spider.schedule() |
the-stack_106_26260 | #!/usr/bin/python
import wiringpi2 as gpio
import time
#init the GPIO
#prepare PWM pins
gpio.wiringPiSetupGpio()
gpio.pinMode(12, gpio.GPIO.PWM_OUTPUT)
gpio.pinMode(13, gpio.GPIO.PWM_OUTPUT)
#prepare PWM channels
gpio.pwmSetMode(gpio.GPIO.PWM_MODE_MS)
gpio.pwmSetRange(480)
gpio.pwmSetClock(2)
#prepare direction pins
gpio.pinMode(5, gpio.GPIO.OUTPUT)
gpio.pinMode(6, gpio.GPIO.OUTPUT)
#movements
def straight_fw(speed):
gpio.digitalWrite(5, 1)
gpio.digitalWrite(6, 1)
gpio.pwmWrite(12, speed)
gpio.pwmWrite(13, speed)
def straight_bw(speed):
gpio.digitalWrite(5, 0)
gpio.digitalWrite(6, 0)
gpio.pwmWrite(12, speed)
gpio.pwmWrite(13, speed)
def stop():
gpio.digitalWrite(5, 1)
gpio.digitalWrite(6, 1)
gpio.pwmWrite(12, 0)
gpio.pwmWrite(13, 0)
def turn_180():
gpio.digitalWrite(5, 0)
gpio.digitalWrite(6, 1)
gpio.pwmWrite(12, 300)
gpio.pwmWrite(13, 300)
time.sleep(2)
stop()
#playground
#straight_fw(300)
#time.sleep(2)
#stop()
#straight_bw(300)
#time.sleep(2)
#stop()
#turn_180()
|
the-stack_106_26261 | """
adbts.tcp.asynchronous
~~~~~~~~~~~~~~~~~~~~~~
Contains functionality for asynchronous Transmission Control Protocol (TCP) transport using `asyncio`.
"""
import asyncio
from .. import exceptions, hints, transport
from . import timeouts
__all__ = ['Transport']
# Disable incorrect warning on asyncio.wait_for, https://github.com/PyCQA/pylint/issues/996.
# pylint: disable=not-an-iterable
class Transport(transport.Transport):
"""
Defines asynchronous (non-blocking) TCP transport using `asyncio`.
"""
def __init__(self,
host: hints.Str,
port: hints.Int,
reader: hints.StreamReader,
writer: hints.StreamWriter,
loop: hints.OptionalEventLoop = None) -> None:
self._host = host
self._port = port
self._reader = reader
self._writer = writer
self._loop = loop
self._closed = False
def __repr__(self) -> hints.Str:
address = str(self)
state = 'closed' if self.closed else 'open'
return '<{}(address={!r}, state={!r})>'.format(self.__class__.__name__, address, state)
def __str__(self) -> hints.Str:
return '{}:{}'.format(self._host, self._port)
@property
def closed(self) -> hints.Bool:
"""
Checks to see if the transport is closed.
:return: Closed state of the transport
:rtype: :class:`~bool`
"""
return self._closed is True
@asyncio.coroutine
@transport.ensure_opened
@transport.ensure_num_bytes
@exceptions.reraise(OSError)
@exceptions.reraise_timeout_errors(asyncio.TimeoutError)
def read(self,
num_bytes: hints.Int,
timeout: hints.Timeout = timeouts.UNDEFINED) -> transport.TransportReadResult:
"""
Read bytes from the transport.
:param num_bytes: Number of bytes to read.
:type num_bytes: :class:`~int`
:param timeout: Maximum number of milliseconds to read before raising an exception.
:type timeout: :class:`~int`, :class:`~NoneType`, or :class:`~object`
:return: Collection of bytes read
:rtype: :class:`~bytes` or :class:`~bytearray`
:raises :class:`~adbts.exceptions.TransportError`: When underlying transport encounters an error
:raises :class:`~adbts.exceptions.TimeoutError`: When timeout is exceeded
"""
data = yield from asyncio.wait_for(self._reader.read(num_bytes),
timeout=timeouts.timeout(timeout),
loop=self._loop)
return data
@asyncio.coroutine
@transport.ensure_opened
@transport.ensure_data
@exceptions.reraise(OSError)
@exceptions.reraise_timeout_errors(asyncio.TimeoutError)
def write(self,
data: hints.Buffer,
timeout: hints.Timeout = timeouts.UNDEFINED) -> transport.TransportWriteResult:
"""
Write bytes to the transport.
:param data: Collection of bytes to write.
:type data: :class:`~bytes` or :class:`~bytearray`
:param timeout: Maximum number of milliseconds to write before raising an exception.
:type timeout: :class:`~int`, :class:`~NoneType`, or :class:`~object`
:return Nothing
:return: :class:`~NoneType`
:raises :class:`~adbts.exceptions.TransportError`: When underlying transport encounters an error.
:raises :class:`~adbts.exceptions.TimeoutError`: When timeout is exceeded
"""
self._writer.write(data)
yield from asyncio.wait_for(self._writer.drain(), timeout=timeouts.timeout(timeout), loop=self._loop)
@transport.ensure_opened
@exceptions.reraise(OSError)
def close(self) -> None:
"""
Close the transport.
:return: Nothing
:rtype: `None`
:raises :class:`~adbts.exceptions.TransportError`: When underlying transport encounters an error
"""
self._writer.close()
self._closed = True
@asyncio.coroutine
@exceptions.reraise(OSError)
def open(host: hints.Str, # pylint: disable=redefined-builtin
port: hints.Int,
timeout: hints.Timeout = timeouts.UNDEFINED,
loop: hints.OptionalEventLoop = None) -> transport.TransportOpenResult:
"""
Open a new :class:`~adbts.tcp.async.Transport` transport to the given host/port.
:param host: Remote host
:type host: :class:`~str`
:param port: Remote port
:type port: :class:`~int`
:param timeout: Maximum number of milliseconds to write before raising an exception.
:type timeout: :class:`~int`, :class:`~NoneType`, or :class:`~object`
:param loop: Asyncio Event Loop
:type loop: :class:`~asyncio.events.AbstractEventLoop`
:return: Asynchronous TCP transport
:rtype: :class:`~adbts.tcp.async.Transport`
:raises :class:`~adbts.exceptions.TransportError`: When underlying transport encounters an error
"""
reader, writer = yield from asyncio.wait_for(asyncio.open_connection(host, port, loop=loop),
timeout=timeouts.timeout(timeout), loop=loop)
return Transport(host, port, reader, writer, loop)
|
the-stack_106_26262 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2018, JK & AGB
# Full license can be found in License.md
# -----------------------------------------------------------------------------
""" Tools for loading solar indices.
Classes
-------------------------------------------------------------------------------
OMNIvals
Moduleauthor
-------------------------------------------------------------------------------
Jeff Klenzing (JK), 3 Mar 2018, Goddard Space Flight Center (GSFC)
References
-------------------------------------------------------------------------------
"""
import datetime as dt
import numpy as np
class OMNIvals:
""" Object containing OMNI solar indices
Keyword Arguments
------------------
file_dir : (str)
Directory with data files (default=solar_index._data_dir)
file_name : (str)
Data filename (default='omni2_daily_12664.txt')
Attributes
----------
self.year : (np.array)
Integer year
self.day : (np.array)
Integer day
self.dt : (np.array)
datetime
self.Rz : (np.array)
Rz index
self.F107 : (np.array)
10.7 cm flux index in solar flux units
self.Lalpha : (np.array)
Lyman alpha
Methods
--------
load_omni_vals : Load the values from an ASCII file
"""
def __init__(self, **kwargs):
try:
self.load_omni_vals(**kwargs)
except ImportError:
raise ImportError("unable to initiate OMNIvals class - ")
def load_omni_vals(self, **kwargs):
""" Load an ascii file into the OMNIvals class
Keyword Arguments
--------------------
file_dir : (str)
Directory with data files (default='data')
file_name : (str)
Data filename (default='omni2_daily_12664.txt')
Returns
-------
Void
"""
from os import path
from solar_index import utils, _data_dir
# Define the default data file and update using kwargs
file_dir = _data_dir
file_name = "omni2_daily_12664.txt"
for kk in kwargs.keys():
if kk.lower() == "file_dir":
file_dir = kwargs[kk]
elif kk.lower() == "file_name":
file_name = kwargs[kk]
# Construct filename and load the data
if not path.isdir(file_dir):
raise OSError("unknown file directory {:s}".format(file_dir))
self.filename = path.join(file_dir, file_name)
if not path.isfile(self.filename):
raise OSError("unknown file {:s}".format(self.filename))
try:
data = np.loadtxt(self.filename)
except ImportError:
estr = "unable to load ascii file {:s}".format(self.filename)
raise ImportError(estr)
self.year = data[:, 0]
self.day = data[:, 1]
self.dt = np.array([dt.datetime(int(self.year[i]), 1, 1) +
dt.timedelta(days=int(self.day[i])-1)
for i in range(len(self.day))])
self.Rz = data[:, 3]
self.F107 = utils.replace_fill_array(data[:, 4], fill_value=999.9)
self.Lalpha = data[:, 5]
|
the-stack_106_26263 | from pypy.rpython.memory.gctransform.transform import GCTransformer
from pypy.rpython.memory.gctransform.support import find_gc_ptrs_in_type, \
get_rtti, ll_call_destructor, type_contains_pyobjs, var_ispyobj
from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython import rmodel
from pypy.rpython.memory import gctypelayout
from pypy.rpython.memory.gc import marksweep
from pypy.rpython.memory.gcheader import GCHeaderBuilder
from pypy.rlib.rarithmetic import ovfcheck
from pypy.rlib.debug import ll_assert
from pypy.translator.backendopt import graphanalyze
from pypy.translator.backendopt.support import var_needsgc
from pypy.annotation import model as annmodel
from pypy.rpython import annlowlevel
from pypy.rpython.rbuiltin import gen_cast
from pypy.rpython.memory.gctypelayout import ll_weakref_deref, WEAKREF
from pypy.rpython.memory.gctypelayout import convert_weakref_to, WEAKREFPTR
from pypy.rpython.memory.gctransform.log import log
from pypy.tool.sourcetools import func_with_new_name
from pypy.rpython.lltypesystem.lloperation import llop
import sys
class CollectAnalyzer(graphanalyze.GraphAnalyzer):
def operation_is_true(self, op):
if op.opname == 'gc__collect':
return True
if op.opname in ('malloc', 'malloc_varsize'):
flags = op.args[1].value
return flags['flavor'] == 'gc' and not flags.get('nocollect', False)
if op.opname in ('coalloc', 'coalloc_varsize'):
return True
def find_initializing_stores(collect_analyzer, graph):
from pypy.objspace.flow.model import mkentrymap
entrymap = mkentrymap(graph)
# a bit of a hackish analysis: if a block contains a malloc and check that
# the result is not zero, then the block following the True link will
# usually initialize the newly allocated object
result = {}
def find_in_block(block, mallocvars):
for i, op in enumerate(block.operations):
if op.opname in ("cast_pointer", "same_as"):
if op.args[0] in mallocvars:
mallocvars[op.result] = True
elif op.opname in ("setfield", "setarrayitem", "setinteriorfield"):
TYPE = op.args[-1].concretetype
if (op.args[0] in mallocvars and
isinstance(TYPE, lltype.Ptr) and
TYPE.TO._gckind == "gc"):
result[op] = True
else:
if collect_analyzer.analyze(op):
return
for exit in block.exits:
if len(entrymap[exit.target]) != 1:
continue
newmallocvars = {}
for i, var in enumerate(exit.args):
if var in mallocvars:
newmallocvars[exit.target.inputargs[i]] = True
if newmallocvars:
find_in_block(exit.target, newmallocvars)
mallocnum = 0
blockset = set(graph.iterblocks())
while blockset:
block = blockset.pop()
if len(block.operations) < 2:
continue
mallocop = block.operations[-2]
checkop = block.operations[-1]
if not (mallocop.opname == "malloc" and
checkop.opname == "ptr_nonzero" and
mallocop.result is checkop.args[0] and
block.exitswitch is checkop.result):
continue
exits = [exit for exit in block.exits if exit.llexitcase]
if len(exits) != 1:
continue
exit = exits[0]
if len(entrymap[exit.target]) != 1:
continue
try:
index = exit.args.index(mallocop.result)
except ValueError:
continue
target = exit.target
mallocvars = {target.inputargs[index]: True}
mallocnum += 1
find_in_block(target, mallocvars)
#if result:
# print "found %s initializing stores in %s" % (len(result), graph.name)
return result
class FrameworkGCTransformer(GCTransformer):
use_stackless = False
root_stack_depth = 163840
def __init__(self, translator):
from pypy.rpython.memory.gc.base import choose_gc_from_config
super(FrameworkGCTransformer, self).__init__(translator, inline=True)
if hasattr(self, 'GC_PARAMS'):
# for tests: the GC choice can be specified as class attributes
from pypy.rpython.memory.gc.marksweep import MarkSweepGC
GCClass = getattr(self, 'GCClass', MarkSweepGC)
GC_PARAMS = self.GC_PARAMS
else:
# for regular translation: pick the GC from the config
GCClass, GC_PARAMS = choose_gc_from_config(translator.config)
self.layoutbuilder = TransformerLayoutBuilder(self)
self.get_type_id = self.layoutbuilder.get_type_id
# set up dummy a table, to be overwritten with the real one in finish()
type_info_table = lltype._ptr(
lltype.Ptr(gctypelayout.GCData.TYPE_INFO_TABLE),
"delayed!type_info_table", solid=True)
gcdata = gctypelayout.GCData(type_info_table)
# initialize the following two fields with a random non-NULL address,
# to make the annotator happy. The fields are patched in finish()
# to point to a real array.
foo = lltype.malloc(lltype.FixedSizeArray(llmemory.Address, 1),
immortal=True, zero=True)
a_random_address = llmemory.cast_ptr_to_adr(foo)
gcdata.static_root_start = a_random_address # patched in finish()
gcdata.static_root_nongcend = a_random_address # patched in finish()
gcdata.static_root_end = a_random_address # patched in finish()
self.gcdata = gcdata
self.malloc_fnptr_cache = {}
gcdata.gc = GCClass(**GC_PARAMS)
root_walker = self.build_root_walker()
gcdata.set_query_functions(gcdata.gc)
gcdata.gc.set_root_walker(root_walker)
self.num_pushs = 0
self.write_barrier_calls = 0
def frameworkgc_setup():
# run-time initialization code
root_walker.setup_root_walker()
gcdata.gc.setup()
bk = self.translator.annotator.bookkeeper
# the point of this little dance is to not annotate
# self.gcdata.static_root_xyz as constants. XXX is it still needed??
data_classdef = bk.getuniqueclassdef(gctypelayout.GCData)
data_classdef.generalize_attr(
'static_root_start',
annmodel.SomeAddress())
data_classdef.generalize_attr(
'static_root_nongcend',
annmodel.SomeAddress())
data_classdef.generalize_attr(
'static_root_end',
annmodel.SomeAddress())
annhelper = annlowlevel.MixLevelHelperAnnotator(self.translator.rtyper)
def getfn(ll_function, args_s, s_result, inline=False,
minimal_transform=True):
graph = annhelper.getgraph(ll_function, args_s, s_result)
if minimal_transform:
self.need_minimal_transform(graph)
if inline:
self.graphs_to_inline[graph] = True
return annhelper.graph2const(graph)
self.frameworkgc_setup_ptr = getfn(frameworkgc_setup, [],
annmodel.s_None)
if root_walker.need_root_stack:
self.incr_stack_ptr = getfn(root_walker.incr_stack,
[annmodel.SomeInteger()],
annmodel.SomeAddress(),
inline = True)
self.decr_stack_ptr = getfn(root_walker.decr_stack,
[annmodel.SomeInteger()],
annmodel.SomeAddress(),
inline = True)
else:
self.incr_stack_ptr = None
self.decr_stack_ptr = None
self.weakref_deref_ptr = self.inittime_helper(
ll_weakref_deref, [llmemory.WeakRefPtr], llmemory.Address)
classdef = bk.getuniqueclassdef(GCClass)
s_gc = annmodel.SomeInstance(classdef)
s_gcref = annmodel.SomePtr(llmemory.GCREF)
malloc_fixedsize_clear_meth = GCClass.malloc_fixedsize_clear.im_func
self.malloc_fixedsize_clear_ptr = getfn(
malloc_fixedsize_clear_meth,
[s_gc, annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
annmodel.SomeBool(), annmodel.SomeBool(),
annmodel.SomeBool()], s_gcref,
inline = False)
if hasattr(GCClass, 'malloc_fixedsize'):
malloc_fixedsize_meth = GCClass.malloc_fixedsize.im_func
self.malloc_fixedsize_ptr = getfn(
malloc_fixedsize_meth,
[s_gc, annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
annmodel.SomeBool(), annmodel.SomeBool(),
annmodel.SomeBool()], s_gcref,
inline = False)
else:
malloc_fixedsize_meth = None
self.malloc_fixedsize_ptr = self.malloc_fixedsize_clear_ptr
## self.malloc_varsize_ptr = getfn(
## GCClass.malloc_varsize.im_func,
## [s_gc] + [annmodel.SomeInteger(nonneg=True) for i in range(5)]
## + [annmodel.SomeBool(), annmodel.SomeBool()], s_gcref)
self.malloc_varsize_clear_ptr = getfn(
GCClass.malloc_varsize_clear.im_func,
[s_gc] + [annmodel.SomeInteger(nonneg=True) for i in range(5)]
+ [annmodel.SomeBool(), annmodel.SomeBool()], s_gcref)
self.collect_ptr = getfn(GCClass.collect.im_func,
[s_gc], annmodel.s_None)
# in some GCs we can inline the common case of
# malloc_fixedsize(typeid, size, True, False, False)
if getattr(GCClass, 'inline_simple_malloc', False):
# make a copy of this function so that it gets annotated
# independently and the constants are folded inside
if malloc_fixedsize_meth is None:
malloc_fast_meth = malloc_fixedsize_clear_meth
self.malloc_fast_is_clearing = True
else:
malloc_fast_meth = malloc_fixedsize_meth
self.malloc_fast_is_clearing = False
malloc_fast = func_with_new_name(
malloc_fast_meth,
"malloc_fast")
s_False = annmodel.SomeBool(); s_False.const = False
s_True = annmodel.SomeBool(); s_True .const = True
self.malloc_fast_ptr = getfn(
malloc_fast,
[s_gc, annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
s_True, s_False,
s_False], s_gcref,
inline = True)
else:
self.malloc_fast_ptr = None
# in some GCs we can also inline the common case of
# malloc_varsize(typeid, length, (3 constant sizes), True, False)
if getattr(GCClass, 'inline_simple_malloc_varsize', False):
# make a copy of this function so that it gets annotated
# independently and the constants are folded inside
malloc_varsize_clear_fast = func_with_new_name(
GCClass.malloc_varsize_clear.im_func,
"malloc_varsize_clear_fast")
s_False = annmodel.SomeBool(); s_False.const = False
s_True = annmodel.SomeBool(); s_True .const = True
self.malloc_varsize_clear_fast_ptr = getfn(
malloc_varsize_clear_fast,
[s_gc, annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True),
s_True, s_False], s_gcref,
inline = True)
else:
self.malloc_varsize_clear_fast_ptr = None
if GCClass.moving_gc:
self.id_ptr = getfn(GCClass.id.im_func,
[s_gc, s_gcref], annmodel.SomeInteger(),
inline = False,
minimal_transform = False)
else:
self.id_ptr = None
self.set_max_heap_size_ptr = getfn(GCClass.set_max_heap_size.im_func,
[s_gc,
annmodel.SomeInteger(nonneg=True)],
annmodel.s_None)
if GCClass.needs_write_barrier:
self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func,
[s_gc, annmodel.SomeAddress(),
annmodel.SomeAddress(),
annmodel.SomeAddress()],
annmodel.s_None,
inline=True)
else:
self.write_barrier_ptr = None
if hasattr(GCClass, "coalloc_fixedsize_clear"):
self.coalloc_clear_ptr = getfn(
GCClass.coalloc_fixedsize_clear.im_func,
[s_gc, annmodel.SomeAddress(),
annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True)],
s_gcref, inline=True)
self.coalloc_varsize_clear_ptr = getfn(
GCClass.coalloc_varsize_clear.im_func,
[s_gc, annmodel.SomeAddress()] +
[annmodel.SomeInteger(nonneg=True) for i in range(5)],
s_gcref, inline=True)
else:
self.coalloc_clear_ptr = self.coalloc_varsize_clear_ptr = None
self.statistics_ptr = getfn(GCClass.statistics.im_func,
[s_gc, annmodel.SomeInteger()],
annmodel.SomeInteger())
# experimental gc_x_* operations
s_x_pool = annmodel.SomePtr(marksweep.X_POOL_PTR)
s_x_clone = annmodel.SomePtr(marksweep.X_CLONE_PTR)
# the x_*() methods use some regular mallocs that must be
# transformed in the normal way
self.x_swap_pool_ptr = getfn(GCClass.x_swap_pool.im_func,
[s_gc, s_x_pool],
s_x_pool,
minimal_transform = False)
self.x_clone_ptr = getfn(GCClass.x_clone.im_func,
[s_gc, s_x_clone],
annmodel.s_None,
minimal_transform = False)
annhelper.finish() # at this point, annotate all mix-level helpers
annhelper.backend_optimize()
self.collect_analyzer = CollectAnalyzer(self.translator)
self.collect_analyzer.analyze_all()
s_gc = self.translator.annotator.bookkeeper.valueoftype(GCClass)
r_gc = self.translator.rtyper.getrepr(s_gc)
self.c_const_gc = rmodel.inputconst(r_gc, self.gcdata.gc)
self.malloc_zero_filled = GCClass.malloc_zero_filled
HDR = self._gc_HDR = self.gcdata.gc.gcheaderbuilder.HDR
self._gc_fields = fields = []
for fldname in HDR._names:
FLDTYPE = getattr(HDR, fldname)
fields.append(('_' + fldname, FLDTYPE))
def build_root_walker(self):
return ShadowStackRootWalker(self)
def consider_constant(self, TYPE, value):
self.layoutbuilder.consider_constant(TYPE, value, self.gcdata.gc)
#def get_type_id(self, TYPE):
# this method is attached to the instance and redirects to
# layoutbuilder.get_type_id().
def finalizer_funcptr_for_type(self, TYPE):
return self.layoutbuilder.finalizer_funcptr_for_type(TYPE)
def gc_fields(self):
return self._gc_fields
def gc_field_values_for(self, obj):
hdr = self.gcdata.gc.gcheaderbuilder.header_of_object(obj)
HDR = self._gc_HDR
return [getattr(hdr, fldname) for fldname in HDR._names]
def finish_tables(self):
table = self.layoutbuilder.flatten_table()
log.info("assigned %s typeids" % (len(table), ))
log.info("added %s push/pop stack root instructions" % (
self.num_pushs, ))
if self.write_barrier_ptr:
log.info("inserted %s write barrier calls" % (
self.write_barrier_calls, ))
# replace the type_info_table pointer in gcdata -- at this point,
# the database is in principle complete, so it has already seen
# the delayed pointer. We need to force it to consider the new
# array now.
self.gcdata.type_info_table._become(table)
# XXX because we call inputconst already in replace_malloc, we can't
# modify the instance, we have to modify the 'rtyped instance'
# instead. horrors. is there a better way?
s_gcdata = self.translator.annotator.bookkeeper.immutablevalue(
self.gcdata)
r_gcdata = self.translator.rtyper.getrepr(s_gcdata)
ll_instance = rmodel.inputconst(r_gcdata, self.gcdata).value
addresses_of_static_ptrs = (
self.layoutbuilder.addresses_of_static_ptrs_in_nongc +
self.layoutbuilder.addresses_of_static_ptrs)
log.info("found %s static roots" % (len(addresses_of_static_ptrs), ))
additional_ptrs = self.layoutbuilder.additional_roots_sources
log.info("additional %d potential static roots" % additional_ptrs)
ll_static_roots_inside = lltype.malloc(lltype.Array(llmemory.Address),
len(addresses_of_static_ptrs) +
additional_ptrs,
immortal=True)
for i in range(len(addresses_of_static_ptrs)):
ll_static_roots_inside[i] = addresses_of_static_ptrs[i]
ll_instance.inst_static_root_start = llmemory.cast_ptr_to_adr(ll_static_roots_inside) + llmemory.ArrayItemsOffset(lltype.Array(llmemory.Address))
ll_instance.inst_static_root_nongcend = ll_instance.inst_static_root_start + llmemory.sizeof(llmemory.Address) * len(self.layoutbuilder.addresses_of_static_ptrs_in_nongc)
ll_instance.inst_static_root_end = ll_instance.inst_static_root_start + llmemory.sizeof(llmemory.Address) * len(addresses_of_static_ptrs)
newgcdependencies = []
newgcdependencies.append(ll_static_roots_inside)
self.write_typeid_list()
return newgcdependencies
def write_typeid_list(self):
"""write out the list of type ids together with some info"""
from pypy.tool.udir import udir
# XXX not ideal since it is not per compilation, but per run
f = udir.join("typeids.txt").open("w")
all = [(typeid, TYPE)
for TYPE, typeid in self.layoutbuilder.id_of_type.iteritems()]
all.sort()
for typeid, TYPE in all:
f.write("%s %s\n" % (typeid, TYPE))
f.close()
def transform_graph(self, graph):
if self.write_barrier_ptr:
self.initializing_stores = find_initializing_stores(
self.collect_analyzer, graph)
super(FrameworkGCTransformer, self).transform_graph(graph)
if self.write_barrier_ptr:
self.initializing_stores = None
def gct_direct_call(self, hop):
if self.collect_analyzer.analyze(hop.spaceop):
livevars = self.push_roots(hop)
self.default(hop)
self.pop_roots(hop, livevars)
else:
self.default(hop)
gct_indirect_call = gct_direct_call
def gct_fv_gc_malloc(self, hop, flags, TYPE, *args):
op = hop.spaceop
flavor = flags['flavor']
c_can_collect = rmodel.inputconst(lltype.Bool, not flags.get('nocollect', False))
PTRTYPE = op.result.concretetype
assert PTRTYPE.TO == TYPE
type_id = self.get_type_id(TYPE)
c_type_id = rmodel.inputconst(lltype.Signed, type_id)
info = self.layoutbuilder.type_info_list[type_id]
c_size = rmodel.inputconst(lltype.Signed, info.fixedsize)
has_finalizer = bool(self.finalizer_funcptr_for_type(TYPE))
c_has_finalizer = rmodel.inputconst(lltype.Bool, has_finalizer)
if not op.opname.endswith('_varsize'):
#malloc_ptr = self.malloc_fixedsize_ptr
zero = flags.get('zero', False)
if (self.malloc_fast_ptr is not None and
c_can_collect.value and not c_has_finalizer.value and
(self.malloc_fast_is_clearing or not zero)):
malloc_ptr = self.malloc_fast_ptr
elif zero:
malloc_ptr = self.malloc_fixedsize_clear_ptr
else:
malloc_ptr = self.malloc_fixedsize_ptr
args = [self.c_const_gc, c_type_id, c_size, c_can_collect,
c_has_finalizer, rmodel.inputconst(lltype.Bool, False)]
else:
v_length = op.args[-1]
c_ofstolength = rmodel.inputconst(lltype.Signed, info.ofstolength)
c_varitemsize = rmodel.inputconst(lltype.Signed, info.varitemsize)
if (self.malloc_varsize_clear_fast_ptr is not None and
c_can_collect.value and not c_has_finalizer.value):
malloc_ptr = self.malloc_varsize_clear_fast_ptr
else:
malloc_ptr = self.malloc_varsize_clear_ptr
args = [self.c_const_gc, c_type_id, v_length, c_size,
c_varitemsize, c_ofstolength, c_can_collect,
c_has_finalizer]
livevars = self.push_roots(hop)
v_result = hop.genop("direct_call", [malloc_ptr] + args,
resulttype=llmemory.GCREF)
self.pop_roots(hop, livevars)
return v_result
gct_fv_gc_malloc_varsize = gct_fv_gc_malloc
def gct_fv_gc_coalloc(self, hop, coallocator, flags, TYPE, *args):
if self.coalloc_clear_ptr is None:
return self.gct_fv_gc_malloc(
hop, flags, TYPE, *args)
op = hop.spaceop
flavor = flags['flavor']
assert not flags.get("nocollect", False)
PTRTYPE = op.result.concretetype
assert PTRTYPE.TO == TYPE
type_id = self.get_type_id(TYPE)
c_type_id = rmodel.inputconst(lltype.Signed, type_id)
info = self.layoutbuilder.type_info_list[type_id]
c_size = rmodel.inputconst(lltype.Signed, info.fixedsize)
has_finalizer = bool(self.finalizer_funcptr_for_type(TYPE))
assert not has_finalizer
v_coallocator = gen_cast(hop.llops, llmemory.Address, coallocator)
if not op.opname.endswith('_varsize'):
malloc_ptr = self.coalloc_clear_ptr
args = [self.c_const_gc, v_coallocator, c_type_id, c_size]
else:
v_length = op.args[-1]
c_ofstolength = rmodel.inputconst(lltype.Signed, info.ofstolength)
c_varitemsize = rmodel.inputconst(lltype.Signed, info.varitemsize)
malloc_ptr = self.coalloc_varsize_clear_ptr
args = [self.c_const_gc, v_coallocator, c_type_id, v_length, c_size,
c_varitemsize, c_ofstolength]
livevars = self.push_roots(hop)
v_result = hop.genop("direct_call", [malloc_ptr] + args,
resulttype=llmemory.GCREF)
self.pop_roots(hop, livevars)
return v_result
gct_fv_gc_coalloc_varsize = gct_fv_gc_coalloc
def gct_gc__collect(self, hop):
op = hop.spaceop
livevars = self.push_roots(hop)
hop.genop("direct_call", [self.collect_ptr, self.c_const_gc],
resultvar=op.result)
self.pop_roots(hop, livevars)
def gct_gc_x_swap_pool(self, hop):
op = hop.spaceop
[v_malloced] = op.args
hop.genop("direct_call",
[self.x_swap_pool_ptr, self.c_const_gc, v_malloced],
resultvar=op.result)
def gct_gc_x_clone(self, hop):
op = hop.spaceop
[v_clonedata] = op.args
hop.genop("direct_call",
[self.x_clone_ptr, self.c_const_gc, v_clonedata],
resultvar=op.result)
def gct_gc_x_size_header(self, hop):
op = hop.spaceop
c_result = rmodel.inputconst(lltype.Signed,
self.gcdata.gc.size_gc_header())
hop.genop("same_as",
[c_result],
resultvar=op.result)
def gct_zero_gc_pointers_inside(self, hop):
if not self.malloc_zero_filled:
v_ob = hop.spaceop.args[0]
TYPE = v_ob.concretetype.TO
gen_zero_gc_pointers(TYPE, v_ob, hop.llops)
def gct_weakref_create(self, hop):
op = hop.spaceop
type_id = self.get_type_id(WEAKREF)
c_type_id = rmodel.inputconst(lltype.Signed, type_id)
info = self.layoutbuilder.type_info_list[type_id]
c_size = rmodel.inputconst(lltype.Signed, info.fixedsize)
malloc_ptr = self.malloc_fixedsize_ptr
c_has_finalizer = rmodel.inputconst(lltype.Bool, False)
c_has_weakptr = c_can_collect = rmodel.inputconst(lltype.Bool, True)
args = [self.c_const_gc, c_type_id, c_size, c_can_collect,
c_has_finalizer, c_has_weakptr]
# push and pop the current live variables *including* the argument
# to the weakref_create operation, which must be kept alive and
# moved if the GC needs to collect
livevars = self.push_roots(hop, keep_current_args=True)
v_result = hop.genop("direct_call", [malloc_ptr] + args,
resulttype=llmemory.GCREF)
v_result = hop.genop("cast_opaque_ptr", [v_result],
resulttype=WEAKREFPTR)
self.pop_roots(hop, livevars)
# cast_ptr_to_adr must be done after malloc, as the GC pointer
# might have moved just now.
v_instance, = op.args
v_addr = hop.genop("cast_ptr_to_adr", [v_instance],
resulttype=llmemory.Address)
hop.genop("bare_setfield",
[v_result, rmodel.inputconst(lltype.Void, "weakptr"), v_addr])
v_weakref = hop.genop("cast_ptr_to_weakrefptr", [v_result],
resulttype=llmemory.WeakRefPtr)
hop.cast_result(v_weakref)
def gct_weakref_deref(self, hop):
v_wref, = hop.spaceop.args
v_addr = hop.genop("direct_call",
[self.weakref_deref_ptr, v_wref],
resulttype=llmemory.Address)
hop.cast_result(v_addr)
def gct_gc_id(self, hop):
if self.id_ptr is not None:
livevars = self.push_roots(hop)
[v_ptr] = hop.spaceop.args
v_ptr = hop.genop("cast_opaque_ptr", [v_ptr],
resulttype=llmemory.GCREF)
hop.genop("direct_call", [self.id_ptr, self.c_const_gc, v_ptr],
resultvar=hop.spaceop.result)
self.pop_roots(hop, livevars)
else:
hop.rename('cast_ptr_to_int') # works nicely for non-moving GCs
def gct_gc_set_max_heap_size(self, hop):
[v_size] = hop.spaceop.args
hop.genop("direct_call", [self.set_max_heap_size_ptr,
self.c_const_gc,
v_size])
def transform_generic_set(self, hop):
from pypy.objspace.flow.model import Constant
opname = hop.spaceop.opname
v_struct = hop.spaceop.args[0]
v_newvalue = hop.spaceop.args[-1]
assert opname in ('setfield', 'setarrayitem', 'setinteriorfield')
assert isinstance(v_newvalue.concretetype, lltype.Ptr)
# XXX for some GCs the skipping if the newvalue is a constant won't be
# ok
if (self.write_barrier_ptr is not None
and not isinstance(v_newvalue, Constant)
and v_struct.concretetype.TO._gckind == "gc"
and hop.spaceop not in self.initializing_stores):
self.write_barrier_calls += 1
v_oldvalue = hop.genop('g' + opname[1:],
hop.inputargs()[:-1],
resulttype=v_newvalue.concretetype)
v_oldvalue = hop.genop("cast_ptr_to_adr", [v_oldvalue],
resulttype = llmemory.Address)
v_newvalue = hop.genop("cast_ptr_to_adr", [v_newvalue],
resulttype = llmemory.Address)
v_structaddr = hop.genop("cast_ptr_to_adr", [v_struct],
resulttype = llmemory.Address)
hop.genop("direct_call", [self.write_barrier_ptr,
self.c_const_gc,
v_oldvalue,
v_newvalue,
v_structaddr])
hop.rename('bare_' + opname)
def var_needs_set_transform(self, var):
return var_needsgc(var)
def push_alive_nopyobj(self, var, llops):
pass
def pop_alive_nopyobj(self, var, llops):
pass
def get_livevars_for_roots(self, hop, keep_current_args=False):
if self.gcdata.gc.moving_gc and not keep_current_args:
# moving GCs don't borrow, so the caller does not need to keep
# the arguments alive
livevars = [var for var in hop.livevars_after_op()
if not var_ispyobj(var)]
else:
livevars = hop.livevars_after_op() + hop.current_op_keeps_alive()
livevars = [var for var in livevars if not var_ispyobj(var)]
return livevars
def push_roots(self, hop, keep_current_args=False):
if self.incr_stack_ptr is None:
return
livevars = self.get_livevars_for_roots(hop, keep_current_args)
self.num_pushs += len(livevars)
if not livevars:
return []
c_len = rmodel.inputconst(lltype.Signed, len(livevars) )
base_addr = hop.genop("direct_call", [self.incr_stack_ptr, c_len ],
resulttype=llmemory.Address)
c_type = rmodel.inputconst(lltype.Void, llmemory.Address)
for k,var in enumerate(livevars):
c_k = rmodel.inputconst(lltype.Signed, k)
v_adr = gen_cast(hop.llops, llmemory.Address, var)
hop.genop("raw_store", [base_addr, c_type, c_k, v_adr])
return livevars
def pop_roots(self, hop, livevars):
if self.decr_stack_ptr is None:
return
if not livevars:
return
c_len = rmodel.inputconst(lltype.Signed, len(livevars) )
base_addr = hop.genop("direct_call", [self.decr_stack_ptr, c_len ],
resulttype=llmemory.Address)
if self.gcdata.gc.moving_gc:
# for moving collectors, reload the roots into the local variables
c_type = rmodel.inputconst(lltype.Void, llmemory.Address)
for k,var in enumerate(livevars):
c_k = rmodel.inputconst(lltype.Signed, k)
v_newaddr = hop.genop("raw_load", [base_addr, c_type, c_k],
resulttype=llmemory.Address)
hop.genop("gc_reload_possibly_moved", [v_newaddr, var])
def compute_borrowed_vars(self, graph):
# XXX temporary workaround, should be done more correctly
if self.gcdata.gc.moving_gc:
return lambda v: False
return super(FrameworkGCTransformer, self).compute_borrowed_vars(graph)
class TransformerLayoutBuilder(gctypelayout.TypeLayoutBuilder):
def __init__(self, transformer):
super(TransformerLayoutBuilder, self).__init__()
self.transformer = transformer
self.offsettable_cache = {}
def make_finalizer_funcptr_for_type(self, TYPE):
rtti = get_rtti(TYPE)
if rtti is not None and hasattr(rtti._obj, 'destructor_funcptr'):
destrptr = rtti._obj.destructor_funcptr
DESTR_ARG = lltype.typeOf(destrptr).TO.ARGS[0]
else:
destrptr = None
DESTR_ARG = None
assert not type_contains_pyobjs(TYPE), "not implemented"
if destrptr:
def ll_finalizer(addr):
v = llmemory.cast_adr_to_ptr(addr, DESTR_ARG)
ll_call_destructor(destrptr, v)
fptr = self.transformer.annotate_finalizer(ll_finalizer,
[llmemory.Address],
lltype.Void)
else:
fptr = lltype.nullptr(gctypelayout.GCData.FINALIZERTYPE.TO)
return fptr
def gen_zero_gc_pointers(TYPE, v, llops, previous_steps=None):
if previous_steps is None:
previous_steps = []
assert isinstance(TYPE, lltype.Struct)
for name in TYPE._names:
c_name = rmodel.inputconst(lltype.Void, name)
FIELD = getattr(TYPE, name)
if isinstance(FIELD, lltype.Ptr) and FIELD._needsgc():
c_null = rmodel.inputconst(FIELD, lltype.nullptr(FIELD.TO))
if not previous_steps:
llops.genop('bare_setfield', [v, c_name, c_null])
else:
llops.genop('bare_setinteriorfield',
[v] + previous_steps + [c_name, c_null])
elif isinstance(FIELD, lltype.Struct):
gen_zero_gc_pointers(FIELD, v, llops, previous_steps + [c_name])
# ____________________________________________________________
sizeofaddr = llmemory.sizeof(llmemory.Address)
class BaseRootWalker:
need_root_stack = False
def __init__(self, gctransformer):
self.gcdata = gctransformer.gcdata
self.gc = self.gcdata.gc
def _freeze_(self):
return True
def setup_root_walker(self):
pass
def append_static_root(self, adr):
self.gcdata.static_root_end.address[0] = adr
self.gcdata.static_root_end += sizeofaddr
def walk_roots(self, collect_stack_root,
collect_static_in_prebuilt_nongc,
collect_static_in_prebuilt_gc):
gcdata = self.gcdata
gc = self.gc
if collect_static_in_prebuilt_nongc:
addr = gcdata.static_root_start
end = gcdata.static_root_nongcend
while addr != end:
result = addr.address[0]
if result.address[0] != llmemory.NULL:
collect_static_in_prebuilt_nongc(gc, result)
addr += sizeofaddr
if collect_static_in_prebuilt_gc:
addr = gcdata.static_root_nongcend
end = gcdata.static_root_end
while addr != end:
result = addr.address[0]
if result.address[0] != llmemory.NULL:
collect_static_in_prebuilt_gc(gc, result)
addr += sizeofaddr
if collect_stack_root:
self.walk_stack_roots(collect_stack_root) # abstract
class ShadowStackRootWalker(BaseRootWalker):
need_root_stack = True
def __init__(self, gctransformer):
BaseRootWalker.__init__(self, gctransformer)
self.rootstacksize = sizeofaddr * gctransformer.root_stack_depth
# NB. 'self' is frozen, but we can use self.gcdata to store state
gcdata = self.gcdata
def incr_stack(n):
top = gcdata.root_stack_top
gcdata.root_stack_top = top + n*sizeofaddr
return top
self.incr_stack = incr_stack
def decr_stack(n):
top = gcdata.root_stack_top - n*sizeofaddr
gcdata.root_stack_top = top
return top
self.decr_stack = decr_stack
def setup_root_walker(self):
stackbase = llmemory.raw_malloc(self.rootstacksize)
ll_assert(bool(stackbase), "could not allocate root stack")
llmemory.raw_memclear(stackbase, self.rootstacksize)
self.gcdata.root_stack_top = stackbase
self.gcdata.root_stack_base = stackbase
def walk_stack_roots(self, collect_stack_root):
gcdata = self.gcdata
gc = self.gc
addr = gcdata.root_stack_base
end = gcdata.root_stack_top
while addr != end:
if addr.address[0] != llmemory.NULL:
collect_stack_root(gc, addr)
addr += sizeofaddr
|
the-stack_106_26265 | """
Platform support for Programs.
This package is a thin wrapper around interactions with the Programs service,
supporting learner- and author-facing features involving that service
if and only if the service is deployed in the Open edX installation.
To ensure maximum separation of concerns, and a minimum of interdependencies,
this package should be kept small, thin, and stateless.
"""
default_app_config = 'openedx.core.djangoapps.programs.apps.ProgramsConfig'
from edx_toggles.toggles import LegacyWaffleSwitch, LegacyWaffleSwitchNamespace # lint-amnesty, pylint: disable=wrong-import-position
PROGRAMS_WAFFLE_SWITCH_NAMESPACE = LegacyWaffleSwitchNamespace(name='programs')
# This is meant to be enabled until https://openedx.atlassian.net/browse/LEARNER-5573 needs to be resolved
ALWAYS_CALCULATE_PROGRAM_PRICE_AS_ANONYMOUS_USER = LegacyWaffleSwitch( # lint-amnesty, pylint: disable=toggle-missing-annotation
PROGRAMS_WAFFLE_SWITCH_NAMESPACE,
'always_calculate_program_price_as_anonymous_user',
__name__
)
|
the-stack_106_26266 | import torch
import torch.nn as nn
from . import odeint
from . import odeint_err
from .misc import _flatten, _flatten_convert_none_to_zeros
class OdeintAdjointMethod(torch.autograd.Function):
total_err=[]
@staticmethod
def forward(ctx, *args):
assert len(args) >= 8, 'Internal error: all arguments required.'
(y0, func, t, flat_params, rtol, atol, method, options, adjoint_rtol, adjoint_atol, adjoint_method,
adjoint_options) = (args[:-11], args[-11], args[-10], args[-9], args[-8], args[-7], args[-6], args[-5],
args[-4], args[-3], args[-2], args[-1])
(ctx.func, ctx.adjoint_rtol, ctx.adjoint_atol, ctx.adjoint_method,
ctx.adjoint_options) = func, adjoint_rtol, adjoint_atol, adjoint_method, adjoint_options
with torch.no_grad():
ans, err = odeint_err(func, y0, t, rtol=rtol, atol=atol, method=method, options=options)
OdeintAdjointMethod.total_err += err
ctx.save_for_backward(t, flat_params, *ans)
return ans
@staticmethod
def backward(ctx, *grad_output):
t, flat_params, *ans = ctx.saved_tensors
ans = tuple(ans)
(func, adjoint_rtol, adjoint_atol, adjoint_method,
adjoint_options) = ctx.func, ctx.adjoint_rtol, ctx.adjoint_atol, ctx.adjoint_method, ctx.adjoint_options
n_tensors = len(ans)
f_params = tuple(func.parameters())
# TODO: use a nn.Module and call odeint_adjoint to implement higher order derivatives.
def augmented_dynamics(t, y_aug):
# Dynamics of the original system augmented with
# the adjoint wrt y, and an integrator wrt t and args.
y, adj_y = y_aug[:n_tensors], y_aug[n_tensors:2 * n_tensors] # Ignore adj_time and adj_params.
with torch.set_grad_enabled(True):
t = t.to(y[0].device).detach().requires_grad_(True)
y = tuple(y_.detach().requires_grad_(True) for y_ in y)
func_eval = func(t, y)
vjp_t, *vjp_y_and_params = torch.autograd.grad(
func_eval, (t,) + y + f_params,
tuple(-adj_y_ for adj_y_ in adj_y), allow_unused=True, retain_graph=True
)
vjp_y = vjp_y_and_params[:n_tensors]
vjp_params = vjp_y_and_params[n_tensors:]
# autograd.grad returns None if no gradient, set to zero.
vjp_t = torch.zeros_like(t) if vjp_t is None else vjp_t
vjp_y = tuple(torch.zeros_like(y_) if vjp_y_ is None else vjp_y_ for vjp_y_, y_ in zip(vjp_y, y))
vjp_params = _flatten_convert_none_to_zeros(vjp_params, f_params)
if len(f_params) == 0:
vjp_params = torch.tensor(0.).to(vjp_y[0])
return (*func_eval, *vjp_y, vjp_t, vjp_params)
T = ans[0].shape[0]
with torch.no_grad():
adj_y = tuple(grad_output_[-1] for grad_output_ in grad_output)
adj_params = torch.zeros_like(flat_params)
adj_time = torch.tensor(0.).to(t)
time_vjps = []
for i in range(T - 1, 0, -1):
ans_i = tuple(ans_[i] for ans_ in ans)
grad_output_i = tuple(grad_output_[i] for grad_output_ in grad_output)
func_i = func(t[i], ans_i)
# Compute the effect of moving the current time measurement point.
dLd_cur_t = sum(
torch.dot(func_i_.reshape(-1), grad_output_i_.reshape(-1)).reshape(1)
for func_i_, grad_output_i_ in zip(func_i, grad_output_i)
)
adj_time = adj_time - dLd_cur_t
time_vjps.append(dLd_cur_t)
# Run the augmented system backwards in time.
if adj_params.numel() == 0:
adj_params = torch.tensor(0.).to(adj_y[0])
aug_y0 = (*ans_i, *adj_y, adj_time, adj_params)
aug_ans = odeint(
augmented_dynamics, aug_y0,
torch.tensor([t[i], t[i - 1]]),
rtol=adjoint_rtol, atol=adjoint_atol, method=adjoint_method, options=adjoint_options
)
# Unpack aug_ans.
adj_y = aug_ans[n_tensors:2 * n_tensors]
adj_time = aug_ans[2 * n_tensors]
adj_params = aug_ans[2 * n_tensors + 1]
adj_y = tuple(adj_y_[1] if len(adj_y_) > 0 else adj_y_ for adj_y_ in adj_y)
if len(adj_time) > 0: adj_time = adj_time[1]
if len(adj_params) > 0: adj_params = adj_params[1]
adj_y = tuple(adj_y_ + grad_output_[i - 1] for adj_y_, grad_output_ in zip(adj_y, grad_output))
del aug_y0, aug_ans
time_vjps.append(adj_time)
time_vjps = torch.cat(time_vjps[::-1])
return (*adj_y, None, time_vjps, adj_params, None, None, None, None, None, None, None, None)
def odeint_adjoint(func, y0, t, rtol=1e-6, atol=1e-12, method=None, options=None, adjoint_rtol=None, adjoint_atol=None,
adjoint_method=None, adjoint_options=None):
# We need this in order to access the variables inside this module,
# since we have no other way of getting variables along the execution path.
if not isinstance(func, nn.Module):
raise ValueError('func is required to be an instance of nn.Module.')
if adjoint_rtol is None:
adjoint_rtol = rtol
if adjoint_atol is None:
adjoint_atol = atol
if adjoint_method is None:
adjoint_method = method
if adjoint_options is None:
adjoint_options = options
tensor_input = False
if torch.is_tensor(y0):
class TupleFunc(nn.Module):
def __init__(self, base_func):
super(TupleFunc, self).__init__()
self.base_func = base_func
def forward(self, t, y):
return (self.base_func(t, y[0]),)
tensor_input = True
y0 = (y0,)
func = TupleFunc(func)
flat_params = _flatten(func.parameters())
ys = OdeintAdjointMethod.apply(*y0, func, t, flat_params, rtol, atol, method, options, adjoint_rtol, adjoint_atol,
adjoint_method, adjoint_options)
err = OdeintAdjointMethod.total_err
OdeintAdjointMethod.total_err=[]
if tensor_input:
ys = ys[0]
return ys, []
|
the-stack_106_26267 | from common.numpy_fast import clip
from selfdrive.car.ford.values import MAX_ANGLE
def create_steer_command(packer, angle_cmd, enabled, angle_steers, action, angleReq, sappConfig, sappChime):
"""Creates a CAN message for the Ford Steer Command."""
#if enabled and lkas available:
#if enabled: # and (frame % 500) >= 3:
# action = lkas_action
# angle_cmd = angle_steers/MAX_ANGLE
#else:
# action = 0xf
# angle_cmd = angle_steers/MAX_ANGLE
#angle_cmd = clip(angle_cmd * MAX_ANGLE, - MAX_ANGLE, MAX_ANGLE)
values = {
"ApaSys_D_Stat": action,
"EPASExtAngleStatReq": angleReq,
"ExtSteeringAngleReq2": angle_cmd,
"SAPPStatusCoding": sappConfig,
"ApaChime_D_Rq": sappChime,
}
return packer.make_can_msg("ParkAid_Data", 2, values)
def create_ds_118(packer, filler1, filler2, filler3, brakectr, awdlckmax, awdlckmn, drvstate, drvtq, emergbrk, stoplmp, angle):
"""Creates a CAN message for the Ford 118 message."""
values = {
"BrkCtrFnd_B_Stat": brakectr,
"AwdLck_Tq_RqMx": awdlckmax,
"AwdLck_Tq_RqMn": awdlckmn,
"DrvSte_D_Stat": drvstate,
"DrvSte_Tq_Rq": drvtq,
"EmgcyBrkLamp_D_Rq": emergbrk,
"StopLamp_B_RqBrk": stoplmp,
"SteWhlRelInit_An_Sns": angle,
"DS_Filler_1": filler1,
"DS_Filler_2": filler2,
"DS_Filler_3": filler3,
}
return packer.make_can_msg("BrakeSnData_5", 2, values)
def create_speed_command(packer, speed, trlraid, actlnocs, actlnocnt, actlqf, gear):
"""Creates a CAN message for the Ford Speed Command."""
values = {
"VehVTrlrAid_B_Avail": trlraid,
"VehVActlEng_No_Cs": actlnocs,
"VehVActlEng_No_Cnt": actlnocnt,
"VehVActlEng_D_Qf": actlqf,
"GearRvrse_D_Actl": gear,
"Veh_V_ActlEng": speed,
}
return packer.make_can_msg("EngVehicleSpThrottle2", 2, values)
def create_speed_command2(packer, speed3, lsmcdecel, actlbrknocs, actlbrknocnt, actlbrkqf):
"""Creates a CAN message for the Ford Speed Command."""
values = {
"Veh_V_ActlBrk": speed3,
"LsmcBrkDecel_D_Stat": lsmcdecel,
"VehVActlBrk_No_Cs": actlbrknocs,
"VehVActlBrk_No_Cnt": actlbrknocnt,
"VehVActlBrk_D_Qf": actlbrkqf,
}
return packer.make_can_msg("BrakeSysFeatures", 2, values)
def create_lkas_ui(packer, main_on, enabled, steer_alert, defog, ahbc, ahbcramping, config, noipma, stats, persipma, dasdsply, x30):
"""Creates a CAN message for the Ford Steer Ui."""
if enabled:
lines = 0x6
else:
lines = 0xc
values = {
"PersIndexIpma_D_Actl": persipma,
"DasStats_D_Dsply": dasdsply,
"Set_Me_X30": x30,
"Lines_Hud": lines,
"Hands_Warning_W_Chime": steer_alert,
"CamraDefog_B_Req": defog,
"AhbHiBeam_D_Rq": ahbc,
"AhbcRampingV_D_Rq": ahbcramping,
"FeatConfigIpmaActl": config,
"FeatNoIpmaActl": noipma,
"CamraStats_D_Dsply": stats,
}
return packer.make_can_msg("Lane_Keep_Assist_Ui", 0, values)
def spam_cancel_button(packer):
values = {
"Cancel": 1
}
return packer.make_can_msg("Steering_Buttons", 0, values)
|
the-stack_106_26269 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import cv2
import os
import sys
cv2v = cv2.__version__
if(cv2v[0] >= '3'):
flagCapturePosFrame = cv2.CAP_PROP_POS_FRAMES
elif(cv2v[0] == '2'):
flagCapturePosFrame = cv2.cv.CV_CAP_PROP_POS_FRAMES
if len(sys.argv) == 2:
videoName = sys.argv[1]
else:
print ("Example of usage: python VideoAnnotation.py video.mp4")
videoName = 'RM4.mp4'
if os.path.exists(videoName):
print ("Video path not provided, using default")
else:
sys.exit("Error: Video path not provided and default doesnt exist")
foldName = videoName.split('.')[0]
foldLabel = foldName + '/labels'
foldJpeg = foldName + '/JPEGImages'
foldGT = foldName + '/Ground'
foldAugment = foldJpeg
if not os.path.exists(foldName):
os.mkdir(foldName)
if not os.path.exists(foldLabel):
os.mkdir(foldLabel)
if not os.path.exists(foldJpeg):
os.mkdir(foldJpeg)
if not os.path.exists(foldGT):
os.mkdir(foldGT)
if not os.path.exists(foldAugment):
os.mkdir(foldAugment)
drawRect = False
startRect = []
endRect = []
iClass = []
key = -1
color = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(255,0,255),(0,255,255),(0, 51, 102), (102, 51, 0),(0,0,0),(255,255,255)]
secColor = [(int(i * 0.7), int(j * 0.7), int(k * 0.7)) for (i,j,k) in color]
color.extend(secColor)
shiftColor = 10
actual = 0
lenBbox = 0
lastFrameSkip = 0
def draw(event,x,y,flags,param):
global drawRect
global startRect
global frame
global endRect
global oriFrame
global key
global color
global actual
global lenBbox
if event == cv2.EVENT_LBUTTONDOWN:
drawRect = True
startRect.append((x, y))
endRect.append((x, y))
iClass.append(cv2.getTrackbarPos('ID','VideoTag'))
lenBbox = len(startRect)
actual = lenBbox
elif event == cv2.EVENT_MOUSEWHEEL:
if actual > 0:
iClass[actual-1] = (iClass[actual-1] + 1 ) % NUM_OF_CLASS
elif event == cv2.EVENT_LBUTTONUP:
drawRect = False
elif event == cv2.EVENT_MOUSEMOVE:
if drawRect:
endRect[actual-1] = (x, y)
frame = oriFrame.copy()
elif event == cv2.EVENT_RBUTTONDOWN:
frame = oriFrame.copy()
if len(startRect) > 0:
startRect.pop()
if len(endRect) > 0:
endRect.pop()
if len(iClass) > 0:
iClass.pop()
if actual > 0:
actual -= 1
def imview(src, bbox):
height, width, _ = src.shape
# ID, xcenter/widht, ycenter/height, srcwidth/width, srcheight/height
iClass, xYolo, yYolo, widthYolo, heightYolo = bbox
xCenter = int( xYolo * width )
yCenter = int( yYolo * height )
objWidth = int( widthYolo * width )
objHeight = int ( heightYolo * height )
ul = (xCenter - objWidth/2, yCenter - objHeight/2)
br = (xCenter + objWidth/2, yCenter + objHeight/2)
cv2.rectangle(src,ul, br, (0,255,0),3)
cv2.imshow("src", src)
cv2.waitKey(10000)
cv2.destroyAllWindows()
# bbox = (2, 0.79765625, 0.705208333333, 0.1015625 ,0.202083333333)
# src = cv2.imread('/home/kaka/Desktop/SimpleVideoAnnotation/atHome004/JPEGImages/000000.jpg')
def VOCtoRect(vocLabel, imgW=1024, imgH=640):
"""
vocLabel:
Classe;
absoluteX/imgWidth; absoluteY/imgHeight;
absoluteWidth/imgWidth; absoluteHeight/imgHeigh
"""
xMean = vocLabel[1] * imgW
yMean = vocLabel[2] * imgH
deltaX = vocLabel[3] * imgW
deltaY = vocLabel[4] * imgH
dX = deltaX/2
dY = deltaY/2
xMin = int(xMean - dX)
xMax = int(xMean + dX)
yMin = int(yMean - dY)
yMax = int(yMean + dY)
return ([(xMin, yMin), (xMax, yMax)])
def nothing(x):
pass
NUM_OF_CLASS = (10-1)
cv2.namedWindow("VideoTag", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("VideoTag", draw)
cv2.createTrackbar("ID", "VideoTag",0, NUM_OF_CLASS, nothing)
cv2.createTrackbar("Jump", "VideoTag",1, 10, nothing)
cv2.createTrackbar("SkipFrames", "VideoTag",1, 300, nothing)
cap = cv2.VideoCapture(videoName)
if not cap.isOpened():
print ("Video not found or Opencv without ffmpeg")
framePos = 0
ret, oriFrame = cap.read()
if not ret:
print("Error reading video")
exit()
frame = oriFrame.copy()
height, width, _ = frame.shape
oldId = 0
actualId = 0
while( cap.isOpened() and ret ):
frame = oriFrame.copy()
jump = cv2.getTrackbarPos('Jump','VideoTag')
for values in range(0, len(startRect)):
if values == actual-1:
col = color[iClass[values]]
thick = 2
else:
col = color[iClass[values] + shiftColor]
thick = 2
cv2.rectangle(frame, startRect[values], endRect[values], col, thick)
cv2.imshow("VideoTag", frame)
key = (cv2.waitKey(1) & 0xFF)
if key == 255:
continue
if key == ord('q'):
break
if actual <= len(startRect) and actual > 0 :
if key == ord('w'):
startRect[actual-1] = (startRect[actual-1][0], startRect[actual-1][1]-jump)
endRect[actual-1] = (endRect[actual-1][0], endRect[actual-1][1]-jump)
if key == ord('s'):
startRect[actual-1] = (startRect[actual-1][0], startRect[actual-1][1]+jump)
endRect[actual-1] = (endRect[actual-1][0], endRect[actual-1][1]+jump)
if key == ord('a'):
startRect[actual-1] = (startRect[actual-1][0]-jump, startRect[actual-1][1])
endRect[actual-1] = (endRect[actual-1][0]-jump, endRect[actual-1][1])
if key == ord('d'):
startRect[actual-1] = (startRect[actual-1][0]+jump, startRect[actual-1][1])
endRect[actual-1] = (endRect[actual-1][0]+jump, endRect[actual-1][1])
if key == ord('6'):
startRect[actual-1] = (startRect[actual-1][0]-jump, startRect[actual-1][1])
endRect[actual-1] = (endRect[actual-1][0]+jump, endRect[actual-1][1])
if key == ord('4'):
startRect[actual-1] = (startRect[actual-1][0]+jump, startRect[actual-1][1])
endRect[actual-1] = (endRect[actual-1][0]-jump, endRect[actual-1][1])
if key == ord('8'):
startRect[actual-1] = (startRect[actual-1][0], startRect[actual-1][1]-jump)
endRect[actual-1] = (endRect[actual-1][0], endRect[actual-1][1]+jump)
if key == ord('5'):
startRect[actual-1] = (startRect[actual-1][0], startRect[actual-1][1]+jump)
endRect[actual-1] = (endRect[actual-1][0], endRect[actual-1][1]-jump)
if key == ord('*'):
iClass[actual-1] = (iClass[actual-1] + 1 ) % NUM_OF_CLASS
if key == ord('/'):
iClass[actual-1] = (iClass[actual-1] - 1 ) % NUM_OF_CLASS
if key == ord('c'):
startRect.append(startRect[-1])
endRect.append(endRect[-1])
iClass.append(iClass[-1])
lenBbox = len(startRect)
actual = lenBbox
if key == ord('-'):
del startRect[actual-1]
del endRect[actual-1]
del iClass[actual-1]
if(actual < 0): actual = 0
lenBbox = len(startRect)
if key == ord('9') and lenBbox > 0:
actual = ((actual)%(lenBbox)) + 1
if key == ord('7') and lenBbox > 0:
actual -= 1
if actual == 0:
actual = lenBbox
if key == ord('z'):
skip = cv2.getTrackbarPos('SkipFrames','VideoTag')
actualPosFrame = cap.get(flagCapturePosFrame)
newFramePos = actualPosFrame-skip
if newFramePos < 0:
newFramePos = 0
cap.set(flagCapturePosFrame, newFramePos)
ret, oriFrame = cap.read()
framePos -= 1
if framePos < 0:
framePos = 0
if key == ord('r'):
path_r = foldLabel+"/{:06d}.txt".format(framePos)
if (not os.path.exists(path_r)):
path_r = foldLabel + '/000000.txt'
if (os.path.exists(path_r)):
startRect, endRect, iClass = [], [], []
actual = 0
with open(path_r, 'r') as f:
lines = f.readlines()
for line in sorted(lines, key=lambda t: t[2:]):
voc = [float(i) for i in line.split()]
rect = VOCtoRect(voc, width, height)
iClass.append(int(voc[0]))
startRect.append(rect[0])
endRect.append(rect[1])
lenBbox = len(startRect)
actual = 1
if key == (32):
vocLabel = []
flipLabel = []
rotLabel = []
save = False
for values in range(0,len(startRect)):
frameWidth, frameHeight = abs(startRect[values][0]-endRect[values][0]), abs(startRect[values][1]-endRect[values][1])
xCenter, yCenter = abs((startRect[values][0]+endRect[values][0])/2.0), abs((startRect[values][1]+endRect[values][1])/2.0)
xVoc, yVoc = xCenter/width, yCenter/height
if frameWidth < 4 or frameHeight < 4:
continue
save = True
widthVoc = float(frameWidth)/width
heightVoc = float(frameHeight)/height
vocClass = (iClass[values], xVoc,yVoc, widthVoc, heightVoc, '\n')
vocLabel.append(' '.join(str(e) + '' for e in vocClass))
flipClass = (iClass[values], 1-xVoc,yVoc, widthVoc, heightVoc, '\n')
flipLabel.append(' '.join(str(e) + '' for e in flipClass))
rotX = (height - yCenter)/height
rotY = xVoc
rotClass = (iClass[values], rotX, rotY, heightVoc, widthVoc, '\n')
rotLabel.append(' '.join(str(e) + '' for e in rotClass))
if save == False:
actualPosFrame = cap.get(flagCapturePosFrame)
skip = cv2.getTrackbarPos('SkipFrames','VideoTag')
if (actualPosFrame+skip > lastFrameSkip):
cap.set(flagCapturePosFrame,actualPosFrame+skip)
ret, oriFrame = cap.read()
lastFrameSkip = actualPosFrame+skip
else:
ret = False;
continue
fileName = "/{:06d}.jpg".format(framePos)
cv2.imwrite(foldGT+fileName, frame)
cv2.imwrite(foldJpeg+fileName, oriFrame)
###
fileName = "/{:06d}_F.jpg".format(framePos)
flip = cv2.flip(oriFrame, 1)
cv2.imwrite(foldAugment+fileName, flip)
###
fileName = "/{:06d}_R.jpg".format(framePos)
rot = cv2.rotate(oriFrame, 0)
cv2.imwrite(foldAugment+fileName, rot)
with open(foldLabel+"/{:06d}.txt".format(framePos), 'w') as f:
for labels in vocLabel:
f.write(labels)
with open(foldLabel+"/{:06d}_F.txt".format(framePos), 'w') as f:
for labels in flipLabel:
f.write(labels)
with open(foldLabel+"/{:06d}_R.txt".format(framePos), 'w') as f:
for labels in rotLabel:
f.write(labels)
actualPosFrame = cap.get(flagCapturePosFrame)
skip = cv2.getTrackbarPos('SkipFrames','VideoTag')
if (actualPosFrame+skip > lastFrameSkip):
cap.set(flagCapturePosFrame,actualPosFrame+skip)
ret, oriFrame = cap.read()
lastFrameSkip = actualPosFrame+skip
else:
ret = False;
framePos += 1
oldId = actualId
command = 'ls -d '+ os.getcwd() +'/{}/JPEGImages/* > '.format(foldName) + os.getcwd() + '/{}/imgList.txt'.format(foldName)
os.system(command)
cap.release()
cv2.destroyAllWindows()
|
the-stack_106_26270 | # Imports
import logging
import socket
import logging
from functools import wraps
from flask_login import current_user, LoginManager
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
from functools import wraps
# Config
app = Flask(__name__)
app.config['SECRET_KEY'] = 'LongAndRandomSecretKey'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///calculator.db'
app.config['RECAPTCHA_PUBLIC_KEY'] = "6LfFdRMcAAAAAEeOwLocqoG8LhRNZhE0TYF8MdMG"
app.config['RECAPTCHA_PRIVATE_KEY'] = "6LfFdRMcAAAAAILSgmbrJcTLnkDV5fG-xwPzyoR4"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
# Functions
def requires_roles(*roles):
def wrapper(f):
@wraps(f)
def wrapped(*args, **kwargs):
if current_user.role not in roles:
# Send unauthorised access attempt notification to security log file.
logging.warning('SECURITY - Unauthorised access attempt [%s, %s, %s, %s]',
current_user.id,
current_user.email,
current_user.role,
request.remote_addr)
# redirect the user to an unauthorised notice!
return render_template('403.html')
return f(*args, **kwargs)
return wrapped
return wrapper
# Home Page View
@app.route('/')
def index():
if current_user.is_authenticated and current_user.role == 'admin':
return render_template('profile.html')
else:
return render_template('index.html')
# Error Page Views
@app.errorhandler(400)
def internal_error(error):
return render_template('400.html'), 400
@app.errorhandler(403)
def page_forbidden(error):
return render_template('403.html'), 403
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
return render_template('500.html'), 500
@app.errorhandler(503)
def internal_error(error):
return render_template('503.html'), 503
if __name__ == '__main__':
my_host = "127.0.0.1"
free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
free_socket.bind((my_host, 0))
free_socket.listen(5)
# free_port = free_socket.getsockname()[1]
free_port = 5000
free_socket.close()
# Login Manager
login_manager = LoginManager()
login_manager.login_view = 'users.login'
login_manager.init_app(app)
from models import User
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
# BLUEPRINTS
# import blueprints
from users.views import users_blueprint
from calculator.views import calculator_blueprint
from admin.views import admin_blueprint
from test.views import quiz_blueprint
# register blueprints with app
app.register_blueprint(users_blueprint)
app.register_blueprint(calculator_blueprint)
app.register_blueprint(admin_blueprint)
app.register_blueprint(quiz_blueprint)
app.run(host=my_host, port=free_port, debug=True)
|
the-stack_106_26271 | #!/usr/bin/env python3
# https://adventofcode.com/2021/day/1
INPUT_FILE ='../input/1.txt'
def file_to_ints(input_file):
"""
Input: A file containing one number per line
Output: An int iterable
Blank lines and lines starting with '#' are ignored
"""
with open(input_file) as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
yield int(line)
def get_increases(array):
"""
Input: An array of numbers
Output: The number of elements larger than the one preceding it
"""
prev = None
inc = 0
for n in array:
if prev and n > prev:
inc += 1
prev = n
return inc
def main():
depths = file_to_ints(INPUT_FILE)
increases = get_increases(depths)
print(increases)
if __name__ == '__main__':
main()
|
the-stack_106_26272 | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from codecs import open
from setuptools import setup, find_packages
# HISTORY.rst entry.
VERSION = '0.1.0'
try:
from azext_datamigration.manual.version import VERSION
except ImportError:
pass
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = []
try:
from azext_datamigration.manual.dependency import DEPENDENCIES
except ImportError:
pass
with open('README.md', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='datamigration',
version=VERSION,
description='Microsoft Azure Command-Line Tools DataMigrationManagementClient Extension',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-cli-extensions/tree/master/src/datamigration',
long_description=README + '\n\n' + HISTORY,
license='MIT',
classifiers=CLASSIFIERS,
packages=find_packages(),
install_requires=DEPENDENCIES,
package_data={'azext_datamigration': ['azext_metadata.json']},
)
|
the-stack_106_26276 | #!/usr/bin/env python3
import math
import torch
from .. import settings
from .variational_strategy import VariationalStrategy
from ..utils.memoize import cached
from ..lazy import RootLazyTensor, MatmulLazyTensor, CholLazyTensor, \
CachedCGLazyTensor, DiagLazyTensor, BatchRepeatLazyTensor, PsdSumLazyTensor
from ..distributions import MultivariateNormal
class WhitenedVariationalStrategy(VariationalStrategy):
@cached(name="logdet_memo")
def prior_covar_logdet(self):
return -self.prior_distribution.lazy_covariance_matrix.logdet()
@cached(name="covar_trace_memo")
def covar_trace(self):
variational_covar = self.variational_distribution.variational_distribution.covariance_matrix
prior_covar = self.prior_distribution.covariance_matrix
batch_shape = prior_covar.shape[:-2]
return (variational_covar * prior_covar).view(*batch_shape, -1).sum(-1)
@cached(name="mean_diff_inv_quad_memo")
def mean_diff_inv_quad(self):
prior_mean = self.prior_distribution.mean
prior_covar = self.prior_distribution.lazy_covariance_matrix
variational_mean = self.variational_distribution.variational_distribution.mean
return prior_covar.inv_quad(variational_mean - prior_mean)
def kl_divergence(self):
variational_dist_u = self.variational_distribution.variational_distribution
prior_dist = self.prior_distribution
kl_divergence = 0.5 * sum(
[
# log|k| - log|S|
# = log|K| - log|K var_dist_covar K|
# = -log|K| - log|var_dist_covar|
self.prior_covar_logdet(),
-variational_dist_u.lazy_covariance_matrix.logdet(),
# tr(K^-1 S) = tr(K^1 K var_dist_covar K) = tr(K var_dist_covar)
self.covar_trace(),
# (m - \mu u)^T K^-1 (m - \mu u)
# = (K^-1 (m - \mu u)) K (K^1 (m - \mu u))
# = (var_dist_mean)^T K (var_dist_mean)
self.mean_diff_inv_quad(),
# d
-prior_dist.event_shape.numel(),
]
)
return kl_divergence
def initialize_variational_dist(self):
prior_dist = self.prior_distribution
inv_prior_dist = torch.distributions.MultivariateNormal(
prior_dist.mean,
prior_dist.lazy_covariance_matrix.add_jitter()
.evaluate()
.double()
.inverse()
.type_as(prior_dist.covariance_matrix),
)
self.variational_distribution.initialize_variational_distribution(inv_prior_dist)
def forward(self, x):
"""
The :func:`~gpytorch.variational.VariationalStrategy.forward` method determines how to marginalize out the
inducing point function values. Specifically, forward defines how to transform a variational distribution
over the inducing point values, :math:`q(u)`, in to a variational distribution over the function values at
specified locations x, :math:`q(f|x)`, by integrating :math:`\int p(f|x, u)q(u)du`
Args:
x (torch.tensor): Locations x to get the variational posterior of the function values at.
Returns:
:obj:`gpytorch.distributions.MultivariateNormal`: The distribution q(f|x)
"""
variational_dist = self.variational_distribution.variational_distribution
inducing_points = self.inducing_points
if inducing_points.dim() < x.dim():
inducing_points = inducing_points.expand(*x.shape[:-2], *inducing_points.shape[-2:])
if len(variational_dist.batch_shape) < x.dim() - 2:
variational_dist = variational_dist.expand(x.shape[:-2])
# If our points equal the inducing points, we're done
if torch.equal(x, inducing_points):
# De-whiten the prior covar
prior_covar = self.prior_distribution.lazy_covariance_matrix
if isinstance(variational_dist.lazy_covariance_matrix, RootLazyTensor):
predictive_covar = RootLazyTensor(prior_covar @ variational_dist.lazy_covariance_matrix.root.evaluate())
else:
predictive_covar = MatmulLazyTensor(prior_covar @ variational_dist.covariance_matrix, prior_covar)
# Cache some values for the KL divergence
if self.training:
self._mean_diff_inv_quad_memo, self._logdet_memo = prior_covar.inv_quad_logdet(
(variational_dist.mean - self.prior_distribution.mean), logdet=True
)
return MultivariateNormal(variational_dist.mean, predictive_covar)
# Otherwise, we have to marginalize
else:
num_induc = inducing_points.size(-2)
full_inputs = torch.cat([inducing_points, x], dim=-2)
full_output = self.model.forward(full_inputs)
full_mean, full_covar = full_output.mean, full_output.lazy_covariance_matrix
# Mean terms
test_mean = full_mean[..., num_induc:]
induc_mean = full_mean[..., :num_induc]
mean_diff = (variational_dist.mean - induc_mean).unsqueeze(-1)
# Covariance terms
induc_induc_covar = full_covar[..., :num_induc, :num_induc].add_jitter()
induc_data_covar = full_covar[..., :num_induc, num_induc:].evaluate()
data_data_covar = full_covar[..., num_induc:, num_induc:]
# If we're less than a certain size, we'll compute the Cholesky decomposition of induc_induc_covar
cholesky = False
if settings.fast_computations.log_prob.off() or (num_induc <= settings.max_cholesky_size.value()):
induc_induc_covar = CholLazyTensor(induc_induc_covar.cholesky())
cholesky = True
# Cache the CG results
# Do not use preconditioning for whitened VI, as it does not seem to improve performance.
with settings.max_preconditioner_size(0):
with torch.no_grad():
eager_rhs = torch.cat([induc_data_covar, mean_diff], -1)
solve, probe_vecs, probe_vec_norms, probe_vec_solves, tmats = CachedCGLazyTensor.precompute_terms(
induc_induc_covar,
eager_rhs.detach(),
logdet_terms=(not cholesky),
include_tmats=(not settings.skip_logdet_forward.on() and not cholesky),
)
eager_rhss = [eager_rhs.detach()]
solves = [solve.detach()]
if settings.skip_logdet_forward.on() and self.training:
eager_rhss.append(torch.cat([probe_vecs, eager_rhs], -1))
solves.append(torch.cat([probe_vec_solves, solve[..., : eager_rhs.size(-1)]], -1))
elif not self.training:
eager_rhss.append(eager_rhs[..., :-1])
solves.append(solve[..., :-1])
induc_induc_covar = CachedCGLazyTensor(
induc_induc_covar,
eager_rhss=eager_rhss,
solves=solves,
probe_vectors=probe_vecs,
probe_vector_norms=probe_vec_norms,
probe_vector_solves=probe_vec_solves,
probe_vector_tmats=tmats,
)
# Compute some terms that will be necessary for the predicitve covariance and KL divergence
if self.training:
interp_data_data_var_plus_mean_diff_inv_quad, logdet = induc_induc_covar.inv_quad_logdet(
torch.cat([induc_data_covar, mean_diff], -1), logdet=True, reduce_inv_quad=False
)
interp_data_data_var = interp_data_data_var_plus_mean_diff_inv_quad[..., :-1]
mean_diff_inv_quad = interp_data_data_var_plus_mean_diff_inv_quad[..., -1]
# Compute predictive mean
predictive_mean = torch.add(
test_mean,
induc_induc_covar.inv_matmul(mean_diff, left_tensor=induc_data_covar.transpose(-1, -2)).squeeze(-1),
)
# Compute the predictive covariance
is_root_lt = isinstance(variational_dist.lazy_covariance_matrix, RootLazyTensor)
is_repeated_root_lt = isinstance(
variational_dist.lazy_covariance_matrix, BatchRepeatLazyTensor
) and isinstance(variational_dist.lazy_covariance_matrix.base_lazy_tensor, RootLazyTensor)
if is_root_lt:
predictive_covar = RootLazyTensor(
induc_data_covar.transpose(-1, -2) @ variational_dist.lazy_covariance_matrix.root.evaluate()
)
elif is_repeated_root_lt:
predictive_covar = RootLazyTensor(
induc_data_covar.transpose(-1, -2)
@ variational_dist.lazy_covariance_matrix.root_decomposition().root.evaluate()
)
else:
predictive_covar = MatmulLazyTensor(
induc_data_covar.transpose(-1, -2), predictive_covar @ induc_data_covar
)
if self.training:
data_covariance = DiagLazyTensor((data_data_covar.diag() - interp_data_data_var).clamp(0, math.inf))
else:
neg_induc_data_data_covar = induc_induc_covar.inv_matmul(
induc_data_covar, left_tensor=induc_data_covar.transpose(-1, -2).mul(-1)
)
data_covariance = data_data_covar + neg_induc_data_data_covar
predictive_covar = PsdSumLazyTensor(predictive_covar, data_covariance)
# Save the logdet, mean_diff_inv_quad, prior distribution for the ELBO
if self.training:
self._memoize_cache["prior_distribution_memo"] = MultivariateNormal(induc_mean, induc_induc_covar)
self._memoize_cache["logdet_memo"] = -logdet
self._memoize_cache["mean_diff_inv_quad_memo"] = mean_diff_inv_quad
return MultivariateNormal(predictive_mean, predictive_covar)
|
the-stack_106_26277 | # qubit number=3
# total number=6
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.x(input_qubit[2]) # number=2
prog.z(input_qubit[2]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC30.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
the-stack_106_26278 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Algorithm Protocol
===================
For a class to be passed as a trading algorithm to the
:py:class:`zipline.lines.SimulatedTrading` zipline it must follow an
implementation protocol. Examples of this algorithm protocol are provided
below.
The algorithm must expose methods:
- initialize: method that takes no args, no returns. Simply called to
enable the algorithm to set any internal state needed.
- get_sid_filter: method that takes no args, and returns a list of valid
sids. List must have a length between 1 and 10. If None is returned the
filter will block all events.
- handle_data: method that accepts a :py:class:`zipline.protocol.BarData`
of the current state of the simulation universe. An example data object:
.. This outputs the table as an HTML table but for some reason there
is no bounding box. Make the previous paraagraph ending colon a
double-colon to turn this back into blockquoted table in ASCII art.
+-----------------+--------------+----------------+-------------------+
| | sid(133) | sid(134) | sid(135) |
+=================+==============+================+===================+
| price | $10.10 | $22.50 | $13.37 |
+-----------------+--------------+----------------+-------------------+
| volume | 10,000 | 5,000 | 50,000 |
+-----------------+--------------+----------------+-------------------+
| mvg_avg_30 | $9.97 | $22.61 | $13.37 |
+-----------------+--------------+----------------+-------------------+
| dt | 6/30/2012 | 6/30/2011 | 6/29/2012 |
+-----------------+--------------+----------------+-------------------+
- set_order: method that accepts a callable. Will be set as the value of the
order method of trading_client. An algorithm can then place orders with a
valid sid and a number of shares::
self.order(sid(133), share_count)
- set_performance: property which can be set equal to the
cumulative_trading_performance property of the trading_client. An
algorithm can then check position information with the
Portfolio object::
self.Portfolio[sid(133)]['cost_basis']
- set_transact_setter: method that accepts a callable. Will
be set as the value of the set_transact_setter method of
the trading_client. This allows an algorithm to change the
slippage model used to predict transactions based on orders
and trade events.
"""
from copy import deepcopy
import numpy as np
from nose.tools import assert_raises
from six.moves import range
from six import itervalues
from zipline.algorithm import TradingAlgorithm
from zipline.api import (
FixedSlippage,
order,
set_slippage,
record,
sid,
)
from zipline.errors import UnsupportedOrderParameters
from zipline.assets import Future, Equity
from zipline.finance.execution import (
LimitOrder,
MarketOrder,
StopLimitOrder,
StopOrder,
)
from zipline.finance.controls import AssetDateBounds
from zipline.transforms import BatchTransform, batch_transform
class TestAlgorithm(TradingAlgorithm):
"""
This algorithm will send a specified number of orders, to allow unit tests
to verify the orders sent/received, transactions created, and positions
at the close of a simulation.
"""
def initialize(self,
sid,
amount,
order_count,
sid_filter=None,
slippage=None,
commission=None):
self.count = order_count
self.asset = self.sid(sid)
self.amount = amount
self.incr = 0
if sid_filter:
self.sid_filter = sid_filter
else:
self.sid_filter = [self.asset.sid]
if slippage is not None:
self.set_slippage(slippage)
if commission is not None:
self.set_commission(commission)
def handle_data(self, data):
# place an order for amount shares of sid
if self.incr < self.count:
self.order(self.asset, self.amount)
self.incr += 1
class HeavyBuyAlgorithm(TradingAlgorithm):
"""
This algorithm will send a specified number of orders, to allow unit tests
to verify the orders sent/received, transactions created, and positions
at the close of a simulation.
"""
def initialize(self, sid, amount):
self.asset = self.sid(sid)
self.amount = amount
self.incr = 0
def handle_data(self, data):
# place an order for 100 shares of sid
self.order(self.asset, self.amount)
self.incr += 1
class NoopAlgorithm(TradingAlgorithm):
"""
Dolce fa niente.
"""
def get_sid_filter(self):
return []
def initialize(self):
pass
def set_transact_setter(self, txn_sim_callable):
pass
def handle_data(self, data):
pass
class ExceptionAlgorithm(TradingAlgorithm):
"""
Throw an exception from the method name specified in the
constructor.
"""
def initialize(self, throw_from, sid):
self.throw_from = throw_from
self.asset = self.sid(sid)
if self.throw_from == "initialize":
raise Exception("Algo exception in initialize")
else:
pass
def set_portfolio(self, portfolio):
if self.throw_from == "set_portfolio":
raise Exception("Algo exception in set_portfolio")
else:
pass
def handle_data(self, data):
if self.throw_from == "handle_data":
raise Exception("Algo exception in handle_data")
else:
pass
def get_sid_filter(self):
if self.throw_from == "get_sid_filter":
raise Exception("Algo exception in get_sid_filter")
else:
return [self.asset]
def set_transact_setter(self, txn_sim_callable):
pass
class DivByZeroAlgorithm(TradingAlgorithm):
def initialize(self, sid):
self.asset = self.sid(sid)
self.incr = 0
def handle_data(self, data):
self.incr += 1
if self.incr > 4:
5 / 0
pass
class TooMuchProcessingAlgorithm(TradingAlgorithm):
def initialize(self, sid):
self.asset = self.sid(sid)
def handle_data(self, data):
# Unless we're running on some sort of
# supercomputer this will hit timeout.
for i in range(1000000000):
self.foo = i
class TimeoutAlgorithm(TradingAlgorithm):
def initialize(self, sid):
self.asset = self.sid(sid)
self.incr = 0
def handle_data(self, data):
if self.incr > 4:
import time
time.sleep(100)
pass
class RecordAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
def handle_data(self, data):
self.incr += 1
self.record(incr=self.incr)
name = 'name'
self.record(name, self.incr)
record(name, self.incr, 'name2', 2, name3=self.incr)
class TestOrderAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
def handle_data(self, data):
if self.incr == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.incr, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.incr += 1
self.order(self.sid(0), 1)
class TestOrderInstantAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
self.last_price = None
def handle_data(self, data):
if self.incr == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.incr, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
self.last_price, "Orders was not filled at last price."
self.incr += 2
self.order_value(self.sid(0), data[0].price * 2.)
self.last_price = data[0].price
class TestOrderStyleForwardingAlgorithm(TradingAlgorithm):
"""
Test Algorithm for verifying that ExecutionStyles are properly forwarded by
order API helper methods. Pass the name of the method to be tested as a
string parameter to this algorithm's constructor.
"""
def __init__(self, *args, **kwargs):
self.method_name = kwargs.pop('method_name')
super(TestOrderStyleForwardingAlgorithm, self)\
.__init__(*args, **kwargs)
def initialize(self):
self.incr = 0
self.last_price = None
def handle_data(self, data):
if self.incr == 0:
assert len(self.portfolio.positions.keys()) == 0
method_to_check = getattr(self, self.method_name)
method_to_check(self.sid(0),
data[0].price,
style=StopLimitOrder(10, 10))
assert len(self.blotter.open_orders[0]) == 1
result = self.blotter.open_orders[0][0]
assert result.limit == 10
assert result.stop == 10
self.incr += 1
class TestOrderValueAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
self.sale_price = None
def handle_data(self, data):
if self.incr == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.incr, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.incr += 2
multiplier = 2.
if isinstance(self.sid(0), Future):
multiplier *= self.sid(0).multiplier
self.order_value(self.sid(0), data[0].price * multiplier)
class TestTargetAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.target_shares, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.target_shares = np.random.randint(1, 30)
self.order_target(self.sid(0), self.target_shares)
class TestOrderPercentAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
self.order(self.sid(0), 10)
self.target_shares = 10
return
else:
assert self.portfolio.positions[0]['amount'] == \
self.target_shares, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.order_percent(self.sid(0), .001)
if isinstance(self.sid(0), Equity):
self.target_shares += np.floor(
(.001 * self.portfolio.portfolio_value) / data[0].price
)
if isinstance(self.sid(0), Future):
self.target_shares += np.floor(
(.001 * self.portfolio.portfolio_value) /
(data[0].price * self.sid(0).multiplier)
)
class TestTargetPercentAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
self.target_shares = 1
else:
assert np.round(self.portfolio.portfolio_value * 0.002) == \
self.portfolio.positions[0]['amount'] * self.sale_price, \
"Orders not filled correctly."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.sale_price = data[0].price
self.order_target_percent(self.sid(0), .002)
class TestTargetValueAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
self.order(self.sid(0), 10)
self.target_shares = 10
return
else:
print(self.portfolio)
assert self.portfolio.positions[0]['amount'] == \
self.target_shares, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.order_target_value(self.sid(0), 20)
self.target_shares = np.round(20 / data[0].price)
if isinstance(self.sid(0), Equity):
self.target_shares = np.round(20 / data[0].price)
if isinstance(self.sid(0), Future):
self.target_shares = np.round(
20 / (data[0].price * self.sid(0).multiplier))
class FutureFlipAlgo(TestAlgorithm):
def handle_data(self, data):
if len(self.portfolio.positions) > 0:
if self.portfolio.positions[self.asset.sid]["amount"] > 0:
self.order_target(self.asset, -self.amount)
else:
self.order_target(self.asset, 0)
else:
self.order_target(self.asset, self.amount)
############################
# AccountControl Test Algos#
############################
class SetMaxLeverageAlgorithm(TradingAlgorithm):
def initialize(self, max_leverage=None):
self.set_max_leverage(max_leverage=max_leverage)
############################
# TradingControl Test Algos#
############################
class SetMaxPositionSizeAlgorithm(TradingAlgorithm):
def initialize(self, sid=None, max_shares=None, max_notional=None):
self.order_count = 0
self.set_max_position_size(sid=sid,
max_shares=max_shares,
max_notional=max_notional)
class SetMaxOrderSizeAlgorithm(TradingAlgorithm):
def initialize(self, sid=None, max_shares=None, max_notional=None):
self.order_count = 0
self.set_max_order_size(sid=sid,
max_shares=max_shares,
max_notional=max_notional)
class SetDoNotOrderListAlgorithm(TradingAlgorithm):
def initialize(self, sid=None, restricted_list=None):
self.order_count = 0
self.set_do_not_order_list(restricted_list)
class SetMaxOrderCountAlgorithm(TradingAlgorithm):
def initialize(self, count):
self.order_count = 0
self.set_max_order_count(count)
class SetLongOnlyAlgorithm(TradingAlgorithm):
def initialize(self):
self.order_count = 0
self.set_long_only()
class SetAssetDateBoundsAlgorithm(TradingAlgorithm):
"""
Algorithm that tries to order 1 share of sid 0 on every bar and has an
AssetDateBounds() trading control in place.
"""
def initialize(self):
self.register_trading_control(AssetDateBounds())
def handle_data(algo, data):
algo.order(algo.sid(0), 1)
class TestRegisterTransformAlgorithm(TradingAlgorithm):
def initialize(self, *args, **kwargs):
self.set_slippage(FixedSlippage())
def handle_data(self, data):
pass
class AmbitiousStopLimitAlgorithm(TradingAlgorithm):
"""
Algorithm that tries to buy with extremely low stops/limits and tries to
sell with extremely high versions of same. Should not end up with any
positions for reasonable data.
"""
def initialize(self, *args, **kwargs):
self.asset = self.sid(kwargs.pop('sid'))
def handle_data(self, data):
########
# Buys #
########
# Buy with low limit, shouldn't trigger.
self.order(self.asset, 100, limit_price=1)
# But with high stop, shouldn't trigger
self.order(self.asset, 100, stop_price=10000000)
# Buy with high limit (should trigger) but also high stop (should
# prevent trigger).
self.order(self.asset, 100, limit_price=10000000, stop_price=10000000)
# Buy with low stop (should trigger), but also low limit (should
# prevent trigger).
self.order(self.asset, 100, limit_price=1, stop_price=1)
#########
# Sells #
#########
# Sell with high limit, shouldn't trigger.
self.order(self.asset, -100, limit_price=1000000)
# Sell with low stop, shouldn't trigger.
self.order(self.asset, -100, stop_price=1)
# Sell with low limit (should trigger), but also high stop (should
# prevent trigger).
self.order(self.asset, -100, limit_price=1000000, stop_price=1000000)
# Sell with low limit (should trigger), but also low stop (should
# prevent trigger).
self.order(self.asset, -100, limit_price=1, stop_price=1)
###################
# Rounding Checks #
###################
self.order(self.asset, 100, limit_price=.00000001)
self.order(self.asset, -100, stop_price=.00000001)
##########################################
# Algorithm using simple batch transforms
class ReturnPriceBatchTransform(BatchTransform):
def get_value(self, data):
assert data.shape[1] == self.window_length, \
"data shape={0} does not equal window_length={1} for data={2}".\
format(data.shape[1], self.window_length, data)
return data.price
@batch_transform
def return_price_batch_decorator(data):
return data.price
@batch_transform
def return_args_batch_decorator(data, *args, **kwargs):
return args, kwargs
@batch_transform
def return_data(data, *args, **kwargs):
return data
@batch_transform
def uses_ufunc(data, *args, **kwargs):
# ufuncs like np.log should not crash
return np.log(data)
@batch_transform
def price_multiple(data, multiplier, extra_arg=1):
return data.price * multiplier * extra_arg
class BatchTransformAlgorithm(TradingAlgorithm):
def initialize(self, *args, **kwargs):
self.refresh_period = kwargs.pop('refresh_period', 1)
self.window_length = kwargs.pop('window_length', 3)
self.args = args
self.kwargs = kwargs
self.history_return_price_class = []
self.history_return_price_decorator = []
self.history_return_args = []
self.history_return_arbitrary_fields = []
self.history_return_nan = []
self.history_return_sid_filter = []
self.history_return_field_filter = []
self.history_return_field_no_filter = []
self.history_return_ticks = []
self.history_return_not_full = []
self.return_price_class = ReturnPriceBatchTransform(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_price_decorator = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_args_batch = return_args_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_arbitrary_fields = return_data(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_nan = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True
)
self.return_sid_filter = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True,
sids=[0]
)
self.return_field_filter = return_data(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True,
fields=['price']
)
self.return_field_no_filter = return_data(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True
)
self.return_not_full = return_data(
refresh_period=1,
window_length=self.window_length,
compute_only_full=False
)
self.uses_ufunc = uses_ufunc(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.price_multiple = price_multiple(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.iter = 0
self.set_slippage(FixedSlippage())
def handle_data(self, data):
self.history_return_price_class.append(
self.return_price_class.handle_data(data))
self.history_return_price_decorator.append(
self.return_price_decorator.handle_data(data))
self.history_return_args.append(
self.return_args_batch.handle_data(
data, *self.args, **self.kwargs))
self.history_return_not_full.append(
self.return_not_full.handle_data(data))
self.uses_ufunc.handle_data(data)
# check that calling transforms with the same arguments
# is idempotent
self.price_multiple.handle_data(data, 1, extra_arg=1)
if self.price_multiple.full:
pre = self.price_multiple.rolling_panel.get_current().shape[0]
result1 = self.price_multiple.handle_data(data, 1, extra_arg=1)
post = self.price_multiple.rolling_panel.get_current().shape[0]
assert pre == post, "batch transform is appending redundant events"
result2 = self.price_multiple.handle_data(data, 1, extra_arg=1)
assert result1 is result2, "batch transform is not idempotent"
# check that calling transform with the same data, but
# different supplemental arguments results in new
# results.
result3 = self.price_multiple.handle_data(data, 2, extra_arg=1)
assert result1 is not result3, \
"batch transform is not updating for new args"
result4 = self.price_multiple.handle_data(data, 1, extra_arg=2)
assert result1 is not result4,\
"batch transform is not updating for new kwargs"
new_data = deepcopy(data)
for sidint in new_data:
new_data[sidint]['arbitrary'] = 123
self.history_return_arbitrary_fields.append(
self.return_arbitrary_fields.handle_data(new_data))
# nan every second event price
if self.iter % 2 == 0:
self.history_return_nan.append(
self.return_nan.handle_data(data))
else:
nan_data = deepcopy(data)
nan_data.price = np.nan
self.history_return_nan.append(
self.return_nan.handle_data(nan_data))
self.iter += 1
# Add a new sid to check that it does not get included
extra_sid_data = deepcopy(data)
extra_sid_data[1] = extra_sid_data[0]
self.history_return_sid_filter.append(
self.return_sid_filter.handle_data(extra_sid_data)
)
# Add a field to check that it does not get included
extra_field_data = deepcopy(data)
extra_field_data[0]['ignore'] = extra_sid_data[0]['price']
self.history_return_field_filter.append(
self.return_field_filter.handle_data(extra_field_data)
)
self.history_return_field_no_filter.append(
self.return_field_no_filter.handle_data(extra_field_data)
)
class BatchTransformAlgorithmMinute(TradingAlgorithm):
def initialize(self, *args, **kwargs):
self.refresh_period = kwargs.pop('refresh_period', 1)
self.window_length = kwargs.pop('window_length', 3)
self.args = args
self.kwargs = kwargs
self.history = []
self.batch_transform = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False,
bars='minute'
)
def handle_data(self, data):
self.history.append(self.batch_transform.handle_data(data))
class SetPortfolioAlgorithm(TradingAlgorithm):
"""
An algorithm that tries to set the portfolio directly.
The portfolio should be treated as a read-only object
within the algorithm.
"""
def initialize(self, *args, **kwargs):
pass
def handle_data(self, data):
self.portfolio = 3
class TALIBAlgorithm(TradingAlgorithm):
"""
An algorithm that applies a TA-Lib transform. The transform object can be
passed at initialization with the 'talib' keyword argument. The results are
stored in the talib_results array.
"""
def initialize(self, *args, **kwargs):
if 'talib' not in kwargs:
raise KeyError('No TA-LIB transform specified '
'(use keyword \'talib\').')
elif not isinstance(kwargs['talib'], (list, tuple)):
self.talib_transforms = (kwargs['talib'],)
else:
self.talib_transforms = kwargs['talib']
self.talib_results = dict((t, []) for t in self.talib_transforms)
def handle_data(self, data):
for t in self.talib_transforms:
result = t.handle_data(data)
if result is None:
if len(t.talib_fn.output_names) == 1:
result = np.nan
else:
result = (np.nan,) * len(t.talib_fn.output_names)
self.talib_results[t].append(result)
class EmptyPositionsAlgorithm(TradingAlgorithm):
"""
An algorithm that ensures that 'phantom' positions do not appear
portfolio.positions in the case that a position has been entered
and fully exited.
"""
def initialize(self, *args, **kwargs):
self.ordered = False
self.exited = False
def handle_data(self, data):
if not self.ordered:
for s in data:
self.order(self.sid(s), 100)
self.ordered = True
if not self.exited:
amounts = [pos.amount for pos
in itervalues(self.portfolio.positions)]
if (
all([(amount == 100) for amount in amounts]) and
(len(amounts) == len(data.keys()))
):
for stock in self.portfolio.positions:
self.order(self.sid(stock), -100)
self.exited = True
# Should be 0 when all positions are exited.
self.record(num_positions=len(self.portfolio.positions))
class InvalidOrderAlgorithm(TradingAlgorithm):
"""
An algorithm that tries to make various invalid order calls, verifying that
appropriate exceptions are raised.
"""
def initialize(self, *args, **kwargs):
self.asset = self.sid(kwargs.pop('sids')[0])
def handle_data(self, data):
from zipline.api import (
order_percent,
order_target,
order_target_percent,
order_target_value,
order_value,
)
for style in [MarketOrder(), LimitOrder(10),
StopOrder(10), StopLimitOrder(10, 10)]:
with assert_raises(UnsupportedOrderParameters):
order(self.asset, 10, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order(self.asset, 10, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_value(self.asset, 300, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_value(self.asset, 300, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_percent(self.asset, .1, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_percent(self.asset, .1, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target(self.asset, 100, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target(self.asset, 100, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_value(self.asset, 100,
limit_price=10,
style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_value(self.asset, 100,
stop_price=10,
style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_percent(self.asset, .2,
limit_price=10,
style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_percent(self.asset, .2,
stop_price=10,
style=style)
##############################
# Quantopian style algorithms
# Noop algo
def initialize_noop(context):
pass
def handle_data_noop(context, data):
pass
# API functions
def initialize_api(context):
context.incr = 0
context.sale_price = None
set_slippage(FixedSlippage())
def handle_data_api(context, data):
if context.incr == 0:
assert 0 not in context.portfolio.positions
else:
assert context.portfolio.positions[0]['amount'] == \
context.incr, "Orders not filled immediately."
assert context.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
context.incr += 1
order(sid(0), 1)
record(incr=context.incr)
###########################
# AlgoScripts as strings
noop_algo = """
# Noop algo
def initialize(context):
pass
def handle_data(context, data):
pass
"""
api_algo = """
from zipline.api import (order,
set_slippage,
FixedSlippage,
record,
sid)
def initialize(context):
context.incr = 0
context.sale_price = None
set_slippage(FixedSlippage())
def handle_data(context, data):
if context.incr == 0:
assert 0 not in context.portfolio.positions
else:
assert context.portfolio.positions[0]['amount'] == \
context.incr, "Orders not filled immediately."
assert context.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
context.incr += 1
order(sid(0), 1)
record(incr=context.incr)
"""
api_get_environment_algo = """
from zipline.api import get_environment, order, symbol
def initialize(context):
context.environment = get_environment()
handle_data = lambda context, data: order(symbol('TEST'), 1)
"""
api_symbol_algo = """
from zipline.api import (order,
symbol)
def initialize(context):
pass
def handle_data(context, data):
order(symbol('TEST'), 1)
"""
call_order_in_init = """
from zipline.api import (order)
def initialize(context):
order(0, 10)
pass
def handle_data(context, data):
pass
"""
access_portfolio_in_init = """
def initialize(context):
var = context.portfolio.cash
pass
def handle_data(context, data):
pass
"""
access_account_in_init = """
def initialize(context):
var = context.account.settled_cash
pass
def handle_data(context, data):
pass
"""
call_all_order_methods = """
from zipline.api import (order,
order_value,
order_percent,
order_target,
order_target_value,
order_target_percent,
sid)
def initialize(context):
pass
def handle_data(context, data):
order(sid(0), 10)
order_value(sid(0), 300)
order_percent(sid(0), .1)
order_target(sid(0), 100)
order_target_value(sid(0), 100)
order_target_percent(sid(0), .2)
"""
record_variables = """
from zipline.api import record
def initialize(context):
context.stocks = [0, 1]
context.incr = 0
def handle_data(context, data):
context.incr += 1
record(incr=context.incr)
"""
record_float_magic = """
from zipline.api import record
def initialize(context):
context.stocks = [0, 1]
context.incr = 0
def handle_data(context, data):
context.incr += 1
record(data=float('%s'))
"""
|
the-stack_106_26279 | def search(value, my_list):
"""Returns the index of a requested value"""
index = 0
# print(my_list)
for i in my_list:
if i == value:
return index
else:
index += 1
return None
def count(value, my_list):
"""Returns the number of times a requested value is in the list"""
my_list.sort()
counter = 0
# print(my_list)
for i in my_list:
if i == value:
counter += 1
return counter
def binary_find(value, my_list):
"""Sorts the list and returns the index of the requested value"""
my_list.sort()
print(my_list)
found = False
first = 0
last = len(my_list) - 1
while first <= last and not found:
mid = (first + last) // 2
if value < my_list[mid]:
last = mid - 1
elif value > my_list[mid]:
first = mid + 1
else:
found = True
if not found:
mid = None
return mid
def main1():
print('MAIN 1: Sequential')
a = [5, 332, 6, 8, 91, 123, 123, 3]
valid = False
while not valid:
selection = input('Please type an integer to search in the list: ')
try:
selection = int(selection)
valid = True
except ValueError:
print('Incorrect input type. Please try again.\n')
location = search(value=selection, my_list=a)
if location == None:
print('Value of {} not found.\n'.format(str(selection)))
else:
print('Index: ' + str(location))
print('Value at index {}: {}\n'.format(str(location), str(a[location])))
def main2():
print('MAIN 2: Smart sequential')
a = [5, 332, 6, 8, 91, 123, 91, 91, 123, 3]
valid = False
while not valid:
selection = input('Please type an integer to search in the list: ')
try:
selection = int(selection)
valid = True
except ValueError:
print('Incorrect input type. Please try again.\n')
num = count(value=selection, my_list=a)
print('Value {} has been found {} times.\n'.format(str(selection), str(num)))
def main3():
print('MAIN 3: Binary search')
a = [5, 332, 6, 8, 91, 123, 91, 91, 123, 3]
valid = False
while not valid:
selection = input('Please type an integer to search in the list: ')
try:
selection = int(selection)
valid = True
except ValueError:
print('Incorrect input type. Please try again.\n')
look_for = binary_find(value=selection, my_list=a)
if look_for is None:
print('Value {} has not been found.\n'.format(str(selection)))
else:
print('Value {} has been found at index {}.\n'.format(str(selection), str(look_for)))
main1()
main2()
main3() |
the-stack_106_26281 | import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.metrics import euclidean_distances
from sklearn.utils.multiclass import unique_labels
import math
class Node:
def __init__(self,
depth,
label = None,
description = None,
best_feature_id = -1,
best_feature_threshold = -1 * float('inf'),
impurity = 0):
self.depth = depth
self.label = label
self.description = description
self.best_feature_id = best_feature_id
self.best_feature_threshold = best_feature_threshold
self.impurity = impurity
self.counts = {}
self.probabilities = {}
self.children = {}
class C45Classifier(BaseEstimator, ClassifierMixin):
""" Classifier which implements c45 algorithm
For more information regarding how to build your own classifier, read more
in the :ref:`User Guide <user_guide>`.
Parameters
----------
demo_param : str, default='demo'
A parameter used for demonstation of how to pass and store paramters.
algo : str, default='c45'
A parameter used for selecting proper tree algorithm. Now only two supported - 'id3' and 'c45'
The only difference between them is the eway how amount of additional info is measured - via information gain or gain ratio
max_depth: int, default=10
Max depth of the tree
min_samples_split: int, default=2
Min size of a node for further split
Attributes
----------
X_ : ndarray, shape (n_samples, n_features)
The input passed during :meth:`fit`.
y_ : ndarray, shape (n_samples,)
The labels passed during :meth:`fit`.
classes_ : ndarray, shape (n_classes,)
The classes seen at :meth:`fit`.
"""
def __init__(self,
algo='c45',
max_depth=10,
min_samples_split=2):
self.algo = algo
self.max_depth = max_depth
self.min_samples_split = min_samples_split
def fit(self, X, y,
types_auto=True,
force_nominal_indices=[],
force_continuous_indices=[],
feature_names = [],
class_names = []):
"""A reference implementation of a fitting function for a classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values. An array of int.
types_auto: boolean, default=True
If true, try to determine if each variable is categorical or numeric; if False, all categories are considered as categorical
force_nominal_indices: array-like
Any variable can be explicitly marked as categorical.
If both parameters, force_nominal_indices and force_continuous_indices are set and have intersections, parameter force_nominal_indices has priority
force_continuous_indices: array-like
Any variable can be explicitly marked as numeric/continuous.
If both parameters, force_nominal_indices and force_continuous_indices are set and have intersections, parameter force_nominal_indices has priority
feature_names: array_like, shape(n_features,)
Names of features; set to their numbers if no names are provided
Returns
-------
self : object
Returns self.
"""
self.nominal_features_ = []
self.feature_names_ = []
self.attrs_for_indices = {}
# Check that X and y have correct shape
X, y = check_X_y(X, y)
# Store the classes seen during fit
self.classes_ = np.array(unique_labels(y))
self.X_ = np.array(X)
self.y_ = np.array(y).reshape(-1, 1)
self.data_ = np.append(self.X_, self.y_, axis=1)
#print('shapes', self.X_.shape, self.y_.shape, self.data_.shape)
#self.data_ = np.hstack( (self.X_, self.y_) )
#print('Joined data with classes', self.data_)
self.total_attributes_no = self.X_.shape[1]
if len(feature_names) > 0:
if len(feature_names) != self.total_attributes_no:
raise ValueError("Attribute labels shape doesnot fit data shape")
else:
feature_names = [i for i in range(self.total_attributes_no)]
self.feature_names_ = feature_names
self.attr_indices_ = [i for i in range(self.total_attributes_no)]
#print('total attrs', self.total_attributes_no, self.feature_names_)
# Infer types of columns
self.nominal_features_ = np.full(self.total_attributes_no, True)
if types_auto == True:
for i in range(self.total_attributes_no):
self.nominal_features_[i] = self._is_attr_discrete(i)
for cont_i in force_continuous_indices:
self.nominal_features_[cont_i] = False
for nom_i in force_nominal_indices:
self.nominal_features_[nom_i] = True
for i in range(self.total_attributes_no):
self.attrs_for_indices[i] = np.unique(X[:,i])
#
#build tree
self._generate_tree()
return self
def predict(self, X):
""" An implementation of a prediction for a classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape (n_samples,)
The label for each sample is the label of the closest sample
seen udring fit.
"""
# Check is fit had been called
check_is_fitted(self, ['X_', 'y_'])
# Input validation
X = check_array(X)
dummy = np.zeros( X.shape[0] )
default_class_threshold = 0.5
index = 0
for row in X:
probas = self._predict_proba_one(row)
if probas is None:
continue
for c, p in probas.items():
if p >= default_class_threshold:
dummy[index] = c
break
#print(row, probas, index, dummy[index])
index += 1
return dummy
def predict_proba(self, data):
""" An implementation of a probabilitis prediction for a classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape (n_samples, n_classes)
Probability for each class for each incoming sample.
"""
output = []
for row in data:
proba = self._predict_proba_one(row)
o = []
for _, v in proba.items():
o.append(v)
output.append(o)
return np.array(output)
def _predict_proba_one(self, row):
#traverse tree
node = self.tree
if node is None:
return None
while True:
feature_id = node.best_feature_id
observed_feature_value = row[feature_id]
next_node = None
if self.nominal_features_[feature_id] == True:
for link, child in node.children.items():
if observed_feature_value == link:
next_node = child
break
else:
threshold = node.best_feature_threshold
if len(node.children) == 2:
if observed_feature_value <= threshold:
next_node = node.children['left']
else:
next_node = node.children['right']
if next_node == None:
break
node = next_node
probas = node.probabilities
return probas
def feature_importances(self):
node = self.tree
importances = {}
node_0_size = 0
for _, v in node.counts.items():
node_0_size += v
node_layers, total_nodes = self._nodes_to_array()
for layer in node_layers:
for node in layer:
feature_id = node.best_feature_id
if feature_id == -1:
continue
impurity = node.impurity
total_entries = 0
for _, v in node.counts.items():
total_entries += v
children_importances = 0
#check children
for _, child in node.children.items():
child_impurity = child.impurity
if child_impurity == -1*float('inf'):
continue
total_child_entries = 0
for _, v in child.counts.items():
total_child_entries += v
children_importances += child_impurity * total_child_entries
#print(impurity)
node_importance = impurity * total_entries - children_importances
node_importance /= node_0_size
#print('Importance for', self.feature_names_[feature_id], 'is', node_importance, '(total', total_entries, ', node impurity', impurity, 'children cumulative importances', children_importances, ')')
#print(feature_id)
if feature_id in importances:
importances[feature_id] = max(node_importance, importances[feature_id])
else:
importances[feature_id] = node_importance
#print(importances)
return importances
def _nodes_to_array(self):
layers = []
layer_id = 0
node = self.tree
layer = []
layer.append(node)
layers.append(layer)
total_nodes = len(layer)
while True:
all_nodes_in_layer = layers[layer_id]
print('Layer size', len(all_nodes_in_layer))
new_layer = []
for n in all_nodes_in_layer:
for _, child in n.children.items():
new_layer.append(child)
if len(new_layer) == 0:
break
layer_id += 1
layers.append(new_layer)
total_nodes += len(new_layer)
return layers, total_nodes
def _log(self, x):
if x == 0:
return 0
else:
return math.log(x,2)
def _entropy(self, data):
s = len(data)
if s == 0:
return 0
num_classes = np.array([0 for u in self.classes_])
for row in data:
class_index = np.where(self.classes_ == row[-1])
num_classes[class_index] += 1
num_classes = [x/s for x in num_classes]
ent = 0
for num in num_classes:
ent += num * self._log(num)
return (-1) * ent
def _information_gain(self, data, subsets):
#input : data and disjoint subsets of it
#output : information gain
s = len(data)
#calculate impurity before split
impurity_before_split = self._entropy(data)
#calculate impurity after split
weights = [len(subset)/s for subset in subsets]
impurity_after_split = 0
intrinsic_value = 0
for i in range(len(subsets)):
impurity_after_split += weights[i] * self._entropy(subsets[i])
intrinsic_value += weights[i] * self._log(weights[i])
#calculate total gain
total_gain = impurity_before_split - impurity_after_split
if self.algo == 'c45':
gain_ratio = total_gain / ( -1 * intrinsic_value)
return gain_ratio
elif self.algo == 'id3':
return total_gain
else:
raise ValueError('Unsupported algo:' + self.algo)
def _is_attr_discrete(self, attr_id):
dtype = self.X_[:,attr_id].dtype
if (dtype == np.float) or (dtype == np.float64) :
return False
return True
def _ig_for_nominal_feature(self, data, feature_id):
X_column = data[:, feature_id]
unique_values_for_feature = np.unique(X_column)
X_column_splits = [ [] for i in range(len(unique_values_for_feature)) ]
X_samples_count = data.shape[0]
for sample_id in range(X_samples_count):
sample = data[sample_id]
for unique_value_id in range(len(unique_values_for_feature)):
unique_value = unique_values_for_feature[unique_value_id]
if unique_value == X_column[sample_id]:
X_column_splits[unique_value_id].append(sample)
ig = self._information_gain(data, X_column_splits)
threshold = None
return (ig, threshold, X_column_splits, unique_values_for_feature) #to return same type as for cont variable
def _ig_for_cont_feature(self, data, feature_id):
X_column_sorted = np.sort(data[:, feature_id])
#X_column_sorted = data[ data[:,feature_id].argsort() ]
feature_threshold = -1* float('inf')
best_ig = -1*float('inf')
X_column_splits = None
unique_feature_values = []
for j in range(len(X_column_sorted) - 1):
if X_column_sorted[j] != X_column_sorted[j + 1]:
threshold = (X_column_sorted[j] + X_column_sorted[j + 1]) / 2
less = []
greater = []
for row in data:
if row[feature_id] > threshold:
greater.append(row)
else:
less.append(row)
ig = self._information_gain(data, [less, greater])
if ig > best_ig:
best_ig = ig
feature_threshold = threshold
X_column_splits = [less, greater]
unique_feature_values = ['<=' + str(threshold), '>' + str(threshold)]
return (best_ig, feature_threshold, X_column_splits, unique_feature_values)
def _calculate_classes(self, y):
counts = {}
for x in self.classes_:
counts[x] = 0
for value in y:
counts[value] += 1
return counts
def _generate_tree(self):
self.tree = self._recursive_generate_tree(data = self.data_,
feature_ids=self.attr_indices_,
level = 0, label = 'root', verbose=1)
def _recursive_generate_tree(self, data, feature_ids, level, label, verbose):
#strategy
#1. Check level. If exceeds max_depth - exit
#2. Check amount of data. If less than min_split or min_leaf - exit
#3. Cycle through all features.
# 3.1 If feature is continuous or ordinal - find best binary split
# 3.2 If feature is nominal - split by categories. Order by categories frequencies and select max_split_categories;
# other categories join to 'other'
#4. Select feature which produces best split. Create subsets of data according to the best split.
#5. For each subset:
# - increase depth
#6. Recursive call
node = Node(level, label)
offset = '\t'*level
if verbose > 0:
print(offset + 'Level', level, '; label', label)
print(offset + 'Incoming data shape:', data.shape)
print(offset + 'Incoming feature_ids:', feature_ids)
if len(data) == 0:
if verbose > 0:
print(offset + 'Not enough data at all, terminate')
node.description = 'Not enough data at all, terminate'
return None
node.counts = self._calculate_classes(data[:, -1])
if verbose > 0:
print(offset + 'Node counts',node.counts)
for c, f in node.counts.items():
node.probabilities[c] = f / len(data[:, -1])
if len(data) <= self.min_samples_split:
if verbose > 0:
print(offset + 'Not enough data to go deeper, terminate')
node.description = 'Not enough data to go deeper, terminate'
return None
if level > self.max_depth:
if verbose > 0:
print(offset + 'Max depth exceeded, terminate')
node.description = 'Max depth exceeded, terminate'
return None
if self._check_one_class_remains(data):
if verbose > 0:
print(offset + 'One class remains, terminate')
node.description = 'One class remains, terminate'
return node
new_level = level + 1
best_feature_id = -1
max_ig = -1 * float('inf')
best_threshold = None
data_splits = []
description = None
unique_feature_values = []
for feature_id in feature_ids:
ig = max_ig
threshold = best_threshold
splits = None
unique_values = []
if self.nominal_features_[feature_id] == True:
#nominal feature
(ig, threshold, splits, unique_values) = self._ig_for_nominal_feature(data, feature_id)
description = 'nominal'
else:
#contunuous or ordinal feature
(ig, threshold, splits, unique_values) = self._ig_for_cont_feature(data, feature_id)
description = 'non-nominal'
if ig > max_ig:
best_feature_id = feature_id
best_threshold = threshold
max_ig = ig
data_splits = splits
unique_feature_values = unique_values
if verbose > 0:
print(offset + 'Feature', self.feature_names_[feature_id], 'has better split')
else:
if verbose > 0:
print(offset + 'Feature', self.feature_names_[feature_id], 'has less IG, ignoring')
node.best_feature_id = best_feature_id
node.best_feature_threshold = best_threshold
node.description = description
node.impurity = max_ig
if best_feature_id < 0:
if verbose > 0:
print(offset + 'No better split. Leaf node')
return node
if data_splits is None:
if verbose > 0:
print(offset + 'No splits found. Leaf node')
return node
if len(data_splits) != len(unique_feature_values):
if verbose > 0:
print(offset + 'sizes (splits, values):', len(data_splits), len(unique_feature_values))
print(offset + 'Shapes between splits and unique features don\'t match. Leaf node?')
return node
new_feature_ids = feature_ids.copy()
if best_feature_id in new_feature_ids:
new_feature_ids.remove(best_feature_id)
if self.nominal_features_[node.best_feature_id] == True:
i = 0
for split in data_splits:
if verbose > 0:
print(offset + 'Data shape for child:', np.array(split).shape)
op = ''
if self.nominal_features_[best_feature_id] == True:
op = ' is '
unique_value = unique_feature_values[i]
new_label = str(self.feature_names_[best_feature_id]) + op + str(unique_value)
child = self._recursive_generate_tree(np.array(split), new_feature_ids, new_level, new_label, verbose)
if child != None:
#Check if our child split improves our knowledge; if probs remain the same, child is useless
if node.counts != child.counts:
node.children[unique_value] = child
i += 1
else:
#Expected data splits should be 2
for i in range(2):
unique_value = unique_feature_values[i]
new_label = str(self.feature_names_[best_feature_id]) + str(unique_value)
child = self._recursive_generate_tree(np.array(data_splits[i]),
new_feature_ids, new_level, new_label, verbose)
if child != None:
if i == 0:
node.children['left'] = child
else:
node.children['right'] = child
return node
def _check_one_class_remains(self, data):
uniques = np.unique(data[:,-1])
if len(uniques) <= 1:
return True
return False
def _get_major_class_index(self, data):
#print('classes', self.classes_)
freq = [0]*len(self.classes_)
#print('classes', self.classes_)
freq = np.array(freq)
for row in data:
#print('row-1, row:', row[-1], row)
index = np.where(self.classes_ == row[-1])
freq[index] += 1
max_ind = np.where(freq == max(freq))
#print('freqs', freq)
return max_ind
def _get_class_frequencies(self, data):
#print('classes', self.classes_)
freqs = [0]*len(self.classes_)
#print('classes', self.classes_)
freqs = np.array(freqs)
for row in data:
#print('row-1, row:', row[-1], row)
index = np.where(self.classes_ == row[-1])
freqs[index] += 1
#print('freqs', freq)
return freqs
def printTree(self):
print('--- Tree ---')
self.printNode(self.tree)
def printNode(self, node, indent=""):
print(indent + 'Label:', node.label)
print(indent + 'Description:', node.description)
total_entries = 0
classes_count_str = 'Classes count: ['
for c, p in node.counts.items():
classes_count_str += str(c) + ':' + str(p) + '; '
total_entries += p
classes_count_str += ']'
classes_prob_str = 'Classes probs: ['
for c, p in node.probabilities.items():
classes_prob_str += str(c) + ':' + str(p) + '; '
# classes_prob_str += str(self.feature_names_[c]) + ':' + str(p / total_entries) + ';'
classes_prob_str += ']'
print(indent + classes_count_str)
print(indent + classes_prob_str)
print(indent + 'Best feature to split:', self.feature_names_[node.best_feature_id])
print(indent + 'Best feature threshold:', node.best_feature_threshold)
for link, child in node.children.items():
print(indent+'Link to child', link)
self.printNode(child, indent + '\t')
|
the-stack_106_26282 | import numpy as np
import PILasOPENCV
# getmask in PILasOPENCV does not work with certain characters / fonts / sizes
# this is a quick fix
# XXX: fix properly and make PR to PILasOPENCV
def getmaskFix(text, ttf_font):
slot = ttf_font.glyph
width, height, baseline = PILasOPENCV.getsize(text, ttf_font)
Z = np.zeros((height, width), dtype=np.ubyte)
x, y = 0, 0
previous = 0
for c in text:
ttf_font.load_char(c)
bitmap = slot.bitmap
top = slot.bitmap_top
left = slot.bitmap_left
w,h = bitmap.width, bitmap.rows
#My modification
if previous == 0 and (w != width or h != height):
Z = np.zeros((h, w), dtype=np.ubyte)
y = 0
else:
y = height-baseline-top
if y<=0: y=0
kerning = ttf_font.get_kerning(previous, c)
x += (kerning.x >> 6)
character = np.array(bitmap.buffer, dtype='uint8').reshape(h,w)
# try:
Z[y:y+h,x:x+w] += character
# except ValueError:
# while x+w>Z.shape[1]:
# x = x - 1
# print("new", x, y, w, h, character.shape, type(bitmap))
# if x>0:
# Z[:character.shape[0],x:x+w] += character
x += (slot.advance.x >> 6)
previous = c
return Z
|
the-stack_106_26284 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 26 13:44:58 2022
@author: Jens Eriksson
"""
from window_functions import hann_window, corner_hann_window, top_hann_window, bartley_hann_window, triangular_window
from window_functions import build_weighted_mask_array
from matplotlib import pyplot as plt
import numpy as np
indir = r"C:\Users\Jens\Documents\Code\bactnet\Bactnet\Training data\stacks\predict\piccolo"
h = 288
w = 288
plt.subplot(331)
plt.imshow(corner_hann_window(h, w))
plt.subplot(332)
plt.imshow(top_hann_window(h, w))
plt.subplot(333)
plt.imshow(np.rot90(corner_hann_window(h, w), 3))
plt.subplot(334)
plt.imshow(np.rot90(top_hann_window(h, w), 1))
plt.subplot(335)
plt.imshow(hann_window(h, w))
plt.subplot(336)
plt.imshow(np.rot90(top_hann_window(h, w), 3))
plt.subplot(337)
plt.imshow(np.rot90(corner_hann_window(h, w), 1))
plt.subplot(338)
plt.imshow(np.rot90(top_hann_window(h, w), 2))
plt.subplot(339)
plt.imshow(np.rot90(corner_hann_window(h, w), 2))
plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[]);
print(build_weighted_mask_array("han", 3))
|
the-stack_106_26286 | import datetime
import oauthlib.oauth2
import oauthlib.oauth2.rfc6749.tokens
import oauth2_provider.models
import oauth2_provider.oauth2_validators
from oauth2_provider.scopes import BaseScopes
from oauth2_provider.settings import oauth2_settings
from . import generators
def signed_token_generator(request):
token_duration = datetime.timedelta(
seconds=oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS
)
user = getattr(request, "user", None)
token, payload = generators.generate_jwt_with_payload(
request.client, token_duration=token_duration, user=user
)
# set claims on the request
request.claims = payload
return token
class Server(oauthlib.oauth2.Server):
"""Just swap the default token generator to signed_tokens."""
def __init__(self, *args, **kwargs):
if not kwargs.get("token_generator"):
kwargs["token_generator"] = signed_token_generator
kwargs[
"refresh_token_generator"
] = oauthlib.oauth2.rfc6749.tokens.random_token_generator
super().__init__(*args, **kwargs)
class OAuth2Validator(oauth2_provider.oauth2_validators.OAuth2Validator):
def _create_access_token(self, expires, request, token, source_refresh_token=None):
"""Saves the token jti in the database."""
access_token = oauth2_provider.models.get_access_token_model()(
user=request.user,
scope=token["scope"],
expires=expires,
token=token["access_token"],
application=request.client,
source_refresh_token=source_refresh_token,
jti=request.claims["jti"],
)
access_token.save()
return access_token
class AppScopes(BaseScopes):
def get_all_scopes(self):
Application = oauth2_provider.models.get_application_model()
return {
k: k
for k in set(
s
for scopes in Application.objects.values_list("scopes", flat=True)
for s in scopes
)
}
def get_available_scopes(self, application=None, request=None, *args, **kwargs):
return application.scopes
def get_default_scopes(self, application=None, request=None, *args, **kwargs):
return self.get_available_scopes(application, request, *args, **kwargs)
|
the-stack_106_26287 | # coding=utf-8
import random
from pyecharts.option import get_all_options
from pyecharts.base import Base
import pyecharts.constants as constants
class Chart(Base):
"""
`Chart`类是所有非自定义类的基类,继承自 `Base` 类
"""
def __init__(self, title, subtitle,
width=800,
height=400,
title_pos="auto",
title_top="auto",
title_color="#000",
subtitle_color="#aaa",
title_text_size=18,
subtitle_text_size=12,
background_color="#fff",
page_title=constants.PAGE_TITLE,
jshost=None):
"""
:param title:
主标题文本,支持 \n 换行,默认为 ""
:param subtitle:
副标题文本,支持 \n 换行,默认为 ""
:param width:
画布宽度,默认为 800(px)
:param height:
画布高度,默认为 400(px)
:param title_pos:
标题距离左侧距离,默认为'left',有'auto', 'left', 'right',
'center'可选,也可为百分比或整数
:param title_top:
标题距离顶部距离,默认为'top',有'top', 'middle', 'bottom'可选,
也可为百分比或整数
:param title_color:
主标题文本颜色,默认为 '#000'
:param subtitle_color:
副标题文本颜色,默认为 '#aaa'
:param title_text_size:
主标题文本字体大小,默认为 18
:param subtitle_text_size:
副标题文本字体大小,默认为 12
:param background_color:
画布背景颜色,默认为 '#fff'
:param page_title:
指定生成的 html 文件中 <title> 标签的值。默认为'Echarts'
:param jshost:
自定义每个实例的 JavaScript host
"""
super(Chart, self).__init__(
width=width, height=height,
page_title=page_title,
jshost=jshost
)
self._colorlst = [
'#c23531', '#2f4554', '#61a0a8', '#d48265', '#749f83',
'#ca8622', '#bda29a', '#6e7074', '#546570', '#c4ccd3',
'#f05b72', '#ef5b9c', '#f47920', '#905a3d', '#fab27b',
'#2a5caa', '#444693', '#726930', '#b2d235', '#6d8346',
'#ac6767', '#1d953f', '#6950a1', '#918597', '#f6f5ec']
self._option.update(
title=[{
"text": title,
"subtext": subtitle,
"left": title_pos,
"top": title_top,
"textStyle": {
"color": title_color,
"fontSize": title_text_size
},
"subtextStyle": {
"color": subtitle_color,
"fontSize": subtitle_text_size
}
}],
toolbox={
"show": True,
"orient": "vertical",
"left": "95%",
"top": "center",
"feature": {
"saveAsImage": {
"show": True,
"title": "下载图片"
},
"restore": {"show": True},
"dataView": {"show": True},
}
},
series_id=random.randint(1, 9000000),
tooltip={},
series=[],
legend=[{"data": []}],
backgroundColor=background_color
)
def add(self, angle_data=None,
angle_range=None,
area_color=None,
area_opacity=None,
axis_range=None,
bar_category_gap=None,
border_color=None,
boundary_gap=None,
center=None,
calendar_date_range=None,
calendar_cell_size=None,
datazoom_type=None,
datazoom_range=None,
datazoom_orient=None,
datazoom_xaxis_index=None,
datazoom_yaxis_index=None,
effect_brushtype=None,
effect_period=None,
effect_scale=None,
extra_data=None,
geo_emphasis_color=None,
geo_normal_color=None,
geo_cities_coords=None,
geo_effect_period=None,
geo_effect_traillength=None,
geo_effect_color=None,
geo_effect_symbol=None,
geo_effect_symbolsize=None,
graph_layout=None,
graph_gravity=None,
graph_edge_length=None,
graph_repulsion=None,
graph_edge_symbol=None,
graph_edge_symbolsize=None,
grid_width=None,
grid_height=None,
grid_top=None,
grid_bottom=None,
grid_left=None,
grid_right=None,
grid3d_width=None,
grid3d_height=None,
grid3d_depth=None,
grid3d_opacity=None,
grid3d_shading=None,
grid3d_rotate_speed=None,
grid3d_rotate_sensitivity=None,
is_angleaxis_show=None,
is_area_show=None,
is_axisline_show=None,
is_calculable=None,
is_calendar_heatmap=None,
is_clockwise=None,
is_convert=None,
is_datazoom_show=None,
is_fill=None,
is_focusnode=None,
is_geo_effect_show=None,
is_grid3d_rotate=None,
is_label_show=None,
is_label_emphasis=None,
is_legend_show=None,
is_liquid_animation=None,
is_liquid_outline_show=None,
is_more_utils=None,
is_piecewise=None,
is_radiusaxis_show=None,
is_random=None,
is_roam=None,
is_rotatelabel=None,
is_smooth=None,
is_splitline_show=None,
is_stack=None,
is_step=None,
is_symbol_show=None,
is_map_symbol_show=None,
is_visualmap=None,
is_xaxislabel_align=None,
is_yaxislabel_align=None,
is_xaxis_inverse=None,
is_yaxis_inverse=None,
is_xaxis_boundarygap=None,
is_yaxis_boundarygap=None,
is_xaxis_show=None,
is_yaxis_show=None,
item_color=None,
label_color=None,
label_pos=None,
label_text_color=None,
label_text_size=None,
label_formatter=None,
label_emphasis_textcolor=None,
label_emphasis_textsize=None,
label_emphasis_pos=None,
legend_orient=None,
legend_pos=None,
legend_top=None,
legend_selectedmode=None,
legend_text_size=None,
legend_text_color=None,
line_curve=None,
line_opacity=None,
line_type=None,
line_width=None,
line_color=None,
liquid_color=None,
maptype=None,
mark_line=None,
mark_line_symbolsize=None,
mark_line_valuedim=None,
mark_point=None,
mark_point_symbol=None,
mark_point_symbolsize=None,
mark_point_textcolor=None,
radius_data=None,
radius=None,
rosetype=None,
rotate_step=None,
scale_range=None,
shape=None,
start_angle=None,
symbol_size=None,
symbol=None,
sankey_node_width=None,
sankey_node_gap=None,
type=None,
tooltip_tragger=None,
tooltip_tragger_on=None,
tooltip_axispointer_type=None,
tooltip_formatter=None,
tooltip_text_color=None,
tooltip_font_size=None,
treemap_left_depth=None,
treemap_drilldown_icon=None,
treemap_visible_min=None,
visual_orient=None,
visual_range_color=None,
visual_range_size=None,
visual_range_text=None,
visual_range=None,
visual_text_color=None,
visual_pos=None,
visual_top=None,
visual_type=None,
visual_split_number=None,
visual_dimension=None,
word_gap=None,
word_size_range=None,
x_axis=None,
xaxis_margin=None,
xaxis_interval=None,
xaxis_force_interval=None,
xaxis_pos=None,
xaxis_name_gap=None,
xaxis_name_size=None,
xaxis_name_pos=None,
xaxis_name=None,
xaxis_rotate=None,
xaxis_min=None,
xaxis_max=None,
xaxis_type=None,
xaxis3d_name=None,
xaxis3d_name_size=None,
xaxis3d_name_gap=None,
xaxis3d_min=None,
xaxis3d_max=None,
xaxis3d_interval=None,
xaxis3d_margin=None,
yaxis_margin=None,
yaxis_interval=None,
yaxis_force_interval=None,
yaxis_pos=None,
yaxis_formatter=None,
yaxis_rotate=None,
yaxis_min=None,
yaxis_max=None,
yaxis_name_gap=None,
yaxis_name_size=None,
yaxis_name_pos=None,
yaxis_type=None,
yaxis_name=None,
yaxis3d_name=None,
yaxis3d_name_size=None,
yaxis3d_name_gap=None,
yaxis3d_min=None,
yaxis3d_max=None,
yaxis3d_interval=None,
yaxis3d_margin=None,
zaxis3d_name=None,
zaxis3d_name_size=None,
zaxis3d_name_gap=None,
zaxis3d_min=None,
zaxis3d_max=None,
zaxis3d_margin=None, **kwargs):
""" `add()` 方法只是用于提供自动参数补全 """
pass
def _config_components(self, is_visualmap=False,
is_more_utils=False,
**kwargs):
""" 图形组件配置项
:param is_visualmap:
指定是否使用 visualMap 组件
:param is_datazoom_show:
指定是否使用 dataZoom 组件
:param is_more_utils:
指定是否提供更多的实用小工具
:param kwargs:
"""
kwargs.update(colorlst=self._colorlst)
chart = get_all_options(**kwargs)
self._option.update(color=chart['color'])
# legend
self._option.get('legend')[0].update(chart['legend'])
# tooltip
self._option.update(tooltip=chart['tooltip'])
# dataZoom,勿改动
if kwargs.get('is_datazoom_show', None) is True:
self._option.update(dataZoom=chart['datazoom'])
# visualMap
if is_visualmap:
self._option.update(visualMap=chart['visual_map'])
# toolbox
if is_more_utils:
self._option.get('toolbox').get('feature').update(
magicType={
"show": True,
"type": ['line', 'bar', 'stack', 'tiled'],
"title": {
"line": "折线图",
"bar": "柱状图",
"stack": "堆叠",
"tiled": "平铺"
}},
dataZoom={
"show": True,
"title": {
"zoom": "区域缩放",
"back": "缩放还原"
}}
)
|
the-stack_106_26290 | import json
from pyramid import testing
from pyramid.response import Response
from unittest import TestCase, mock
from sqlalchemy import create_engine, Column, String, Integer
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from marshmallow import Schema, fields
from pyramid_restful import viewsets
class MyViewSet(viewsets.APIViewSet):
def list(self, request, *args, **kwargs):
return Response({'method': 'GET', 'action': 'list'})
engine = create_engine('sqlite://')
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String)
class UserSchema(Schema):
id = fields.Integer()
name = fields.String()
class UserViewSet(viewsets.ModelCRUPDViewSet):
model = User
schema_class = UserSchema
def get_dbsession():
Session = sessionmaker()
Session.configure(bind=engine)
return Session()
class ViewSetTests(TestCase):
def setUp(self):
self.viewset = MyViewSet.as_view(action_map={'get': 'list'})
def test_action_map(self):
request = testing.DummyRequest()
response = self.viewset(request)
expected = {'method': 'GET', 'action': 'list'}
assert response.status_code == 200
assert response.body == expected
def test_missing_action_map(self):
self.assertRaises(TypeError, MyViewSet.as_view)
class ModelViewSetTests(TestCase):
@classmethod
def setUpClass(cls):
Base.metadata.create_all(engine)
dbsession = get_dbsession()
user = User(id=1, name='testing')
user2 = User(id=2, name='testing 2')
dbsession.add(user)
dbsession.add(user2)
dbsession.commit()
def setUp(self):
self.dbsession = get_dbsession()
self.list_viewset = UserViewSet.as_view({'get': 'list', 'post': 'create'})
self.detail_viewset = UserViewSet.as_view(
{'get': 'retrieve', 'put': 'update', 'patch': 'partial_update', 'delete': 'destroy'})
self.request = testing.DummyRequest()
self.request.dbsession = self.dbsession
def tearDown(self):
self.dbsession.close()
def test_list(self):
response = self.list_viewset(self.request)
assert response.status_code == 200
assert json.loads(response.body.decode('utf-8')) == [{"id": 1, "name": "testing"}, {"id": 2, "name": "testing 2"}]
def test_create(self):
expected = {'id': 3, 'name': 'testing 3'}
self.request.json_body = expected
self.request.method = 'POST'
response = self.list_viewset(self.request)
assert response.status_code == 201
assert json.loads(response.body.decode('utf-8')) == expected
def test_retrieve(self):
expected = {'id': 1, 'name': 'testing'}
self.request.matchdict['id'] = 1
response = self.detail_viewset(self.request)
assert response.status_code == 200
assert json.loads(response.body.decode('utf-8')) == expected
def test_object_does_not_exist(self):
self.request.matchdict['id'] = 99
response = self.detail_viewset(self.request)
assert response.status_code == 404
def test_update(self):
expected = {'id': 1, 'name': 'testing 1'}
self.request.matchdict['id'] = 1
self.request.method = 'PUT'
self.request.json_body = expected
response = self.detail_viewset(self.request)
assert response.status_code == 200
assert json.loads(response.body.decode('utf-8')) == expected
def test_partial_update(self):
expected = {'id': 1, 'name': '1'}
self.request.matchdict['id'] = 1
self.request.method = 'PATCH'
self.request.json_body = {'name': '1'}
response = self.detail_viewset(self.request)
assert response.status_code == 200
assert json.loads(response.body.decode('utf-8')) == expected
def test_destroy(self):
self.request.matchdict['id'] = 1
self.request.method = 'DELETE'
response = self.detail_viewset(self.request)
assert response.status_code == 204
self.request.method = 'GET'
response = self.detail_viewset(self.request)
assert response.status_code == 404
|
the-stack_106_26291 | from multiprocessing import Lock
from contextlib import contextmanager
from typing import NewType
from dbt.adapters.postgres import PostgresConnectionManager
from dbt.adapters.postgres import PostgresCredentials
from dbt.logger import GLOBAL_LOGGER as logger # noqa
import dbt.exceptions
import dbt.flags
import boto3
from hologram import FieldEncoder, JsonSchemaMixin
from hologram.helpers import StrEnum
from dataclasses import dataclass, field
from typing import Optional, List
drop_lock: Lock = dbt.flags.MP_CONTEXT.Lock()
IAMDuration = NewType('IAMDuration', int)
class IAMDurationEncoder(FieldEncoder):
@property
def json_schema(self):
return {'type': 'integer', 'minimum': 0, 'maximum': 65535}
JsonSchemaMixin.register_field_encoders({IAMDuration: IAMDurationEncoder()})
class RedshiftConnectionMethod(StrEnum):
DATABASE = 'database'
IAM = 'iam'
@dataclass
class RedshiftCredentials(PostgresCredentials):
method: RedshiftConnectionMethod = RedshiftConnectionMethod.DATABASE
password: Optional[str] = None
cluster_id: Optional[str] = field(
default=None,
metadata={'description': 'If using IAM auth, the name of the cluster'},
)
iam_profile: Optional[str] = None
iam_duration_seconds: int = 900
search_path: Optional[str] = None
keepalives_idle: int = 240
autocreate: bool = False
db_groups: List[str] = field(default_factory=list)
@property
def type(self):
return 'redshift'
def _connection_keys(self):
keys = super()._connection_keys()
return keys + (
'method',
'cluster_id',
'iam_profile',
'iam_duration_seconds'
)
class RedshiftConnectionManager(PostgresConnectionManager):
TYPE = 'redshift'
@contextmanager
def fresh_transaction(self, name=None):
"""On entrance to this context manager, hold an exclusive lock and
create a fresh transaction for redshift, then commit and begin a new
one before releasing the lock on exit.
See drop_relation in RedshiftAdapter for more information.
:param Optional[str] name: The name of the connection to use, or None
to use the default.
"""
with drop_lock:
connection = self.get_thread_connection()
if connection.transaction_open:
self.commit()
self.begin()
yield
self.commit()
self.begin()
@classmethod
def fetch_cluster_credentials(cls, db_user, db_name, cluster_id,
iam_profile, duration_s, autocreate,
db_groups):
"""Fetches temporary login credentials from AWS. The specified user
must already exist in the database, or else an error will occur"""
if iam_profile is None:
boto_client = boto3.client('redshift')
else:
logger.debug("Connecting to Redshift using 'IAM'" +
f"with profile {iam_profile}")
boto_session = boto3.Session(
profile_name=iam_profile
)
boto_client = boto_session.client('redshift')
try:
return boto_client.get_cluster_credentials(
DbUser=db_user,
DbName=db_name,
ClusterIdentifier=cluster_id,
DurationSeconds=duration_s,
AutoCreate=autocreate,
DbGroups=db_groups,)
except boto_client.exceptions.ClientError as e:
raise dbt.exceptions.FailedToConnectException(
"Unable to get temporary Redshift cluster credentials: {}"
.format(e))
@classmethod
def get_tmp_iam_cluster_credentials(cls, credentials):
cluster_id = credentials.cluster_id
# default via:
# boto3.readthedocs.io/en/latest/reference/services/redshift.html
iam_duration_s = credentials.iam_duration_seconds
if not cluster_id:
raise dbt.exceptions.FailedToConnectException(
"'cluster_id' must be provided in profile if IAM "
"authentication method selected")
cluster_creds = cls.fetch_cluster_credentials(
credentials.user,
credentials.database,
credentials.cluster_id,
credentials.iam_profile,
iam_duration_s,
credentials.autocreate,
credentials.db_groups,
)
# replace username and password with temporary redshift credentials
return credentials.replace(user=cluster_creds.get('DbUser'),
password=cluster_creds.get('DbPassword'))
@classmethod
def get_credentials(cls, credentials):
method = credentials.method
# Support missing 'method' for backwards compatibility
if method == 'database' or method is None:
logger.debug("Connecting to Redshift using 'database' credentials")
# this requirement is really annoying to encode into json schema,
# so validate it here
if credentials.password is None:
raise dbt.exceptions.FailedToConnectException(
"'password' field is required for 'database' credentials"
)
return credentials
elif method == 'iam':
logger.debug("Connecting to Redshift using 'IAM' credentials")
return cls.get_tmp_iam_cluster_credentials(credentials)
else:
raise dbt.exceptions.FailedToConnectException(
"Invalid 'method' in profile: '{}'".format(method))
|
the-stack_106_26292 | #!/usr/bin/env python3
"""Creating a dataframe from a csv or json."""
import pandas as pd
# df.csv
# a,b,c,d
# 1,2,3,4
# 5,6,7,8
# 9,8,7,6
# df.json
# {"a":{"0":1,"1":5,"2":9},"b":{"0":2,"1":6,"2":8},
# "c":{"0":3,"1":7,"2":7},"d":{"0":4,"1":8,"2":6}}
df = pd.read_csv('df.csv', header=0, sep=',', index_col=None,
comment='#', na_values='-1')
df = pd.read_json('df.json')
print(df)
# a b c d
# 0 1 2 3 4
# 1 5 6 7 8
# 2 9 8 7 6
for chunk in pd.read_csv('df.csv', chunksize=1):
print(chunk)
# a b c d
# 0 1 2 3 4
# a b c d
# 1 5 6 7 8
# a b c d
# 2 9 8 7 6
df.to_csv('df.csv', index=False)
df.to_json('df.json')
|
the-stack_106_26293 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Analysis defaults options for DM pipelien analysis
"""
from __future__ import absolute_import, division, print_function
generic = {
'outfile': (None, 'Path to output file.', str),
'infile': (None, 'Path to input file.', str),
'summaryfile': (None, 'Path to file with results summaries.', str),
}
common = {
'ttype': (None, 'Type of target being analyzed.', str),
'rosters': ([], 'Name of a stacking target roster.', list),
'rosterlist': (None, 'Path to the roster list.', str),
'target': (None, 'Name of analysis target.', str),
'targetlist': (None, 'Path to the target list.', str),
'config': (None, 'Path to fermipy config file.', str),
'roi_baseline': ('fit_baseline', 'Key for roi baseline file.', str),
'profile_file': (None, 'Path to yaml file with target profile', str),
'spatial_models': ([], 'Types of spatial models to use', list),
'alias_dict': (None, 'File to rename target version keys.', str),
'sed_file': (None, 'Path to SED file.', str),
'profiles': ([], 'List of profiles to analyze', list),
'nsims': (-1, 'Number of simulations to run.', int),
'dry_run': (False, 'Print commands but do not run them.', bool),
'make_plots': (False, 'Make plots', bool),
'non_null_src': (False, 'Zero out test source', bool),
'do_find_src': (False, 'Add source finding step to simulated realizations', bool),
}
sims = {
'sim': (None, 'Name of the simulation scenario.', str),
'sims': ([], 'Names of the simulation scenario.', list),
'sim_profile': ('default', 'Name of the profile to use for simulation.', str),
'nsims': (20, 'Number of simulations to run.', int),
'nsims_job': (0, 'Number of simulations to run per job.', int),
'seed': (0, 'Seed number for first simulation.', int),
'rand_config': (None, 'Path to config file for genaration random sky dirs', str),
'skydirs': (None, 'Yaml file with blank sky directions.', str),
'extracopy': ([], 'Extra files to copy', list),
'band_sim': ('null', 'Name of the simulation scenario to use for plot bands.', str),
'band_type': ('e2dnde_ul', 'Name of the quantity to plot in bands plots.', str),
}
collect = {
'write_full': (False, 'Write file with full collected results', bool),
'write_summary': (False, 'Write file with summary of collected results', bool),
}
jobs = {
'action': ('run', 'Action to perform', str),
'dry_run': (False, 'Print commands, but do not execute them', bool),
'job_check_sleep': (300, 'Sleep time between checking on job status (s)', int),
'print_update': (False, 'Print summary of job status', bool),
'check_status_once': (False, 'Check status only once before proceeding', bool),
}
|
the-stack_106_26295 | # ******************************************************************************
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import json
import numpy as np
import pytest
from _pyngraph import VariantInt, VariantString
import ngraph as ng
from ngraph.exceptions import UserInputError
from ngraph.impl import Function, PartialShape, Shape, Type
from ngraph.impl.op import Parameter
from tests.runtime import get_runtime
from tests.test_ngraph.util import run_op_node
from tests import (xfail_issue_35929,
xfail_issue_36476,
xfail_issue_36479,
xfail_issue_36480)
def test_ngraph_function_api():
shape = [2, 2]
parameter_a = ng.parameter(shape, dtype=np.float32, name="A")
parameter_b = ng.parameter(shape, dtype=np.float32, name="B")
parameter_c = ng.parameter(shape, dtype=np.float32, name="C")
model = (parameter_a + parameter_b) * parameter_c
function = Function(model, [parameter_a, parameter_b, parameter_c], "TestFunction")
function.get_parameters()[1].set_partial_shape(PartialShape([3, 4, 5]))
ordered_ops = function.get_ordered_ops()
op_types = [op.get_type_name() for op in ordered_ops]
assert op_types == ["Parameter", "Parameter", "Parameter", "Add", "Multiply", "Result"]
assert len(function.get_ops()) == 6
assert function.get_output_size() == 1
assert function.get_output_op(0).get_type_name() == "Result"
assert function.get_output_element_type(0) == parameter_a.get_element_type()
assert list(function.get_output_shape(0)) == [2, 2]
assert (function.get_parameters()[1].get_partial_shape()) == PartialShape([3, 4, 5])
assert len(function.get_parameters()) == 3
assert len(function.get_results()) == 1
assert function.get_friendly_name() == "TestFunction"
@pytest.mark.parametrize(
"dtype",
[
np.float32,
pytest.param(np.float64, marks=xfail_issue_35929),
pytest.param(np.int8, marks=xfail_issue_36479),
np.int16,
np.int32,
np.int64,
pytest.param(np.uint8, marks=xfail_issue_36479),
np.uint16,
pytest.param(np.uint32, marks=xfail_issue_36476),
np.uint64,
],
)
def test_simple_computation_on_ndarrays(dtype):
runtime = get_runtime()
shape = [2, 2]
parameter_a = ng.parameter(shape, dtype=dtype, name="A")
parameter_b = ng.parameter(shape, dtype=dtype, name="B")
parameter_c = ng.parameter(shape, dtype=dtype, name="C")
model = (parameter_a + parameter_b) * parameter_c
computation = runtime.computation(model, parameter_a, parameter_b, parameter_c)
value_a = np.array([[1, 2], [3, 4]], dtype=dtype)
value_b = np.array([[5, 6], [7, 8]], dtype=dtype)
value_c = np.array([[9, 10], [11, 12]], dtype=dtype)
result = computation(value_a, value_b, value_c)
assert np.allclose(result, np.array([[54, 80], [110, 144]], dtype=dtype))
value_a = np.array([[13, 14], [15, 16]], dtype=dtype)
value_b = np.array([[17, 18], [19, 20]], dtype=dtype)
value_c = np.array([[21, 22], [23, 24]], dtype=dtype)
result = computation(value_a, value_b, value_c)
assert np.allclose(result, np.array([[630, 704], [782, 864]], dtype=dtype))
def test_serialization():
dtype = np.float32
shape = [2, 2]
parameter_a = ng.parameter(shape, dtype=dtype, name="A")
parameter_b = ng.parameter(shape, dtype=dtype, name="B")
parameter_c = ng.parameter(shape, dtype=dtype, name="C")
model = (parameter_a + parameter_b) * parameter_c
runtime = get_runtime()
computation = runtime.computation(model, parameter_a, parameter_b, parameter_c)
try:
serialized = computation.serialize(2)
serial_json = json.loads(serialized)
assert serial_json[0]["name"] != ""
assert 10 == len(serial_json[0]["ops"])
except Exception:
pass
def test_broadcast_1():
input_data = np.array([1, 2, 3], dtype=np.int32)
new_shape = [3, 3]
expected = [[1, 2, 3], [1, 2, 3], [1, 2, 3]]
result = run_op_node([input_data], ng.broadcast, new_shape)
assert np.allclose(result, expected)
def test_broadcast_2():
input_data = np.arange(4, dtype=np.int32)
new_shape = [3, 4, 2, 4]
expected = np.broadcast_to(input_data, new_shape)
result = run_op_node([input_data], ng.broadcast, new_shape)
assert np.allclose(result, expected)
def test_broadcast_3():
input_data = np.array([1, 2, 3], dtype=np.int32)
new_shape = [3, 3]
axis_mapping = [0]
expected = [[1, 1, 1], [2, 2, 2], [3, 3, 3]]
result = run_op_node([input_data], ng.broadcast, new_shape, axis_mapping, "EXPLICIT")
assert np.allclose(result, expected)
@pytest.mark.xfail(reason="AssertionError: assert dtype('float32') == <class 'bool'")
@pytest.mark.parametrize(
"destination_type, input_data",
[(bool, np.zeros((2, 2), dtype=np.int32)), ("boolean", np.zeros((2, 2), dtype=np.int32))],
)
def test_convert_to_bool(destination_type, input_data):
expected = np.array(input_data, dtype=bool)
result = run_op_node([input_data], ng.convert, destination_type)
assert np.allclose(result, expected)
assert np.array(result).dtype == bool
@pytest.mark.parametrize(
"destination_type, rand_range, in_dtype, expected_type",
[
pytest.param(np.float32, (-8, 8), np.int32, np.float32),
pytest.param(np.float64, (-16383, 16383), np.int64, np.float64, marks=xfail_issue_35929),
pytest.param("f32", (-8, 8), np.int32, np.float32),
pytest.param("f64", (-16383, 16383), np.int64, np.float64, marks=xfail_issue_35929),
],
)
def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type):
np.random.seed(133391)
input_data = np.random.randint(*rand_range, size=(2, 2), dtype=in_dtype)
expected = np.array(input_data, dtype=expected_type)
result = run_op_node([input_data], ng.convert, destination_type)
assert np.allclose(result, expected)
assert np.array(result).dtype == expected_type
@xfail_issue_35929
@pytest.mark.parametrize(
"destination_type, expected_type",
[
(np.int8, np.int8),
(np.int16, np.int16),
(np.int32, np.int32),
(np.int64, np.int64),
("i8", np.int8),
("i16", np.int16),
("i32", np.int32),
("i64", np.int64),
],
)
def test_convert_to_int(destination_type, expected_type):
np.random.seed(133391)
input_data = np.ceil(-8 + np.random.rand(2, 3, 4) * 16)
expected = np.array(input_data, dtype=expected_type)
result = run_op_node([input_data], ng.convert, destination_type)
assert np.allclose(result, expected)
assert np.array(result).dtype == expected_type
@xfail_issue_35929
@pytest.mark.parametrize(
"destination_type, expected_type",
[
(np.uint8, np.uint8),
(np.uint16, np.uint16),
(np.uint32, np.uint32),
(np.uint64, np.uint64),
("u8", np.uint8),
("u16", np.uint16),
("u32", np.uint32),
("u64", np.uint64),
],
)
def test_convert_to_uint(destination_type, expected_type):
np.random.seed(133391)
input_data = np.ceil(np.random.rand(2, 3, 4) * 16)
expected = np.array(input_data, dtype=expected_type)
result = run_op_node([input_data], ng.convert, destination_type)
assert np.allclose(result, expected)
assert np.array(result).dtype == expected_type
def test_bad_data_shape():
A = ng.parameter(shape=[2, 2], name="A", dtype=np.float32)
B = ng.parameter(shape=[2, 2], name="B")
model = A + B
runtime = get_runtime()
computation = runtime.computation(model, A, B)
value_a = np.array([[1, 2]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
with pytest.raises(UserInputError):
computation(value_a, value_b)
def test_constant_get_data_bool():
input_data = np.array([True, False, False, True])
node = ng.constant(input_data, dtype=np.bool)
retrieved_data = node.get_data()
assert np.allclose(input_data, retrieved_data)
@pytest.mark.parametrize("data_type", [np.float32, np.float64])
def test_constant_get_data_floating_point(data_type):
np.random.seed(133391)
input_data = np.random.randn(2, 3, 4).astype(data_type)
min_value = -1.0e20
max_value = 1.0e20
input_data = min_value + input_data * max_value * data_type(2)
node = ng.constant(input_data, dtype=data_type)
retrieved_data = node.get_data()
assert np.allclose(input_data, retrieved_data)
@pytest.mark.parametrize("data_type", [np.int64, np.int32, np.int16, np.int8])
def test_constant_get_data_signed_integer(data_type):
np.random.seed(133391)
input_data = np.random.randint(
np.iinfo(data_type).min, np.iinfo(data_type).max, size=[2, 3, 4], dtype=data_type
)
node = ng.constant(input_data, dtype=data_type)
retrieved_data = node.get_data()
assert np.allclose(input_data, retrieved_data)
@pytest.mark.parametrize("data_type", [np.uint64, np.uint32, np.uint16, np.uint8])
def test_constant_get_data_unsigned_integer(data_type):
np.random.seed(133391)
input_data = np.random.randn(2, 3, 4).astype(data_type)
input_data = (
np.iinfo(data_type).min + input_data * np.iinfo(data_type).max + input_data * np.iinfo(data_type).max
)
node = ng.constant(input_data, dtype=data_type)
retrieved_data = node.get_data()
assert np.allclose(input_data, retrieved_data)
@xfail_issue_36480
def test_backend_config():
dummy_config = {"dummy_option": "dummy_value"}
# Expect no throw
runtime = get_runtime()
runtime.set_config(dummy_config)
def test_result():
node = np.array([[11, 10], [1, 8], [3, 4]])
result = run_op_node([node], ng.result)
assert np.allclose(result, node)
def test_node_friendly_name():
dummy_node = ng.parameter(shape=[1], name="dummy_name")
assert(dummy_node.friendly_name == "dummy_name")
dummy_node.set_friendly_name("changed_name")
assert(dummy_node.get_friendly_name() == "changed_name")
dummy_node.friendly_name = "new_name"
assert(dummy_node.get_friendly_name() == "new_name")
def test_node_output():
input_array = np.array([0, 1, 2, 3, 4, 5])
splits = 3
expected_shape = len(input_array) // splits
input_tensor = ng.constant(input_array, dtype=np.int32)
axis = ng.constant(0, dtype=np.int64)
split_node = ng.split(input_tensor, axis, splits)
split_node_outputs = split_node.outputs()
assert len(split_node_outputs) == splits
assert [output_node.get_index() for output_node in split_node_outputs] == [0, 1, 2]
assert np.equal(
[output_node.get_element_type() for output_node in split_node_outputs],
input_tensor.get_element_type(),
).all()
assert np.equal(
[output_node.get_shape() for output_node in split_node_outputs],
Shape([expected_shape]),
).all()
assert np.equal(
[output_node.get_partial_shape() for output_node in split_node_outputs],
PartialShape([expected_shape]),
).all()
output0 = split_node.output(0)
output1 = split_node.output(1)
output2 = split_node.output(2)
assert [output0.get_index(), output1.get_index(), output2.get_index()] == [0, 1, 2]
def test_node_input():
shape = [2, 2]
parameter_a = ng.parameter(shape, dtype=np.float32, name="A")
parameter_b = ng.parameter(shape, dtype=np.float32, name="B")
model = parameter_a + parameter_b
model_inputs = model.inputs()
assert len(model_inputs) == 2
assert [input_node.get_index() for input_node in model_inputs] == [0, 1]
assert np.equal(
[input_node.get_element_type() for input_node in model_inputs],
model.get_element_type(),
).all()
assert np.equal(
[input_node.get_shape() for input_node in model_inputs], Shape(shape)
).all()
assert np.equal(
[input_node.get_partial_shape() for input_node in model_inputs],
PartialShape(shape),
).all()
input0 = model.input(0)
input1 = model.input(1)
assert [input0.get_index(), input1.get_index()] == [0, 1]
def test_node_target_inputs_soruce_output():
shape = [2, 2]
parameter_a = ng.parameter(shape, dtype=np.float32, name="A")
parameter_b = ng.parameter(shape, dtype=np.float32, name="B")
model = parameter_a + parameter_b
out_a = list(parameter_a.output(0).get_target_inputs())[0]
out_b = list(parameter_b.output(0).get_target_inputs())[0]
assert out_a.get_node().name == model.name
assert out_b.get_node().name == model.name
assert np.equal([out_a.get_shape()], [model.get_output_shape(0)]).all()
assert np.equal([out_b.get_shape()], [model.get_output_shape(0)]).all()
in_model0 = model.input(0).get_source_output()
in_model1 = model.input(1).get_source_output()
assert in_model0.get_node().name == parameter_a.name
assert in_model1.get_node().name == parameter_b.name
assert np.equal([in_model0.get_shape()], [model.get_output_shape(0)]).all()
assert np.equal([in_model1.get_shape()], [model.get_output_shape(0)]).all()
def test_variants():
variant_int = VariantInt(32)
variant_str = VariantString("test_text")
assert variant_int.get() == 32
assert variant_str.get() == "test_text"
variant_int.set(777)
variant_str.set("another_text")
assert variant_int.get() == 777
assert variant_str.get() == "another_text"
def test_runtime_info():
test_shape = PartialShape([1, 1, 1, 1])
test_type = Type.f32
test_param = Parameter(test_type, test_shape)
relu_node = ng.relu(test_param)
runtime_info = relu_node.get_rt_info()
runtime_info["affinity"] = "test_affinity"
relu_node.set_friendly_name("testReLU")
runtime_info_after = relu_node.get_rt_info()
assert runtime_info_after["affinity"] == "test_affinity"
|
the-stack_106_26298 | import traceback
import Configuration
from Classes.Logic.LogicLaserMessageFactory import LogicLaserMessageFactory
from Classes.Messaging import Messaging
class MessageManager:
def receiveMessage(self, messageType, messagePayload):
message = LogicLaserMessageFactory.createMessageByType(messageType, messagePayload)
if message is not None:
try:
if Configuration.settings["Proxy"] == False and message.isServerToClient():
message.encode()
else:
message.fields = message.decode()
if Configuration.settings["Proxy"] == False:
message.execute(self, message.fields)
except Exception:
print(traceback.format_exc())
if Configuration.settings["Proxy"] == False:
Messaging.sendMessage(23457, {"Socket": self.client}, self.player) |
the-stack_106_26299 | import sys
import os
if len(sys.argv) != 4:
print(len(sys.argv))
sys.exit('Usage: python ' + sys.argv[0] + ' <input pmf.f filename> <mechID from PP> <out pmf.dat filename>')
print('reading pmf fortran file ' + sys.argv[1])
try:
f = open(sys.argv[1])
except Exception as ex:
sys.exit(ex)
lines = f.readlines()
f.close()
nl = len(lines)
for l in lines:
split = l.strip().split(':')
if 'umber of states' in split[0]:
ns = int(split[-1])
break
for l in lines:
split = l.strip().split(':')
if 'umber of x data points' in split[0]:
np = int(split[-1])
break
print('Number of states per point',ns)
print('Number of points',np)
in_dat = False
nl_hdr = 0
for i in range(nl):
s = lines[i].split()
is_nondat = len(s)==1 or not (len(s) > 2 and s[0] == 'data' and s[1] == 'x_data(1)')
if not in_dat:
if is_nondat:
nl_hdr = nl_hdr+1
else:
in_dat = True
x_data = []
for i in range(nl_hdr,nl+1,ns+1):
if lines[i].split()[0] != 'data':
break
x_data.append(float([x for x in lines[i].split()[-1].split('/') if x][-1]))
np = len(x_data)
y_data = []
for j in range(1,ns+1):
y_data.append([])
for i in range(nl_hdr+j,nl+1,ns+1):
if lines[i].split()[0] != 'data':
break
y_data[-1].append(float([x for x in lines[i].split()[-1].split('/') if x][-1]))
if len(y_data[-1]) != np:
sys.exit('Different number of values read for y_data than x_data')
mech = sys.argv[2]
mech_file = os.environ['PELE_PHYSICS_HOME'] + '/Support/Fuego/Mechanism/Models/' + mech + '/mechanism.H'
print('grabbing species list from ' + mech_file)
try:
f = open(mech_file)
except Exception as ex:
sys.exit(ex)
lines = f.readlines()
f.close()
species = []
for l in lines:
tok = l.split()
if len(tok)==3 and '_ID' in tok[1]:
species.append('"' + tok[1][:-3] + '"')
print('Found '+str(len(species))+' species names to use')
if len(species) + 3 != ns:
sys.exit('Number of species not compatible with fortran file')
try:
f = open(sys.argv[3],'w')
except Exception as ex:
sys.exit(ex)
f.write('VARIABLES = "X" "temp" "u" "rho" ' + ' '.join(species)+'\n')
f.write('ZONE I=' + str(np) + ' FORMAT=POINT\n')
for i in range(np):
f.write(str(x_data[i])+' '+' '.join([str(y_data[j][i]) for j in range(ns)])+'\n')
f.close()
print('pmf data writtent to ' + sys.argv[3])
|
the-stack_106_26300 | # Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
import yaml
import ray
from ray import tune
from ray.tune.registry import register_env
def arg_parser():
parser = argparse.ArgumentParser()
''' Specification file of the expriment '''
parser.add_argument("--spec", required=True, type=str)
''' Mode for running an experiment '''
parser.add_argument("--mode", required=True, choices=['train', 'load'])
''' '''
parser.add_argument("--checkpoint", type=str, default=None)
''' '''
parser.add_argument("--num_workers", type=int, default=None)
''' '''
parser.add_argument("--num_cpus", type=int, default=1)
''' '''
parser.add_argument("--num_gpus", type=int, default=0)
''' '''
parser.add_argument("--num_envs_per_worker", type=int, default=None)
''' '''
parser.add_argument("--num_cpus_per_worker", type=int, default=None)
''' '''
parser.add_argument("--num_gpus_per_worker", type=int, default=None)
''' Directory where the environment and related files are stored '''
parser.add_argument("--project_dir", type=str, default=None)
''' Directory where intermediate results are saved '''
parser.add_argument("--local_dir", type=str, default=None)
''' Verbose '''
parser.add_argument("--verbose", action='store_true')
''' '''
parser.add_argument("--ip_head", type=str, default=None)
''' '''
parser.add_argument("--password", type=str, default=None)
return parser
if __name__ == "__main__":
args = arg_parser().parse_args()
with open(args.spec) as f:
spec = yaml.load(f, Loader=yaml.FullLoader)
config = spec['config']
'''
Register environment to learn according to the input specification file
'''
if config['env'] == "HumanoidImitation":
import rllib_env_imitation as env_module
else:
raise NotImplementedError("Unknown Environment")
register_env(config['env'], lambda config: env_module.env_cls(config))
'''
Register custom model to use if it exists
'''
framework = config.get('framework')
if config.get('model'):
custom_model = config.get('model').get('custom_model')
if custom_model:
if framework=='torch':
import rllib_model_custom_torch
else:
raise NotImplementedError("Tensorflow is not supported!")
'''
Validate configurations and overide values by arguments
'''
if args.local_dir is not None:
spec.update({'local_dir': args.local_dir})
if args.project_dir is not None:
assert os.path.exists(args.project_dir)
config['env_config']['project_dir'] = args.project_dir
if config['model'].get('custom_model_config'):
config['model']['custom_model_config'].update(
{'project_dir': config['env_config']['project_dir']})
if args.verbose:
config['env_config'].update({'verbose': args.verbose})
if args.checkpoint is not None:
assert os.path.exists(args.checkpoint)
if args.num_workers is not None:
config.update({'num_workers': args.num_workers})
if args.num_gpus is not None:
config.update({'num_gpus': args.num_gpus})
if args.num_envs_per_worker:
config.update({'num_envs_per_worker': args.num_envs_per_worker})
if args.num_cpus_per_worker:
config.update({'num_cpus_per_worker': args.num_cpus_per_worker})
if args.num_gpus_per_worker:
config.update({'num_gpus_per_worker': args.num_gpus_per_worker})
if args.mode == "train":
if not os.path.exists(spec['local_dir']):
raise Exception(
"The directory does not exist: %s"%spec['local_dir'])
config_override = env_module.config_override(spec)
config.update(config_override)
if args.ip_head:
# tmp_dir = os.path.join(spec['local_dir'], os.path.join('tmp/', spec['name']))
if args.password:
ray.init(address=args.ip_head, redis_password=args.password)
else:
ray.init(address=args.ip_head)
else:
assert args.num_cpus is not None
assert args.num_gpus is not None
ray.init(num_cpus=args.num_cpus, num_gpus=args.num_gpus)
def adjust_config_for_loading(config, alg):
config["num_workers"] = 1
config['num_envs_per_worker'] = 1
config['num_cpus_per_worker'] = 1
config['num_gpus_per_worker'] = 0
config['remote_worker_envs'] = False
def adjust_config(config, alg):
rollout_fragment_length = config.get('rollout_fragment_length')
num_workers = config.get('num_workers')
num_envs_per_worker = config.get('num_envs_per_worker')
train_batch_size = config.get('train_batch_size')
'''
Set rollout_fragment_length value so that
workers can genertate train_batch_size tuples correctly
'''
rollout_fragment_length = \
max(train_batch_size // (num_workers * num_envs_per_worker), 100)
while rollout_fragment_length * num_workers * num_envs_per_worker \
< train_batch_size:
rollout_fragment_length += 1
config['rollout_fragment_length'] = rollout_fragment_length
adjust_config(config, spec['run'])
if args.mode == "load":
adjust_config_for_loading(config, spec['run'])
if spec["run"] == "PPO":
from ray.rllib.agents.ppo import PPOTrainer as Trainer
else:
raise NotImplementedError("Not a supported algorithm")
trainer = Trainer(env=env_module.env_cls, config=config)
if args.checkpoint is not None:
trainer.restore(args.checkpoint)
env_module.rm.initialize()
env = env_module.env_cls(config['env_config'])
cam = env_module.default_cam()
renderer = env_module.EnvRenderer(trainer=trainer, env=env, cam=cam)
renderer.run()
else:
tune.run(
spec['run'],
name=spec['name'],
stop=spec['stop'],
local_dir=spec['local_dir'],
checkpoint_freq=spec['checkpoint_freq'],
checkpoint_at_end=spec['checkpoint_at_end'],
config=config,
restore=args.checkpoint,
sync_to_driver=False,
)
|
the-stack_106_26302 | import typing
if typing.TYPE_CHECKING: # pragma: no cover
from .applications import App
class SettingsError(Exception):
"""Raised when a setting is missing, ill-declared or invalid."""
class Settings:
def __init__(self, obj: typing.Optional[typing.Any]):
for setting in dir(obj):
if not setting.isupper() or setting.startswith("_"):
continue
value = getattr(obj, setting)
setattr(self, setting, value)
class LazySettings:
"""A lazy proxy for application settings.
Once configured, an instance of this class can be used to access settings
from anywhere in the application code base.
Such an instance is in fact exposed as `bocadillo.settings`.
Settings can be accessed using:
- Dot notation: `settings.FOO`.
- The `getattr` builtin: `getattr(settings, "FOO")`.
- The dict-like `.get()` method: `settings.get("FOO", "foo")`.
"""
def __init__(self):
self._wrapped = None
def configure(self, obj: typing.Any = None, **options):
if self.configured:
raise RuntimeError("Settings are already configured")
wrapped = Settings(obj)
for name, option in options.items():
assert name.isupper()
setattr(wrapped, name, option)
self._wrapped = wrapped
@property
def configured(self) -> bool:
return self._wrapped is not None
def __getattr__(self, name: str) -> typing.Any:
if not self.configured:
raise SettingsError(
f"Requested setting {name} but settings aren't configured yet."
)
value = getattr(self._wrapped, name)
self.__dict__[name] = value # cache setting
return value
def __setattr__(self, name: str, value: typing.Any):
if name == "_wrapped":
self.__dict__.clear()
else:
self.__dict__.pop(name, None) # remove from cache
super().__setattr__(name, value)
def __contains__(self, name: str) -> bool:
return name in self.__dict__
def get(self, name: str, default: typing.Any = None) -> typing.Any:
return getattr(self, name, default)
def _clear(self):
self._wrapped = None
settings = LazySettings() # pylint: disable=invalid-name
def configure(app: "App", settings_obj: typing.Any = None, **kwargs) -> "App":
"""Configure the application settings and setup plugins.
# Parameters
app (App): an application instance.
settings (any): an optional settings object or module.
**kwargs (any): arbitrary settings, case-insensitive.
# Returns
app (App): the same object as `app`, for convenience.
"""
from .plugins import setup_plugins
if settings_obj is None:
settings_obj = kwargs.pop("settings", None)
kwargs = {key.upper(): value for key, value in kwargs.items()}
settings.configure(obj=settings_obj, **kwargs)
setup_plugins(app)
return app
|
the-stack_106_26305 | import config
import numpy as np
import os
import tarfile
import torch
import datasets
import datasets.transforms as transforms
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data.distributed import DistributedSampler
from utils import *
dist = False
world_size = config.PARAM['world_size']
num_workers = config.PARAM['num_workers']
normalize = config.PARAM['normalize']
device = config.PARAM['device']
def fetch_dataset(data_name):
print('fetching data {}...'.format(data_name))
if(data_name=='MNIST'):
train_dir = './data/{}/train'.format(data_name)
test_dir = './data/{}/test'.format(data_name)
train_dataset = datasets.MNIST(root=train_dir, train=True, download=True, transform=transforms.ToTensor())
if(normalize):
stats = make_stats(train_dataset,batch_size=128)
train_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(stats)])
test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(stats)])
else:
train_transform = transforms.Compose([transforms.ToTensor()])
test_transform = transforms.Compose([transforms.ToTensor()])
train_dataset.transform = train_transform
test_dataset = datasets.MNIST(root=test_dir, train=False, download=True, transform=test_transform)
elif(data_name=='EMNIST' or data_name=='EMNIST_byclass' or data_name=='EMNIST_bymerge' or
data_name=='EMNIST_balanced' or data_name=='EMNIST_letters' or data_name=='EMNIST_digits' or data_name=='EMNIST_mnist'):
train_dir = './data/{}/train'.format(data_name.split('_')[0])
test_dir = './data/{}/test'.format(data_name.split('_')[0])
transform = transforms.Compose([transforms.ToTensor()])
split = 'balanced' if len(data_name.split('_')) == 1 else data_name.split('_')[1]
train_dataset = datasets.EMNIST(root=train_dir, split=split, branch=branch, train=True, download=True, transform=transform)
test_dataset = datasets.EMNIST(root=test_dir, split=split, branch=branch, train=False, download=True, transform=transform)
elif(data_name=='FashionMNIST'):
train_dir = './data/{}/train'.format(data_name)
test_dir = './data/{}/test'.format(data_name)
transform = transforms.Compose([transforms.ToTensor()])
train_dataset = datasets.FashionMNIST(root=train_dir, train=True, download=True, transform=transform)
test_dataset = datasets.FashionMNIST(root=test_dir, train=False, download=True, transform=transform)
elif(data_name=='CIFAR10'):
train_dir = './data/{}/train'.format(data_name)
test_dir = './data/{}/validation'.format(data_name)
train_dataset = datasets.CIFAR10(train_dir, train=True, transform=transforms.ToTensor(), download=True)
if(normalize):
stats = make_stats(train_dataset,batch_size=128)
train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(stats)])
test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(stats)])
else:
train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()])
test_transform = transforms.Compose([transforms.ToTensor()])
train_dataset.transform = train_transform
test_dataset = datasets.CIFAR10(test_dir, train=False, transform=test_transform, download=True)
elif(data_name=='CIFAR100'):
train_dir = './data/{}/train'.format(data_name)
test_dir = './data/{}/validation'.format(data_name)
train_dataset = datasets.CIFAR100(train_dir, branch=branch, train=True, transform=transforms.ToTensor(), download=True)
if(normalize):
stats = make_stats(train_dataset,batch_size=128)
train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(stats)])
test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(stats)])
else:
train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()])
test_transform = transforms.Compose([transforms.ToTensor()])
train_dataset.transform = train_transform
test_dataset = datasets.CIFAR100(test_dir, branch=branch, train=False, transform=test_transform, download=True)
elif(data_name=='SVHN'):
train_dir = './data/{}/train'.format(data_name)
test_dir = './data/{}/validation'.format(data_name)
train_dataset = datasets.SVHN(train_dir, split='train', transform=transforms.ToTensor(), download=True)
if(normalize):
stats = make_stats(train_dataset,batch_size=128)
train_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(stats)])
test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(stats)])
else:
train_transform = transforms.Compose([transforms.ToTensor()])
test_transform = transforms.Compose([transforms.ToTensor()])
train_dataset.transform = train_transform
test_dataset = datasets.SVHN(test_dir, split='test', transform=test_transform, download=True)
elif(data_name=='ImageNet'):
train_dir = './data/{}/train'.format(data_name)
test_dir = './data/{}/validation'.format(data_name)
train_dataset = datasets.ImageFolder(train_dir, transform=transforms.ToTensor())
if(normalize):
stats = make_stats(train_dataset,batch_size=128)
train_transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(stats)])
test_transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(stats)])
else:
train_transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor()])
test_transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor()])
train_dataset.transform = train_transform
test_dataset = datasets.ImageFolder(test_dir, transform=test_transform)
elif(data_name=='CUB2011'):
train_dir = './data/{}/train'.format(data_name.split('_')[0])
test_dir = './data/{}/validation'.format(data_name.split('_')[0])
train_dataset = datasets.CUB2011(train_dir, transform=transforms.Compose([transforms.Resize((224,224)),transforms.ToTensor()]), download=True)
if(normalize):
stats = make_stats(train_dataset,batch_size=128)
train_transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(stats)])
test_transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize(stats)])
else:
train_transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor()])
test_transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor()])
train_dataset.transform = train_transform
test_dataset = datasets.CUB2011(test_dir, transform=test_transform, download=True)
elif(data_name=='WheatImage' or data_name=='WheatImage_binary' or data_name=='WheatImage_six'):
train_dir = './data/{}/train'.format(data_name.split('_')[0])
test_dir = './data/{}/validation'.format(data_name.split('_')[0])
label_mode = 'six' if len(data_name.split('_')) == 1 else data_name.split('_')[1]
train_dataset = datasets.WheatImage(train_dir, label_mode=label_mode, transform=transforms.Compose([transforms.Resize((224,288)),
transforms.ToTensor()]))
if(normalize):
stats = make_stats(train_dataset,batch_size=128)
train_transform = transforms.Compose([transforms.Resize((224,288)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize(stats)])
test_transform = transforms.Compose([transforms.Resize((224,288)),
transforms.ToTensor(),
transforms.Normalize(stats)])
else:
train_transform = transforms.Compose([transforms.Resize((224,288)),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor()])
test_transform = transforms.Compose([transforms.Resize((224,288)),
transforms.ToTensor()])
train_dataset.transform = train_transform
test_dataset = datasets.WheatImage(test_dir, label_mode=label_mode, transform=test_transform)
elif(data_name=='CocoDetection'):
train_dir = './data/Coco/train2017'
train_ann = './data/Coco/annotations/instances_train2017.json'
test_dir = './data/Coco/val2017'
test_ann = './data/Coco/annotations/instances_val2017.json'
transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor()])
train_dataset = datasets.CocoDetection(
train_dir, train_ann, transform=transform)
test_dataset = datasets.CocoDetection(
test_dir, test_ann, transform=transform)
elif(data_name=='CocoCaptions'):
train_dir = './data/Coco/train2017'
train_ann = './data/Coco/annotations/captions_train2017.json'
test_dir = './data/Coco/val2017'
test_ann = './data/Coco/annotations/captions_val2017.json'
transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor()])
train_dataset = datasets.CocoCaptions(
train_dir, train_ann, transform=transform)
test_dataset = datasets.CocoCaptions(
test_dir, test_ann, transform=transform)
elif(data_name=='VOCDetection'):
train_dir = './data/VOC/VOCdevkit'
test_dir = './data/VOC/VOCdevkit'
transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor()])
train_dataset = datasets.VOCDetection(
train_dir, 'trainval', transform=transform)
test_dataset = datasets.VOCDetection(
test_dir, 'test', transform=transform)
elif(data_name=='VOCSegmentation'):
train_dir = './data/VOC/VOCdevkit'
test_dir = './data/VOC/VOCdevkit'
transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor()])
train_dataset = datasets.VOCSegmentation(train_dir, 'trainval', transform=transform)
test_dataset = datasets.VOCSegmentation(test_dir, 'test', transform=transform)
elif(data_name=='MOSI' or data_name=='MOSI_binary' or data_name=='MOSI_five' or data_name=='MOSI_seven' or data_name=='MOSI_regression'):
train_dir = './data/{}'.format(data_name.split('_')[0])
test_dir = './data/{}'.format(data_name.split('_')[0])
label_mode = 'five' if len(data_name.split('_')) == 1 else data_name.split('_')[1]
train_dataset = datasets.MOSI(train_dir, split='trainval', label_mode=label_mode, download=True)
stats = make_stats(train_dataset,batch_size=1)
train_transform = transforms.Compose([transforms.Normalize(stats)])
test_transform = transforms.Compose([transforms.Normalize(stats)])
train_dataset.transform = train_transform
test_dataset = datasets.MOSI(test_dir, split='test', label_mode=label_mode, download=True, transform=test_transform)
elif(data_name =='Kodak'):
train_dataset = None
transform = transforms.Compose([transforms.ToTensor()])
test_dir = './data/{}'.format(data_name)
train_dataset = datasets.ImageFolder(
test_dir, transform)
test_dataset = datasets.ImageFolder(
test_dir, transform)
elif(data_name =='UCID'):
train_dataset = None
transform = transforms.Compose([transforms.ToTensor()])
test_dir = './data/{}'.format(data_name)
train_dataset = datasets.ImageFolder(
test_dir, transform)
test_dataset = datasets.ImageFolder(
test_dir, transform)
else:
raise ValueError('Not valid dataset name')
print('data ready')
return train_dataset,test_dataset
def input_collate(batch):
if(isinstance(batch[0], dict)):
output = {key: [] for key in batch[0].keys()}
for b in batch:
for key in b:
output[key].append(b[key])
return output
else:
return default_collate(batch)
def split_dataset(dataset,data_size,batch_size,radomGen=np.random.RandomState(1234),shuffle={'train':True,'test':False},collate_fn=input_collate):
data_loader = {}
for k in dataset:
data_size[k] = len(dataset[k]) if (data_size[k]==0) else data_size[k]
batch_size[k] = data_size[k] if (batch_size[k]==0) else batch_size[k]
data_idx_k = radomGen.choice(list(range(len(dataset[k]))), size=data_size[k], replace=False)
dataset_k = torch.utils.data.Subset(dataset[k], data_idx_k)
data_loader[k] = torch.utils.data.DataLoader(dataset=dataset_k,
shuffle=shuffle[k], batch_size=batch_size[k], pin_memory=True, sampler=None, num_workers=num_workers, collate_fn=collate_fn)
return data_loader
def split_dataset_cross_validation(train_dataset,test_dataset,data_size,batch_size,num_fold,radomGen,p=0.8):
indices = list(range(len(train_dataset)))
data_idx = radomGen.choice(indices, size=data_size, replace=False)
if(batch_size==0):
batch_size = len(train_idx)
else:
batch_size = batch_size*world_size
if(num_fold==1):
train_idx = radomGen.choice(data_idx, size=int(data_size*p), replace=False)
sub_train_dataset = torch.utils.data.Subset(train_dataset, train_idx)
train_sampler = DistributedSampler(sub_train_dataset) if (world_size > 1 and dist) else None
train_loader = [torch.utils.data.DataLoader(dataset=sub_train_dataset,
shuffle=(train_sampler is None), batch_size=batch_size, pin_memory=True, sampler=train_sampler, num_workers=num_workers*world_size)]
validation_idx = list(set(data_idx) - set(train_idx))
validation_dataset = torch.utils.data.Subset(train_dataset, validation_idx)
validation_sampler = DistributedSampler(validation_dataset) if (world_size > 1 and dist) else None
validation_loader = [torch.utils.data.DataLoader(dataset=validation_dataset,
batch_size=batch_size, pin_memory=True, sampler=validation_sampler, num_workers=num_workers*world_size)]
elif(num_fold>1 and num_fold<=len(indices)):
splitted_idx = np.array_split(data_idx, num_fold)
train_loader = []
validation_loader = []
for i in range(num_fold):
validation_idx = splitted_idx[i]
train_idx = list(set(data_idx) - set(validation_idx))
cur_train_dataset = torch.utils.data.Subset(train_dataset, train_idx)
cur_train_sampler = DistributedSampler(cur_train_dataset) if (world_size > 1 and dist) else None
train_loader.append(torch.utils.data.DataLoader(dataset=cur_train_dataset,
shuffle=(cur_train_sampler is None), batch_size=batch_size, pin_memory=True, sampler=cur_train_sampler, num_workers=num_workers*world_size))
validation_dataset = torch.utils.data.Subset(train_dataset, validation_idx)
validation_sampler = DistributedSampler(validation_dataset) if (world_size > 1 and dist) else None
validation_loader.append(torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size, pin_memory=True, sampler=validation_sampler, num_workers=num_workers*world_size))
else:
error("Invalid number of fold")
exit()
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size, pin_memory=True, num_workers=num_workers*world_size)
return train_loader,validation_loader,test_loader
def fetch_dataset_synth(input_feature,output_feature,high_dim=None,cov_mode='base',noise_sigma=np.sqrt(0.1),randomGen=np.random.RandomState(1234)):
print('fetching data...')
data_size = 50000
test_size = 10000
V = make_cov_mat(input_feature,cov_mode)
X = randomGen.multivariate_normal(np.zeros(input_feature),V,data_size+test_size)
if(high_dim is None):
beta = randomGen.randn(input_feature,output_feature)
else:
if(high_dim>=input_feature):
raise ValueError('invalid high dimension')
valid_beta = randomGen.randn(high_dim,output_feature)
empty_beta = np.zeros((input_feature-high_dim,output_feature))
beta = np.vstack((valid_beta,empty_beta))
mu = np.matmul(X,beta)
eps = noise_sigma*randomGen.randn(*mu.shape)
if(output_feature==1):
y = mu + eps
elif(output_feature>1):
p = softmax(mu + eps)
y = []
for i in range(X.shape[0]):
sample = randomGen.multinomial(1,p[i,])
y.append(np.where(sample==1)[0][0])
y = np.array(y)
else:
raise ValueError('invalid dimension')
print('data ready')
X,y = X.astype(np.float32),y.astype(np.int64)
train_dataset = torch.utils.data.TensorDataset(torch.from_numpy(X[:data_size,:]), torch.from_numpy(y[:data_size]))
test_dataset = torch.utils.data.TensorDataset(torch.from_numpy(X[data_size:,:]), torch.from_numpy(y[data_size:]))
return train_dataset,test_dataset
def make_cov_mat(dim,mode,zo=0.5):
if(mode=='base'):
V = np.eye(dim)
elif(mode=='corr'):
V = np.full((dim, dim), zo)
V = V + (1-zo)*np.eye(dim)
elif(mode=='decay_corr'):
indices = np.arange(dim)
valid_indices = [indices,indices]
mesh_indices = np.meshgrid(*valid_indices, sparse=False, indexing='ij')
exponent = np.abs(mesh_indices[0]-mesh_indices[1])
V = np.power(zo,exponent)
else:
raise ValueError('invalid covariance mode')
return V
def make_stats(dataset,reuse=True,batch_size=1000):
if(reuse and os.path.exists('./data/stats/{}.pkl'.format(dataset.data_name))):
stats = load('./data/stats/{}.pkl'.format(dataset.data_name))
elif(dataset is not None):
data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=0)
stats = {}
for k in dataset.feature_dim:
stats[k] = Stats(dataset.feature_dim[k])
print('Computing mean and std...')
with torch.no_grad():
for input in data_loader:
for k in dataset.feature_dim:
stats[k].update(input[k])
save(stats,'./data/stats/{}.pkl'.format(dataset.data_name))
else:
raise ValueError('Please provide dataset for making stats')
for k in dataset.output_names:
if(k != 'label'):
print('[{}] mean: {}, std: {}'.format(k,stats[k].mean,stats[k].std))
return stats
def unzip(path,mode='zip'):
filenames = filenames_in(path,mode)
for filename in filenames:
print('Unzipping {}'.format(filename),end='')
tar = tarfile.open('{}/{}.{}'.format(path,filename,mode))
tar.extractall(path='{}/{}'.format(path,filename))
tar.close()
print('Done')
return
def extract_patches_2d(img,patch_shape,step=[1.0,1.0]):
patch_H, patch_W = patch_shape[0], patch_shape[1]
if(img.size(2)<patch_H):
num_padded_H_Top = (patch_H - img.size(2))//2
num_padded_H_Bottom = patch_H - img.size(2) - num_padded_H_Top
padding_H = nn.ConstantPad2d((0,0,num_padded_H_Top,num_padded_H_Bottom),0)
img = padding_H(img)
if(img.size(3)<patch_W):
num_padded_W_Left = (patch_W - img.size(3))//2
num_padded_W_Right = patch_W - img.size(3) - num_padded_W_Left
padding_W = nn.ConstantPad2d((num_padded_W_Left,num_padded_W_Right,0,0),0)
img = padding_W(img)
step_int = [0,0]
step_int[0] = int(patch_H*step[0]) if(isinstance(step[0], float)) else step[0]
step_int[1] = int(patch_W*step[1]) if(isinstance(step[1], float)) else step[1]
patches_fold_H = img.unfold(2, patch_H, step_int[0])
if((img.size(2) - patch_H) % step_int[0] != 0):
patches_fold_H = torch.cat((patches_fold_H,img[:,:,-patch_H:,].permute(0,1,3,2).unsqueeze(2)),dim=2)
patches_fold_HW = patches_fold_H.unfold(3, patch_W, step_int[1])
if((img.size(3) - patch_W) % step_int[1] != 0):
patches_fold_HW = torch.cat((patches_fold_HW,patches_fold_H[:,:,:,-patch_W:,:].permute(0,1,2,4,3).unsqueeze(3)),dim=3)
patches = patches_fold_HW.permute(2,3,0,1,4,5)
patches = patches.reshape(-1,img.size(0),img.size(1),patch_H,patch_W)
patches = patches.transpose(0,1)
return patches
def reconstruct_from_patches_2d(patches,img_shape,step=[1.0,1.0]):
patches = patches.transpose(0,1)
patch_H, patch_W = patches.size(3), patches.size(4)
img_size = (patches.size(1), patches.size(2), max(img_shape[0], patch_H), max(img_shape[1], patch_W))
step_int = [0,0]
step_int[0] = int(patch_H*step[0]) if(isinstance(step[0], float)) else step[0]
step_int[1] = int(patch_W*step[1]) if(isinstance(step[1], float)) else step[1]
nrow, ncol = 1 + (img_size[-2] - patch_H)//step_int[0], 1 + (img_size[-1] - patch_W)//step_int[1]
r_nrow = nrow + 1 if((img_size[2] - patch_H) % step_int[0] != 0) else nrow
r_ncol = ncol + 1 if((img_size[3] - patch_W) % step_int[1] != 0) else ncol
patches = patches.reshape(r_nrow,r_ncol,img_size[0],img_size[1],patch_H,patch_W)
img = torch.zeros(img_size, device = patches.device)
overlap_counter = torch.zeros(img_size, device = patches.device)
for i in range(nrow):
for j in range(ncol):
img[:,:,i*step_int[0]:i*step_int[0]+patch_H,j*step_int[1]:j*step_int[1]+patch_W] += patches[i,j,]
overlap_counter[:,:,i*step_int[0]:i*step_int[0]+patch_H,j*step_int[1]:j*step_int[1]+patch_W] += 1
if((img_size[2] - patch_H) % step_int[0] != 0):
for j in range(ncol):
img[:,:,-patch_H:,j*step_int[1]:j*step_int[1]+patch_W] += patches[-1,j,]
overlap_counter[:,:,-patch_H:,j*step_int[1]:j*step_int[1]+patch_W] += 1
if((img_size[3] - patch_W) % step_int[1] != 0):
for i in range(nrow):
img[:,:,i*step_int[0]:i*step_int[0]+patch_H,-patch_W:] += patches[i,-1,]
overlap_counter[:,:,i*step_int[0]:i*step_int[0]+patch_H,-patch_W:] += 1
if((img_size[2] - patch_H) % step_int[0] != 0 and (img_size[3] - patch_W) % step_int[1] != 0):
img[:,:,-patch_H:,-patch_W:] += patches[-1,-1,]
overlap_counter[:,:,-patch_H:,-patch_W:] += 1
img /= overlap_counter
if(img_shape[0]<patch_H):
num_padded_H_Top = (patch_H - img_shape[0])//2
num_padded_H_Bottom = patch_H - img_shape[0] - num_padded_H_Top
img = img[:,:,num_padded_H_Top:-num_padded_H_Bottom,]
if(img_shape[1]<patch_W):
num_padded_W_Left = (patch_W - img_shape[1])//2
num_padded_W_Right = patch_W - img_shape[1] - num_padded_W_Left
img = img[:,:,:,num_padded_W_Left:-num_padded_W_Right]
return img
class Stats(object):
def __init__(self, feature_dim):
self.feature_dim = feature_dim
self.n_samples = 0
def update(self, data):
collapse_data = data.transpose(self.feature_dim,-1)
collapse_data = collapse_data.reshape(-1,collapse_data.size(-1))
if self.n_samples == 0:
self.n_samples = collapse_data.size(0)
self.n_features = data.size(self.feature_dim)
self.mean = collapse_data.mean(dim=0)
self.std = collapse_data.std(dim=0)
else:
if collapse_data.size(1) != self.n_features:
raise ValueError("Data dims don't match prev observations.")
m = float(self.n_samples)
n = collapse_data.size(0)
new_mean = collapse_data.mean(dim=0)
if(n==1):
new_std = new_mean.new_zeros(new_mean.size())
else:
new_std = collapse_data.std(dim=0)
old_mean = self.mean
old_std = self.std
self.mean = m/(m+n)*old_mean + n/(m+n)*new_mean
self.std = torch.sqrt(m/(m+n)*old_std**2 + n/(m+n)*new_std**2 + m*n/(m+n)**2 * (old_mean - new_mean)**2)
self.n_samples += n
return
|
the-stack_106_26312 | import asyncio
import collections
import copy
import time
from aiokafka.errors import (KafkaTimeoutError,
NotLeaderForPartitionError,
LeaderNotAvailableError,
ProducerClosed)
from aiokafka.record.legacy_records import LegacyRecordBatchBuilder
from aiokafka.record.default_records import DefaultRecordBatchBuilder
from aiokafka.structs import RecordMetadata
from aiokafka.util import create_future
class BatchBuilder:
def __init__(self, magic, batch_size, compression_type,
*, is_transactional):
if magic < 2:
assert not is_transactional
self._builder = LegacyRecordBatchBuilder(
magic, compression_type, batch_size)
else:
self._builder = DefaultRecordBatchBuilder(
magic, compression_type, is_transactional=is_transactional,
producer_id=-1, producer_epoch=-1, base_sequence=0,
batch_size=batch_size)
self._relative_offset = 0
self._buffer = None
self._closed = False
def append(self, *, timestamp, key, value, headers=[]):
"""Add a message to the batch.
Arguments:
timestamp (float or None): epoch timestamp in seconds. If None,
the timestamp will be set to the current time. If submitting to
an 0.8.x or 0.9.x broker, the timestamp will be ignored.
key (bytes or None): the message key. `key` and `value` may not
both be None.
value (bytes or None): the message value. `key` and `value` may not
both be None.
Returns:
If the message was successfully added, returns a metadata object
with crc, offset, size, and timestamp fields. If the batch is full
or closed, returns None.
"""
if self._closed:
return None
metadata = self._builder.append(
self._relative_offset, timestamp, key, value,
headers=headers)
# Check if we could add the message
if metadata is None:
return None
self._relative_offset += 1
return metadata
def close(self):
"""Close the batch to further updates.
Closing the batch before submitting to the producer ensures that no
messages are added via the ``producer.send()`` interface. To gracefully
support both the batch and individual message interfaces, leave the
batch open. For complete control over the batch's contents, close
before submission. Closing a batch has no effect on when it's sent to
the broker.
A batch may not be reopened after it's closed.
"""
if self._closed:
return
self._closed = True
def _set_producer_state(self, producer_id, producer_epoch, base_sequence):
assert type(self._builder) is DefaultRecordBatchBuilder
self._builder.set_producer_state(
producer_id, producer_epoch, base_sequence)
def _build(self):
self.close()
if self._buffer is None:
self._buffer = self._builder.build()
del self._builder # We may only call self._builder.build() once!
return self._buffer
def size(self):
"""Get the size of batch in bytes."""
if self._buffer is not None:
return len(self._buffer)
else:
return self._builder.size()
def record_count(self):
"""Get the number of records in the batch."""
return self._relative_offset
class MessageBatch:
"""This class incapsulate operations with batch of produce messages"""
def __init__(self, tp, builder, ttl):
self._builder = builder
self._tp = tp
self._ttl = ttl
self._ctime = time.monotonic()
# Waiters
# Set when messages are delivered to Kafka based on ACK setting
self.future = create_future()
self._msg_futures = []
# Set when sender takes this batch
self._drain_waiter = create_future()
self._retry_count = 0
@property
def tp(self):
return self._tp
@property
def record_count(self):
return self._builder.record_count()
def append(self, key, value, timestamp_ms, _create_future=create_future,
headers=[]):
"""Append message (key and value) to batch
Returns:
None if batch is full
or
asyncio.Future that will resolved when message is delivered
"""
metadata = self._builder.append(
timestamp=timestamp_ms, key=key, value=value, headers=headers)
if metadata is None:
return None
future = _create_future()
self._msg_futures.append((future, metadata))
return future
def done(self, base_offset, timestamp=None,
_record_metadata_class=RecordMetadata):
"""Resolve all pending futures"""
tp = self._tp
topic = tp.topic
partition = tp.partition
if timestamp == -1:
timestamp_type = 0
else:
timestamp_type = 1
# Set main batch future
if not self.future.done():
self.future.set_result(_record_metadata_class(
topic, partition, tp, base_offset, timestamp, timestamp_type))
# Set message futures
for future, metadata in self._msg_futures:
if future.done():
continue
# If timestamp returned by broker is -1 it means we need to take
# the timestamp sent by user.
if timestamp == -1:
timestamp = metadata.timestamp
offset = base_offset + metadata.offset
future.set_result(_record_metadata_class(
topic, partition, tp, offset, timestamp, timestamp_type))
def done_noack(self):
""" Resolve all pending futures to None """
# Faster resolve for base_offset=None case.
if not self.future.done():
self.future.set_result(None)
for future, _ in self._msg_futures:
if future.done():
continue
future.set_result(None)
def failure(self, exception):
if not self.future.done():
self.future.set_exception(exception)
for future, _ in self._msg_futures:
if future.done():
continue
# we need to copy exception so traceback is not multiplied
# https://github.com/aio-libs/aiokafka/issues/246
future.set_exception(copy.copy(exception))
# Consume exception to avoid warnings. We delegate this consumption
# to user only in case of explicit batch API.
if self._msg_futures:
self.future.exception()
# In case where sender fails and closes batches all waiters have to be
# reset also.
if not self._drain_waiter.done():
self._drain_waiter.set_exception(exception)
async def wait_drain(self, timeout=None):
"""Wait until all message from this batch is processed"""
waiter = self._drain_waiter
await asyncio.wait([waiter], timeout=timeout)
if waiter.done():
waiter.result() # Check for exception
def expired(self):
"""Check that batch is expired or not"""
return (time.monotonic() - self._ctime) > self._ttl
def drain_ready(self):
"""Compress batch to be ready for send"""
if not self._drain_waiter.done():
self._drain_waiter.set_result(None)
self._retry_count += 1
def reset_drain(self):
"""Reset drain waiter, until we will do another retry"""
assert self._drain_waiter.done()
self._drain_waiter = create_future()
def set_producer_state(self, producer_id, producer_epoch, base_sequence):
assert not self._drain_waiter.done()
self._builder._set_producer_state(
producer_id, producer_epoch, base_sequence)
def get_data_buffer(self):
return self._builder._build()
def is_empty(self):
return self._builder.record_count() == 0
@property
def retry_count(self):
return self._retry_count
class MessageAccumulator:
"""Accumulator of messages batched by topic-partition
Producer adds messages to this accumulator and a background send task
gets batches per nodes to process it.
"""
def __init__(
self, cluster, batch_size, compression_type, batch_ttl, *,
txn_manager=None):
self._batches = collections.defaultdict(collections.deque)
self._pending_batches = set([])
self._cluster = cluster
self._batch_size = batch_size
self._compression_type = compression_type
self._batch_ttl = batch_ttl
self._wait_data_future = create_future()
self._closed = False
self._api_version = (0, 9)
self._txn_manager = txn_manager
self._exception = None # Critical exception
def set_api_version(self, api_version):
self._api_version = api_version
async def flush(self):
waiters = []
for batches in self._batches.values():
for batch in list(batches):
waiters.append(batch.future)
for batch in list(self._pending_batches):
waiters.append(batch.future)
if waiters:
await asyncio.wait(waiters)
async def flush_for_commit(self):
waiters = []
for batches in self._batches.values():
for batch in batches:
# We force all buffers to close to finalyze the transaction
# scope. We should not add anything to this transaction.
batch._builder.close()
waiters.append(batch.future)
for batch in self._pending_batches:
waiters.append(batch.future)
# Wait for all waiters to finish. We only wait for the scope we defined
# above, other batches should not be delivered as part of this
# transaction
if waiters:
await asyncio.wait(waiters)
def fail_all(self, exception):
# Close all batches with this exception
for batches in self._batches.values():
for batch in batches:
batch.failure(exception)
for batch in self._pending_batches:
batch.failure(exception)
self._exception = exception
async def close(self):
self._closed = True
await self.flush()
async def add_message(
self, tp, key, value, timeout, timestamp_ms=None,
headers=[]
):
""" Add message to batch by topic-partition
If batch is already full this method waits (`timeout` seconds maximum)
until batch is drained by send task
"""
while True:
if self._closed:
# this can happen when producer is closing but try to send some
# messages in async task
raise ProducerClosed()
if self._exception is not None:
raise copy.copy(self._exception)
pending_batches = self._batches.get(tp)
if not pending_batches:
builder = self.create_builder()
batch = self._append_batch(builder, tp)
else:
batch = pending_batches[-1]
future = batch.append(key, value, timestamp_ms, headers=headers)
if future is not None:
return future
# Batch is full, can't append data atm,
# waiting until batch per topic-partition is drained
start = time.monotonic()
await batch.wait_drain(timeout)
timeout -= time.monotonic() - start
if timeout <= 0:
raise KafkaTimeoutError()
def data_waiter(self):
""" Return waiter future that will be resolved when accumulator contain
some data for drain
"""
return self._wait_data_future
def _pop_batch(self, tp):
batch = self._batches[tp].popleft()
not_retry = batch.retry_count == 0
if self._txn_manager is not None and not_retry:
assert self._txn_manager.has_pid(), \
"We should have waited for it in sender routine"
seq = self._txn_manager.sequence_number(batch.tp)
self._txn_manager.increment_sequence_number(
batch.tp, batch.record_count)
batch.set_producer_state(
producer_id=self._txn_manager.producer_id,
producer_epoch=self._txn_manager.producer_epoch,
base_sequence=seq)
batch.drain_ready()
if len(self._batches[tp]) == 0:
del self._batches[tp]
self._pending_batches.add(batch)
if not_retry:
def cb(fut, batch=batch, self=self):
self._pending_batches.remove(batch)
batch.future.add_done_callback(cb)
return batch
def reenqueue(self, batch):
tp = batch.tp
self._batches[tp].appendleft(batch)
self._pending_batches.remove(batch)
batch.reset_drain()
def drain_by_nodes(self, ignore_nodes, muted_partitions=set()):
""" Group batches by leader to partition nodes. """
nodes = collections.defaultdict(dict)
unknown_leaders_exist = False
for tp in list(self._batches.keys()):
# Just ignoring by node is not enough, as leader can change during
# the cycle
if tp in muted_partitions:
continue
leader = self._cluster.leader_for_partition(tp)
if leader is None or leader == -1:
if self._batches[tp][0].expired():
# batch is for partition is expired and still no leader,
# so set exception for batch and pop it
batch = self._pop_batch(tp)
if leader is None:
err = NotLeaderForPartitionError()
else:
err = LeaderNotAvailableError()
batch.failure(exception=err)
unknown_leaders_exist = True
continue
elif ignore_nodes and leader in ignore_nodes:
continue
batch = self._pop_batch(tp)
# We can get an empty batch here if all `append()` calls failed
# with validation...
if not batch.is_empty():
nodes[leader][tp] = batch
else:
# XXX: use something more graceful. We just want to trigger
# delivery future here, no message futures.
batch.done_noack()
# all batches are drained from accumulator
# so create "wait data" future again for waiting new data in send
# task
if not self._wait_data_future.done():
self._wait_data_future.set_result(None)
self._wait_data_future = create_future()
return nodes, unknown_leaders_exist
def create_builder(self):
if self._api_version >= (0, 11):
magic = 2
elif self._api_version >= (0, 10):
magic = 1
else:
magic = 0
is_transactional = False
if self._txn_manager is not None and \
self._txn_manager.transactional_id is not None:
is_transactional = True
return BatchBuilder(
magic, self._batch_size, self._compression_type,
is_transactional=is_transactional)
def _append_batch(self, builder, tp):
# We must do this before actual add takes place to check for errors.
if self._txn_manager is not None:
self._txn_manager.maybe_add_partition_to_txn(tp)
batch = MessageBatch(tp, builder, self._batch_ttl)
self._batches[tp].append(batch)
if not self._wait_data_future.done():
self._wait_data_future.set_result(None)
return batch
async def add_batch(self, builder, tp, timeout):
"""Add BatchBuilder to queue by topic-partition.
Arguments:
builder (BatchBuilder): batch object to enqueue.
tp (TopicPartition): topic and partition to enqueue this batch for.
timeout (int): time in seconds to wait for a free slot in the batch
queue.
Returns:
MessageBatch: delivery wrapper around the BatchBuilder object.
Raises:
aiokafka.errors.ProducerClosed: the accumulator has already been
closed and flushed.
aiokafka.errors.KafkaTimeoutError: the batch could not be added
within the specified timeout.
"""
if self._closed:
raise ProducerClosed()
if self._exception is not None:
raise copy.copy(self._exception)
start = time.monotonic()
while timeout > 0:
pending = self._batches.get(tp)
if pending:
await pending[-1].wait_drain(timeout=timeout)
timeout -= time.monotonic() - start
else:
batch = self._append_batch(builder, tp)
return asyncio.shield(batch.future)
raise KafkaTimeoutError()
|
the-stack_106_26313 | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=5, create_symlink=False)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=1,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=15,
dataset_joints=15,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
],
inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])
# model settings
model = dict(
type='TopDown',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=32,
out_channels=channel_cfg['num_output_channels'],
num_deconv_layers=0,
extra=dict(final_conv_kernel=1, ),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[256, 256],
heatmap_size=[64, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=True,
det_bbox_thr=0.0,
bbox_file='',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/atrw'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=256),
test_dataloader=dict(samples_per_gpu=256),
train=dict(
type='AnimalATRWDataset',
ann_file=f'{data_root}/annotations/keypoint_train.json',
img_prefix=f'{data_root}/images/train/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='AnimalATRWDataset',
ann_file=f'{data_root}/annotations/keypoint_val.json',
img_prefix=f'{data_root}/images/val/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='AnimalATRWDataset',
ann_file=f'{data_root}/annotations/keypoint_val.json',
img_prefix=f'{data_root}/images/val/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
|
the-stack_106_26315 | # coding=utf-8
# Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from flax.linen.attention import dot_product_attention_weights
from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPooling, FlaxSequenceClassifierOutput
from ...modeling_flax_utils import (
ACT2FN,
FlaxPreTrainedModel,
append_replace_return_docstrings,
overwrite_call_docstring,
)
from .configuration_vit import ViTConfig
VIT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.FlaxPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading, saving and converting weights from
PyTorch models)
This model is also a Flax Linen `flax.linen.Module
<https://flax.readthedocs.io/en/latest/flax.linen.html#module>`__ subclass. Use it as a regular Flax linen Module
and refer to the Flax documentation for all matter related to general usage and behavior.
Finally, this model supports inherent JAX features such as:
- `Just-In-Time (JIT) compilation <https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit>`__
- `Automatic Differentiation <https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation>`__
- `Vectorization <https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap>`__
- `Parallelization <https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap>`__
Parameters:
config (:class:`~transformers.ViTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.FlaxPreTrainedModel.from_pretrained` method to load the
model weights.
dtype (:obj:`jax.numpy.dtype`, `optional`, defaults to :obj:`jax.numpy.float32`):
The data type of the computation. Can be one of :obj:`jax.numpy.float32`, :obj:`jax.numpy.float16` (on
GPUs) and :obj:`jax.numpy.bfloat16` (on TPUs).
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
specified all the computation will be performed with the given ``dtype``.
**Note that this only specifies the dtype of the computation and does not influence the dtype of model
parameters.**
If you wish to change the dtype of the model parameters, see
:meth:`~transformers.FlaxPreTrainedModel.to_fp16` and :meth:`~transformers.FlaxPreTrainedModel.to_bf16`.
"""
VIT_INPUTS_DOCSTRING = r"""
Args:
pixel_values (:obj:`numpy.ndarray` of shape :obj:`(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using :class:`~transformers.ViTFeatureExtractor`. See
:meth:`transformers.ViTFeatureExtractor.__call__` for details.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
class FlaxPatchEmbeddings(nn.Module):
config: ViTConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
image_size = self.config.image_size
patch_size = self.config.patch_size
num_patches = (image_size // patch_size) * (image_size // patch_size)
self.num_patches = num_patches
self.projection = nn.Conv(
self.config.hidden_size,
kernel_size=(patch_size, patch_size),
strides=(patch_size, patch_size),
padding="VALID",
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
)
def __call__(self, pixel_values):
x = self.projection(pixel_values)
batch_size, _, _, channels = x.shape
return jnp.reshape(x, (batch_size, -1, channels))
class FlaxViTEmbeddings(nn.Module):
"""Construct the CLS token, position and patch embeddings."""
config: ViTConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.cls_token = self.param("cls_token", nn.initializers.zeros, (1, 1, self.config.hidden_size))
self.patch_embeddings = FlaxPatchEmbeddings(self.config, dtype=self.dtype)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = self.param(
"position_embeddings", nn.initializers.zeros, (1, num_patches + 1, self.config.hidden_size)
)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
def __call__(self, pixel_values, deterministic=True):
batch_size = pixel_values.shape[0]
embeddings = self.patch_embeddings(pixel_values)
cls_tokens = jnp.broadcast_to(self.cls_token, (batch_size, 1, self.config.hidden_size))
embeddings = jnp.concatenate((cls_tokens, embeddings), axis=1)
embeddings = embeddings + self.position_embeddings
embeddings = self.dropout(embeddings, deterministic=deterministic)
return embeddings
class FlaxViTSelfAttention(nn.Module):
config: ViTConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
if self.config.hidden_size % self.config.num_attention_heads != 0:
raise ValueError(
"`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads`: {self.config.num_attention_heads}"
)
self.query = nn.Dense(
self.config.hidden_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
use_bias=self.config.qkv_bias,
)
self.key = nn.Dense(
self.config.hidden_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
use_bias=self.config.qkv_bias,
)
self.value = nn.Dense(
self.config.hidden_size,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
use_bias=self.config.qkv_bias,
)
def __call__(self, hidden_states, deterministic: bool = True, output_attentions: bool = False):
head_dim = self.config.hidden_size // self.config.num_attention_heads
query_states = self.query(hidden_states).reshape(
hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
)
value_states = self.value(hidden_states).reshape(
hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
)
key_states = self.key(hidden_states).reshape(
hidden_states.shape[:2] + (self.config.num_attention_heads, head_dim)
)
dropout_rng = None
if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
dropout_rng = self.make_rng("dropout")
attn_weights = dot_product_attention_weights(
query_states,
key_states,
dropout_rng=dropout_rng,
dropout_rate=self.config.attention_probs_dropout_prob,
broadcast_dropout=True,
deterministic=deterministic,
dtype=self.dtype,
precision=None,
)
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
return outputs
class FlaxViTSelfOutput(nn.Module):
config: ViTConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dense = nn.Dense(
self.config.hidden_size,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
dtype=self.dtype,
)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
def __call__(self, hidden_states, input_tensor, deterministic: bool = True):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
return hidden_states
class FlaxViTAttention(nn.Module):
config: ViTConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.attention = FlaxViTSelfAttention(self.config, dtype=self.dtype)
self.output = FlaxViTSelfOutput(self.config, dtype=self.dtype)
def __call__(self, hidden_states, deterministic=True, output_attentions: bool = False):
attn_outputs = self.attention(hidden_states, deterministic=deterministic, output_attentions=output_attentions)
attn_output = attn_outputs[0]
hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_outputs[1],)
return outputs
class FlaxViTIntermediate(nn.Module):
config: ViTConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dense = nn.Dense(
self.config.intermediate_size,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
dtype=self.dtype,
)
self.activation = ACT2FN[self.config.hidden_act]
def __call__(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
class FlaxViTOutput(nn.Module):
config: ViTConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dense = nn.Dense(
self.config.hidden_size,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
dtype=self.dtype,
)
self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
def __call__(self, hidden_states, attention_output, deterministic: bool = True):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic=deterministic)
hidden_states = hidden_states + attention_output
return hidden_states
class FlaxViTLayer(nn.Module):
config: ViTConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.attention = FlaxViTAttention(self.config, dtype=self.dtype)
self.intermediate = FlaxViTIntermediate(self.config, dtype=self.dtype)
self.output = FlaxViTOutput(self.config, dtype=self.dtype)
self.layernorm_before = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
self.layernorm_after = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
def __call__(self, hidden_states, deterministic: bool = True, output_attentions: bool = False):
attention_outputs = self.attention(
self.layernorm_before(hidden_states), # in ViT, layernorm is applied before self-attention
deterministic=deterministic,
output_attentions=output_attentions,
)
attention_output = attention_outputs[0]
# first residual connection
attention_output = attention_output + hidden_states
# in ViT, layernorm is also applied after self-attention
layer_output = self.layernorm_after(attention_output)
hidden_states = self.intermediate(layer_output)
hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic)
outputs = (hidden_states,)
if output_attentions:
outputs += (attention_outputs[1],)
return outputs
class FlaxViTLayerCollection(nn.Module):
config: ViTConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layers = [
FlaxViTLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers)
]
def __call__(
self,
hidden_states,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = layer(hidden_states, deterministic=deterministic, output_attentions=output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions += (layer_outputs[1],)
if output_hidden_states:
all_hidden_states += (hidden_states,)
outputs = (hidden_states,)
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class FlaxViTEncoder(nn.Module):
config: ViTConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.layer = FlaxViTLayerCollection(self.config, dtype=self.dtype)
def __call__(
self,
hidden_states,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
return self.layer(
hidden_states,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
class FlaxViTPooler(nn.Module):
config: ViTConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
def setup(self):
self.dense = nn.Dense(
self.config.hidden_size,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
dtype=self.dtype,
)
def __call__(self, hidden_states):
cls_hidden_state = hidden_states[:, 0]
cls_hidden_state = self.dense(cls_hidden_state)
return nn.tanh(cls_hidden_state)
class FlaxViTPreTrainedModel(FlaxPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ViTConfig
base_model_prefix = "vit"
main_input_name = "pixel_values"
module_class: nn.Module = None
def __init__(self, config: ViTConfig, input_shape=None, seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs):
module = self.module_class(config=config, dtype=dtype, **kwargs)
if input_shape is None:
input_shape = (1, config.image_size, config.image_size, 3)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> FrozenDict:
# init input tensors
pixel_values = jnp.zeros(input_shape, dtype=self.dtype)
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
return self.module.init(rngs, pixel_values, return_dict=False)["params"]
@add_start_docstrings_to_model_forward(VIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def __call__(
self,
pixel_values,
params: dict = None,
dropout_rng: jax.random.PRNGKey = None,
train: bool = False,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
return self.module.apply(
{"params": params or self.params},
jnp.array(pixel_values, dtype=jnp.float32),
not train,
output_attentions,
output_hidden_states,
return_dict,
rngs=rngs,
)
class FlaxViTModule(nn.Module):
config: ViTConfig
dtype: jnp.dtype = jnp.float32 # the dtype of the computation
add_pooling_layer: bool = True
def setup(self):
self.embeddings = FlaxViTEmbeddings(self.config, dtype=self.dtype)
self.encoder = FlaxViTEncoder(self.config, dtype=self.dtype)
self.layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
self.pooler = FlaxViTPooler(self.config, dtype=self.dtype) if self.add_pooling_layer else None
def __call__(
self,
pixel_values,
deterministic: bool = True,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
hidden_states = self.embeddings(pixel_values, deterministic=deterministic)
outputs = self.encoder(
hidden_states,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.layernorm(hidden_states)
pooled = self.pooler(hidden_states) if self.add_pooling_layer else None
if not return_dict:
# if pooled is None, don't return it
if pooled is None:
return (hidden_states,) + outputs[1:]
return (hidden_states, pooled) + outputs[1:]
return FlaxBaseModelOutputWithPooling(
last_hidden_state=hidden_states,
pooler_output=pooled,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"The bare ViT Model transformer outputting raw hidden-states without any specific head on top.",
VIT_START_DOCSTRING,
)
class FlaxViTModel(FlaxViTPreTrainedModel):
module_class = FlaxViTModule
FLAX_VISION_MODEL_DOCSTRING = """
Returns:
Examples::
>>> from transformers import ViTFeatureExtractor, FlaxViTModel
>>> from PIL import Image
>>> import requests
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k')
>>> model = FlaxViTModel.from_pretrained('google/vit-base-patch16-224-in21k')
>>> inputs = feature_extractor(images=image, return_tensors="np")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
overwrite_call_docstring(FlaxViTModel, FLAX_VISION_MODEL_DOCSTRING)
append_replace_return_docstrings(FlaxViTModel, output_type=FlaxBaseModelOutputWithPooling, config_class=ViTConfig)
class FlaxViTForImageClassificationModule(nn.Module):
config: ViTConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.vit = FlaxViTModule(config=self.config, dtype=self.dtype, add_pooling_layer=False)
self.classifier = nn.Dense(
self.config.num_labels,
dtype=self.dtype,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
)
def __call__(
self,
pixel_values=None,
deterministic: bool = True,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.vit(
pixel_values,
deterministic=deterministic,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
logits = self.classifier(hidden_states[:, 0, :])
if not return_dict:
output = (logits,) + outputs[2:]
return output
return FlaxSequenceClassifierOutput(
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
the [CLS] token) e.g. for ImageNet.
""",
VIT_START_DOCSTRING,
)
class FlaxViTForImageClassification(FlaxViTPreTrainedModel):
module_class = FlaxViTForImageClassificationModule
FLAX_VISION_CLASSIF_DOCSTRING = """
Returns:
Example::
>>> from transformers import ViTFeatureExtractor, FlaxViTForImageClassification
>>> from PIL import Image
>>> import jax
>>> import requests
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224')
>>> model = FlaxViTForImageClassification.from_pretrained('google/vit-base-patch16-224')
>>> inputs = feature_extractor(images=image, return_tensors="np")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = jax.numpy.argmax(logits, axis=-1)
>>> print("Predicted class:", model.config.id2label[predicted_class_idx.item()])
"""
overwrite_call_docstring(FlaxViTForImageClassification, FLAX_VISION_CLASSIF_DOCSTRING)
append_replace_return_docstrings(
FlaxViTForImageClassification, output_type=FlaxSequenceClassifierOutput, config_class=ViTConfig
)
|
the-stack_106_26318 | # Copyright 2019 The Blueqat Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The module for calculate Pauli matrices."""
from collections import defaultdict, namedtuple
from functools import reduce
from itertools import combinations, product
from numbers import Number, Integral
from math import pi
import numpy as np
_PauliTuple = namedtuple("_PauliTuple", "n")
half_pi = pi / 2
def pauli_from_char(ch, n=0):
"""Make Pauli matrix from an character.
Args:
ch (str): "X" or "Y" or "Z" or "I".
n (int, optional): Make Pauli matrix as n-th qubits.
Returns:
If ch is "X" => X, "Y" => Y, "Z" => Z, "I" => I
Raises:
ValueError: When ch is not "X", "Y", "Z" nor "I".
"""
ch = ch.upper()
if ch == "I":
return I
if ch == "X":
return X(n)
if ch == "Y":
return Y(n)
if ch == "Z":
return Z(n)
raise ValueError("ch shall be X, Y, Z or I")
def term_from_chars(chars):
"""Make Pauli's Term from chars which is written by "X", "Y", "Z" or "I".
e.g. "XZIY" => X(0) * Z(1) * Y(3)
Args:
chars (str): Written in "X", "Y", "Z" or "I".
Returns:
Term: A `Term` object.
Raises:
ValueError: When chars conteins the character which is "X", "Y", "Z" nor "I".
"""
return Term.from_chars(chars)
def to_term(pauli):
"""Convert to Term from Pauli operator (X, Y, Z, I).
Args:
pauli (X, Y, Z or I): A Pauli operator
Returns:
Term: A `Term` object.
"""
return pauli.to_term()
def to_expr(term):
"""Convert to Expr from Term or Pauli operator (X, Y, Z, I).
Args:
term: (Term, X, Y, Z or I): A Term or Pauli operator.
Returns:
Expr: An `Expr` object.
"""
return term.to_expr()
def commutator(expr1, expr2):
"""Returns [expr1, expr2] = expr1 * expr2 - expr2 * expr1.
Args:
expr1 (Expr, Term or Pauli operator): Pauli's expression.
expr2 (Expr, Term or Pauli operator): Pauli's expression.
Returns:
Expr: expr1 * expr2 - expr2 * expr1.
"""
expr1 = expr1.to_expr().simplify()
expr2 = expr2.to_expr().simplify()
return (expr1 * expr2 - expr2 * expr1).simplify()
def is_commutable(expr1, expr2, eps=0.00000001):
"""Test whether expr1 and expr2 are commutable.
Args:
expr1 (Expr, Term or Pauli operator): Pauli's expression.
expr2 (Expr, Term or Pauli operator): Pauli's expression.
eps (float, optional): Machine epsilon.
If |[expr1, expr2]| < eps, consider it is commutable.
Returns:
bool: if expr1 and expr2 are commutable, returns True, otherwise False.
"""
return sum((x * x.conjugate()).real for x in commutator(expr1, expr2).coeffs()) < eps
# To avoid pylint error
def _n(pauli):
return pauli.n
def _GetItem(self_, n):
return type(self_)(n)
class _PauliImpl:
@property
def op(self):
"""Return operator type (X, Y, Z, I)"""
return self.__class__.__name__[1]
@property
def is_identity(self):
"""If `self` is I, returns True, otherwise False."""
return self.op == "I"
def __hash__(self):
return hash((self.op, _n(self)))
def __eq__(self, other):
if isinstance(other, _PauliImpl):
if self.is_identity:
return other.is_identity
return _n(self) == _n(other) and self.op == other.op
if isinstance(other, Term):
return self.to_term() == other
if isinstance(other, Expr):
return self.to_expr() == other
return NotImplemented
def __ne__(self, other):
return not self == other
def __mul__(self, other):
if isinstance(other, Number):
return Term.from_pauli(self, other)
if not isinstance(other, _PauliImpl):
return NotImplemented
if self.is_identity:
return other.to_term()
if other.is_identity:
return self.to_term()
if _n(self) == _n(other) and self.op == other.op:
return I.to_term()
return Term.from_paulipair(self, other)
def __rmul__(self, other):
if isinstance(other, Number):
return Term.from_pauli(self, other)
return NotImplemented
def __truediv__(self, other):
if isinstance(other, Number):
if other:
return Term.from_pauli(self, 1.0 / other)
raise ZeroDivisionError
return NotImplemented
def __add__(self, other):
return self.to_expr() + other
def __radd__(self, other):
return other + self.to_expr()
def __sub__(self, other):
return self.to_expr() - other
def __rsub__(self, other):
return other - self.to_expr()
def __neg__(self):
return Term.from_pauli(self, -1.0)
def __repr__(self):
if self.is_identity:
return "I"
return self.op + "[" + str(_n(self)) + "]"
def to_term(self):
"""Convert to Pauli Term"""
return Term.from_pauli(self)
def to_expr(self):
"""Convert to Pauli Expr"""
return self.to_term().to_expr()
_matrix = {
'I': np.array([[1, 0], [0, 1]], dtype=complex),
'X': np.array([[0, 1], [1, 0]], dtype=complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=complex),
'Z': np.array([[1, 0], [0, -1]], dtype=complex)
}
@property
def matrix(self):
"""Matrix reprentation of this operator."""
return self._matrix[self.op].copy()
def to_matrix(self, n_qubits=-1):
"""Convert to the matrix."""
if self.is_identity:
if n_qubits == -1:
return self.matrix
else:
return reduce(np.kron, [I.matrix for _ in range(n_qubits)])
if n_qubits == -1:
n_qubits = _n(self) + 1
if _n(self) == 0:
mat = self.matrix
else:
mat = reduce(np.kron, [I.matrix for _ in range(_n(self))])
mat = np.kron(mat, self.matrix)
if n_qubits > _n(self) + 1:
mat = reduce(np.kron, [I.matrix for _ in range(n_qubits - _n(self) - 1)], mat)
return mat
class _X(_PauliImpl, _PauliTuple):
"""Pauli's X operator"""
class _Y(_PauliImpl, _PauliTuple):
"""Pauli's Y operator"""
class _Z(_PauliImpl, _PauliTuple):
"""Pauli's Z operator"""
class _PauliCtor:
def __init__(self, ty):
self.ty = ty
def __call__(self, n):
return self.ty(n)
def __getitem__(self, n):
return self.ty(n)
X = _PauliCtor(_X)
Y = _PauliCtor(_Y)
Z = _PauliCtor(_Z)
class _I(_PauliImpl, namedtuple("_I", "")):
"""Identity operator"""
def __call__(self):
return self
I = _I()
_TermTuple = namedtuple("_TermTuple", "ops coeff")
class Term(_TermTuple):
"""Multiplication of Pauli matrices with coefficient.
Note that this class is immutable.
Multiplied Pauli matrices are very important for quantum computation
because it is an unitary matrix (without coefficient) and also
it can be consider the time evolution of the term (with real coefficient)
without Suzuki-Trotter expansion.
"""
@staticmethod
def from_paulipair(pauli1, pauli2):
"""Make new Term from two Pauli operator."""
return Term(Term.join_ops((pauli1,), (pauli2,)), 1.0)
@staticmethod
def from_pauli(pauli, coeff=1.0):
"""Make new Term from an Pauli operator"""
if pauli.is_identity or coeff == 0:
return Term((), coeff)
return Term((pauli,), coeff)
@staticmethod
def from_ops_iter(ops, coeff):
"""For internal use."""
return Term(tuple(ops), coeff)
@staticmethod
def from_chars(chars):
"""Make Pauli's Term from chars which is written by "X", "Y", "Z" or "I".
e.g. "XZIY" => X(0) * Z(1) * Y(3)
Args:
chars (str): Written in "X", "Y", "Z" or "I".
Returns:
Term: A `Term` object.
Raises:
ValueError: When chars conteins the character which is "X", "Y", "Z" nor "I".
"""
paulis = [pauli_from_char(c, n) for n, c in enumerate(chars) if c != "I"]
if not paulis:
return 1.0 * I
if len(paulis) == 1:
return 1.0 * paulis[0]
return reduce(lambda a, b: a * b, paulis)
@staticmethod
def join_ops(ops1, ops2):
"""For internal use."""
i = len(ops1) - 1
j = 0
while i >= 0 and j < len(ops2):
if ops1[i] == ops2[j]:
i -= 1
j += 1
else:
break
return ops1[:i + 1] + ops2[j:]
@property
def is_identity(self):
"""If `self` is I, returns True, otherwise False."""
return not self.ops
def __mul__(self, other):
if isinstance(other, Number):
return Term(self.ops, self.coeff * other)
if isinstance(other, Term):
ops = Term.join_ops(self.ops, other.ops)
coeff = self.coeff * other.coeff
return Term(ops, coeff)
if isinstance(other, _PauliImpl):
if other.is_identity:
return self
return Term(Term.join_ops(self.ops, (other,)), self.coeff)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, Number):
return Term(self.ops, self.coeff * other)
if isinstance(other, _PauliImpl):
if other.is_identity:
return self
return Term(Term.join_ops((other,), self.ops), self.coeff)
return NotImplemented
def __truediv__(self, other):
if isinstance(other, (int, float)):
if other:
return Term(self.ops, self.coeff / other)
raise ZeroDivisionError
return NotImplemented
def __pow__(self, n):
if isinstance(n, Integral):
if n < 0:
raise ValueError("`pauli_term ** n` or `pow(pauli_term, n)`: " +
"n shall not be negative value.")
if n == 0:
return Term.from_pauli(I)
return Term(self.ops * n, self.coeff ** n)
return NotImplemented
def __add__(self, other):
return Expr.from_term(self) + other
def __radd__(self, other):
return other + Expr.from_term(self)
def __sub__(self, other):
return Expr.from_term(self) - other
def __rsub__(self, other):
return other - Expr.from_term(self)
def __neg__(self):
return Term(self.ops, -self.coeff)
def __repr__(self):
if self.coeff == 0:
return "0*I"
if self.coeff == -1.0:
s_coeff = "-"
else:
s_coeff = str(self.coeff) + "*"
if self.ops == ():
s_ops = "I"
else:
s_ops = "*".join(op.op + "[" + repr(op.n) + "]" for op in self.ops)
return s_coeff + s_ops
def __eq__(self, other):
if isinstance(other, _PauliImpl):
other = other.to_term()
return _TermTuple.__eq__(self, other) or \
_TermTuple.__eq__(self.simplify(), other.simplify())
def __ne__(self, other):
return not self == other
def to_term(self):
"""Do nothing. This method is prepared to avoid TypeError."""
return self
def to_expr(self):
"""Convert to Expr."""
return Expr.from_term(self)
def commutator(self, other):
"""Returns commutator."""
return commutator(self, other)
def is_commutable_with(self, other):
"""Test whether `self` is commutable with `other`."""
return is_commutable(self, other)
def simplify(self):
"""Simplify the Term."""
def mul(op1, op2):
if op1 == "I":
return 1.0, op2
if op2 == "I":
return 1.0, op1
if op1 == op2:
return 1.0, "I"
if op1 == "X":
return (-1j, "Z") if op2 == "Y" else (1j, "Y")
if op1 == "Y":
return (-1j, "X") if op2 == "Z" else (1j, "Z")
if op1 == "Z":
return (-1j, "Y") if op2 == "X" else (1j, "X")
before = defaultdict(list)
for op in self.ops:
if op.op == "I":
continue
before[op.n].append(op.op)
new_coeff = self.coeff
new_ops = []
for n in sorted(before.keys()):
ops = before[n]
assert ops
k = 1.0
op = ops[0]
for _op in ops[1:]:
_k, op = mul(op, _op)
k *= _k
new_coeff *= k
if new_coeff.imag == 0:
# cast to float
new_coeff = new_coeff.real
if op != "I":
new_ops.append(pauli_from_char(op, n))
return Term(tuple(new_ops), new_coeff)
def n_iter(self):
"""Returns an iterator which yields indices for each Pauli matrices in the Term."""
return (op.n for op in self.ops)
def max_n(self):
"""Returns the maximum index of Pauli matrices in the Term."""
return max(self.n_iter())
def append_to_circuit(self, circuit, simplify=True):
"""Append Pauli gates to `Circuit`."""
if simplify:
term = self.simplify()
else:
term = self
for op in term.ops[::-1]:
gate = op.op.lower()
if gate != "i":
getattr(circuit, gate)[op.n]
def get_time_evolution(self):
"""Get the function to append the time evolution of this term.
Returns:
function(circuit: Circuit, t: float):
Add gates for time evolution to `circuit` with time `t`
"""
term = self.simplify()
coeff = term.coeff
if coeff.imag:
raise ValueError("Not a real coefficient.")
ops = term.ops
def append_to_circuit(circuit, t):
if not ops:
return
for op in ops:
n = op.n
if op.op == "X":
circuit.h[n]
elif op.op == "Y":
circuit.rx(-half_pi)[n]
for i in range(1, len(ops)):
circuit.cx[ops[i-1].n, ops[i].n]
circuit.rz(-2 * coeff * t)[ops[-1].n]
for i in range(len(ops)-1, 0, -1):
circuit.cx[ops[i-1].n, ops[i].n]
for op in ops:
n = op.n
if op.op == "X":
circuit.h[n]
elif op.op == "Y":
circuit.rx(half_pi)[n]
return append_to_circuit
def to_matrix(self, n_qubits=-1):
"""Convert to the matrix."""
if n_qubits == -1:
n_qubits = self.max_n() + 1
mat = I.to_matrix(n_qubits)
for op in self.ops:
if op.is_identity:
continue
mat = mat @ op.to_matrix(n_qubits)
return mat * self.coeff
_ExprTuple = namedtuple("_ExprTuple", "terms")
class Expr(_ExprTuple):
@staticmethod
def from_number(num):
"""Make new Expr from a number"""
if num:
return Expr.from_term(Term((), num))
else:
return Expr.zero()
@staticmethod
def from_term(term):
"""Make new Expr from a Term"""
if term.coeff:
return Expr((term,))
else:
return Expr.zero()
@staticmethod
def from_terms_iter(terms):
"""For internal use."""
return Expr(tuple(term for term in terms if term.coeff))
def terms_to_dict(self):
"""For internal use."""
return {term[0]: term[1] for term in self.terms if term.coeff}
@staticmethod
def from_terms_dict(terms_dict):
"""For internal use."""
return Expr(tuple(Term(k, v) for k, v in terms_dict.items() if v))
@staticmethod
def zero():
"""Returns 0 as Term"""
return Expr(())
@property
def is_identity(self):
"""If `self` is I, returns True, otherwise False."""
if not self.terms:
return True
return len(self.terms) == 1 and not self.terms[0].ops and self.terms[0].coeff == 1.0
def __eq__(self, other):
if isinstance(other, (_PauliImpl, Term)):
other = other.to_expr()
if isinstance(other, Expr):
return self.terms == other.terms or self.simplify().terms == other.simplify().terms
return NotImplemented
def __ne__(self, other):
return not self == other
def __add__(self, other):
if isinstance(other, Number):
other = Expr.from_number(other)
elif isinstance(other, Term):
other = Expr.from_term(other)
if isinstance(other, Expr):
terms = self.terms_to_dict()
for op, coeff in other.terms:
if op in terms:
terms[op] += coeff
if terms[op] == 0:
del terms[op]
else:
terms[op] = coeff
return Expr.from_terms_dict(terms)
return NotImplemented
def __sub__(self, other):
if isinstance(other, Number):
other = Expr.from_number(other)
elif isinstance(other, Term):
other = Expr.from_term(other)
if isinstance(other, Expr):
terms = self.terms_to_dict()
for op, coeff in other.terms:
if op in terms:
terms[op] -= coeff
if terms[op] == 0:
del terms[op]
else:
terms[op] = -coeff
return Expr.from_terms_dict(terms)
return NotImplemented
def __radd__(self, other):
if isinstance(other, Number):
return Expr.from_number(other) + self
if isinstance(other, Term):
return Expr.from_term(other) + self
return NotImplemented
def __rsub__(self, other):
if isinstance(other, Number):
return Expr.from_number(other) - self
if isinstance(other, Term):
return Expr.from_term(other) - self
return NotImplemented
def __neg__(self):
return Expr(tuple(Term(op, -coeff) for op, coeff in self.terms))
def __mul__(self, other):
if isinstance(other, Number):
if other == 0:
return Expr.from_number(0.0)
return Expr.from_terms_iter(Term(op, coeff * other) for op, coeff in self.terms)
if isinstance(other, _PauliImpl):
other = other.to_term()
if isinstance(other, Term):
return Expr(tuple(term * other for term in self.terms))
if isinstance(other, Expr):
terms = defaultdict(float)
for t1, t2 in product(self.terms, other.terms):
term = t1 * t2
terms[term.ops] += term.coeff
return Expr.from_terms_dict(terms)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, Number):
if other == 0:
return Expr.from_number(0.0)
return Expr.from_terms_iter(Term(op, coeff * other) for op, coeff in self.terms)
if isinstance(other, _PauliImpl):
other = other.to_term()
if isinstance(other, Term):
return Expr(tuple(other * term for term in self.terms))
return NotImplemented
def __truediv__(self, other):
if isinstance(other, Number):
if other:
return Expr(tuple(term / other for term in self.terms))
raise ZeroDivisionError
return NotImplemented
def __pow__(self, n):
if isinstance(n, Integral):
if n < 0:
raise ValueError("`pauli_expr ** n` or `pow(pauli_expr, n)`: " +
"n shall not be negative value.")
if n == 0:
return Expr.from_number(1.0)
val = self
for _ in range(n - 1):
val *= self
return val
return NotImplemented
def __iter__(self):
return iter(self.terms)
def __repr__(self):
if not self.terms:
return "0*I+0"
s_terms = [repr(self.terms[0])]
for term in self.terms[1:]:
s = repr(term)
if s[0] == "+":
s_terms.append("+")
s_terms.append(s[1:])
elif s[0] == "-":
s_terms.append("-")
s_terms.append(s[1:])
else:
s_terms.append("+")
s_terms.append(s)
return " ".join(s_terms)
def __getnewargs__(self):
return (self.terms,)
def to_expr(self):
"""Do nothing. This method is prepared to avoid TypeError."""
return self
def max_n(self):
"""Returns the maximum index of Pauli matrices in the Term."""
return max(term.max_n() for term in self.terms if term.ops)
def coeffs(self):
"""Generator which yields a coefficent for each Term."""
for term in self.terms:
yield term.coeff
def commutator(self, other):
"""Returns commutator."""
return commutator(self, other)
def is_commutable_with(self, other):
"""Test whether `self` is commutable with `other`."""
return is_commutable(self, other)
def is_all_terms_commutable(self):
"""Test whether all terms are commutable. This function may very slow."""
return all(is_commutable(a, b) for a, b in combinations(self.terms, 2))
def simplify(self):
"""Simplify the Expr."""
d = defaultdict(float)
for term in self.terms:
term = term.simplify()
d[term.ops] += term.coeff
return Expr.from_terms_iter(
Term.from_ops_iter(k, d[k]) for k in sorted(d, key=repr) if d[k])
def to_matrix(self, n_qubits=-1):
"""Convert to the matrix."""
if n_qubits == -1:
n_qubits = self.max_n() + 1
return sum(term.to_matrix(n_qubits) for term in self.terms)
def qubo_bit(n):
"""Represent QUBO's bit to Pauli operator of Ising model.
Args:
n (int): n-th bit in QUBO
Returns:
Expr: Pauli expression of QUBO bit.
"""
return 0.5 - 0.5*Z[n]
|
the-stack_106_26319 | #!/usr/bin/python
#
# Request for historical data (RDM type 12) published by provider.history.tcl
# This domain is not officially supported by Thomson Reuters
# Sample:
# {'MTYPE':'REFRESH','RIC':'tANZ.AX','SERVICE':'NIP'}
# {'SERVICE':'NIP','SALTIM':'08:05:22:612:000:000','MTYPE':'IMAGE','TRADE_ID':'123456789',
# 'BID_ORD_ID':'5307FBL20AL7B','TRDPRC_1':40.124,'RIC':'tANZ.AX','ASK_ORD_ID':'5307FBL20BN8A'}
#
import sys
import pyrfa
p = pyrfa.Pyrfa()
p.createConfigDb("./pyrfa.cfg")
p.setDebugMode(False)
p.acquireSession("Session3")
p.createOMMConsumer()
p.login()
p.directoryRequest()
p.dictionaryRequest()
p.historyRequest("tANZ.AX")
count = 0
while not p.isHistoryRefreshComplete():
for u in p.dispatchEventQueue():
if count == 1:
print(u['SERVICE'] + " - " + u['RIC'])
print("-----------------------")
for k,v in u.items():
sys.stdout.write(k+',')
print("")
for k,v in u.items():
sys.stdout.write(str(v)+',')
elif count > 1:
for k,v in u.items():
sys.stdout.write(str(v)+',')
count += 1
print("")
print("\n\n########## total history records: %s ###################\n\n" % (count - 1)) |
the-stack_106_26320 | #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import os
import socket
import subprocess
from singleprocess_train import main as single_process_main
from fairseq import distributed_utils, options
def main(args):
if args.distributed_init_method is None and args.distributed_port > 0:
# We can determine the init method automatically for Slurm.
node_list = os.environ.get('SLURM_JOB_NODELIST')
if node_list is not None:
try:
hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', node_list])
args.distributed_init_method = 'tcp://{host}:{port}'.format(
host=hostnames.split()[0].decode('utf-8'),
port=args.distributed_port)
args.distributed_rank = int(os.environ.get('SLURM_PROCID'))
args.device_id = int(os.environ.get('SLURM_LOCALID'))
except subprocess.CalledProcessError as e: # scontrol failed
raise e
except FileNotFoundError as e: # Slurm is not installed
pass
if args.distributed_init_method is None:
raise ValueError('--distributed-init-method or --distributed-port '
'must be specified for distributed training')
args.distributed_rank = distributed_utils.distributed_init(args)
print('| initialized host {} as rank {}'.format(socket.gethostname(), args.distributed_rank))
single_process_main(args)
if __name__ == '__main__':
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser)
main(args)
|
the-stack_106_26321 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import ApplicationInsightsManagementClientConfiguration
from .operations import WorkbookTemplatesOperations
from . import models
class ApplicationInsightsManagementClient(object):
"""Composite Swagger for Application Insights Management Client.
:ivar workbook_templates: WorkbookTemplatesOperations operations
:vartype workbook_templates: azure.mgmt.applicationinsights.v2019_10_17_preview.operations.WorkbookTemplatesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = ApplicationInsightsManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.workbook_templates = WorkbookTemplatesOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> ApplicationInsightsManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
the-stack_106_26323 | from allauth.account.forms import SignupForm
from django import forms
from bims.models import Profile
class CustomSignupForm(SignupForm):
first_name = forms.CharField(
max_length=150,
label='First Name',
required=True)
last_name = forms.CharField(
max_length=150,
label='Last Name',
required=True
)
organization = forms.CharField(
max_length=100,
label='Organization/Institution',
required=True
)
role = forms.ChoiceField(
choices=Profile.ROLE_CHOICES,
initial='citizen',
required=True
)
def custom_signup(self, request, user):
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.organization = self.cleaned_data['organization']
user.save()
bims_profile, created = Profile.objects.get_or_create(
user=user
)
bims_profile.role = self.cleaned_data['role']
bims_profile.save()
return user
|
the-stack_106_26324 | #!/usr/bin/env python
from random import randint
from time import sleep
import unicornhathd as unicorn
print("""Snow
Draws random white pixels to look like a snowstorm.
If you're using a Unicorn HAT and only half the screen lights up,
edit this example and change 'unicorn.AUTO' to 'unicorn.HAT' below.
""")
unicorn.set_layout(unicorn.AUTO)
unicorn.rotation(90)
unicorn.brightness(0.5)
width,height=unicorn.get_shape()
rows = []
row_pointer = 0
def init():
# create a buffer of <height> blank rows
for i in range(height):
rows.append(get_blank_row())
def get_blank_row():
# generate a blank row
return [0] * width
def get_new_row():
# get a new blank row and add a random brightness snowflake to a random column
row = get_blank_row()
row[randint(0, width - 1)] = 50 + randint(0, 155)
return row
def update_display():
# keep track of the row we are updating
c = row_pointer
for h in range(height):
for w in range(width):
# val is between 50 and 255
val = rows[c][w]
# invert coordinates
unicorn.set_pixel((width - 1) - w, (height - 1) - h, val, val, val)
c += 1
if c > height - 1:
c = 0
unicorn.show()
def step():
global row_pointer
# add new row at current row pointer
# leave other rows the same, display will start from this one which overwrites the
# oldest existing row from the last time we updated the display
rows[row_pointer] = get_new_row()
update_display()
# determine next row pointer, wrapping around if we went past zero
row_pointer -= 1
if row_pointer < 0:
row_pointer = height - 1
init()
while True:
step()
sleep(0.3) |
the-stack_106_26325 | import math
# Fixed parameters
N = 200 # Number of participants
Tmax = 120 # total duration of the simulaion
# Tunable parameters
Tinf = 30 # infectious time
I0 = 1 # number of initial cases
Itot = 150 # total number of cases
cr = 0.005 # per capita contact rate c,
# cr x N is the number of contacts per unit of time an infectious individual make
S0 = N - I0
Send = N - Itot
z = Itot/math.log(S0/Send)
print("gamma/beta = ", z)
print("R0 = ", S0/z)
Imax = N - z + z * math.log(z) - z * math.log(S0)
print("IMax = ", Imax)
beta = 1/(z*Tinf)
print("beta = ", beta)
p = beta/cr # probability that a contact with a susceptible individual results in transmission
print("Probability of infection = ", p) |
the-stack_106_26326 | from common import Action
import copy
import gym
from gym import spaces
import numpy as np
from random import choice
from copy import deepcopy
from parse_utils import vectorize_obs
class KarelEnv(gym.Env):
N_ACTIONS = 6
# Direction encoding
dir_to_dxy = {"north": (-1, 0), "east": (0, 1),
"south": (1, 0), "west": (0, -1)}
dir_ord = ["north", "east", "south", "west"]
def __init__(self, task_space=None, is_compact=True, reward_func='binary'):
super(KarelEnv, self).__init__()
if reward_func == 'binary':
self.R = self.R_binary
else:
self.R = self.R_complex
self.task_space = task_space
self.is_compact = is_compact
self.debug = False
self.probe_mode = False
self.obs_shape = (4, 4, 11)
self.action_space = spaces.Discrete(self.N_ACTIONS)
self.observation_space = spaces.Box(
low=0, high=1, shape=self.obs_shape, dtype=np.uint8
)
self.action_handlers = {
Action.move: self.move,
Action.turnLeft: lambda src_state: self.turn(-1, src_state),
Action.turnRight: lambda src_state: self.turn(1, src_state),
Action.pickMarker: self.pickMarker,
Action.putMarker: self.putMarker,
Action.finish: self.finish,
}
# self.reset()
def reset(self, init_state=None):
if init_state is None:
init_state = choice(self.task_space)
self.init(init_state)
return vectorize_obs(init_state, self.is_compact)
def init(self, task):
self.task = copy.deepcopy(task)
self.is_terminal = False
self.task["pregrid_markers"] = set(
map(tuple, self.task["pregrid_markers"]))
self.task["postgrid_markers"] = set(
map(tuple, self.task["postgrid_markers"]))
# Active (i.e., changing) state, note that this is not the total state
self.state = {
"agent_r": self.task["pregrid_agent_row"],
"agent_c": self.task["pregrid_agent_col"],
"agent_d": self.task["pregrid_agent_dir"],
"markers": self.task["pregrid_markers"],
}
# Active target state, note that this is not the total state
self.target_state = {
"agent_r": self.task["postgrid_agent_row"],
"agent_c": self.task["postgrid_agent_col"],
"agent_d": self.task["postgrid_agent_dir"],
"markers": self.task["postgrid_markers"],
}
def get_full_state(self, state=None):
if state is None:
state = self.state
if state == "terminal":
return "terminal"
task_state = copy.deepcopy(self.task)
task_state["pregrid_agent_row"] = state["agent_r"]
task_state["pregrid_agent_col"] = state["agent_c"]
task_state["pregrid_agent_dir"] = state["agent_d"]
task_state["pregrid_markers"] = state["markers"]
return task_state
def generate_rollout(self, PI, H):
EP = []
for i in range(H):
s = self.get_full_state()
if s == "terminal":
break
a = PI(s)
r = self.R(self.state, a)
EP.append((s, a, r))
self.step(a)
return EP
def probe(self, action):
state_copy = deepcopy(self.state)
self.next_state, self.is_terminal = self.action_handlers[action](state_copy)
r = self.R(self.state, action)
is_solved = self.state == self.target_state and action == Action.finish
has_crashed = self.is_terminal and not is_solved
next_obs = vectorize_obs(self.get_full_state(), self.is_compact)
return next_obs, r, self.is_terminal, {"solved": is_solved, "crashed": has_crashed}
def step(self, action):
state_copy = deepcopy(self.state)
self.next_state, self.is_terminal = self.action_handlers[action](state_copy)
r = self.R(self.state, action)
is_solved = self.state == self.target_state and action == Action.finish
has_crashed = self.is_terminal and not is_solved
self.state = self.next_state
next_obs = vectorize_obs(self.get_full_state(), self.is_compact)
return next_obs, r, self.is_terminal, {"solved": is_solved, "crashed": has_crashed}
def R_binary(self, s, a):
if s == self.target_state and a == Action.finish:
return 1
else:
return 0
def R_complex(self, s, a):
if self.debug:
next_state, is_terminal = self.action_handlers[a](src_state=deepcopy(s))
else:
next_state, is_terminal = self.next_state, self.is_terminal
if s == self.target_state and a == Action.finish: # Task solved
return 20
elif is_terminal: # Crash
return -10
elif a == Action.move:
vd1 = s["agent_r"]-self.task["postgrid_agent_row"]
hd1 = s["agent_c"]-self.task["postgrid_agent_col"]
d1 = abs(hd1) + abs(vd1)
vd2 = next_state["agent_r"]-self.task["postgrid_agent_row"]
hd2 = next_state["agent_c"]-self.task["postgrid_agent_col"]
d2 = abs(hd2) + abs(vd2)
if s['markers'] == self.task["postgrid_markers"]:
if d2 < d1:
return 1
elif d2 > d1:
h_ort, v_ort = hd1//max(abs(hd1),1), vd1//max(abs(vd1),1)
h_blocked, v_blocked = False, False
for step in range(1, 5):
h_blocked |= [s["agent_r"], s["agent_c"]+h_ort*step] in self.task["walls"]
v_blocked |= [s["agent_r"]+v_ort*step, s["agent_c"]] in self.task["walls"]
if not (h_blocked and v_blocked):
return -1
else:
return 0
else:
return 0
else:
return 0
elif a == Action.putMarker:
loc = (s["agent_r"], s["agent_c"])
if loc in self.task["postgrid_markers"] and loc not in s["markers"]:
return 3
else:
return -3
elif a == Action.pickMarker:
loc = (s["agent_r"], s["agent_c"])
if loc not in self.task["postgrid_markers"] and loc in s["markers"]:
return 3
else:
return -3
else:
return 0
def move(self, src_state):
agent_r, agent_c = src_state["agent_r"], src_state["agent_c"]
agent_d = src_state["agent_d"]
dxy = self.dir_to_dxy[agent_d]
next_pos = [agent_r + dxy[0], agent_c + dxy[1]]
out_of_bounds = (
next_pos[0] >= self.task["gridsz_num_rows"]
or next_pos[1] >= self.task["gridsz_num_cols"]
or next_pos[0] < 0
or next_pos[1] < 0
)
wall_hit = next_pos in self.task["walls"]
if out_of_bounds or wall_hit:
return src_state, True
src_state["agent_r"], src_state["agent_c"] = next_pos[0], next_pos[1]
return src_state, False
def turn(self, clk_ort, src_state):
agent_r, agent_c = src_state["agent_r"], src_state["agent_c"]
agent_d = src_state["agent_d"]
dir_idx = self.dir_ord.index(agent_d)
next_idx = (dir_idx + clk_ort + 4) % 4
new_dir = self.dir_ord[next_idx]
src_state["agent_d"] = new_dir
return src_state, False
def pickMarker(self, src_state):
agent_r, agent_c = src_state["agent_r"], src_state["agent_c"]
if (agent_r, agent_c) not in src_state["markers"]:
return src_state, True
src_state["markers"].remove((agent_r, agent_c))
return src_state, False
def putMarker(self, src_state):
agent_r, agent_c = src_state["agent_r"], src_state["agent_c"]
if (agent_r, agent_c) in src_state["markers"]:
return src_state, True
src_state["markers"].add((agent_r, agent_c))
return src_state, False
def finish(self, src_state):
return src_state, True
|
the-stack_106_26327 | import os
from unittest.mock import patch, PropertyMock
import requests
from memsource import api, constants, models
import api as api_test
class TestApiAnalysis(api_test.ApiTestCase):
def setUp(self):
self.url_base = 'https://cloud.memsource.com/web/api/v2/analyse'
self.analysis = api.Analysis('token')
self.test_analysis_file_path = '/tmp/analysis.csv'
self.setCleanUpFiles([self.test_analysis_file_path])
@patch.object(requests.Session, 'request')
def test_get(self, mock_request):
type(mock_request()).status_code = PropertyMock(return_value=200)
mock_request().json.return_value = {}
analysis_id = self.gen_random_int()
self.assertIsInstance(self.analysis.get(analysis_id), models.Analysis)
mock_request.assert_called_with(
constants.HttpMethod.get.value,
'{}/get'.format(self.url_base),
params={
'token': self.analysis.token,
'analyse': analysis_id,
},
timeout=constants.Base.timeout.value
)
@patch.object(requests.Session, 'request')
def test_create(self, mock_request):
type(mock_request()).status_code = PropertyMock(return_value=200)
mock_request().json.return_value = {}
job_part_ids = [self.gen_random_int()]
self.assertIsInstance(self.analysis.create(job_part_ids), models.Analysis)
mock_request.assert_called_with(
constants.HttpMethod.post.value,
'{}/create'.format(self.url_base),
data={
'token': self.analysis.token,
'jobPart': job_part_ids,
},
timeout=constants.Base.timeout.value
)
@patch.object(requests.Session, 'request')
def test_delete(self, mock_request):
type(mock_request()).status_code = PropertyMock(return_value=200)
mock_request().json.return_value = None
analysis_id = self.gen_random_int()
self.assertIsNone(self.analysis.delete(analysis_id))
mock_request.assert_called_with(
constants.HttpMethod.post.value,
'{}/delete'.format(self.url_base),
data={
'token': self.analysis.token,
'analyse': analysis_id,
'purge': False,
},
timeout=constants.Base.timeout.value
)
@patch.object(requests.Session, 'request')
def test_get_by_project(self, mock_request):
type(mock_request()).status_code = PropertyMock(return_value=200)
mock_request().json.return_value = [{id: 1}, {id: 2}]
project_id = self.gen_random_int()
analyses = self.analysis.get_by_project(project_id)
self.assertIsInstance(analyses, list)
for analysis in analyses:
self.assertIsInstance(analysis, models.Analysis)
mock_request.assert_called_with(
constants.HttpMethod.get.value,
'{}/listByProject'.format(self.url_base),
params={
'token': self.analysis.token,
'project': project_id,
},
timeout=constants.Base.timeout.value
)
@patch.object(requests.Session, 'request')
def test_download(self, mock_request):
type(mock_request()).status_code = PropertyMock(return_value=200)
analysis = 'this is analysis'
mock_request().iter_content.return_value = [
bytes(content, 'utf-8') for content in analysis]
analysis_id = self.gen_random_int()
self.assertFalse(os.path.isfile(self.test_analysis_file_path))
returned_value = self.analysis.download(analysis_id, self.test_analysis_file_path)
self.assertTrue(os.path.isfile(self.test_analysis_file_path))
self.assertIsNone(returned_value)
with open(self.test_analysis_file_path) as f:
self.assertEqual(''.join(analysis), f.read())
mock_request.assert_called_with(
constants.HttpMethod.get.value,
"{}/download".format(self.url_base),
params={
'token': self.analysis.token,
'analyse': analysis_id,
'format': constants.AnalysisFormat.CSV.value
},
timeout=constants.Base.timeout.value * 5,
stream=True
)
|
the-stack_106_26328 | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.nasnet import NASNetMobile
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 64
args_lr = 0.002
args_model = 'mnasnet'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final5/' + job_name + '*'
total_epochs = 143
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
base_model = NASNetMobile(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_final5/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
the-stack_106_26332 | """
Tutorial 3: Null models for gradient significance
==================================================
In this tutorial we assess the significance of correlations between the first
canonical gradient and data from other modalities (curvature, cortical
thickness and T1w/T2w image intensity). A normal test of the significance of
the correlation cannot be used, because the spatial auto-correlation in MRI
data may bias the test statistic. In this tutorial we will show two approaches
for null hypothesis testing: spin permutations and Moran spectral
randomization.
.. note::
When using either approach to compare gradients to non-gradient markers,
we recommend randomizing the non-gradient markers as these randomizations
need not maintain the statistical independence between gradients.
"""
###############################################################################
# Spin Permutations
# ------------------------------
#
# Here, we use the spin permutations approach previously proposed in
# `(Alexander-Bloch et al., 2018)
# <https://www.sciencedirect.com/science/article/pii/S1053811918304968>`_,
# which preserves the auto-correlation of the permuted feature(s) by rotating
# the feature data on the spherical domain.
# We will start by loading the conte69 surfaces for left and right hemispheres,
# their corresponding spheres, midline mask, and t1w/t2w intensity as well as
# cortical thickness data, and a template functional gradient.
import numpy as np
from brainspace.datasets import load_gradient, load_marker, load_conte69
# load the conte69 hemisphere surfaces and spheres
surf_lh, surf_rh = load_conte69()
sphere_lh, sphere_rh = load_conte69(as_sphere=True)
# Load the data
t1wt2w_lh, t1wt2w_rh = load_marker('t1wt2w')
t1wt2w = np.concatenate([t1wt2w_lh, t1wt2w_rh])
thickness_lh, thickness_rh = load_marker('thickness')
thickness = np.concatenate([thickness_lh, thickness_rh])
# Template functional gradient
embedding = load_gradient('fc', idx=0, join=True)
###############################################################################
# Let’s first generate some null data using spintest.
from brainspace.null_models import SpinPermutations
from brainspace.plotting import plot_hemispheres
# Let's create some rotations
n_rand = 1000
sp = SpinPermutations(n_rep=n_rand, random_state=0)
sp.fit(sphere_lh, points_rh=sphere_rh)
t1wt2w_rotated = np.hstack(sp.randomize(t1wt2w_lh, t1wt2w_rh))
thickness_rotated = np.hstack(sp.randomize(thickness_lh, thickness_rh))
###############################################################################
# As an illustration of the rotation, let’s plot the original t1w/t2w data
# Plot original data
plot_hemispheres(surf_lh, surf_rh, array_name=t1wt2w, size=(1200, 200), cmap='viridis',
nan_color=(0.5, 0.5, 0.5, 1), color_bar=True, zoom=1.65)
###############################################################################
# as well as a few rotated versions.
# sphinx_gallery_thumbnail_number = 2
# Plot some rotations
plot_hemispheres(surf_lh, surf_rh, array_name=t1wt2w_rotated[:3], size=(1200, 600),
cmap='viridis', nan_color=(0.5, 0.5, 0.5, 1), color_bar=True,
zoom=1.55, label_text=['Rot0', 'Rot1', 'Rot2'])
###############################################################################
#
# .. warning::
#
# With spin permutations, midline vertices (i.e,, NaNs) from both the
# original and rotated data are discarded. Depending on the overlap of
# midlines in the, statistical comparisons between them may compare
# different numbers of features. This can bias your test statistics.
# Therefore, if a large portion of the sphere is not used, we recommend
# using Moran spectral randomization instead.
#
# Now we simply compute the correlations between the first gradient and the
# original data, as well as all rotated data.
from matplotlib import pyplot as plt
from scipy.stats import spearmanr
fig, axs = plt.subplots(1, 2, figsize=(9, 3.5))
feats = {'t1wt2w': t1wt2w, 'thickness': thickness}
rotated = {'t1wt2w': t1wt2w_rotated, 'thickness': thickness_rotated}
r_spin = np.empty(n_rand)
mask = ~np.isnan(thickness)
for k, (fn, feat) in enumerate(feats.items()):
r_obs, pv_obs = spearmanr(feat[mask], embedding[mask])
# Compute perm pval
for i, perm in enumerate(rotated[fn]):
mask_rot = mask & ~np.isnan(perm) # Remove midline
r_spin[i] = spearmanr(perm[mask_rot], embedding[mask_rot])[0]
pv_spin = np.mean(np.abs(r_spin) >= np.abs(r_obs))
# Plot null dist
axs[k].hist(r_spin, bins=25, density=True, alpha=0.5, color=(0.8, 0.8, 0.8))
axs[k].axvline(r_obs, lw=2, ls='--', color='k')
axs[k].set_xlabel('Correlation with {}'.format(fn))
if k == 0:
axs[k].set_ylabel('Density')
print('{}:\n Obs : {:.5e}\n Spin: {:.5e}\n'.
format(fn.capitalize(), pv_obs, pv_spin))
fig.tight_layout()
plt.show()
###############################################################################
# It is interesting to see that both p-values increase when taking into
# consideration the auto-correlation present in the surfaces. Also, we can see
# that the correlation with thickness is no longer statistically significant
# after spin permutations.
#
#
#
# Moran Spectral Randomization
# ------------------------------
#
# Moran Spectral Randomization (MSR) computes Moran's I, a metric for spatial
# auto-correlation and generates normally distributed data with similar
# auto-correlation. MSR relies on a weight matrix denoting the spatial
# proximity of features to one another. Within neuroimaging, one
# straightforward example of this is inverse geodesic distance i.e. distance
# along the cortical surface.
#
# In this example we will show how to use MSR to assess statistical
# significance between cortical markers (here curvature and cortical t1wt2w
# intensity) and the first functional connectivity gradient. We will start by
# loading the left temporal lobe mask, t1w/t2w intensity as well as cortical
# thickness data, and a template functional gradient
from brainspace.datasets import load_mask
n_pts_lh = surf_lh.n_points
mask_tl, _ = load_mask(name='temporal')
# Keep only the temporal lobe.
embedding_tl = embedding[:n_pts_lh][mask_tl]
t1wt2w_tl = t1wt2w_lh[mask_tl]
curv_tl = load_marker('curvature')[0][mask_tl]
###############################################################################
# We will now compute the Moran eigenvectors. This can be done either by
# providing a weight matrix of spatial proximity between each vertex, or by
# providing a cortical surface. Here we’ll use a cortical surface.
from brainspace.null_models import MoranRandomization
from brainspace.mesh import mesh_elements as me
# compute spatial weight matrix
w = me.get_ring_distance(surf_lh, n_ring=1, mask=mask_tl)
w.data **= -1
msr = MoranRandomization(n_rep=n_rand, procedure='singleton', tol=1e-6,
random_state=0)
msr.fit(w)
###############################################################################
# Using the Moran eigenvectors we can now compute the randomized data.
curv_rand = msr.randomize(curv_tl)
t1wt2w_rand = msr.randomize(t1wt2w_tl)
###############################################################################
# Now that we have the randomized data, we can compute correlations between
# the gradient and the real/randomised data and generate the non-parametric
# p-values.
fig, axs = plt.subplots(1, 2, figsize=(9, 3.5))
feats = {'t1wt2w': t1wt2w_tl, 'curvature': curv_tl}
rand = {'t1wt2w': t1wt2w_rand, 'curvature': curv_rand}
for k, (fn, data) in enumerate(rand.items()):
r_obs, pv_obs = spearmanr(feats[fn], embedding_tl, nan_policy='omit')
# Compute perm pval
r_rand = np.asarray([spearmanr(embedding_tl, d)[0] for d in data])
pv_rand = np.mean(np.abs(r_rand) >= np.abs(r_obs))
# Plot null dist
axs[k].hist(r_rand, bins=25, density=True, alpha=0.5, color=(0.8, 0.8, 0.8))
axs[k].axvline(r_obs, lw=2, ls='--', color='k')
axs[k].set_xlabel('Correlation with {}'.format(fn))
if k == 0:
axs[k].set_ylabel('Density')
print('{}:\n Obs : {:.5e}\n Moran: {:.5e}\n'.
format(fn.capitalize(), pv_obs, pv_rand))
fig.tight_layout()
plt.show()
###############################################################################
# There are some scenarios where MSR results do not follow a normal
# distribution. It is relatively simple to check whether this occurs in our
# data by visualizing the null distributions. Check this interesting paper
# for more information `(Burt et al., 2020) <https://www.biorxiv.org/content/
# 10.1101/2020.02.18.955054v1>`_.
|
the-stack_106_26333 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Trimgalore(Package):
"""Trim Galore! is a wrapper around Cutadapt and FastQC to consistently
apply adapter and quality trimming to FastQ files, with extra
functionality for RRBS data."""
homepage = "https://github.com/FelixKrueger/TrimGalore"
url = "https://github.com/FelixKrueger/TrimGalore/archive/0.4.4.tar.gz"
version('0.6.1', sha256='658578c29d007fe66f9ab49608442be703a6fcf535db06eb82659c7edccb62b0')
version('0.6.0', sha256='f374dfa4c94e2ad50c63276dda0f341fd95b29cb1d5a0e2ad56e8b0168b758ec')
version('0.4.5', 'c71756042b2a65c34d483533a29dc206')
version('0.4.4', 'aae1b807b48e38bae7074470203997bb')
depends_on('perl', type=('build', 'run'))
depends_on('py-cutadapt', type=('build', 'run'))
depends_on('fastqc')
def install(self, spec, prefix):
filter_file(r'#!/usr/bin/perl', '#!/usr/bin/env perl', 'trim_galore')
mkdirp(prefix.bin)
install('trim_galore', prefix.bin)
|
the-stack_106_26334 | """authentik expression policy evaluator"""
from ipaddress import ip_address, ip_network
from typing import TYPE_CHECKING, Optional
from django.http import HttpRequest
from django_otp import devices_for_user
from structlog.stdlib import get_logger
from authentik.core.models import User
from authentik.flows.planner import PLAN_CONTEXT_SSO
from authentik.lib.expression.evaluator import BaseEvaluator
from authentik.lib.utils.http import get_client_ip
from authentik.policies.exceptions import PolicyException
from authentik.policies.models import Policy, PolicyBinding
from authentik.policies.process import PolicyProcess
from authentik.policies.types import PolicyRequest, PolicyResult
LOGGER = get_logger()
if TYPE_CHECKING:
from authentik.policies.expression.models import ExpressionPolicy
class PolicyEvaluator(BaseEvaluator):
"""Validate and evaluate python-based expressions"""
_messages: list[str]
policy: Optional["ExpressionPolicy"] = None
def __init__(self, policy_name: str):
super().__init__()
self._messages = []
self._context["ak_logger"] = get_logger(policy_name)
self._context["ak_message"] = self.expr_func_message
self._context["ak_user_has_authenticator"] = self.expr_func_user_has_authenticator
self._context["ak_call_policy"] = self.expr_func_call_policy
self._context["ip_address"] = ip_address
self._context["ip_network"] = ip_network
self._filename = policy_name or "PolicyEvaluator"
def expr_func_message(self, message: str):
"""Wrapper to append to messages list, which is returned with PolicyResult"""
self._messages.append(message)
def expr_func_call_policy(self, name: str, **kwargs) -> PolicyResult:
"""Call policy by name, with current request"""
policy = Policy.objects.filter(name=name).select_subclasses().first()
if not policy:
raise ValueError(f"Policy '{name}' not found.")
req: PolicyRequest = self._context["request"]
req.context.update(kwargs)
proc = PolicyProcess(PolicyBinding(policy=policy), request=req, connection=None)
return proc.profiling_wrapper()
def expr_func_user_has_authenticator(
self, user: User, device_type: Optional[str] = None
) -> bool:
"""Check if a user has any authenticator devices, optionally matching *device_type*"""
user_devices = devices_for_user(user)
if device_type:
for device in user_devices:
device_class = device.__class__.__name__.lower().replace("device", "")
if device_class == device_type:
return True
return False
return len(list(user_devices)) > 0
def set_policy_request(self, request: PolicyRequest):
"""Update context based on policy request (if http request is given, update that too)"""
# update website/docs/expressions/_objects.md
# update website/docs/expressions/_functions.md
self._context["ak_is_sso_flow"] = request.context.get(PLAN_CONTEXT_SSO, False)
if request.http_request:
self.set_http_request(request.http_request)
self._context["request"] = request
self._context["context"] = request.context
def set_http_request(self, request: HttpRequest):
"""Update context based on http request"""
# update website/docs/expressions/_objects.md
# update website/docs/expressions/_functions.md
self._context["ak_client_ip"] = ip_address(get_client_ip(request))
self._context["http_request"] = request
def handle_error(self, exc: Exception, expression_source: str):
"""Exception Handler"""
raise PolicyException(exc)
def evaluate(self, expression_source: str) -> PolicyResult:
"""Parse and evaluate expression. Policy is expected to return a truthy object.
Messages can be added using 'do ak_message()'."""
try:
result = super().evaluate(expression_source)
except PolicyException as exc:
# PolicyExceptions should be propagated back to the process,
# which handles recording and returning a correct result
raise exc
except Exception as exc: # pylint: disable=broad-except
LOGGER.warning("Expression error", exc=exc)
return PolicyResult(False, str(exc))
else:
policy_result = PolicyResult(False, *self._messages)
if result is None:
LOGGER.warning(
"Expression policy returned None",
src=expression_source,
req=self._context,
)
policy_result.passing = False
if result:
policy_result.passing = bool(result)
return policy_result
|
the-stack_106_26335 | from Node import error
SYNTAX_NODE_SERIALIZATION_CODES = {
# 0 is 'Token'. Needs to be defined manually
# 1 is 'Unknown'. Needs to be defined manually
'UnknownDecl': 2,
'TypealiasDecl': 3,
'AssociatedtypeDecl': 4,
'IfConfigDecl': 5,
'PoundErrorDecl': 6,
'PoundWarningDecl': 7,
'PoundSourceLocation': 8,
'ClassDecl': 9,
'StructDecl': 10,
'ProtocolDecl': 11,
'ExtensionDecl': 12,
'FunctionDecl': 13,
'InitializerDecl': 14,
'DeinitializerDecl': 15,
'SubscriptDecl': 16,
'ImportDecl': 17,
'AccessorDecl': 18,
'VariableDecl': 19,
'EnumCaseDecl': 20,
'EnumDecl': 21,
'OperatorDecl': 22,
'PrecedenceGroupDecl': 23,
'UnknownExpr': 24,
'InOutExpr': 25,
'PoundColumnExpr': 26,
'TryExpr': 27,
'IdentifierExpr': 28,
'SuperRefExpr': 29,
'NilLiteralExpr': 30,
'DiscardAssignmentExpr': 31,
'AssignmentExpr': 32,
'SequenceExpr': 33,
'PoundLineExpr': 34,
'PoundFileExpr': 35,
'PoundFunctionExpr': 36,
'PoundDsohandleExpr': 37,
'SymbolicReferenceExpr': 38,
'PrefixOperatorExpr': 39,
'BinaryOperatorExpr': 40,
'ArrowExpr': 41,
'FloatLiteralExpr': 42,
'TupleExpr': 43,
'ArrayExpr': 44,
'DictionaryExpr': 45,
'ImplicitMemberExpr': 46,
'IntegerLiteralExpr': 47,
'StringLiteralExpr': 48,
'BooleanLiteralExpr': 49,
'TernaryExpr': 50,
'MemberAccessExpr': 51,
'DotSelfExpr': 52,
'IsExpr': 53,
'AsExpr': 54,
'TypeExpr': 55,
'ClosureExpr': 56,
'UnresolvedPatternExpr': 57,
'FunctionCallExpr': 58,
'SubscriptExpr': 59,
'OptionalChainingExpr': 60,
'ForcedValueExpr': 61,
'PostfixUnaryExpr': 62,
'SpecializeExpr': 63,
'KeyPathExpr': 65,
'KeyPathBaseExpr': 66,
'ObjcKeyPathExpr': 67,
'ObjcSelectorExpr': 68,
'EditorPlaceholderExpr': 69,
'ObjectLiteralExpr': 70,
'UnknownStmt': 71,
'ContinueStmt': 72,
'WhileStmt': 73,
'DeferStmt': 74,
'ExpressionStmt': 75,
'RepeatWhileStmt': 76,
'GuardStmt': 77,
'ForInStmt': 78,
'SwitchStmt': 79,
'DoStmt': 80,
'ReturnStmt': 81,
'FallthroughStmt': 82,
'BreakStmt': 83,
'DeclarationStmt': 84,
'ThrowStmt': 85,
'IfStmt': 86,
'Decl': 87,
'Expr': 88,
'Stmt': 89,
'Type': 90,
'Pattern': 91,
'CodeBlockItem': 92,
'CodeBlock': 93,
'DeclNameArgument': 94,
'DeclNameArguments': 95,
# removed: 'FunctionCallArgument': 96,
'TupleExprElement': 97,
'ArrayElement': 98,
'DictionaryElement': 99,
'ClosureCaptureItem': 100,
'ClosureCaptureSignature': 101,
'ClosureParam': 102,
'ClosureSignature': 103,
'StringSegment': 104,
'ExpressionSegment': 105,
'ObjcNamePiece': 106,
'TypeInitializerClause': 107,
'ParameterClause': 108,
'ReturnClause': 109,
'FunctionSignature': 110,
'IfConfigClause': 111,
'PoundSourceLocationArgs': 112,
'DeclModifier': 113,
'InheritedType': 114,
'TypeInheritanceClause': 115,
'MemberDeclBlock': 116,
'MemberDeclListItem': 117,
'SourceFile': 118,
'InitializerClause': 119,
'FunctionParameter': 120,
'AccessLevelModifier': 121,
'AccessPathComponent': 122,
'AccessorParameter': 123,
'AccessorBlock': 124,
'PatternBinding': 125,
'EnumCaseElement': 126,
'OperatorPrecedenceAndTypes': 127,
'PrecedenceGroupRelation': 128,
'PrecedenceGroupNameElement': 129,
'PrecedenceGroupAssignment': 130,
'PrecedenceGroupAssociativity': 131,
'Attribute': 132,
'LabeledSpecializeEntry': 133,
'ImplementsAttributeArguments': 134,
'ObjCSelectorPiece': 135,
'WhereClause': 136,
'ConditionElement': 137,
'AvailabilityCondition': 138,
'MatchingPatternCondition': 139,
'OptionalBindingCondition': 140,
'ElseIfContinuation': 141,
'ElseBlock': 142,
'SwitchCase': 143,
'SwitchDefaultLabel': 144,
'CaseItem': 145,
'SwitchCaseLabel': 146,
'CatchClause': 147,
'GenericWhereClause': 148,
'SameTypeRequirement': 149,
'GenericParameter': 150,
'GenericParameterClause': 151,
'ConformanceRequirement': 152,
'CompositionTypeElement': 153,
'TupleTypeElement': 154,
'GenericArgument': 155,
'GenericArgumentClause': 156,
'TypeAnnotation': 157,
'TuplePatternElement': 158,
'AvailabilityArgument': 159,
'AvailabilityLabeledArgument': 160,
'AvailabilityVersionRestriction': 161,
'VersionTuple': 162,
'CodeBlockItemList': 163,
# removed: 'FunctionCallArgumentList': 164,
'TupleExprElementList': 165,
'ArrayElementList': 166,
'DictionaryElementList': 167,
'StringLiteralSegments': 168,
'DeclNameArgumentList': 169,
'ExprList': 170,
'ClosureCaptureItemList': 171,
'ClosureParamList': 172,
'ObjcName': 173,
'FunctionParameterList': 174,
'IfConfigClauseList': 175,
'InheritedTypeList': 176,
'MemberDeclList': 177,
'ModifierList': 178,
'AccessPath': 179,
'AccessorList': 180,
'PatternBindingList': 181,
'EnumCaseElementList': 182,
'PrecedenceGroupAttributeList': 183,
'PrecedenceGroupNameList': 184,
'TokenList': 185,
'NonEmptyTokenList': 186,
'AttributeList': 187,
'SpecializeAttributeSpecList': 188,
'ObjCSelector': 189,
'SwitchCaseList': 190,
'CatchClauseList': 191,
'CaseItemList': 192,
'ConditionElementList': 193,
'GenericRequirementList': 194,
'GenericParameterList': 195,
'CompositionTypeElementList': 196,
'TupleTypeElementList': 197,
'GenericArgumentList': 198,
'TuplePatternElementList': 199,
'AvailabilitySpecList': 200,
'UnknownPattern': 201,
'EnumCasePattern': 202,
'IsTypePattern': 203,
'OptionalPattern': 204,
'IdentifierPattern': 205,
'AsTypePattern': 206,
'TuplePattern': 207,
'WildcardPattern': 208,
'ExpressionPattern': 209,
'ValueBindingPattern': 210,
'UnknownType': 211,
'SimpleTypeIdentifier': 212,
'MemberTypeIdentifier': 213,
'ClassRestrictionType': 214,
'ArrayType': 215,
'DictionaryType': 216,
'MetatypeType': 217,
'OptionalType': 218,
'ImplicitlyUnwrappedOptionalType': 219,
'CompositionType': 220,
'TupleType': 221,
'FunctionType': 222,
'AttributedType': 223,
'YieldStmt': 224,
'YieldList': 225,
'IdentifierList': 226,
'NamedAttributeStringArgument': 227,
'DeclName': 228,
'PoundAssertStmt': 229,
'SomeType': 230,
'CustomAttribute': 231,
'GenericRequirement': 232,
'DifferentiableAttributeArguments': 233,
'DifferentiationParamsClause': 234,
'DifferentiationParams': 235,
'DifferentiationParamList': 236,
'DifferentiationParam': 237,
'DifferentiableAttributeFuncSpecifier': 238,
'FunctionDeclName': 239,
'PoundFilePathExpr': 240,
}
def verify_syntax_node_serialization_codes(nodes, serialization_codes):
# Verify that all nodes have serialization codes
for node in nodes:
if not node.is_base() and node.syntax_kind not in serialization_codes:
error('Node %s has no serialization code' % node.syntax_kind)
# Verify that no serialization code is used twice
used_codes = set()
for serialization_code in serialization_codes.values():
if serialization_code in used_codes:
error("Serialization code %d used twice" % serialization_code)
used_codes.add(serialization_code)
def get_serialization_code(syntax_kind):
return SYNTAX_NODE_SERIALIZATION_CODES[syntax_kind]
|
the-stack_106_26336 | import graphene
import graphene_django
from typing import Dict, Any
from django.db.models.query import QuerySet
from meetup.models import Meetup
from meetup.models import Event
from meetup.models import Attendee
from meetup.models import Attendance
class MeetupType(graphene_django.DjangoObjectType):
"""GraphQL type for Meetup model"""
class Meta:
model = Meetup
class EventType(graphene_django.DjangoObjectType):
"""GraphQL type for Event model"""
class Meta:
model = Event
class AttendeeType(graphene_django.DjangoObjectType):
"""GraphQL type for Attendee model"""
class Meta:
model = Attendee
class AttendanceType(graphene_django.DjangoObjectType):
"""GraphQL type for Attendance model"""
class Meta:
model = Attendance
class MeetupQuery:
"""Query which provides access to all meetups."""
meetup = graphene.Field(
MeetupType,
id=graphene.Int(),
name=graphene.String(),
description="Fetch a specific meetup by specifying the 'id' or 'name'."
)
all_meetups = graphene.List(
MeetupType,
limit=graphene.Int(),
description="All meetups available in the application."
)
def resolve_meetup(self, info: Any, **kwargs: Dict) -> Meetup:
"""
Args:
info (Any):
Returns:
A Meetup instance
"""
pk = kwargs.get('id', None)
name = kwargs.get('name', None)
if pk is not None:
return Meetup.objects.get(pk=pk)
if name is not None:
return Meetup.objects.get(name=name)
def resolve_all_meetups(self, info: Any, **kwargs: Dict) -> QuerySet:
"""Return a list of all meetups. Optionally limit the records if the
client provides a `limit` input.
Args:
info (Any):
Returns:
A Meetup queryset
"""
limit = kwargs.get('limit', None)
meetups = Meetup.objects.all()
if limit is None:
return meetups
return meetups[:limit]
class EventQuery:
event = graphene.Field(
EventType,
id=graphene.Int(),
name=graphene.String()
)
all_events = graphene.List(
EventType,
limit=graphene.Int()
)
def resolve_event(self, info: Any, **kwargs: Dict) -> Event:
"""
Args:
info (Any):
Returns:
A Event instance
"""
pk = kwargs.get('id', None)
name = kwargs.get('name', None)
if pk is not None:
return Event.objects.get(pk=pk)
if name is not None:
return Event.objects.get(name=name)
def resolve_all_events(self, info: Any, **kwargs: Dict) -> QuerySet:
"""
Args:
info (Any):
Returns:
A Event queryset
"""
limit = kwargs.get('limit', None)
events = Event.objects.all()
if limit is None:
return events
return events[:limit]
class AttendeeQuery:
attendee = graphene.Field(
AttendeeType,
id=graphene.Int(),
name=graphene.String()
)
all_attendees = graphene.List(AttendeeType)
def resolve_attendee(self, info: Any, **kwargs: Dict) -> Attendee:
"""
Args:
info (Any):
Returns:
An Attendee instance
"""
pk = kwargs.get('id', None)
name = kwargs.get('name', None)
if pk is not None:
return Attendee.objects.get(pk=pk)
if name is not None:
return Attendee.objects.get(name=name)
def resolve_all_attendees(self, info: Any, **kwargs: Dict) -> QuerySet:
"""
Args:
info (Any):
Returns:
An Attendee queryset
"""
return Attendee.objects.all()
class AttendanceQuery:
attendance = graphene.Field(AttendanceType, id=graphene.Int())
all_attendances = graphene.List(AttendanceType)
def resolve_attendance(self, info: Any, **kwargs: Dict) -> Attendance:
"""
Args:
info (Any):
Returns:
An Attendance instance
"""
pk = kwargs.get('id', None)
if pk is None:
return
return Attendance.objects.get(pk=pk)
def resolve_all_attendances(self, info: Any, **kwargs: Dict) -> QuerySet:
"""
Args:
info (Any):
Returns:
An Attendance queryset
"""
return Attendance.objects.all()
class Query(
MeetupQuery,
EventQuery,
AttendeeQuery,
AttendanceQuery,
graphene.ObjectType
):
"""
# Root Query
### Root query for the application.
This documentation comes directly from the doc string
for the `meetup.schema:Query class`. The doc string contains
markdown.
The root query of the application provides the access to the following:
- MeetupQuery
- EventQuery
- AttendeeQuery
- AttendanceQuery
"""
pass
|
the-stack_106_26338 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from datetime import datetime
from io import BytesIO
from typing import Any
from zipfile import ZipFile
from flask import g, request, Response, send_file
from flask_appbuilder.api import expose, protect, rison, safe
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import ngettext
from superset.commands.importers.exceptions import NoValidFilesFoundError
from superset.commands.importers.v1.utils import get_contents_from_bundle
from superset.constants import MODEL_API_RW_METHOD_PERMISSION_MAP, RouteMethod
from superset.databases.filters import DatabaseFilter
from superset.extensions import event_logger
from superset.models.sql_lab import SavedQuery
from superset.queries.saved_queries.commands.bulk_delete import (
BulkDeleteSavedQueryCommand,
)
from superset.queries.saved_queries.commands.exceptions import (
SavedQueryBulkDeleteFailedError,
SavedQueryNotFoundError,
)
from superset.queries.saved_queries.commands.export import ExportSavedQueriesCommand
from superset.queries.saved_queries.commands.importers.dispatcher import (
ImportSavedQueriesCommand,
)
from superset.queries.saved_queries.filters import (
SavedQueryAllTextFilter,
SavedQueryFavoriteFilter,
SavedQueryFilter,
)
from superset.queries.saved_queries.schemas import (
get_delete_ids_schema,
get_export_ids_schema,
openapi_spec_methods_override,
)
from superset.views.base_api import (
BaseSupersetModelRestApi,
requires_form_data,
statsd_metrics,
)
logger = logging.getLogger(__name__)
class SavedQueryRestApi(BaseSupersetModelRestApi):
datamodel = SQLAInterface(SavedQuery)
include_route_methods = RouteMethod.REST_MODEL_VIEW_CRUD_SET | {
RouteMethod.EXPORT,
RouteMethod.RELATED,
RouteMethod.DISTINCT,
RouteMethod.IMPORT,
"bulk_delete", # not using RouteMethod since locally defined
}
class_permission_name = "SavedQuery"
method_permission_name = MODEL_API_RW_METHOD_PERMISSION_MAP
resource_name = "saved_query"
allow_browser_login = True
base_filters = [["id", SavedQueryFilter, lambda: []]]
show_columns = [
"created_by.first_name",
"created_by.id",
"created_by.last_name",
"database.database_name",
"database.id",
"description",
"id",
"label",
"schema",
"sql",
"sql_tables",
]
list_columns = [
"changed_on_delta_humanized",
"created_on",
"created_by.first_name",
"created_by.id",
"created_by.last_name",
"database.database_name",
"database.id",
"db_id",
"description",
"id",
"label",
"schema",
"sql",
"sql_tables",
"rows",
"last_run_delta_humanized",
"extra",
]
add_columns = ["db_id", "description", "label", "schema", "sql"]
edit_columns = add_columns
order_columns = [
"schema",
"label",
"description",
"sql",
"rows",
"created_by.first_name",
"database.database_name",
"created_on",
"changed_on_delta_humanized",
"last_run_delta_humanized",
]
search_columns = ["id", "database", "label", "schema", "created_by"]
search_filters = {
"id": [SavedQueryFavoriteFilter],
"label": [SavedQueryAllTextFilter],
}
apispec_parameter_schemas = {
"get_delete_ids_schema": get_delete_ids_schema,
"get_export_ids_schema": get_export_ids_schema,
}
openapi_spec_tag = "Queries"
openapi_spec_methods = openapi_spec_methods_override
related_field_filters = {
"database": "database_name",
}
filter_rel_fields = {"database": [["id", DatabaseFilter, lambda: []]]}
allowed_rel_fields = {"database"}
allowed_distinct_fields = {"schema"}
def pre_add(self, item: SavedQuery) -> None:
item.user = g.user
def pre_update(self, item: SavedQuery) -> None:
self.pre_add(item)
@expose("/", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@rison(get_delete_ids_schema)
def bulk_delete(self, **kwargs: Any) -> Response:
"""Delete bulk Saved Queries
---
delete:
description: >-
Deletes multiple saved queries in a bulk operation.
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_delete_ids_schema'
responses:
200:
description: Saved queries bulk delete
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
item_ids = kwargs["rison"]
try:
BulkDeleteSavedQueryCommand(g.user, item_ids).run()
return self.response(
200,
message=ngettext(
"Deleted %(num)d saved query",
"Deleted %(num)d saved queries",
num=len(item_ids),
),
)
except SavedQueryNotFoundError:
return self.response_404()
except SavedQueryBulkDeleteFailedError as ex:
return self.response_422(message=str(ex))
@expose("/export/", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_export_ids_schema)
def export(self, **kwargs: Any) -> Response:
"""Export saved queries
---
get:
description: >-
Exports multiple saved queries and downloads them as YAML files
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_export_ids_schema'
responses:
200:
description: A zip file with saved query(ies) and database(s) as YAML
content:
application/zip:
schema:
type: string
format: binary
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
token = request.args.get("token")
requested_ids = kwargs["rison"]
timestamp = datetime.now().strftime("%Y%m%dT%H%M%S")
root = f"saved_query_export_{timestamp}"
filename = f"{root}.zip"
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
try:
for file_name, file_content in ExportSavedQueriesCommand(
requested_ids
).run():
with bundle.open(f"{root}/{file_name}", "w") as fp:
fp.write(file_content.encode())
except SavedQueryNotFoundError:
return self.response_404()
buf.seek(0)
response = send_file(
buf,
mimetype="application/zip",
as_attachment=True,
attachment_filename=filename,
)
if token:
response.set_cookie(token, "done", max_age=600)
return response
@expose("/import/", methods=["POST"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.import_",
log_to_statsd=False,
)
@requires_form_data
def import_(self) -> Response:
"""Import Saved Queries with associated databases
---
post:
requestBody:
required: true
content:
multipart/form-data:
schema:
type: object
properties:
formData:
description: upload file (ZIP)
type: string
format: binary
passwords:
description: >-
JSON map of passwords for each featured database in the
ZIP file. If the ZIP includes a database config in the path
`databases/MyDatabase.yaml`, the password should be provided
in the following format:
`{"databases/MyDatabase.yaml": "my_password"}`.
type: string
overwrite:
description: overwrite existing saved queries?
type: boolean
responses:
200:
description: Saved Query import result
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
upload = request.files.get("formData")
if not upload:
return self.response_400()
with ZipFile(upload) as bundle:
contents = get_contents_from_bundle(bundle)
if not contents:
raise NoValidFilesFoundError()
passwords = (
json.loads(request.form["passwords"])
if "passwords" in request.form
else None
)
overwrite = request.form.get("overwrite") == "true"
command = ImportSavedQueriesCommand(
contents, passwords=passwords, overwrite=overwrite
)
command.run()
return self.response(200, message="OK")
|
the-stack_106_26339 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Cisco and/or its affiliates.
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The nxos hsrp_interfaces class
This class creates a command set to bring the current device configuration
to a desired end-state. The command set is based on a comparison of the
current configuration (as dict) and the provided configuration (as dict).
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
ConfigBase,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
dict_diff,
to_list,
remove_empties,
)
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.facts.facts import (
Facts,
)
from ansible_collections.cisco.nxos.plugins.module_utils.network.nxos.utils.utils import (
flatten_dict,
get_interface_type,
normalize_interface,
search_obj_in_list,
vlan_range_to_list,
)
class Hsrp_interfaces(ConfigBase):
"""
The nxos_hsrp_interfaces class
"""
gather_subset = ["!all", "!min"]
gather_network_resources = ["hsrp_interfaces"]
def __init__(self, module):
super(Hsrp_interfaces, self).__init__(module)
def get_hsrp_interfaces_facts(self, data=None):
""" Get the 'facts' (the current configuration)
:rtype: A dictionary
:returns: The current configuration as a dictionary
"""
facts, _warnings = Facts(self._module).get_facts(
self.gather_subset, self.gather_network_resources, data=data
)
hsrp_interfaces_facts = facts["ansible_network_resources"].get(
"hsrp_interfaces", []
)
return hsrp_interfaces_facts
def edit_config(self, commands):
return self._connection.edit_config(commands)
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
result = {"changed": False}
warnings = []
commands = []
if self.state in self.ACTION_STATES:
existing_hsrp_interfaces_facts = self.get_hsrp_interfaces_facts()
else:
existing_hsrp_interfaces_facts = []
if self.state in self.ACTION_STATES or self.state == "rendered":
commands.extend(self.set_config(existing_hsrp_interfaces_facts))
if commands and self.state in self.ACTION_STATES:
if not self._module.check_mode:
self.edit_config(commands)
result["changed"] = True
if self.state in self.ACTION_STATES:
result["commands"] = commands
if self.state in self.ACTION_STATES or self.state == "gathered":
changed_hsrp_interfaces_facts = self.get_hsrp_interfaces_facts()
elif self.state == "rendered":
result["rendered"] = commands
elif self.state == "parsed":
running_config = self._module.params["running_config"]
if not running_config:
self._module.fail_json(
msg="value of running_config parameter must not be empty for state parsed"
)
result["parsed"] = self.get_hsrp_interfaces_facts(
data=running_config
)
if self.state in self.ACTION_STATES:
result["before"] = existing_hsrp_interfaces_facts
if result["changed"]:
result["after"] = changed_hsrp_interfaces_facts
elif self.state == "gathered":
result["gathered"] = changed_hsrp_interfaces_facts
result["warnings"] = warnings
return result
def set_config(self, existing_hsrp_interfaces_facts):
""" Collect the configuration from the args passed to the module,
collect the current configuration (as a dict from facts)
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
config = self._module.params["config"]
want = []
if config:
for w in config:
w.update({"name": normalize_interface(w["name"])})
want.append(w)
have = existing_hsrp_interfaces_facts
resp = self.set_state(want, have)
return to_list(resp)
def set_state(self, want, have):
""" Select the appropriate function based on the state provided
:param want: the desired configuration as a dictionary
:param have: the current configuration as a dictionary
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
state = self._module.params["state"]
# check for 'config' keyword in play
if (
state in ("overridden", "merged", "replaced", "rendered")
and not want
):
self._module.fail_json(
msg="value of config parameter must not be empty for state {0}".format(
state
)
)
cmds = list()
if state == "overridden":
cmds.extend(self._state_overridden(want, have))
elif state == "deleted":
cmds.extend(self._state_deleted(want, have))
else:
for w in want:
if state in ["merged", "rendered"]:
cmds.extend(self._state_merged(flatten_dict(w), have))
elif state == "replaced":
cmds.extend(self._state_replaced(flatten_dict(w), have))
return cmds
def _state_replaced(self, want, have):
""" The command generator when state is replaced
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
cmds = []
obj_in_have = search_obj_in_list(want["name"], have, "name")
if obj_in_have:
diff = dict_diff(want, obj_in_have)
else:
diff = want
merged_cmds = self.set_commands(want, have)
if "name" not in diff:
diff["name"] = want["name"]
replaced_cmds = []
if obj_in_have:
replaced_cmds = self.del_attribs(diff)
if replaced_cmds or merged_cmds:
for cmd in set(replaced_cmds).intersection(set(merged_cmds)):
merged_cmds.remove(cmd)
cmds.extend(replaced_cmds)
cmds.extend(merged_cmds)
return cmds
def _state_overridden(self, want, have):
""" The command generator when state is overridden
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
cmds = []
for h in have:
# Check existing states, set to default if not in want or different than want
h = flatten_dict(h)
obj_in_want = search_obj_in_list(h["name"], want, "name")
if obj_in_want:
# Let the 'want' loop handle all vals for this interface
continue
cmds.extend(self.del_attribs(h))
for w in want:
# Update any want attrs if needed. The overridden state considers
# the play as the source of truth for the entire device, therefore
# set any unspecified attrs to their default state.
w = self.set_none_vals_to_defaults(flatten_dict(w))
cmds.extend(self.set_commands(w, have))
return cmds
def _state_merged(self, want, have):
""" The command generator when state is merged
:rtype: A list
:returns: the commands necessary to merge the provided into
the current configuration
"""
return self.set_commands(want, have)
def _state_deleted(self, want, have):
""" The command generator when state is deleted
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
if not (want or have):
return []
cmds = []
if want:
for w in want:
obj_in_have = flatten_dict(
search_obj_in_list(w["name"], have, "name")
)
cmds.extend(self.del_attribs(obj_in_have))
else:
for h in have:
cmds.extend(self.del_attribs(flatten_dict(h)))
return cmds
def del_attribs(self, obj):
if not obj or len(obj.keys()) == 1:
return []
cmds = []
if "bfd" in obj:
cmds.append("no hsrp bfd")
if cmds:
cmds.insert(0, "interface " + obj["name"])
return cmds
def set_none_vals_to_defaults(self, want):
# Set dict None values to default states
if "bfd" in want and want["bfd"] is None:
want["bfd"] = "disable"
return want
def diff_of_dicts(self, want, obj_in_have):
diff = set(want.items()) - set(obj_in_have.items())
diff = dict(diff)
if diff and want["name"] == obj_in_have["name"]:
diff.update({"name": want["name"]})
return diff
def add_commands(self, want, obj_in_have):
if not want:
return []
cmds = []
if "bfd" in want and want["bfd"] is not None:
if want["bfd"] == "enable":
cmd = "hsrp bfd"
cmds.append(cmd)
elif (
want["bfd"] == "disable"
and obj_in_have
and obj_in_have.get("bfd") == "enable"
):
cmd = "no hsrp bfd"
cmds.append(cmd)
if cmds:
cmds.insert(0, "interface " + want["name"])
return cmds
def set_commands(self, want, have):
cmds = []
obj_in_have = search_obj_in_list(want["name"], have, "name")
if not obj_in_have:
cmds = self.add_commands(want, obj_in_have)
else:
diff = self.diff_of_dicts(want, obj_in_have)
cmds = self.add_commands(diff, obj_in_have)
return cmds
|
the-stack_106_26340 | """
Low-level BLAS functions (:mod:`scipy.linalg.blas`)
===================================================
This module contains low-level functions from the BLAS library.
.. versionadded:: 0.12.0
.. warning::
These functions do little to no error checking.
It is possible to cause crashes by mis-using them,
so prefer using the higher-level routines in `scipy.linalg`.
Finding functions
-----------------
.. autosummary::
:toctree: generated/
get_blas_funcs
find_best_blas_type
BLAS Level 1 functions
----------------------
.. autosummary::
:toctree: generated/
caxpy
ccopy
cdotc
cdotu
crotg
cscal
csrot
csscal
cswap
dasum
daxpy
dcopy
ddot
dnrm2
drot
drotg
drotm
drotmg
dscal
dswap
dzasum
dznrm2
icamax
idamax
isamax
izamax
sasum
saxpy
scasum
scnrm2
scopy
sdot
snrm2
srot
srotg
srotm
srotmg
sscal
sswap
zaxpy
zcopy
zdotc
zdotu
zdrot
zdscal
zrotg
zscal
zswap
BLAS Level 2 functions
----------------------
.. autosummary::
:toctree: generated/
cgemv
cgerc
cgeru
chemv
ctrmv
csyr
cher
cher2
dgemv
dger
dsymv
dtrmv
dsyr
dsyr2
sgemv
sger
ssymv
strmv
ssyr
ssyr2
zgemv
zgerc
zgeru
zhemv
ztrmv
zsyr
zher
zher2
BLAS Level 3 functions
----------------------
.. autosummary::
:toctree: generated/
cgemm
chemm
cherk
cher2k
csymm
csyrk
csyr2k
dgemm
dsymm
dsyrk
dsyr2k
sgemm
ssymm
ssyrk
ssyr2k
zgemm
zhemm
zherk
zher2k
zsymm
zsyrk
zsyr2k
"""
#
# Author: Pearu Peterson, March 2002
# refactoring by Fabian Pedregosa, March 2010
#
from __future__ import division, print_function, absolute_import
__all__ = ['get_blas_funcs', 'find_best_blas_type']
import numpy as _np
from scipy.linalg import _fblas
try:
from scipy.linalg import _cblas
except ImportError:
_cblas = None
# Expose all functions (only fblas --- cblas is an implementation detail)
empty_module = None
from scipy.linalg._fblas import *
del empty_module
# 'd' will be default for 'i',..
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z', 'G': 'z'}
# some convenience alias for complex functions
_blas_alias = {'cnrm2': 'scnrm2', 'znrm2': 'dznrm2',
'cdot': 'cdotc', 'zdot': 'zdotc',
'cger': 'cgerc', 'zger': 'zgerc',
'sdotc': 'sdot', 'sdotu': 'sdot',
'ddotc': 'ddot', 'ddotu': 'ddot'}
def find_best_blas_type(arrays=(), dtype=None):
"""Find best-matching BLAS/LAPACK type.
Arrays are used to determine the optimal prefix of BLAS routines.
Parameters
----------
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of BLAS
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
Returns
-------
prefix : str
BLAS/LAPACK prefix character.
dtype : dtype
Inferred Numpy data type.
prefer_fortran : bool
Whether to prefer Fortran order routines over C order.
"""
dtype = _np.dtype(dtype)
prefer_fortran = False
if arrays:
# use the most generic type in arrays
dtypes = [ar.dtype for ar in arrays]
dtype = _np.find_common_type(dtypes, ())
try:
index = dtypes.index(dtype)
except ValueError:
index = 0
if arrays[index].flags['FORTRAN']:
# prefer Fortran for leading array with column major order
prefer_fortran = True
prefix = _type_conv.get(dtype.char, 'd')
if dtype.char == 'G':
# complex256 -> complex128 (i.e., C long double -> C double)
dtype = _np.dtype('D')
elif dtype.char not in 'fdFD':
dtype = _np.dtype('d')
return prefix, dtype, prefer_fortran
def _get_funcs(names, arrays, dtype,
lib_name, fmodule, cmodule,
fmodule_name, cmodule_name, alias):
"""
Return available BLAS/LAPACK functions.
Used also in lapack.py. See get_blas_funcs for docstring.
"""
funcs = []
unpack = False
dtype = _np.dtype(dtype)
module1 = (cmodule, cmodule_name)
module2 = (fmodule, fmodule_name)
if isinstance(names, str):
names = (names,)
unpack = True
prefix, dtype, prefer_fortran = find_best_blas_type(arrays, dtype)
if prefer_fortran:
module1, module2 = module2, module1
for i, name in enumerate(names):
func_name = prefix + name
func_name = alias.get(func_name, func_name)
func = getattr(module1[0], func_name, None)
module_name = module1[1]
if func is None:
func = getattr(module2[0], func_name, None)
module_name = module2[1]
if func is None:
raise ValueError(
'%s function %s could not be found' % (lib_name, func_name))
func.module_name, func.typecode = module_name, prefix
func.dtype = dtype
func.prefix = prefix # Backward compatibility
funcs.append(func)
if unpack:
return funcs[0]
else:
return funcs
def get_blas_funcs(names, arrays=(), dtype=None):
"""Return available BLAS function objects from names.
Arrays are used to determine the optimal prefix of BLAS routines.
Parameters
----------
names : str or sequence of str
Name(s) of BLAS functions without type prefix.
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of BLAS
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
Returns
-------
funcs : list
List containing the found function(s).
Notes
-----
This routine automatically chooses between Fortran/C
interfaces. Fortran code is used whenever possible for arrays with
column major order. In all other cases, C code is preferred.
In BLAS, the naming convention is that all functions start with a
type prefix, which depends on the type of the principal
matrix. These can be one of {'s', 'd', 'c', 'z'} for the numpy
types {float32, float64, complex64, complex128} respectively.
The code and the dtype are stored in attributes `typecode` and `dtype`
of the returned functions.
"""
return _get_funcs(names, arrays, dtype,
"BLAS", _fblas, _cblas, "fblas", "cblas",
_blas_alias)
|
the-stack_106_26341 | from django.shortcuts import render, redirect
from django.conf import settings
from App.models import User
from App.models.song import Song, SpotifyTrackInput
from App.forms import SpotifyTrackInputForm, SpotifySearchForm
import os
import sys
import spotipy
from spotipy.oauth2 import SpotifyOAuth
import spotify_cred as cred
# this is a pointer to the module object instance itself.
this = sys.modules[__name__]
# declare the spotify_api at the module level
this.spotify_api = None
# define the folder to cache spotify tokens
CACHE_FOLDER = os.path.join(settings.MEDIA_ROOT, '.spotify_caches')
class Spotify_Api():
''' This class is a wrapper around the Spotipy library to access Spotify content'''
def __init__(self, auth_manager, cache_handler, user):
'''This app supports one spotify user at a time.
The auth_manager and cache_handler are passed in along with the Django user object
'''
self.auth_manager = auth_manager
self.cache_handler = cache_handler
self.user = user
# initialize the API
self.spotify = spotipy.Spotify(auth_manager = self.auth_manager)
# this object caches the Spotify display name of the current user
self.user_display_name = None
#################################################################
# CONVERSION METHODS FOR SPOTIFY CONTENT TO DJANGO MODELS
#################################################################
def track_info_subset (self, spotify_track, album_name=None, cover_art=None):
'''This function saves the information about a Spotify track needed by our app'''
new_track = dict()
# populate main fields
new_track['id'] = spotify_track['id']
new_track['name'] = spotify_track['name']
new_track['artist_name'] = spotify_track['artists'][0]['name']
if 'album' in spotify_track:
new_track['album_name'] = spotify_track['album']['name']
if len(spotify_track['album']['images']) > 0:
new_track['cover_art'] = spotify_track['album']['images'][0]['url']
else:
new_track['cover_art'] = None
else:
new_track['album_name'] = album_name
new_track['cover_art'] = cover_art
features = self.spotify.audio_features(spotify_track['id'])
new_track['tempo'] = round(features[0]['tempo'])
# build a duration string
seconds = round(spotify_track['duration_ms']/1000)
minutes = seconds // 60
seconds = seconds - (minutes * 60)
new_track['duration'] = str(minutes) + ":" + "{:02d}".format(seconds)
return new_track
def track_list_info(self, tracks):
'''This method processes a list of spotify tracks and returns the required information
for each track. It also returns the offset, last, and total index fields, which
can be used to get another page of tracks.'''
track_list = list()
for track in tracks['items']:
if 'track' in track:
track_list.append(self.track_info_subset(track['track']))
else:
track_list.append(self.track_info_subset(track))
return {'track_list': track_list,
'first': tracks['offset'] + 1,
'last' : tracks['offset'] + len(tracks['items']),
'total': tracks['total']}
def album_info_subset (self, spotify_album):
'''This function saves the information about a Spotify album needed by our app'''
new_album = dict()
new_album['id'] = spotify_album['id']
new_album['album_name'] = spotify_album['name']
new_album['artist_name'] = spotify_album['artists'][0]['name']
if len(spotify_album['images']) > 0:
new_album['cover_art'] = spotify_album['images'][0]['url']
else:
new_album['cover_art'] = settings.STATIC_URL + "img/default.png"
return new_album
def album_list_info(self, albums):
'''This method processes a list of spotify albums and returns the required information
for each album. It also returns the offset, last, and total index fields, which
can be used to get another page of albums.'''
album_list = list()
for i in albums['items']:
if 'album' in i:
album_list.append(self.album_info_subset(i['album']))
else:
album_list.append(self.album_info_subset(i))
return {'album_list': album_list,
'first': albums['offset'] + 1,
'last' : albums['offset'] + len(albums['items']),
'total': albums['total']}
def artist_info_subset (self, spotify_artist):
'''This function saves the information about a Spotify artist needed by our app'''
new_artist = dict()
new_artist['id'] = spotify_artist['id']
new_artist['artist_name'] = spotify_artist['name']
if len(spotify_artist['images']) > 0:
new_artist['artist_image'] = spotify_artist['images'][0]['url']
else:
new_artist['artist_image'] = settings.STATIC_URL + "img/default.png"
return new_artist
def artist_list_info (self, artists):
'''This method processes a list of spotify artists and returns the required information
for each artist. It also returns the offset, last, and total index fields, which
can be used to get another page of artists.'''
artist_list = list()
for i in artists['items']:
artist_list.append(self.artist_info_subset(i))
if 'offset' in artists:
return {'artist_list': artist_list,
'first': artists['offset'] + 1,
'last' : artists['offset'] + len(artists['items']),
'total': artists['total']}
else:
return {'artist_list': artist_list,
'first': 1,
'last' : len(artists['items']),
'total': artists['total']}
def playlist_info_subset (self, spotify_playlist):
'''This function saves the information about a Spotify artist needed by our app'''
new_playlist = dict()
new_playlist['id'] = spotify_playlist['id']
new_playlist['name'] = spotify_playlist['name']
new_playlist['owner'] = spotify_playlist['owner']['display_name']
if len(spotify_playlist['images']) > 0:
new_playlist['image'] = spotify_playlist['images'][0]['url']
else:
new_playlist['image'] = settings.STATIC_URL + "img/default.png"
return new_playlist
def info_for_playlists (self, playlists):
'''This method processes a list of spotify playlists and returns the required information
for each playlist. It also returns the offset, last, and total index fields, which
can be used to get another page of playlists.'''
list_of_playlists = list()
for i in playlists['items']:
list_of_playlists.append(self.playlist_info_subset(i))
return {'list_of_playlists': list_of_playlists,
'first': playlists['offset'] + 1,
'last' : playlists['offset'] + len(playlists['items']),
'total': playlists['total']}
def current_username(self):
'''This method returns the display name of the Spotify user.'''
# if na name is saved, call Spotify to get the name
if self.user_display_name is None:
self.user_display_name = self.spotify.current_user()["display_name"]
return self.user_display_name
def recently_played_tracks(self, limit=16):
'''This method returns the last 16 unique tracks played by the current user.'''
items = self.spotify.current_user_recently_played(limit=25)['items']
unique_tracks = list()
for item in items:
# check if this item is already in the list
for t in unique_tracks:
if item['track']['id'] == t['id']:
break;
else: # this item is not in the track list
new_track = self.track_info_subset(item['track'])
unique_tracks.append(new_track)
# stop when track limit is readhed
if len(unique_tracks) >= limit:
break
return unique_tracks
def album_collection(self, offset=0):
'''This method returns a list of albums saved in the current user's spotify library.'''
albums = self.spotify.current_user_saved_albums(offset=offset, limit=16)
return self.album_list_info(albums)
def artists_followed(self):
'''This method returns a list of the first 16 artists follwed by the current user.
Note: the spotify API doesn't provide an offset or total fields for this oepration.'''
artists = self.spotify.current_user_followed_artists(limit=16)['artists']
return self.artist_list_info(artists)
def playlist_collection(self):
'''This method returns a list of playlists saved in the current user's spotify library.'''
playlists = self.spotify.current_user_playlists(limit=16)
return self.info_for_playlists(playlists)
def saved_tracks(self, offset):
'''This method returns a list of tracks liked on Spotify the current user.'''
tracks = self.spotify.current_user_saved_tracks(offset=offset, limit=16)
return self.track_list_info(tracks)
def artist_albums(self, artist_id, offset=0):
'''This method returns a list of up to 16 albums by a given artist.
The offset parameter is used to get a different set of albums.'''
albums = self.spotify.artist_albums(artist_id, limit=16, offset=offset)
return self.album_list_info(albums)
def album_tracks(self, album_id):
'''This method returns a list of all tracks on a given album.'''
album = self.spotify.album(album_id) # get the album
tracks = album['tracks']['items'] # get the tracks from that album
track_list = list()
for track in tracks:
# pass in the album name and cover art, as that info is not stored with each track
track_list.append(self.track_info_subset(track, album['name'], album['images'][0]['url']))
return {'track_list': track_list,
'first': album['tracks']['offset'] + 1,
'last' : album['tracks']['offset'] + len(album['tracks']['items']),
'total': album['tracks']['total']
}
def artist_tracks(self, artist_id):
'''This method returns a list of the top 10 tracks for a given artist.'''
tracks = self.spotify.artist_top_tracks(artist_id)['tracks']
track_list = list()
for track in tracks:
track_list.append(self.track_info_subset(track))
return track_list
def playlist_tracks(self, playlist_id, offset):
'''This method returns a list of up to 16 tracks from a given playlist.
The offset parameter is used to get a different set of tracks.'''
tracks = self.spotify.playlist_tracks(playlist_id, offset=offset, limit=16)
return self.track_list_info(tracks)
def track_info(self, track_id):
'''This method returns the information for a single spotify track.'''
track = self.spotify.track(track_id)
return self.track_info_subset(track)
def search(self, search_term, content_type, offset=0):
'''This method searches spotify for items matching the given search term.
The content_type can be 'album', 'artist', 'playlist', or 'track'.
The offset parameter can be used to get additional pages of matching items.'''
results = self.spotify.search(q=search_term, limit=16, offset=offset, type=content_type, market='US')
if content_type == 'artist':
return self.artist_list_info(results['artists'])
if content_type == 'album':
return self.album_list_info(results['albums'])
if content_type == 'playlist':
return self.info_for_playlists(results['playlists'])
if content_type == 'track':
return self.track_list_info(results['tracks'])
return None
###########################################
# end of the Spotify_API class
###########################################
def spotify_token(user):
'''This routine returns the user's spotify token for use by the audio player.
If the user does not have a token, this function returns None.'''
if this.spotify_api is None:
return None
if user == this.spotify_api.user:
return this.spotify_api.cache_handler.get_cached_token()
else:
return None
#################################################################
# DJANGO VIEWS TO ACCESS SPOTIFY RESORUCES #
#################################################################
def spotify_sign_in(request):
'''This view coordinates spotify authorization for the user'''
user = request.user
# Step 1: initialize cache and authorization managers
cache_path = os.path.join(CACHE_FOLDER, user.username)
cache_handler = spotipy.cache_handler.CacheFileHandler(cache_path=cache_path)
auth_manager = spotipy.oauth2.SpotifyOAuth(client_id=cred.client_ID,
client_secret= cred.client_SECRET,
redirect_uri=cred.redirect_url,
scope="streaming, user-modify-playback-state, user-read-playback-state, user-read-currently-playing, user-read-recently-played, user-library-read user-follow-read playlist-modify-private",
cache_handler=cache_handler,
show_dialog=True)
#print("auth manager created")
if request.GET.get("code"):
# Step 3. Being redirected from Spotify auth page
auth_manager.get_access_token(request.GET.get("code"))
print("code obtained")
return redirect('App:spotify_sign_in')
if not auth_manager.validate_token(cache_handler.get_cached_token()):
# Step 2. Display sign in link when no token
#print("requesting authorization")
auth_url = auth_manager.get_authorize_url()
return redirect(auth_url)
# Step 4. Signed in, display list of user's recently played tracks
this.spotify_api = Spotify_Api(auth_manager, cache_handler, user)
#print("API initialized")
if not user.has_spotify_token:
user.has_spotify_token = True
#print("saving token")
user.save()
#print("obtaining liked songs")
tracks = this.spotify_api.saved_tracks(offset=0)
return render(request, "spotify_track_list.html", {
"spotify_user": this.spotify_api.current_username(),
'track_list_description': "Your Liked Songs",
"tracks": tracks['track_list'],
"first" : tracks['first'],
"last" : tracks['last'],
"total" : tracks['total']
})
def spotify_sign_out(request):
'''This view signs a user out of Spotify'''
user = request.user
if this.spotify_api is not None:
if user == this.spotify_api.user:
# force the next user to start a new API
this.spotify_api = None
cache_path = os.path.join(CACHE_FOLDER, user.username)
try:
# Remove the CACHE file with the token so that a new user can authorize.
os.remove(cache_path)
except OSError as e:
print ("Error: %s - %s." % (e.filename, e.strerror))
if user.has_spotify_token:
# clear the flag indicating that the user has a token
user.has_spotify_token = False
user.save()
return redirect('App:home')
def spotify_recently_played(request):
'''This view displays a list of user's recently played tracks'''
if this.spotify_api is None:
return render(request, 'not_signed_in_spotify.html')
return render(request, "spotify_track_list.html", {
"spotify_user": this.spotify_api.current_username(),
'track_list_description': "Your Recently Played Songs",
"tracks": this.spotify_api.recently_played_tracks(),
"first" : 1,
"last" : 16,
"total" : 16
})
def spotify_followed_artists(request):
'''This view displays the Spotify artists followed by the user'''
if this.spotify_api is None:
return render(request, 'not_signed_in_spotify.html')
artists = this.spotify_api.artists_followed()
return render(request, "spotify_artist_list.html", {
"spotify_user": this.spotify_api.current_username(),
'artist_list_description': "Your Followed Artists",
'artist_list': artists['artist_list'],
'first': artists['first'],
'last' : artists['last'],
'total': artists['total']
})
def spotify_liked_songs(request):
'''This view displays the Spotify songs liked by the user'''
if this.spotify_api is None:
return render(request, 'not_signed_in_spotify.html')
offset = request.GET.get('offset')
if offset is None:
offset = 0
tracks = this.spotify_api.saved_tracks(offset)
return render(request, "spotify_track_list.html", {
"spotify_user": this.spotify_api.current_username(),
'track_list_description': "Your Liked Songs",
"tracks": tracks['track_list'],
"first" : tracks['first'],
"last" : tracks['last'],
"total" : tracks['total']
})
def spotify_saved_albums(request):
'''This view displays the Spotify albums saved by the user'''
if this.spotify_api is None:
return render(request, 'not_signed_in_spotify.html')
offset = request.GET.get('offset')
if offset is None:
offset = 0
albums = this.spotify_api.album_collection(offset)
return render(request, "spotify_album_list.html", {
"spotify_user": this.spotify_api.current_username(),
'album_list_description': "Your Saved Albums",
"album_list": albums['album_list'],
"first": albums['first'],
"last": albums['last'],
"total": albums['total'],
})
def spotify_saved_playlists(request):
'''This view displays the Spotify albums saved by the user'''
if this.spotify_api is None:
return render(request, 'not_signed_in_spotify.html')
playlists = this.spotify_api.playlist_collection()
return render(request, "spotify_playlists.html", {
"spotify_user": this.spotify_api.current_username(),
'playlists_description': "Your Saved Playlists",
"playlists": playlists['list_of_playlists'],
"first": playlists['first'],
"last" : playlists['last'],
"total": playlists['total']
})
def spotify_search(request):
'''This view allows the user to search for Spotify content'''
if this.spotify_api is None:
return render(request, 'not_signed_in_spotify.html')
if request.method == "GET":
# display the form for the user to enter search options
form = SpotifySearchForm()
return render(request, "spotify_search.html", {'form': form})
else:
form = SpotifySearchForm(request.POST)
if form.is_valid():
# get data from the form
st = form.cleaned_data['search_term']
ct = form.cleaned_data['content_type']
# redirect to the appropriate view.
# This redirect step allows the offset parameter to search for additional data
if ct == 'artist':
return redirect('App:spotify_search_artists', st)
if ct == 'album':
return redirect('App:spotify_search_albums', st)
if ct == 'playlist':
return redirect('App:spotify_search_playlists', st)
if ct == 'track':
return redirect('App:spotify_search_tracks', st)
else:
# display error on form
return render(request, 'spotify_search.html', {
'form': form,
'error': "Invalid data submitted."})
def spotify_search_albums(request, search_term):
'''This view obtains spotify albums that match the search term'''
if this.spotify_api is None:
return render(request, 'not_signed_in_spotify.html')
# get the offset query parameter from the URL
offset = request.GET.get('offset')
if offset is None:
offset = 0
# get results using the spotify API
results = this.spotify_api.search(search_term, 'album', offset)
# render those results
return render(request, "spotify_album_list.html", {
"spotify_user": this.spotify_api.current_username(),
'album_list_description': "Albums Matching - " + search_term,
"album_list": results['album_list'],
"first": results['first'],
"last": results['last'],
"total": results['total']})
def spotify_search_artists(request, search_term):
'''This view obtains spotify artists that match the search term'''
if this.spotify_api is None:
return render(request, 'not_signed_in_spotify.html')
# get the offset query parameter from the URL
offset = request.GET.get('offset')
if offset is None:
offset = 0
# get results using the spotify API
results = this.spotify_api.search(search_term, 'artist', offset)
# render those results
return render(request, "spotify_artist_list.html", {
"spotify_user": this.spotify_api.current_username(),
'artist_list_description': "Artists Matching - " + search_term,
"artist_list": results['artist_list'],
"first": results['first'],
"last" : results['last'],
"total": results['total']})
def spotify_search_playlists(request, search_term):
'''This view obtains spotify playlists that match the search term'''
if this.spotify_api is None:
return render(request, 'not_signed_in_spotify.html')
# get the offset query parameter from the URL
offset = request.GET.get('offset')
if offset is None:
offset = 0
# get results using the spotify API
results = this.spotify_api.search(search_term, 'playlist', offset)
# render those results
return render(request, "spotify_playlists.html", {
"spotify_user": this.spotify_api.current_username(),
'playlists_description': "Playlists Matching - " + search_term,
"playlists": results['list_of_playlists'],
"first": results['first'],
"last": results['last'],
"total": results['total']})
def spotify_search_tracks(request, search_term):
'''This view obtains spotify tracks that match the search term'''
if this.spotify_api is None:
return render(request, 'not_signed_in_spotify.html')
# get the offset query parameter from the URL
offset = request.GET.get('offset')
if offset is None:
offset = 0
# get the offset query parameter from the URL
results = this.spotify_api.search(search_term, 'track', offset)
# render those results
return render(request, "spotify_track_list.html", {
"spotify_user": this.spotify_api.current_username(),
'artist_list_description': "Tracks Matching - " + search_term,
"tracks": results['track_list'],
"first": results['first'],
"last": results['last'],
"total": results['total']})
def spotify_playlist_tracks (request, playlist_id):
'''This view displays a list of tracks on a specific Spotify album'''
if this.spotify_api is None:
return render(request, 'not_signed_in_spotify.html')
offset = request.GET.get('offset')
if offset is None:
offset = 0
tracks = this.spotify_api.playlist_tracks(playlist_id, offset)
return render(request, "spotify_track_list.html", {
"spotify_user": this.spotify_api.current_username(),
'track_list_description': "Playlist Contents",
"tracks": tracks['track_list'],
"first" : tracks['first'],
"last" : tracks['last'],
"total" : tracks['total']
})
def spotify_album_tracks (request, album_id):
'''This view displays a list of tracks on a specific Spotify album'''
if this.spotify_api is None:
return render(request, 'not_signed_in_spotify.html')
tracks = this.spotify_api.album_tracks(album_id)
return render(request, "spotify_track_list.html", {
"spotify_user": this.spotify_api.current_username(),
'track_list_description': "Album Contents",
"tracks": tracks['track_list'],
"first" : tracks['first'],
"last" : tracks['last'],
"total" : tracks['total']
})
def spotify_artist_tracks (request, artist_id):
'''This view displays a list of tracks on a specific Spotify album'''
if this.spotify_api is None:
return render(request, 'not_signed_in_spotify.html')
track_list = this.spotify_api.artist_tracks(artist_id)
return render(request, "spotify_track_list.html", {
"spotify_user": this.spotify_api.current_username(),
'track_list_description': "Top Ten Tracks by Artist",
"tracks": track_list,
"first" : 1,
"last" : 10,
"total" : 10
})
def spotify_artist_albums (request, artist_id):
'''This view displays the albums by a specific Spotify artist'''
if this.spotify_api is None:
return render(request, 'not_signed_in_spotify.html')
offset = request.GET.get('offset')
if offset is None:
offset = 0
artist_albums = this.spotify_api.artist_albums(artist_id, offset)
return render(request, "spotify_album_list.html", {
"spotify_user": this.spotify_api.current_username(),
'album_list_description': "Albums by Artist",
"album_list": artist_albums['album_list'],
"first": artist_albums['first'],
"last": artist_albums['last'],
"total": artist_albums['total'],
})
def add_spotify_track(request, track_id):
'''This view adds a Spotify track into the Studio song database'''
from App.views.song_crud import authorized
# must be an administrator or teacher to add songs
if not authorized(request.user):
return render(request, 'permission_denied.html')
if this.spotify_api is None:
return render(request, 'not_signed_in_spotify.html')
track = this.spotify_api.track_info(track_id)
image_link = track['cover_art']
if request.method == "GET":
track_input = SpotifyTrackInput(
track_id = track['id'],
title = track['name'],
artist = track['artist_name'])
form = SpotifyTrackInputForm(instance=track_input)
return render(request, 'add_song.html', {'form': form, 'cover_art': image_link})
else: # process data submitted from the form
form = SpotifyTrackInputForm(request.POST)
# if form data invalid, display an error on the form
if not form.is_valid():
return render(request, 'add_song.html', {'form':SpotifyTrackInputForm(), 'error': "Invalid data submitted."})
song_instance = form.save(commit=False)
# create a new Song object
new_song = Song()
# save the audio file, metadata, dance_type, and holiday/theme
new_song.spotify_track_id = song_instance.track_id
new_song.image_link = image_link
new_song.title = song_instance.title
new_song.artist = song_instance.artist
new_song.dance_type = song_instance.dance_type
new_song.holiday = song_instance.holiday
new_song.save()
print(new_song)
# return to list of songs
return redirect('App:all_songs') |
the-stack_106_26342 | """This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import drf_yasg.openapi as openapi
import logging
import numpy as np
import pathlib
import os
from collections import Counter
from django.db import IntegrityError
from django.db.models.fields import DecimalField
from django.conf import settings
from drf_yasg.utils import swagger_auto_schema
from django.db.models import Q, When, Count, Case, OuterRef, Max, Exists, Value, BooleanField
from rest_framework import generics, status, filters
from rest_framework.exceptions import NotFound, ValidationError as RestValidationError
from rest_framework.parsers import FormParser, JSONParser, MultiPartParser
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.views import exception_handler
from core.utils.common import conditional_atomic
from core.label_config import config_essential_data_has_changed
from projects.models import (
Project, ProjectSummary
)
from projects.serializers import (
ProjectSerializer, ProjectLabelConfigSerializer, ProjectSummarySerializer
)
from tasks.models import Task, Annotation, Prediction, TaskLock
from tasks.serializers import TaskSerializer, TaskWithAnnotationsAndPredictionsAndDraftsSerializer
from core.mixins import APIViewVirtualRedirectMixin, APIViewVirtualMethodMixin
from core.permissions import all_permissions, ViewClassPermission
from core.utils.common import (
get_object_with_check_and_log, bool_from_request, paginator, paginator_help)
from core.utils.exceptions import ProjectExistException, LabelStudioDatabaseException
from core.utils.io import find_dir, find_file, read_yaml
from data_manager.functions import get_prepared_queryset
from data_manager.models import View
logger = logging.getLogger(__name__)
_result_schema = openapi.Schema(
title='Labeling result',
description='Labeling result (choices, labels, bounding boxes, etc.)',
type=openapi.TYPE_OBJECT,
properies={
'from_name': openapi.Schema(
title='from_name',
description='The name of the labeling tag from the project config',
type=openapi.TYPE_STRING
),
'to_name': openapi.Schema(
title='to_name',
description='The name of the labeling tag from the project config',
type=openapi.TYPE_STRING
),
'value': openapi.Schema(
title='value',
description='Labeling result value. Format depends on chosen ML backend',
type=openapi.TYPE_OBJECT
)
},
example={
'from_name': 'image_class',
'to_name': 'image',
'value': {
'labels': ['Cat']
}
}
)
_task_data_schema = openapi.Schema(
title='Task data',
description='Task data',
type=openapi.TYPE_OBJECT,
example={
'id': 1,
'my_image_url': 'https://app.heartex.ai/static/samples/kittens.jpg'
}
)
class ProjectListAPI(generics.ListCreateAPIView):
"""
get:
List your projects
Return a list of the projects that you've created.
post:
Create new project
Create a labeling project.
"""
parser_classes = (JSONParser, FormParser, MultiPartParser)
serializer_class = ProjectSerializer
filter_backends = [filters.OrderingFilter]
permission_required = ViewClassPermission(
GET=all_permissions.projects_view,
POST=all_permissions.projects_create,
)
ordering = ['-created_at']
def get_queryset(self):
return Project.objects.with_counts().filter(organization=self.request.user.active_organization)
def get_serializer_context(self):
context = super(ProjectListAPI, self).get_serializer_context()
context['created_by'] = self.request.user
return context
def perform_create(self, ser):
try:
project = ser.save(organization=self.request.user.active_organization)
except IntegrityError as e:
if str(e) == 'UNIQUE constraint failed: project.title, project.created_by_id':
raise ProjectExistException('Project with the same name already exists: {}'.
format(ser.validated_data.get('title', '')))
raise LabelStudioDatabaseException('Database error during project creation. Try again.')
@swagger_auto_schema(tags=['Projects'])
def get(self, request, *args, **kwargs):
return super(ProjectListAPI, self).get(request, *args, **kwargs)
@swagger_auto_schema(tags=['Projects'], request_body=ProjectSerializer)
def post(self, request, *args, **kwargs):
return super(ProjectListAPI, self).post(request, *args, **kwargs)
class ProjectAPI(APIViewVirtualRedirectMixin,
APIViewVirtualMethodMixin,
generics.RetrieveUpdateDestroyAPIView):
"""
get:
Get project by ID
Retrieve information about a project by ID.
patch:
Update project
Update project settings for a specific project.
delete:
Delete project
Delete a project by specified project ID.
"""
parser_classes = (JSONParser, FormParser, MultiPartParser)
queryset = Project.objects.with_counts()
permission_required = ViewClassPermission(
GET=all_permissions.projects_view,
DELETE=all_permissions.projects_delete,
PATCH=all_permissions.projects_change,
PUT=all_permissions.projects_change,
POST=all_permissions.projects_create,
)
serializer_class = ProjectSerializer
redirect_route = 'projects:project-detail'
redirect_kwarg = 'pk'
def get_queryset(self):
return Project.objects.with_counts().filter(organization=self.request.user.active_organization)
@swagger_auto_schema(tags=['Projects'])
def get(self, request, *args, **kwargs):
return super(ProjectAPI, self).get(request, *args, **kwargs)
@swagger_auto_schema(tags=['Projects'])
def delete(self, request, *args, **kwargs):
return super(ProjectAPI, self).delete(request, *args, **kwargs)
@swagger_auto_schema(tags=['Projects'], request_body=ProjectSerializer)
def patch(self, request, *args, **kwargs):
project = self.get_object()
label_config = self.request.data.get('label_config')
# config changes can break view, so we need to reset them
if label_config:
try:
has_changes = config_essential_data_has_changed(label_config, project.label_config)
except KeyError:
pass
else:
if has_changes:
View.objects.filter(project=project).all().delete()
return super(ProjectAPI, self).patch(request, *args, **kwargs)
def perform_destroy(self, instance):
"""Performance optimization for whole project deletion
if we catch constraint error fallback to regular .delete() method"""
try:
task_annotation_qs = Annotation.objects.filter(task__project_id=instance.id)
task_annotation_qs._raw_delete(task_annotation_qs.db)
task_prediction_qs = Prediction.objects.filter(task__project_id=instance.id)
task_prediction_qs._raw_delete(task_prediction_qs.db)
task_locks_qs = TaskLock.objects.filter(task__project_id=instance.id)
task_locks_qs._raw_delete(task_locks_qs.db)
task_qs = Task.objects.filter(project_id=instance.id)
task_qs._raw_delete(task_qs.db)
instance.delete()
except IntegrityError as e:
logger.error('Fallback to cascade deleting after integrity_error: {}'.format(str(e)))
instance.delete()
@swagger_auto_schema(auto_schema=None)
def post(self, request, *args, **kwargs):
return super(ProjectAPI, self).post(request, *args, **kwargs)
@swagger_auto_schema(auto_schema=None)
def put(self, request, *args, **kwargs):
return super(ProjectAPI, self).put(request, *args, **kwargs)
class ProjectNextTaskAPI(generics.RetrieveAPIView):
"""get:
Get next task to label
Get the next task for labeling. If you enable Machine Learning in
your project, the response might include a "predictions"
field. It contains a machine learning prediction result for
this task.
"""
permission_required = all_permissions.tasks_view
serializer_class = TaskWithAnnotationsAndPredictionsAndDraftsSerializer # using it for swagger API docs
def _get_random_unlocked(self, task_query, upper_limit=None):
# get random task from task query, ignoring locked tasks
n = task_query.count()
if n > 0:
upper_limit = upper_limit or n
random_indices = np.random.permutation(upper_limit)
task_query_only = task_query.only('overlap', 'id')
for i in random_indices:
try:
task = task_query_only[int(i)]
except IndexError as exc:
logger.error(f'Task query out of range for {int(i)}, count={task_query_only.count()}. '
f'Reason: {exc}', exc_info=True)
except Exception as exc:
logger.error(exc, exc_info=True)
else:
try:
task = Task.objects.select_for_update(skip_locked=True).get(pk=task.id)
if not task.has_lock():
return task
except Task.DoesNotExist:
logger.debug('Task with id {} locked'.format(task.id))
def _get_first_unlocked(self, tasks_query):
# Skip tasks that are locked due to being taken by collaborators
for task_id in tasks_query.values_list('id', flat=True):
try:
task = Task.objects.select_for_update(skip_locked=True).get(pk=task_id)
if not task.has_lock():
return task
except Task.DoesNotExist:
logger.debug('Task with id {} locked'.format(task_id))
def _try_ground_truth(self, tasks, project):
"""Returns task from ground truth set"""
ground_truth = Annotation.objects.filter(task=OuterRef('pk'), ground_truth=True)
not_solved_tasks_with_ground_truths = tasks.annotate(
has_ground_truths=Exists(ground_truth)).filter(has_ground_truths=True)
if not_solved_tasks_with_ground_truths.exists():
if project.sampling == project.SEQUENCE:
return self._get_first_unlocked(not_solved_tasks_with_ground_truths)
return self._get_random_unlocked(not_solved_tasks_with_ground_truths)
def _try_tasks_with_overlap(self, tasks):
"""Filter out tasks without overlap (doesn't return next task)"""
tasks_with_overlap = tasks.filter(overlap__gt=1)
if tasks_with_overlap.exists():
return None, tasks_with_overlap
else:
return None, tasks.filter(overlap=1)
def _try_breadth_first(self, tasks):
"""Try to find tasks with maximum amount of annotations, since we are trying to label tasks as fast as possible
"""
# =======
# This commented part is trying to solve breadth-first in a bit different way:
# it selects first task where any amount of annotations have been already created
# we've left it here to be able to select it through the project settings later
# =======
# annotations = Annotation.objects.filter(task=OuterRef('pk'), ground_truth=False)
# not_solved_tasks_labeling_started = tasks.annotate(labeling_started=Exists(annotations))
# not_solved_tasks_labeling_started_true = not_solved_tasks_labeling_started.filter(labeling_started=True)
# if not_solved_tasks_labeling_started_true.exists():
# # try to complete tasks that are already in progress
# next_task = self._get_random(not_solved_tasks_labeling_started_true)
# return next_task
tasks = tasks.annotate(annotations_count=Count('annotations'))
max_annotations_count = tasks.aggregate(Max('annotations_count'))['annotations_count__max']
if max_annotations_count == 0:
# there is no any labeled tasks found
return
# find any task with maximal amount of created annotations
not_solved_tasks_labeling_started = tasks.annotate(
reach_max_annotations_count=Case(
When(annotations_count=max_annotations_count, then=Value(True)),
default=Value(False),
output_field=BooleanField()))
not_solved_tasks_labeling_with_max_annotations = not_solved_tasks_labeling_started.filter(
reach_max_annotations_count=True)
if not_solved_tasks_labeling_with_max_annotations.exists():
# try to complete tasks that are already in progress
return self._get_random_unlocked(not_solved_tasks_labeling_with_max_annotations)
def _try_uncertainty_sampling(self, tasks, project, user_solved_tasks_array):
task_with_current_predictions = tasks.filter(predictions__model_version=project.model_version)
if task_with_current_predictions.exists():
logger.debug('Use uncertainty sampling')
# collect all clusters already solved by user, count number of solved task in them
user_solved_clusters = project.prepared_tasks.filter(pk__in=user_solved_tasks_array).annotate(
cluster=Max('predictions__cluster')).values_list('cluster', flat=True)
user_solved_clusters = Counter(user_solved_clusters)
# order each task by the count of how many tasks solved in it's cluster
cluster_num_solved_map = [When(predictions__cluster=k, then=v) for k, v in user_solved_clusters.items()]
num_tasks_with_current_predictions = task_with_current_predictions.count() # WARNING! this call doesn't work after consequent annotate
if cluster_num_solved_map:
task_with_current_predictions = task_with_current_predictions.annotate(
cluster_num_solved=Case(*cluster_num_solved_map, default=0, output_field=DecimalField()))
# next task is chosen from least solved cluster and with lowest prediction score
possible_next_tasks = task_with_current_predictions.order_by('cluster_num_solved', 'predictions__score')
else:
possible_next_tasks = task_with_current_predictions.order_by('predictions__score')
num_annotators = project.annotators().count()
if num_annotators > 1 and num_tasks_with_current_predictions > 0:
# try to randomize tasks to avoid concurrent labeling between several annotators
next_task = self._get_random_unlocked(
possible_next_tasks, upper_limit=min(num_annotators + 1, num_tasks_with_current_predictions))
else:
next_task = self._get_first_unlocked(possible_next_tasks)
else:
# uncertainty sampling fallback: choose by random sampling
logger.debug(f'Uncertainty sampling fallbacks to random sampling '
f'(current project.model_version={str(project.model_version)})')
next_task = self._get_random_unlocked(tasks)
return next_task
def _make_response(self, next_task, request, use_task_lock=True):
"""Once next task has chosen, this function triggers inference and prepare the API response"""
user = request.user
project = next_task.project
if use_task_lock:
# set lock for the task with TTL 3x time more then current average lead time (or 1 hour by default)
next_task.set_lock(request.user)
# call machine learning api and format response
if project.show_collab_predictions and not next_task.predictions.exists():
for ml_backend in project.ml_backends.all():
ml_backend.predict_one_task(next_task)
# serialize task
context = {'request': request, 'project': project, 'resolve_uri': True,
'proxy': bool_from_request(request.GET, 'proxy', True)}
serializer = TaskWithAnnotationsAndPredictionsAndDraftsSerializer(next_task, context=context)
response = serializer.data
annotations = []
for c in response.get('annotations', []):
if c.get('completed_by') == user.id and not (c.get('ground_truth') or c.get('honeypot')):
annotations.append(c)
response['annotations'] = annotations
# remove all predictions if we don't want to show it in the label stream
if not project.show_collab_predictions:
response['predictions'] = []
return Response(response)
@swagger_auto_schema(
tags=['Projects'], responses={200: TaskWithAnnotationsAndPredictionsAndDraftsSerializer()}
)
def get(self, request, *args, **kwargs):
project = get_object_with_check_and_log(request, Project, pk=self.kwargs['pk'])
self.check_object_permissions(request, project)
user = request.user
# support actions api call from actions/next_task.py
if hasattr(self, 'prepared_tasks'):
project.prepared_tasks = self.prepared_tasks
# get prepared tasks from request params (filters, selected items)
else:
project.prepared_tasks = get_prepared_queryset(self.request, project)
# detect solved and not solved tasks
user_solved_tasks_array = user.annotations.filter(ground_truth=False).filter(
Q(task__isnull=False)).values_list('task__pk', flat=True)
with conditional_atomic():
not_solved_tasks = project.prepared_tasks.\
exclude(pk__in=user_solved_tasks_array)
# if annotator is assigned for tasks, he must to solve it regardless of is_labeled=True
assigned_flag = hasattr(self, 'assignee_flag') and self.assignee_flag
if not assigned_flag:
not_solved_tasks = not_solved_tasks.annotate(
annotation_number=Count('annotations', filter=Q(annotations__ground_truth=False), distinct=True)
).filter(
annotation_number__lt=project.maximum_annotations
)
not_solved_tasks_count = not_solved_tasks.count()
# return nothing if there are no tasks remain
if not_solved_tasks_count == 0:
raise NotFound(f'There are no tasks remaining to be annotated by the user={user}')
logger.debug(f'{not_solved_tasks_count} tasks that still need to be annotated for user={user}')
# ordered by data manager
if assigned_flag:
next_task = not_solved_tasks.first()
if not next_task:
raise NotFound('No more tasks found')
return self._make_response(next_task, request, use_task_lock=False)
# If current user has already lock one task - return it (without setting the lock again)
next_task = Task.get_locked_by(user, tasks=not_solved_tasks)
if next_task:
return self._make_response(next_task, request, use_task_lock=False)
if project.show_ground_truth_first:
logger.debug(f'User={request.user} tries ground truth from {not_solved_tasks_count} tasks')
next_task = self._try_ground_truth(not_solved_tasks, project)
if next_task:
return self._make_response(next_task, request)
if project.show_overlap_first:
# don't output anything - just filter tasks with overlap
logger.debug(f'User={request.user} tries overlap first from {not_solved_tasks_count} tasks')
_, not_solved_tasks = self._try_tasks_with_overlap(not_solved_tasks)
# don't use this mode for data manager sorting, because the sorting becomes not obvious
if project.sampling != project.SEQUENCE:
# if there any tasks in progress (with maximum number of annotations), randomly sampling from them
logger.debug(f'User={request.user} tries depth first from {not_solved_tasks_count} tasks')
next_task = self._try_breadth_first(not_solved_tasks)
if next_task:
return self._make_response(next_task, request)
if project.sampling == project.UNCERTAINTY:
logger.debug(f'User={request.user} tries uncertainty sampling from {not_solved_tasks_count} tasks')
next_task = self._try_uncertainty_sampling(not_solved_tasks, project, user_solved_tasks_array)
elif project.sampling == project.UNIFORM:
logger.debug(f'User={request.user} tries random sampling from {not_solved_tasks_count} tasks')
next_task = self._get_random_unlocked(not_solved_tasks)
elif project.sampling == project.SEQUENCE:
logger.debug(f'User={request.user} tries sequence sampling from {not_solved_tasks_count} tasks')
next_task = self._get_first_unlocked(not_solved_tasks)
if next_task:
return self._make_response(next_task, request)
else:
raise NotFound(
f'There are still some tasks to complete for the user={user}, but they seem to be locked by another user.')
class LabelConfigValidateAPI(generics.CreateAPIView):
parser_classes = (JSONParser, FormParser, MultiPartParser)
permission_classes = (AllowAny,)
serializer_class = ProjectLabelConfigSerializer
@swagger_auto_schema(
tags=['Projects'],
operation_summary='Validate label config',
operation_description='Validate a labeling configuration for a project.',
responses={200: 'Validation success'}
)
def post(self, request, *args, **kwargs):
return super(LabelConfigValidateAPI, self).post(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
try:
serializer.is_valid(raise_exception=True)
except RestValidationError as exc:
context = self.get_exception_handler_context()
response = exception_handler(exc, context)
response = self.finalize_response(request, response)
return response
return Response(status=status.HTTP_204_NO_CONTENT)
class ProjectLabelConfigValidateAPI(generics.RetrieveAPIView):
""" Validate label config
"""
parser_classes = (JSONParser, FormParser, MultiPartParser)
serializer_class = ProjectLabelConfigSerializer
permission_required = all_permissions.projects_change
queryset = Project.objects.all()
@swagger_auto_schema(
tags=['Projects'],
operation_summary='Validate a label config',
manual_parameters=[
openapi.Parameter(
name='label_config',
type=openapi.TYPE_STRING,
in_=openapi.IN_QUERY,
description='labeling config')
])
def post(self, request, *args, **kwargs):
project = self.get_object()
label_config = self.request.data.get('label_config')
if not label_config:
raise RestValidationError('Label config is not set or is empty')
# check new config includes meaningful changes
has_changed = config_essential_data_has_changed(label_config, project.label_config)
project.validate_config(label_config)
return Response({'config_essential_data_has_changed': has_changed}, status=status.HTTP_200_OK)
@swagger_auto_schema(auto_schema=None)
def get(self, request, *args, **kwargs):
return super(ProjectLabelConfigValidateAPI, self).get(request, *args, **kwargs)
class ProjectSummaryAPI(generics.RetrieveAPIView):
parser_classes = (JSONParser,)
serializer_class = ProjectSummarySerializer
permission_required = all_permissions.projects_view
queryset = ProjectSummary.objects.all()
@swagger_auto_schema(tags=['Projects'], operation_summary='Project summary')
def get(self, *args, **kwargs):
return super(ProjectSummaryAPI, self).get(*args, **kwargs)
class TasksListAPI(generics.ListCreateAPIView,
generics.DestroyAPIView,
APIViewVirtualMethodMixin,
APIViewVirtualRedirectMixin):
"""
get:
List project tasks
Paginated list of tasks for a specific project.
delete:
Delete all tasks
Delete all tasks from a specific project.
"""
parser_classes = (JSONParser, FormParser)
permission_required = ViewClassPermission(
GET=all_permissions.tasks_view,
POST=all_permissions.tasks_change,
DELETE=all_permissions.tasks_delete,
)
serializer_class = TaskSerializer
redirect_route = 'projects:project-settings'
redirect_kwarg = 'pk'
def get_queryset(self):
project = generics.get_object_or_404(Project.objects.for_user(self.request.user), pk=self.kwargs.get('pk', 0))
tasks = Task.objects.filter(project=project)
return paginator(tasks, self.request)
@swagger_auto_schema(tags=['Projects'])
def delete(self, request, *args, **kwargs):
project = generics.get_object_or_404(Project.objects.for_user(self.request.user), pk=self.kwargs['pk'])
Task.objects.filter(project=project).delete()
return Response(status=204)
@swagger_auto_schema(**paginator_help('tasks', 'Projects'))
def get(self, *args, **kwargs):
return super(TasksListAPI, self).get(*args, **kwargs)
@swagger_auto_schema(auto_schema=None, tags=['Projects'])
def post(self, *args, **kwargs):
return super(TasksListAPI, self).post(*args, **kwargs)
def get_serializer_context(self):
context = super(TasksListAPI, self).get_serializer_context()
context['project'] = get_object_with_check_and_log(self.request, Project, pk=self.kwargs['pk'])
return context
def perform_create(self, serializer):
project = get_object_with_check_and_log(self.request, Project, pk=self.kwargs['pk'])
serializer.save(project=project)
class TemplateListAPI(generics.ListAPIView):
parser_classes = (JSONParser, FormParser, MultiPartParser)
permission_required = all_permissions.projects_view
swagger_schema = None
def list(self, request, *args, **kwargs):
annotation_templates_dir = find_dir('annotation_templates')
configs = []
for config_file in pathlib.Path(annotation_templates_dir).glob('**/*.yml'):
config = read_yaml(config_file)
if config.get('image', '').startswith('/static') and settings.HOSTNAME:
# if hostname set manually, create full image urls
config['image'] = settings.HOSTNAME + config['image']
configs.append(config)
template_groups_file = find_file(os.path.join('annotation_templates', 'groups.txt'))
with open(template_groups_file, encoding='utf-8') as f:
groups = f.read().splitlines()
logger.debug(f'{len(configs)} templates found.')
return Response({'templates': configs, 'groups': groups})
class ProjectSampleTask(generics.RetrieveAPIView):
parser_classes = (JSONParser,)
queryset = Project.objects.all()
permission_required = all_permissions.projects_view
serializer_class = ProjectSerializer
swagger_schema = None
def post(self, request, *args, **kwargs):
label_config = self.request.data.get('label_config')
if not label_config:
raise RestValidationError('Label config is not set or is empty')
project = self.get_object()
return Response({'sample_task': project.get_sample_task(label_config)}, status=200)
|
the-stack_106_26343 | # coding=utf-8
import numpy as np
from snntoolbox.datasets.utils import get_dataset
class TestGetDataset:
"""Test obtaining the dataset from disk in correct format."""
def test_get_dataset_from_npz(self, _datapath, _config):
data = np.random.random_sample((1, 1, 1, 1))
np.savez_compressed(str(_datapath.join('x_norm')), data)
np.savez_compressed(str(_datapath.join('x_test')), data)
np.savez_compressed(str(_datapath.join('y_test')), data)
_config.set('paths', 'dataset_path', str(_datapath))
normset, testset = get_dataset(_config)
assert all([normset, testset])
def test_get_dataset_from_jpg(self, _datapath, _config):
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
return plt
classpath = _datapath.mkdir('class_0')
data = np.random.random_sample((10, 10, 3))
plt.imsave(str(classpath.join('image_0.jpg')), data)
_config.read_dict(
{'paths': {'dataset_path': str(_datapath)},
'input': {'dataset_format': 'jpg',
'dataflow_kwargs': "{'target_size': (11, 12)}",
'datagen_kwargs': "{'rescale': 0.003922,"
" 'featurewise_center': True,"
" 'featurewise_std_normalization':"
" True}"}})
normset, testset = get_dataset(_config)
assert all([normset, testset])
|
the-stack_106_26344 | from __future__ import annotations
import asyncio
import collections.abc
import re
import time
from typing import (
Any,
AsyncGenerator,
Awaitable,
Callable,
Dict,
Optional,
Sequence,
Type,
TypeVar,
)
from bluesky.protocols import Descriptor, Dtype, Reading
from ophyd.v2.core import (
Device,
Signal,
SignalCollector,
SignalDetails,
SignalProvider,
SignalRO,
SignalRW,
SignalSourcer,
SignalWO,
SignalX,
T,
check_no_args,
)
# TODO: use the one in pvi
PASCAL_CASE_REGEX = re.compile(r"(?<![A-Z])[A-Z]|[A-Z][a-z/d]|(?<=[a-z])\d")
def to_snake_case(pascal_s: str) -> str:
"""Takes a PascalCaseFieldName and returns an Title Case Field Name
Args:
pascal_s: E.g. PascalCaseFieldName
Returns:
snake_case converted name. E.g. pascal_case_field_name
"""
return PASCAL_CASE_REGEX.sub(lambda m: "_" + m.group().lower(), pascal_s).strip("_")
SetCallback = Callable[[Any], Awaitable[None]]
CallCallback = Callable[[], Awaitable[None]]
primitive_dtypes: Dict[type, Dtype] = {
str: "string",
int: "integer",
float: "number",
bool: "boolean",
}
def make_descriptor(source: Optional[str], value) -> Descriptor:
assert source, "Not connected"
try:
dtype = primitive_dtypes[type(value)]
shape = []
except KeyError:
assert isinstance(value, Sequence), f"Can't get dtype for {type(value)}"
dtype = "array"
shape = [len(value)]
return dict(source=source, dtype=dtype, shape=shape)
class _SimStore:
def __init__(self):
self.on_set: Dict[int, SetCallback] = {}
self.on_call: Dict[int, CallCallback] = {}
self.values: Dict[int, Any] = {}
self.events: Dict[int, asyncio.Event] = {}
def set_value(self, signal_id: int, value):
self.values[signal_id] = value
self.events[signal_id].set()
self.events[signal_id] = asyncio.Event()
class SimSignal(Signal):
def __init__(self, store: _SimStore):
self._store = store
self._source: Optional[str] = None
@property
def source(self) -> Optional[str]:
return self._source
def set_source(self, source, *args, **kwargs):
self._source = source
check_no_args(args, kwargs)
async def wait_for_connection(self):
while self.source is None:
await asyncio.sleep(0.1)
class SimSignalRO(SignalRO[T], SimSignal):
async def get_descriptor(self) -> Descriptor:
return make_descriptor(self.source, self._store.values[id(self)])
async def get_reading(self) -> Reading:
return Reading(value=self._store.values[id(self)], timestamp=time.time())
async def observe_reading(self) -> AsyncGenerator[Reading, None]:
id_self = id(self)
while True:
yield Reading(value=self._store.values[id_self], timestamp=time.time())
await self._store.events[id_self].wait()
class SimSignalWO(SignalWO[T], SimSignal):
"""Signal that can be put to"""
async def put(self, value: T):
id_self = id(self)
cb = self._store.on_set.get(id_self, None)
if cb:
await cb(value)
self._store.set_value(id_self, value)
class SimSignalRW(SimSignalRO[T], SimSignalWO[T], SignalRW[T]):
pass
class SimSignalX(SignalX, SimSignal):
async def execute(self):
cb = self._store.on_call.get(id(self), None)
if cb:
await cb()
lookup: Dict[Type[Signal], Type[SimSignal]] = {
SignalRO: SimSignalRO,
SignalWO: SimSignalWO,
SignalRW: SimSignalRW,
SignalX: SimSignalX,
}
SetCallbackT = TypeVar("SetCallbackT", bound=SetCallback)
CallCallbackT = TypeVar("CallCallbackT", bound=CallCallback)
class SimProvider(SignalProvider):
@staticmethod
def transport() -> str:
return "sim"
def __init__(self):
self._store = _SimStore()
def on_set(self, signal: SignalWO) -> Callable[[SetCallbackT], SetCallbackT]:
def decorator(cb: SetCallbackT) -> SetCallbackT:
self._store.on_set[id(signal)] = cb
return cb
return decorator
def on_call(self, signal: SignalX) -> Callable[[CallCallbackT], CallCallbackT]:
def decorator(cb: CallCallbackT) -> CallCallbackT:
self._store.on_call[id(signal)] = cb
return cb
return decorator
def get_value(self, signal: SignalRO[T]) -> T:
return self._store.values[id(signal)]
def set_value(self, signal: SignalRO[T], value: T) -> T:
self._store.set_value(id(signal), value)
return value
def create_disconnected_signal(self, details: SignalDetails) -> Signal:
"""Create a disconnected Signal to go in all_signals"""
signal = lookup[details.signal_cls](self._store)
if details.value_type is not None:
origin = getattr(details.value_type, "__origin__", None)
if origin is None:
# str, bool, int, float
assert details.value_type
value = details.value_type()
elif origin is collections.abc.Sequence:
# Sequence[...]
value = ()
elif origin is dict:
# Dict[...]
value = origin()
else:
raise ValueError(f"Can't make {details.value_type}")
self._store.values[id(signal)] = value
self._store.events[id(signal)] = asyncio.Event()
return signal
async def connect_signals(
self, device: Device, signal_prefix: str, sourcer: SignalSourcer
):
# Ignore the sourcer and make our own names
for attr_name, signal in device.all_signals.items():
source = self.canonical_source(signal_prefix + to_snake_case(attr_name))
signal.set_source(source)
@classmethod
def instance(cls) -> Optional[SimProvider]:
provider = SignalCollector.get_provider("sim")
assert provider is None or isinstance(provider, SimProvider)
return provider
|
the-stack_106_26345 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""\
Serialize a btrfs subvolume built by an `image_layer` target into a
portable format (either a file, or a directory with a few files).
At the moment, this only outputs "full" packages -- that is, we do not
support emitting an incremental package relative to a prior `image_layer`.
## How to add support for incremental packages
There is a specific setting, where it is possible to support safe
incremental packaging. First, read on to understand why the general case of
incremental packaging is intrinsically unsafe.
### The incremental package consistency problem
It is technically simple to create incremental outputs:
- `btrfs send -p`
- `tar --create --listed-incremental`
The problem is that it is hard to guarantee consistency between parts of the
incremental stack.
It is reasonable for an end-user to expect this to work correctly, so long
as they build both parts from excatly the same source control version:
- first, they build package A;
- later (perhaps on a different host or repo checkout), they build an
incremental package B that stacks on top of A.
Indeed, this generally works for programming artifacts, because programming
languages define a clear interface for their build artifacts, and the same
source code + build toolchain is GUARANTEED to always produce artifacts that
are interface-compatible with other outputs from the same inputs.
In contrast, a filesystem output of an image build does NOT define such an
interface, which makes it impossible to guarantee consistency. Let's make
this concrete with an example.
Imagine these Buck targets:
- `:parent_subvol`
- `:child_subvol`, with `parent_layer = ":parent_subvol"`
Let's say that `:parent_subvol` contains, among other things, a multi-file
relational DB which stores a table per file, and uses RANDOM keys
internally. The first time we build it, we might get this:
```
$ jq . table_names
{
"randKeyA3": {"name": "cat"},
"randKeyA1": {"name": "dog"},
"randKeyA8": {"name": "gibbon"}
}
$ jq . table_friends
{
"randKeyA3": ["randKeyA1"]
}
```
This database just says that we have 3 animals, and 1 directed friendship
among them (cat -> dog).
You can imagine a second build of `:parent_subvol` which has the same
semantic content:
```
$ jq . table_names
{
"randKeyA6": {"name": "cat"},
"randKeyA5": {"name": "dog"},
"randKeyA1": {"name": "gibbon"}
}
$ jq . table_friends
{
"randKeyA6": ["randKeyA5"]
}
```
Since the random keys are internal to the DB, and not part of its public
API, this is permissible build entropy -- just like "build info" sections in
binary objects, and just like build timestamps.
So from the point of view of Buck, everything is fine.
Now, let's say that in `:child_subvol` we add another friendship to the DB
(gibbon -> dog). Depending on the version of `:parent_subvol` you start
with, building `:child_subvol` will cause you to produce an incremental
package replaceing JUST the file `table_friends` with one of these versions:
```
# `:child_subvol` from the first `:parent_subvol` build
$ jq . table_friends
{
"randKeyA3": ["randKeyA1"],
"randKeyA8": ["randKeyA1"]
}
# `:child_subvol` from the second `:parent_subvol` build
$ jq . table_friends
{
"randKeyA6": ["randKeyA5"],
"randKeyA1": ["randKeyA5"],
}
```
Omitting `table_names` from the incremental update is completely fine from
the point of view of the filesystem -- that file hasn't changed in either
build. However, we now have two INCOMPATIBLE build artifacts from the same
source version.
Now, we may end up combining the first version of `:parent_subvol` with the
second version of `:child_subvol`. The incremental update would apply fine,
but the resulting DB would be corrupted.
Worst of all, this could occur quite naturally, e.g.
- An innocent (but not stupid!) user may assume that since builds are
hermetic, build artifacts from the same version are compatible.
- Target-level distributed caching in Buck may cache artifacts from two
different build runs. On the Buck side, T35569915 documents the
intention to make ALL cache retrievals be based only on input keys,
which could actually guarantee the consistency we need, but this is
probably not happening before late 2019, early 2020.
To sum up:
- In practice, builds are almost never bitwise-reproducible. The resulting
filesystem contents of two builds of the same repo state may differ.
When we say a build environment is hermetic we just mean that at runtime,
all of its artifacts work the same way, so long as they were built from
the same repo state.
- Filesystems lack a standard semantic interface, which could guarantee
interoperability between filesystem artifacts from two differen builds of
the same "hermetic" environment. Therefore, any kind of "incremental"
package has to be applied against EXACTLY the same filesystem contents,
against which it was built, or the result may be incorrect.
- In a distributed build setting, it's hard to guarantee that incremental
build artifacts will NOT get composed incorrectly.
- So, we choose NOT to support incremental packaging in the general case.
We may revise this decision once Buck's cache handling changes
(T35569915), or if the need for incremental packaging is strong enough to
justify shipping solutions with razor-sharp edges.
### When can we safely build incremental packages?
Before getting to the practically useful solution, let me mention a
less-useful one in passing. It is simple to define a rule type that outputs
a STACK of known-compatible incremental packages. The current code has
commented-out breadcrumbs (see `get_subvolume_on_disk_stack`), while
P60233442 adds ~20 lines of code to materializing an incremental send-stream
stack. This solves the consistency problem, but it's unclear what value
this type of rule provides over a "full" package.
The main use-case for incremental builds is this:
- pieces of widely-used infrastructure are packaged up into a few
common base images,
- custom container images are distributed as incremental add-ons to these
common bases.
In this case, we can side-step the above correctness issues by requiring
that any base `image_layer` for an incremental package must have a "release"
property. This is an assertion that can be verified at build-time, stating
that a content hash of the base layer has been checked into the source
control repo. While the production version of this might look a little
different, this demonstrates the right semantics:
```
$ cat TARGETS
buck_genrule(
name='parent.sendstream',
out='parent.sendstream',
bash='... fetch the sendstream from some blob store ...',
)
image_sendstream_layer(
name='parent',
source=':parent.sendstream',
# The presence of this hash assures us that the filesystem contents are
# fixed, which makes it safe to build incremental snapshots against it.
sendstream_hash={
'sha256':
'4449df7d6848198f310aaffa7f7860b6022017e1913b94b6af86bb618e999480',
},
)
image_layer(
name='child',
parent_layer=':parent',
...
)
image_package(
name='child_from_parent.sendstream',
layer=':child',
# If `:parent` lacked `sendstream_hash`, we would not know it is a
# "release" image, and this `image_package` would fail to build.
incremental_to=':parent',
)
```
Besides tweaks to naming, the main difference I would expect in a production
system is a more automatable way of specifying content hashes for previously
released base images.
Requiring base images to be released adds some conceptual complexity. However,
it is quite reasonable to have post-CI release processes for commonly used
base images. Specific advantages to this include:
- more rigorous testing than is feasible in at-code-review-time CI/CD system
- the ability to pre-warm caches, thus ensuring nearly instant availability
of the base images.
"""
import argparse
import os
import pwd
import stat
import subprocess
from typing import Mapping, NamedTuple, Optional, Callable
from antlir.nspawn_in_subvol.args import PopenArgs, new_nspawn_opts
from antlir.nspawn_in_subvol.nspawn import popen_nspawn, run_nspawn
from .common import check_popen_returncode, init_logging
from .find_built_subvol import find_built_subvol
from .fs_utils import Path, create_ro, generate_work_dir
from .subvol_utils import Subvol, SubvolOpts, KiB, MiB
class _Opts(NamedTuple):
subvol_opts: SubvolOpts
build_appliance: Optional[Subvol]
size_mb: Optional[int]
volume_label: Optional[str]
class Format:
"A base class that registers its subclasses in NAME_TO_CLASS."
NAME_TO_CLASS: Mapping[str, "Format"] = {}
def __init_subclass__(cls, format_name: str, **kwargs):
super().__init_subclass__(**kwargs)
prev_cls = cls.NAME_TO_CLASS.get(format_name)
if prev_cls:
raise AssertionError(f"{cls} and {prev_cls} share format_name")
cls.NAME_TO_CLASS[format_name] = cls
@classmethod
def make(cls, format_name) -> "Format":
return cls.NAME_TO_CLASS[format_name]()
class Sendstream(Format, format_name="sendstream"):
"""
Packages the subvolume as a stand-alone (non-incremental) send-stream.
See the script-level docs for details on supporting incremental ones.
"""
def package_full(self, subvol: Subvol, output_path: str, opts: _Opts):
with create_ro(
output_path, "wb"
) as outfile, subvol.mark_readonly_and_write_sendstream_to_file(
outfile
):
pass
class SendstreamZst(Format, format_name="sendstream.zst"):
"""
Packages the subvolume as a stand-alone (non-incremental) zstd-compressed
send-stream. See the script-level docs for details on supporting incremental
ones.
Future: add general compression support instead of adding `TarballGz`,
`TarballZst`, `SendstreamGz`, etc.
"""
def package_full(self, subvol: Subvol, output_path: str, opts: _Opts):
with create_ro(output_path, "wb") as outfile, subprocess.Popen(
["zstd", "--stdout"], stdin=subprocess.PIPE, stdout=outfile
) as zst, subvol.mark_readonly_and_write_sendstream_to_file(zst.stdin):
pass
check_popen_returncode(zst)
class SquashfsImage(Format, format_name="squashfs"):
"""
Packages the subvolume as a squashfs-formatted disk image, usage:
mount -t squashfs image.squashfs dest/ -o loop
"""
def package_full(self, subvol: Subvol, output_path: str, opts: _Opts):
create_ro(output_path, "wb").close() # Ensure non-root ownership
subvol.run_as_root(
[
"mksquashfs",
subvol.path(),
output_path,
"-comp",
"zstd",
"-noappend",
]
)
class BtrfsImage(Format, format_name="btrfs"):
"""
Packages the subvolume as a btrfs-formatted disk image, usage:
mount -t btrfs image.btrfs dest/ -o loop
"""
def package_full(self, subvol: Subvol, output_path: str, opts: _Opts):
subvol.mark_readonly_and_send_to_new_loopback(
output_path, subvol_opts=opts.subvol_opts
)
class TarballGzipImage(Format, format_name="tar.gz"):
"""
Packages the subvolume as a gzip-compressed tarball, usage:
tar xzf image.tar.gz -C dest/
"""
def package_full(self, subvol: Subvol, output_path: str, opts: _Opts):
with create_ro(output_path, "wb") as outfile, subprocess.Popen(
["gzip", "--stdout"], stdin=subprocess.PIPE, stdout=outfile
) as gz, subvol.write_tarball_to_file(gz.stdin):
pass
check_popen_returncode(gz)
class CPIOGzipImage(Format, format_name="cpio.gz"):
"""
Packages the subvol as a gzip-compressed cpio.
"""
def package_full(self, subvol: Subvol, output_path: str, opts: _Opts):
work_dir = generate_work_dir()
# This command is partly based on the recomendations of
# reproducible-builds.org:
# https://reproducible-builds.org/docs/archives/
# Note that this does *not* create a reproducible archive yet.
# For that we need 2 more things:
# - Clearing of the timestamps
# - Setting uid/gid to 0
# Those 2 operations mutate the filesystem. Packaging
# should be transparent and not cause mutations, as such
# those operations should be added as genrule layers (or
# something similar) that mutates the filesystem being
# packaged *before* reaching this point.
create_archive_cmd = [
"/bin/bash",
"-c",
"set -ue -o pipefail;" f"pushd {work_dir} >/dev/null;"
# List all the files except sockets since cpio doesn't
# support them and they don't really mean much outside
# the context of the process that is using it.
"(set -ue -o pipefail; /bin/find . -mindepth 1 ! -type s | "
# Use LANG=C to avoid any surprises that locale might cause
"LANG=C /bin/sort | "
# Create the archive with bsdtar
"LANG=C /bin/cpio -o -H newc |"
# And finally compress it
"/bin/gzip --stdout)",
]
opts = new_nspawn_opts(
cmd=create_archive_cmd,
layer=opts.build_appliance,
bindmount_rw=[(subvol.path(), work_dir)],
user=pwd.getpwnam("root"),
)
with create_ro(output_path, "wb") as outfile, popen_nspawn(
opts, PopenArgs(stdout=outfile)
):
pass
def _bash_cmd_in_build_appliance(
output_path: str,
opts: _Opts,
subvol: Subvol,
get_bash: Callable[[str, str], str],
):
"""
Spin up a new nspawn build appliance with bind mounts
and run cmd provided by get_bash.
"""
work_dir = generate_work_dir()
output_dir = "/output"
o_basepath, o_file = os.path.split(output_path)
image_path = os.path.join(output_dir, o_file)
cmd = [
"/bin/bash",
"-eux",
"-o",
"pipefail",
"-c",
get_bash(image_path=image_path, work_dir=work_dir),
]
run_nspawn(
new_nspawn_opts(
cmd=cmd,
layer=opts.build_appliance,
bindmount_rw=[
(subvol.path(), work_dir),
(o_basepath, output_dir),
],
user=pwd.getpwuid(os.getuid()),
),
PopenArgs(),
)
class VfatImage(Format, format_name="vfat"):
"""
Packages the subvolume as a vfat-formatted disk image, usage:
mount -t vfat image.vfat dest/ -o loop
NB: vfat is very limited on supported file types, thus we only support
packaging regular files/dirs into a vfat image.
"""
def package_full(self, subvol: Subvol, output_path: str, opts: _Opts):
if opts.size_mb is None:
raise ValueError("size_mb is required when packaging a vfat image")
_bash_cmd_in_build_appliance(
output_path,
opts,
subvol,
lambda *, image_path, work_dir: (
"/usr/sbin/mkfs.vfat {maybe_label} "
"-C {image_path} {image_size}; "
"/usr/bin/mcopy -v -i {image_path} -sp {work_dir}/* ::"
).format(
maybe_label=f"-n {opts.volume_label}"
if opts.volume_label
else "",
image_path=image_path,
# mkfs.vfat takes the size as BLOCK_COUNT (1K Bytes)
# thus passing in "size_mb * KiB" results in "size_mb" MiB
image_size=opts.size_mb * KiB,
work_dir=work_dir,
),
)
class Ext3Image(Format, format_name="ext3"):
"""
Packages the subvolume as an ext3-formatted disk image, usage:
mount -t ext3 image.ext3 dest/ -o loop
"""
def package_full(self, subvol: Subvol, output_path: str, opts: _Opts):
if opts.size_mb is None:
raise ValueError("size_mb is required when packaging an ext3 image")
_bash_cmd_in_build_appliance(
output_path,
opts,
subvol,
lambda *, image_path, work_dir: (
"/usr/bin/truncate -s {image_size}M {image_path}; "
"/usr/sbin/mkfs.ext3 {maybe_label} {image_path}"
" -d {work_dir}"
).format(
maybe_label=f"-L {opts.volume_label}"
if opts.volume_label
else "",
image_path=image_path,
image_size=opts.size_mb,
work_dir=work_dir,
),
)
def parse_args(argv):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"--subvolumes-dir",
required=True,
type=Path.from_argparse,
help="A directory on a btrfs volume, where all the subvolume wrapper "
"directories reside.",
)
parser.add_argument(
"--layer-path",
required=True,
help="A directory output from the `image_layer` we need to package",
)
parser.add_argument(
"--format",
choices=Format.NAME_TO_CLASS.keys(),
required=True,
help=f"""
Brief format descriptions -- see the code docblocks for more detail:
{'; '.join(
'"' + k + '" -- ' + v.__doc__
for k, v in Format.NAME_TO_CLASS.items()
)}
""",
)
parser.add_argument(
"--output-path",
required=True,
help="Write the image package file(s) to this path -- must not exist",
)
parser.add_argument(
"--writable-subvolume",
action="store_true",
default=False,
help="By default, the subvolume inside a loopback is marked read-only."
" Pass this flag to mark it writable.",
)
parser.add_argument(
"--seed-device",
action="store_true",
default=False,
help="Pass this flag to make the resulting image a btrfs seed device",
)
parser.add_argument(
"--size-mb",
type=int,
help="Size of the target filesystem image",
)
parser.add_argument(
"--volume-label",
help="Label for the target filesystem image",
)
parser.add_argument(
"--multi-pass-size-minimization",
action="store_true",
default=False,
help="By default, we do not apply time-costly efforts to minimize the"
" size of loopback image",
)
parser.add_argument(
"--set-default-subvol",
action="store_true",
default=False,
help="Set the default subvol of the loopback image to be the volume:"
" subvol",
)
parser.add_argument(
"--build-appliance", help="Build appliance layer to use when packaging"
)
# Future: To add support for incremental send-streams, we'd want to
# use this (see `--ancestor-jsons` in `image_package.bzl`)
#
# parser.add_argument(
# '--ancestor-jsons',
# nargs=argparse.REMAINDER, metavar=['PATH'], required=True,
# help='Consumes the remaining arguments on the command-line. '
# 'A list of image_layer JSON output files.',
# )
return Path.parse_args(parser, argv)
# Future: For incremental snapshots, an important sanity check is to verify
# that base subvolume is actually an ancestor of the subvolume being
# packaged, since `btrfs send` does not check this. The function below
# enables us to do this, and more.
#
# def get_subvolume_on_disk_stack(
# layer_json_paths: Iterable[str], subvolumes_dir: str,
# ) -> List[SubvolumeOnDisk]:
# # Map the given layer JSONs to btrfs subvolumes in the per-repo volume
# uuid_to_svod = {}
# parent_uuids = set()
# for json_path in layer_json_paths:
# with open(json_path) as infile:
# svod = SubvolumeOnDisk.from_json_file(infile, subvolumes_dir)
# uuid_to_svod[svod.btrfs_uuid] = svod
# if svod.btrfs_parent_uuid:
# parent_uuids.add(svod.btrfs_parent_uuid)
#
# # Traverse `SubvolumeOnDisk`s from the leaf child to the last ancestor
# svod, = (s for u, s in uuid_to_svod.items() if u not in parent_uuids)
# subvol_stack = []
# while True:
# subvol_stack.append(svod)
# if not svod.btrfs_parent_uuid:
# break
# svod = uuid_to_svod[svod.btrfs_parent_uuid]
# subvol_stack.reverse() # Now from last ancestor to newest child
# assert len(subvol_stack) == len(uuid_to_svod), uuid_to_svod
# assert len(set(subvol_stack)) == len(uuid_to_svod), uuid_to_svod
#
# return subvol_stack
def package_image(argv):
args = parse_args(argv)
assert not os.path.exists(args.output_path)
Format.make(args.format).package_full(
find_built_subvol(args.layer_path, subvolumes_dir=args.subvolumes_dir),
output_path=args.output_path,
opts=_Opts(
subvol_opts=SubvolOpts(
readonly=not args.writable_subvolume,
seed_device=args.seed_device,
set_default_subvol=args.set_default_subvol,
multi_pass_size_minimization=args.multi_pass_size_minimization,
size_bytes=args.size_mb * MiB if args.size_mb else None,
),
build_appliance=find_built_subvol(args.build_appliance)
if args.build_appliance
else None,
size_mb=args.size_mb,
volume_label=args.volume_label,
),
)
# Paranoia: images are read-only after being built
os.chmod(
args.output_path,
stat.S_IMODE(os.stat(args.output_path).st_mode)
& ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH),
)
if __name__ == "__main__": # pragma: no cover
import sys
init_logging()
package_image(sys.argv[1:])
|
the-stack_106_26346 | #
# Module providing the `SyncManager` class for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import sys
import threading
import array
import queue
from time import time as _time
from traceback import format_exc
from . import connection
from .context import reduction, get_spawning_popen, ProcessError
from . import pool
from . import process
from . import util
from . import get_context
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tobytes())
reduction.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
reduction.register(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return '%s(typeid=%r, address=%r, id=%r)' % \
(self.__class__.__name__, self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'):
if not isinstance(result, str):
raise TypeError(
"Result {0!r} (kind '{1}') type is {2}, not str".format(
result, kind, type(result)))
if kind == '#UNSERIALIZABLE':
return RemoteError('Unserializable message: %s\n' % result)
else:
return RemoteError(result)
else:
return ValueError('Unrecognized message type {!r}'.format(kind))
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if callable(func):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
if not isinstance(authkey, bytes):
raise TypeError(
"Authkey {0!r} is type {1!s}, not bytes".format(
authkey, type(authkey)))
self.registry = registry
self.authkey = process.AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.id_to_local_proxy_obj = {}
self.mutex = threading.Lock()
def serve_forever(self):
'''
Run the server forever
'''
self.stop_event = threading.Event()
process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
accepter.start()
try:
while not self.stop_event.is_set():
self.stop_event.wait(1)
except (KeyboardInterrupt, SystemExit):
pass
finally:
if sys.stdout != sys.__stdout__: # what about stderr?
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.exit(0)
def accepter(self):
while True:
try:
c = self.listener.accept()
except OSError:
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop_event.is_set():
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
try:
obj, exposed, gettypeid = id_to_obj[ident]
except KeyError as ke:
try:
obj, exposed, gettypeid = \
self.id_to_local_proxy_obj[ident]
except KeyError as second_ke:
raise ke
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception as e:
send(('#UNSERIALIZABLE', format_exc()))
except Exception as e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
# Perhaps include debug info about 'c'?
with self.mutex:
result = []
keys = list(self.id_to_refcount.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
def number_of_objects(self, c):
'''
Number of shared objects
'''
# Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0'
return len(self.id_to_refcount)
def shutdown(self, c):
'''
Shutdown this process
'''
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
except:
import traceback
traceback.print_exc()
finally:
self.stop_event.set()
def create(self, c, typeid, *args, **kwds):
'''
Create a new shared object and return its id
'''
with self.mutex:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
if kwds or (len(args) != 1):
raise ValueError(
"Without callable, must have one non-keyword argument")
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
if not isinstance(method_to_typeid, dict):
raise TypeError(
"Method_to_typeid {0!r}: type {1!s}, not dict".format(
method_to_typeid, type(method_to_typeid)))
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
self.incref(c, ident)
return ident, tuple(exposed)
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
with self.mutex:
try:
self.id_to_refcount[ident] += 1
except KeyError as ke:
# If no external references exist but an internal (to the
# manager) still does and a new external reference is created
# from it, restore the manager's tracking of it from the
# previously stashed internal ref.
if ident in self.id_to_local_proxy_obj:
self.id_to_refcount[ident] = 1
self.id_to_obj[ident] = \
self.id_to_local_proxy_obj[ident]
obj, exposed, gettypeid = self.id_to_obj[ident]
util.debug('Server re-enabled tracking & INCREF %r', ident)
else:
raise ke
def decref(self, c, ident):
if ident not in self.id_to_refcount and \
ident in self.id_to_local_proxy_obj:
util.debug('Server DECREF skipping %r', ident)
return
with self.mutex:
if self.id_to_refcount[ident] <= 0:
raise AssertionError(
"Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format(
ident, self.id_to_obj[ident],
self.id_to_refcount[ident]))
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_refcount[ident]
if ident not in self.id_to_refcount:
# Two-step process in case the object turns out to contain other
# proxy objects (e.g. a managed list of managed lists).
# Otherwise, deleting self.id_to_obj[ident] would trigger the
# deleting of the stored value (another managed object) which would
# in turn attempt to acquire the mutex that is already held here.
self.id_to_obj[ident] = (None, (), None) # thread-safe
util.debug('disposing of obj with id %r', ident)
with self.mutex:
del self.id_to_obj[ident]
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle',
ctx=None):
if authkey is None:
authkey = process.current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = process.AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
self._ctx = ctx or get_context()
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = self._ctx.Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
if self._process is not None:
self._process.join(timeout)
if not self._process.is_alive():
self._process = None
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
if self._state.value == State.INITIAL:
self.start()
if self._state.value != State.STARTED:
if self._state.value == State.INITIAL:
raise ProcessError("Unable to start server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=1.0)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
@property
def address(self):
return self._address
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in list(method_to_typeid.items()): # isinstance?
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True, manager_owned=False):
with BaseProxy._mutex:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
# Should be set to True only when a proxy object is being created
# on the manager server; primary use case: nested proxy objects.
# RebuildProxy detects when a proxy is being created on the manager
# and sets this value appropriately.
self._owned_by_manager = manager_owned
if authkey is not None:
self._authkey = process.AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = process.current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
if self._owned_by_manager:
util.debug('owned_by_manager skipped INCREF of %r', self._token.id)
return
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception as e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if get_spawning_popen() is not None:
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %#x>' % \
(type(self).__name__, self._token.typeid, id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
'''
server = getattr(process.current_process(), '_manager_server', None)
if server and server.address == token.address:
util.debug('Rebuild a proxy owned by manager, token=%r', token)
kwds['manager_owned'] = True
if token.id not in server.id_to_local_proxy_obj:
server.id_to_local_proxy_obj[token.id] = \
server.id_to_obj[token.id]
incref = (
kwds.pop('incref', True) and
not getattr(process.current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return a proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec('''def %s(self, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = process.current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True, timeout=None):
args = (blocking,) if timeout is None else (blocking, timeout)
return self._callmethod('acquire', args)
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self, n=1):
return self._callmethod('notify', (n,))
def notify_all(self):
return self._callmethod('notify_all')
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class BarrierProxy(BaseProxy):
_exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def abort(self):
return self._callmethod('abort')
def reset(self):
return self._callmethod('reset')
@property
def parties(self):
return self._callmethod('__getattribute__', ('parties',))
@property
def n_waiting(self):
return self._callmethod('__getattribute__', ('n_waiting',))
@property
def broken(self):
return self._callmethod('__getattribute__', ('broken',))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
'__mul__', '__reversed__', '__rmul__', '__setitem__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
))
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'has_key', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__'
))
BasePoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
))
BasePoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'starmap_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
class PoolProxy(BasePoolProxy):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
|
the-stack_106_26348 | #!/usr/bin/python
# coding: UTF-8
#
# Author: Dawid Laszuk
# Contact: [email protected]
#
# Feel free to contact for any information.
"""
.. currentmodule:: CEEMDAN
"""
from __future__ import print_function
import logging
import numpy as np
from multiprocessing import Pool
# Python3 handles mutliprocessing much better.
# For Python2 we need to pickle instance differently.
import sys
if sys.version_info[0] < 3:
import copy_reg as copy_reg
import types
def _pickle_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(types.MethodType, _pickle_method)
class CEEMDAN:
"""
**"Complete Ensemble Empirical Mode Decomposition with Adaptive Noise"**
"Complete ensemble empirical mode decomposition with adaptive
noise" (CEEMDAN) [Torres2011_] is noise-assisted EMD technique.
Word "complete" presumably refers to decomposing completly
everything, even added perturbation (noise).
Provided implementation contains proposed "improvmenets" from
paper [Colominas2014_].
Parameters
----------
trials : int (default: 100)
Number of trials or EMD performance with added noise.
epsilon : float (default: 0.005)
Scale for added noise (:math:`\epsilon`) which multiply std :math:`\sigma`:
:math:`\\beta = \epsilon \cdot \sigma (noise)`
ext_EMD : EMD (default: None)
One can pass EMD object defined outside, which will be
used to compute IMF decompositions in each trial. If none
is passed then EMD with default options is used.
References
----------
.. [Torres2011] M.E. Torres, M.A. Colominas, G. Schlotthauer, P. Flandrin
A complete ensemble empirical mode decomposition with adaptive noise.
Acoustics, Speech and Signal Processing (ICASSP), 2011, pp. 4144--4147
.. [Colominas2014] M.A. Colominas, G. Schlotthauer, M.E. Torres,
Improved complete ensemble EMD: A suitable tool for biomedical signal
processing, In Biomed. Sig. Proc. and Control, V. 14, 2014, pp. 19--29
"""
logger = logging.getLogger(__name__)
noise_kinds_all = ["normal", "uniform"]
def __init__(self, trials=100, epsilon=0.005, ext_EMD=None, **kwargs):
# Ensemble constants
self.trials = trials
self.epsilon = epsilon
self.all_noise_std = np.zeros(self.trials)
self.beta_progress = True # Scale noise by std
self.random = np.random.RandomState()
self.noise_kind = "normal"
self.all_noise_EMD = []
if ext_EMD is None:
from PyEMD import EMD
self.EMD = EMD()
else:
self.EMD = ext_EMD
self.range_thr = 0.01
self.total_power_thr = 0.05
# By default (None) Pool spawns #processes = #CPU
processes = None if "processes" not in kwargs else kwargs["processes"]
self.pool = Pool(processes=processes)
# Update based on options
for key in kwargs.keys():
if key in self.__dict__.keys():
self.__dict__[key] = kwargs[key]
elif key in self.EMD.__dict__.keys():
self.EMD.__dict__[key] = kwargs[key]
def __call__(self, S, T=None, max_imf=-1):
return self.ceemdan(S, T=T, max_imf=max_imf)
def __getstate__(self):
self_dict = self.__dict__.copy()
del self_dict['pool']
return self_dict
def generate_noise(self, scale, size):
"""
Generate noise with specified parameters.
Currently supported distributions are:
* *normal* with std equal scale.
* *uniform* with range [-scale/2, scale/2].
Parameters
----------
scale : float
Width for the distribution.
size : int
Number of generated samples.
Returns
-------
noise : numpy array
Noise sampled from selected distribution.
"""
if self.noise_kind=="normal":
noise = self.random.normal(loc=0, scale=scale, size=size)
elif self.noise_kind=="uniform":
noise = self.random.uniform(low=-scale/2, high=scale/2, size=size)
else:
raise ValueError("Unsupported noise kind. Please assigned `noise_kind`"
+ " to be one of these: " + str(self.noise_kinds_all))
return noise
def noise_seed(self, seed):
"""Set seed for noise generation."""
self.random.seed(seed)
def ceemdan(self, S, T=None, max_imf=-1):
scale_s = np.std(S)
S[:] = S/scale_s
# Define all noise
self.all_noises = self.generate_noise(1, (self.trials,S.size))
# Decompose all noise and remember 1st's std
self.logger.debug("Decomposing all noises")
for trial, noise in enumerate(self.all_noises):
_imfs = self.emd(noise, T, max_imf=-1)
# If beta_progress then scale all IMFs with 1st std
if self.beta_progress:
_imfs = _imfs/np.std(_imfs[0])
self.all_noise_EMD.append(_imfs)
# Create first IMF
last_imf = self._eemd(S, T, 1)[0]
res = np.empty(S.size)
all_cimfs = last_imf.reshape((-1, last_imf.size))
prev_res = S - last_imf
self.logger.debug("Starting CEEMDAN")
while(True):
# Check end condition in the beginning because we've already have 1 IMF
if self.end_condition(S, all_cimfs, max_imf):
self.logger.debug("End Condition - Pass")
break
imfNo = all_cimfs.shape[0]
beta = self.epsilon*np.std(prev_res)
local_mean = np.zeros(S.size)
for trial in range(self.trials):
# Skip if noise[trial] didn't have k'th mode
noise_imf = self.all_noise_EMD[trial]
res = prev_res.copy()
if len(noise_imf) > imfNo:
res += beta*noise_imf[imfNo]
# Extract local mean, which is at 2nd position
imfs = self.emd(res, T, 1)
local_mean += imfs[-1]/self.trials
last_imf = prev_res - local_mean
all_cimfs = np.vstack((all_cimfs, last_imf))
prev_res = local_mean.copy()
# END of while
res = S - np.sum(all_cimfs, axis=0)
all_cimfs = np.vstack((all_cimfs,res))
all_cimfs = all_cimfs*scale_s
return all_cimfs
def end_condition(self, S, cIMFs, max_imf):
"""Test for end condition of CEEMDAN.
Procedure stops if:
* number of components reach provided `max_imf`, or
* last component is close to being pure noise (range or power), or
* set of provided components reconstructs sufficiently input.
Parameters
----------
S : numpy array
Original signal on which CEEMDAN was performed.
cIMFs : numpy 2D array
Set of cIMFs where each row is cIMF.
Returns
-------
end : bool
Whether to stop CEEMDAN.
"""
imfNo = cIMFs.shape[0]
# Check if hit maximum number of cIMFs
if max_imf > 0 and imfNo >= max_imf:
return True
# Compute EMD on residue
R = S - np.sum(cIMFs, axis=0)
_test_imf = self.emd(R, None, max_imf=1)
# Check if residue is IMF or no extrema
if _test_imf.shape[0] == 1:
self.logger.debug("Not enough extrema")
return True
# Check for range threshold
if np.max(R) - np.min(R) < self.range_thr:
self.logger.debug("FINISHED -- RANGE")
return True
# Check for power threshold
if np.sum(np.abs(R)) < self.total_power_thr:
self.logger.debug("FINISHED -- SUM POWER")
return True
return False
def _eemd(self, S, T=None, max_imf=-1):
if T is None: T = np.arange(len(S), dtype=S.dtype)
self._S = S
self._T = T
self._N = N = len(S)
self.max_imf = max_imf
# For trial number of iterations perform EMD on a signal
# with added white noise
all_IMFs = self.pool.map(self._trial_update, range(self.trials))
max_imfNo = max([IMFs.shape[0] for IMFs in all_IMFs])
self.E_IMF = np.zeros((max_imfNo, N))
for IMFs in all_IMFs:
self.E_IMF[:IMFs.shape[0]] += IMFs
return self.E_IMF/self.trials
def _trial_update(self, trial):
# Generate noise
noise = self.epsilon*self.all_noise_EMD[trial][0]
return self.emd(self._S+noise, self._T, self.max_imf)
def emd(self, S, T, max_imf=-1):
"""Vanilla EMD method.
Provides emd evaluation from provided EMD class.
For reference please see :class:`PyEMD.EMD`.
"""
return self.EMD.emd(S, T, max_imf)
###################################################
## Beginning of program
if __name__ == "__main__":
import pylab as plt
# Logging options
logging.basicConfig(level=logging.INFO)
max_imf = -1
# Signal options
N = 500
tMin, tMax = 0, 2*np.pi
T = np.linspace(tMin, tMax, N)
S = 3*np.sin(4*T) + 4*np.cos(9*T) + np.sin(8.11*T+1.2)
# Prepare and run EEMD
trials = 20
ceemdan = CEEMDAN(trials=trials)
C_IMFs = ceemdan(S, T, max_imf)
imfNo = C_IMFs.shape[0]
# Plot results in a grid
c = np.floor(np.sqrt(imfNo+2))
r = np.ceil((imfNo+2)/c)
plt.ioff()
plt.subplot(r,c,1)
plt.plot(T, S, 'r')
plt.xlim((tMin, tMax))
plt.title("Original signal")
plt.subplot(r,c,2)
plt.plot(T, S-np.sum(C_IMFs, axis=0), 'r')
plt.xlim((tMin, tMax))
plt.title("Residuum")
for num in range(imfNo):
plt.subplot(r,c,num+3)
plt.plot(T, C_IMFs[num],'g')
plt.xlim((tMin, tMax))
plt.title("Imf "+str(num+1))
plt.show()
|
the-stack_106_26352 | '''OpenGL extension APPLE.element_array
Overview (from the spec)
This extension provides facilities to improve DrawElements style vertex
indices submission performance by allowing index arrays. Using this
extension these arrays can be contained inside a vertex array range and
thus pulled directly by the graphics processor, avoiding the CPU overhead
of touching the index data.
This extension is most useful when used in conjunction with the
APPLE_vertex_array_range extension. APPLE_vertex_array_range provides an
interface for storing vertex array data. In cases where large amounts of
vertex data are in use, the index data used to construct primitives
(typically as passed to the GL through DrawElements) can impose a
significant bandwidth burden. APPLE_element_array allows the application to
specify independent arrays of elements, which can then be cached using
APPLE_vertex_array_range. In effect this creates a more orthogonal
interface for both vertex indices and data.
The official definition of this extension is available here:
http://oss.sgi.com/projects/ogl-sample/registry/APPLE/element_array.txt
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_APPLE_element_array'
GL_ELEMENT_ARRAY_APPLE = constant.Constant( 'GL_ELEMENT_ARRAY_APPLE', 0x8768 )
GL_ELEMENT_ARRAY_TYPE_APPLE = constant.Constant( 'GL_ELEMENT_ARRAY_TYPE_APPLE', 0x8769 )
glget.addGLGetConstant( GL_ELEMENT_ARRAY_TYPE_APPLE, (1,) )
GL_ELEMENT_ARRAY_POINTER_APPLE = constant.Constant( 'GL_ELEMENT_ARRAY_POINTER_APPLE', 0x876A )
glElementPointerAPPLE = platform.createExtensionFunction(
'glElementPointerAPPLE', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, ctypes.c_void_p,),
doc = 'glElementPointerAPPLE( GLenum(type), c_void_p(pointer) ) -> None',
argNames = ('type', 'pointer',),
)
glDrawElementArrayAPPLE = platform.createExtensionFunction(
'glDrawElementArrayAPPLE', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLint, constants.GLsizei,),
doc = 'glDrawElementArrayAPPLE( GLenum(mode), GLint(first), GLsizei(count) ) -> None',
argNames = ('mode', 'first', 'count',),
)
glDrawRangeElementArrayAPPLE = platform.createExtensionFunction(
'glDrawRangeElementArrayAPPLE', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLuint, constants.GLuint, constants.GLint, constants.GLsizei,),
doc = 'glDrawRangeElementArrayAPPLE( GLenum(mode), GLuint(start), GLuint(end), GLint(first), GLsizei(count) ) -> None',
argNames = ('mode', 'start', 'end', 'first', 'count',),
)
glMultiDrawElementArrayAPPLE = platform.createExtensionFunction(
'glMultiDrawElementArrayAPPLE', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, arrays.GLintArray, arrays.GLsizeiArray, constants.GLsizei,),
doc = 'glMultiDrawElementArrayAPPLE( GLenum(mode), GLintArray(first), GLsizeiArray(count), GLsizei(primcount) ) -> None',
argNames = ('mode', 'first', 'count', 'primcount',),
)
glMultiDrawRangeElementArrayAPPLE = platform.createExtensionFunction(
'glMultiDrawRangeElementArrayAPPLE', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLuint, constants.GLuint, arrays.GLintArray, arrays.GLsizeiArray, constants.GLsizei,),
doc = 'glMultiDrawRangeElementArrayAPPLE( GLenum(mode), GLuint(start), GLuint(end), GLintArray(first), GLsizeiArray(count), GLsizei(primcount) ) -> None',
argNames = ('mode', 'start', 'end', 'first', 'count', 'primcount',),
)
def glInitElementArrayAPPLE():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
the-stack_106_26354 | import torch
import torch.nn as nn
import torch.nn.functional as func
class DistanceDiscriminator(nn.Module):
def __init__(self, batch_size, out_size):
super().__init__()
self.batch_size = batch_size
self.batch_combine = nn.Linear(batch_size, out_size)
def forward(self, data):
data = data.view(data.size(0), -1)
dist = ((data[None, :] - data[:, None]).norm(dim=1) + 1e-6).log()
return self.batch_combine(dist)
class DynamicAugmentation(nn.Module):
def __init__(self, transforms, target=0.6,
p=0.0, step=0.01, every=4):
super().__init__()
self.transforms = transforms
self.target = target
self.p = p
self.step = step
self.every = every
self.tick = 0
def update(self, result):
if self.tick % self.every == 0:
with torch.no_grad():
sign = (result.sign() + 1).mean() / 2
if sign < self.target:
self.p -= self.step
else:
self.p += self.step
self.p = max(0, min(1, self.p))
self.tick = 0
self.tick += 1
def forward(self, data):
for transform in self.transforms:
aug = transform(data)
if isinstance(data, (list, tuple)):
tmp = []
mask = (torch.rand(aug[0].size(0)) < self.p).to(aug[0].device)
for item, aug_item in zip(data, aug):
mm = mask.view(mask.size(0), *([1] * (item.dim() - 1)))
item = ((~mm).float() * item + mm.float() * aug_item)
tmp.append(item)
data = tuple(tmp)
else:
mask = (torch.rand(aug.size(0)) < self.p).to(data.device)
mask = mask.view(mask.size(0), *([1] * (data.dim() - 1)))
data = ((~mask).float() * data + mask.float() * aug)
return data
|
the-stack_106_26355 | import json
import logging
import os
import ssl
import webbrowser
from http.server import HTTPServer, BaseHTTPRequestHandler
from queue import Queue
from threading import Thread
from typing import Any, Optional
from peek.connection import EsClient, RefreshingEsClient
_logger = logging.getLogger(__name__)
class _OidcExchange:
callback_path = None
class CallbackHTTPRequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
_logger.debug(f'Path is: {self.path}')
_OidcExchange.callback_path.put(self.path)
self.send_response(200)
self.end_headers()
self.wfile.write(b'Callback received, you can now close the browser tab.')
def do_POST(self):
pass
def log_message(self, fmt: str, *args: Any) -> None:
_logger.info("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
fmt % args))
def oidc_authenticate(es_client: EsClient, realm: str, callback_port: str, callback_ssl: bool, name: Optional[str]):
_logger.info(f'OIDC authenticate for realm {realm!r} and callback port {callback_port!r}')
prepare_response = _oidc_prepare(es_client, realm)
httpd = _oidc_start_http_server(callback_port, callback_ssl)
print('Please use browser to complete authentication against the idP')
webbrowser.open(prepare_response['redirect'])
callback_path = _OidcExchange.callback_path.get()
if isinstance(callback_path, bytes):
callback_path = callback_path.decode('utf-8')
try:
httpd.shutdown()
auth_response = _oidc_do_authenticate(es_client, realm, prepare_response['state'], prepare_response['nonce'],
callback_path)
return RefreshingEsClient(
es_client,
auth_response['username'],
auth_response['access_token'],
auth_response['refresh_token'],
auth_response['expires_in'],
name=name)
finally:
_OidcExchange.callback_path = None
def _oidc_prepare(es_client, realm: str):
return es_client.perform_request(
'POST',
'/_security/oidc/prepare',
json.dumps({
'realm': realm,
}),
deserialize_it=True
)
def _oidc_do_authenticate(es_client, realm: str, state: str, nonce: str, redirect_uri: str):
response = es_client.perform_request(
'POST',
'/_security/oidc/authenticate',
json.dumps({
'realm': realm,
'state': state,
'nonce': nonce,
'redirect_uri': redirect_uri,
}),
deserialize_it=True
)
return response
def _oidc_start_http_server(callback_port, callback_ssl):
from peek import __file__ as package_root
package_root = os.path.dirname(package_root)
_OidcExchange.callback_path = Queue(maxsize=1)
httpd = HTTPServer(('localhost', int(callback_port)), CallbackHTTPRequestHandler)
if callback_ssl:
keyfile = os.path.join(package_root, 'certs', 'key.pem')
certfile = os.path.join(package_root, 'certs', 'cert.pem')
httpd.socket = ssl.wrap_socket(
httpd.socket,
keyfile=keyfile,
certfile=certfile,
server_side=True)
t = Thread(target=httpd.serve_forever, daemon=True)
t.start()
return httpd
class OidcAuthenticateFunc:
def __call__(self, app, **options):
realm = options.get('realm', 'oidc1')
conn = options.get('conn', None)
oidc_es_client = oidc_authenticate(
app.es_client_manager.current if conn is None else app.es_client_manager.get_client(conn),
realm,
options.get('callback_port', '5601'),
options.get('callback_ssl', True),
name=options.get('name', None),
)
app.es_client_manager.add(oidc_es_client)
return json.dumps({'username': oidc_es_client.username, 'realm': 'realm'})
@property
def options(self):
return {'realm': 'oidc1', 'callback_port': '5601', 'callback_ssl': True, 'name': None, 'conn': None}
@property
def description(self):
return 'Start OIDC authentication flow'
|
the-stack_106_26357 | #!/Library/Frameworks/Python.framework/Versions/3.5/bin/python3.5
from .basics import *
import pickle
import os
import codecs
from .analysis import Analysis_net
from .analysis_prior import Analysis_prior_net
from .synthesis import Synthesis_net
class Synthesis_prior_net(nn.Module):
'''
Decode synthesis prior
'''
def __init__(self):
super(Synthesis_prior_net, self).__init__()
self.deconv1 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 5, stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv1.weight.data, math.sqrt(2 * 1))
torch.nn.init.constant_(self.deconv1.bias.data, 0.01)
self.relu1 = nn.ReLU()
self.deconv2 = nn.ConvTranspose2d(out_channel_N, out_channel_N, 5, stride=2, padding=2, output_padding=1)
torch.nn.init.xavier_normal_(self.deconv2.weight.data, math.sqrt(2 * 1))
torch.nn.init.constant_(self.deconv2.bias.data, 0.01)
self.relu2 = nn.ReLU()
self.deconv3 = nn.ConvTranspose2d(out_channel_N, out_channel_M, 3, stride=1, padding=1)
torch.nn.init.xavier_normal_(self.deconv3.weight.data, (math.sqrt(2 * 1 * (out_channel_M + out_channel_N) / (out_channel_N + out_channel_N))))
torch.nn.init.constant_(self.deconv3.bias.data, 0.01)
# self.priordecoder = nn.Sequential(
# nn.ConvTranspose2d(out_channel_N, out_channel_N, 5, stride=2, padding=2, output_padding=1),
# nn.ReLU(),
# nn.ConvTranspose2d(out_channel_N, out_channel_N, 5, stride=2, padding=2, output_padding=1),
# nn.ReLU(),
# nn.ConvTranspose2d(out_channel_N, out_channel_M, 3, stride=1, padding=1)
# )
def forward(self, x):
x = self.relu1(self.deconv1(x))
x = self.relu2(self.deconv2(x))
return torch.exp(self.deconv3(x))
def build_model():
input_image = torch.zeros([7,3,256,256])
analysis_net = Analysis_net()
analysis_prior_net = Analysis_prior_net()
synthesis_net = Synthesis_net()
synthesis_prior_net = Synthesis_prior_net()
feature = analysis_net(input_image)
z = analysis_prior_net(feature)
compressed_z = torch.round(z)
recon_sigma = synthesis_prior_net(compressed_z)
compressed_feature_renorm = feature / recon_sigma
compressed_feature_renorm = torch.round(compressed_feature_renorm)
compressed_feature_denorm = compressed_feature_renorm * recon_sigma
recon_image = synthesis_net(compressed_feature_denorm)
print("input_image : ", input_image.size())
print("feature : ", feature.size())
print("z : ", z.size())
print("recon_sigma : ", recon_sigma.size())
print("recon_image : ", recon_image.size())
if __name__ == '__main__':
build_model()
|
the-stack_106_26359 | # -*- coding: utf-8 -*-
"""
Estimate the PSF FWHM for each of the reduced science images, and append the
'reducedFileIndex.csv' with a column containing that information. The PSF FWHM
values will be used to cull data to only include good seeing conditions.
"""
#Import whatever modules will be used
import os
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table, Column
from astropy.coordinates import SkyCoord
import astropy.units as u
from scipy import stats
# Add the AstroImage class
import astroimage as ai
# Add the header handler to the BaseImage class
from Mimir_header_handler import Mimir_header_handler
ai.reduced.ReducedScience.set_header_handler(Mimir_header_handler)
ai.set_instrument('mimir')
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# Define the location of the PPOL reduced data to be read and worked on
PPOL_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\PPOL_Reduced\\201611\\'
S3_dir = os.path.join(PPOL_data, 'S3_Astrometry')
# This is the location where all pyPol data will be saved
pyPol_data='C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_Reduced\\201611'
# Set the filename for the reduced data indexFile and read it in
reducedFileIndexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
reducedFileIndex = Table.read(reducedFileIndexFile)
# Set the filename for the PSF backup index file. This file will be saved
# separately, in addition to the modified reduced file index. This way, if the
# user invokes 01_buildIndex.py again, it will not OVERWRITE the PSF data.
PSFindexFile = os.path.join(pyPol_data, 'PSFindex.csv')
# Finally, loop through EACH image and compute the PSF
PSFwidths = []
sigm2FWHM = 2*np.sqrt(2*np.log(2))
numberOfFiles = len(reducedFileIndex)
for iFile, filename in enumerate(reducedFileIndex['FILENAME'].data):
if reducedFileIndex['AB'][iFile] == 'B':
PSFwidths = [-1]
continue
# Construct the PPOL file name
PPOL_file = os.path.join(S3_dir, filename)
# Read in the image
thisImg = ai.reduced.ReducedScience.read(PPOL_file)
# Construct a Photometry analyzer object
thisPhotAnalyzer = ai.utilitywrappers.PhotometryAnalyzer(thisImg)
# Estimate the PSF for this image
PSFstamp, PSFparams = thisPhotAnalyzer.get_psf()
# Check if non-null values were returned from the get_psf method
if (PSFparams['sminor'] is None) or (PSFparams['smajor'] is None):
PSFwidths.append(0)
continue
# Estimate a binning-normalized "width" parameter for the PSF
thisBinning = np.sqrt(np.array(thisImg.binning).prod())
thisWidth = np.sqrt(PSFparams['sminor']*PSFparams['smajor'])*thisBinning
PSFwidths.append(sigm2FWHM*thisWidth)
# Compute the percentage done and show it to the user
percentage = np.round(100*iFile/numberOfFiles, 2)
print('File : {0} ... completed {1:3.2f}%'.format(os.path.basename(filename), percentage), end="\r")
# Add a FWHM column to the PSF index (for safe(r) keeping) file index.
PSFindex = Table()
FWHMcolumn = Column(name='FWHM', data=PSFwidths)
PSFindex.add_column(FWHMcolumn)
reducedFileIndex.add_column(FWHMcolumn)
# Save the index to disk.
PSFindex.write(PSFindexFile, format='ascii.csv', overwrite=True)
reducedFileIndex.write(reducedFileIndexFile, format='ascii.csv', overwrite=True)
|
the-stack_106_26361 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import unittest
from unittest import mock
from airflow.models.dag import DAG
from airflow.providers.microsoft.azure.operators.wasb_delete_blob import WasbDeleteBlobOperator
class TestWasbDeleteBlobOperator(unittest.TestCase):
_config = {
'container_name': 'container',
'blob_name': 'blob',
}
def setUp(self):
args = {'owner': 'airflow', 'start_date': datetime.datetime(2017, 1, 1)}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
operator = WasbDeleteBlobOperator(task_id='wasb_operator_1', dag=self.dag, **self._config)
self.assertEqual(operator.container_name, self._config['container_name'])
self.assertEqual(operator.blob_name, self._config['blob_name'])
self.assertEqual(operator.is_prefix, False)
self.assertEqual(operator.ignore_if_missing, False)
operator = WasbDeleteBlobOperator(
task_id='wasb_operator_2', dag=self.dag, is_prefix=True, ignore_if_missing=True, **self._config
)
self.assertEqual(operator.is_prefix, True)
self.assertEqual(operator.ignore_if_missing, True)
@mock.patch('airflow.providers.microsoft.azure.operators.wasb_delete_blob.WasbHook', autospec=True)
def test_execute(self, mock_hook):
mock_instance = mock_hook.return_value
operator = WasbDeleteBlobOperator(
task_id='wasb_operator', dag=self.dag, is_prefix=True, ignore_if_missing=True, **self._config
)
operator.execute(None)
mock_instance.delete_file.assert_called_once_with('container', 'blob', True, True)
|
the-stack_106_26362 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.contrib.auth.models import User
from django.db import IntegrityError, DataError
from django.db import transaction
from signbank.dictionary.models import (Gloss, Dataset, SignLanguage, Language, Keyword, Translation,
Dialect, RelationToForeignSign, FieldChoice, MorphologyDefinition,
GlossTranslations)
from signbank.dictionary.models import build_choice_list
class GlossTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="test", email=None, password=None)
# Migrations have id=1 already
self.language = Language.objects.create(name="glang", language_code_2char="gl", language_code_3char="gla")
self.signlanguage = SignLanguage.objects.create(pk=2, name="testsignlanguage", language_code_3char="tst")
self.dataset = Dataset.objects.create(name="testdataset", signlanguage=self.signlanguage)
self.gloss = Gloss.objects.create(idgloss="testgloss", dataset=self.dataset, created_by=self.user,
updated_by=self.user)
def test_str(self):
self.assertEqual(str(self.gloss), self.gloss.idgloss)
def test_publish(self):
"""Test that publishing Gloss works."""
gloss = Gloss.objects.get(idgloss="testgloss")
# Should be not be published when first created
self.assertFalse(gloss.published)
# Set locked True, and check that the Gloss is locked.
gloss.published = True
gloss.save()
self.assertTrue(gloss.published)
def test_unique_together(self):
"""Make sure that there can't be two of the same gloss+dataset combinations"""
gloss = Gloss.objects.get(idgloss="testgloss")
# Create another Gloss
new_gloss = Gloss.objects.create(idgloss="testgloss2", dataset=self.dataset, created_by=self.user,
updated_by=self.user)
new_dataset = Dataset.objects.create(name="testdataset2", signlanguage=self.signlanguage)
self.assertEqual(new_gloss.idgloss, "testgloss2")
# Make sure you cannot violate unique_together by changing the Gloss.idgloss
with self.assertRaises(IntegrityError): # Should return IntegrityError
with transaction.atomic():
new_gloss.idgloss = "testgloss"
new_gloss.save()
# Change to a new dataset
new_gloss.dataset = new_dataset
new_gloss.save()
self.assertTrue(new_gloss.dataset == new_dataset)
# Change new_gloss to the same as gloss
new_gloss.idgloss = "testgloss"
new_gloss.save()
self.assertTrue(new_gloss.idgloss == gloss.idgloss)
# Make sure that you cannot violate unique_together by changing Dataset
with self.assertRaises(IntegrityError): # Should return IntegrityError
with transaction.atomic():
gloss.dataset = new_dataset
gloss.save()
def test_idgloss(self):
"""Tests idgloss"""
gloss = Gloss.objects.get(idgloss="testgloss")
# Check for some weird characters
weird_chars = ("äöåÄÖŨ^~'* ´`üÜÿŸëêËÊ€$#", "ЁЂЃЄЅІЇЌЍЎЏАБВДЖИКОПРСТФХЦЧЩЫ", "؟ الستارود أي بعد, معاملة بيو",)
for my_str in weird_chars:
gloss.idgloss = my_str
gloss.save()
self.assertEqual(gloss.idgloss, str(gloss.idgloss))
self.assertEqual(gloss.idgloss, my_str)
# Test that the length of idgloss can't be too long
with self.assertRaises(DataError):
gloss.idgloss = "afasdkfjsdalkfjdsaljfl^¨'*´`} sajfljadsklfjasdklfjsadkjflÄÖÅlöjsadkfjasdkljflaksdjfkljds"
"fljasdlkfjakdslkafjsdlkafjölasdjfkldsajlaköfjsdakljfklasdjfkldsjaflkajdsflökjdsalkfjadslköfjdsalökjfklsd"
"ajflkdsjlkfajöldskjflkadsjflkdsajfladslkfjdlksa"
gloss.save()
def test_idgloss_dataset(self):
"""Test that a Gloss cannot be created without a relation to Dataset."""
with self.assertRaises(IntegrityError):
Gloss.objects.create(idgloss="testgloss7", created_by=self.user, updated_by=self.user)
def test_idgloss_en(self):
"""Tests the field idgloss_en."""
# Check that the max_length can't be exceeded.
with self.assertRaises(DataError):
en = Gloss.objects.create(idgloss="testgloss_en", idgloss_en="äöå1@r" * 10 + "1", dataset=self.dataset,
created_by=self.user, updated_by=self.user)
def test_created_by(self):
"""Tests that the created_by field functions when a gloss is created."""
gl = Gloss.objects.create(idgloss="testgloss_createdby", dataset=self.dataset,
created_by=self.user, updated_by=self.user)
self.assertEqual(gl.created_by, self.user)
def test_get_translation_languages(self):
"""Tests function get_translation_languages()"""
self.dataset.translation_languages = (self.language,)
self.dataset.save()
self.assertIn(self.language, Gloss.get_translation_languages(self.gloss))
def test_get_translations_for_translation_languages(self):
"""Test function get_translations_for_translation_languages()"""
keyword = Keyword.objects.create(text="akeyword")
keyword2 = Keyword.objects.create(text="another")
translation = Translation.objects.create(gloss=self.gloss, language=self.language, keyword=keyword,
order=2)
translation2 = Translation.objects.create(gloss=self.gloss, language=self.language, keyword=keyword2, order=3)
self.dataset.translation_languages = (self.language,)
self.dataset.save()
unzipped = zip(*Gloss.get_translations_for_translation_languages(self.gloss))
languages, translations = next(unzipped), next(unzipped)
self.assertIn(self.language, languages)
keywords = [str(translation.keyword), str(translation2.keyword)]
# Check that all the keywords are in the 'translations' string.
self.assertTrue(all(x in str(*translations) for x in keywords))
def test_field_labels(self):
"""Test that function returns proper field labels."""
meta_fields = self.gloss._meta.fields
field_names = dict()
for field in meta_fields:
field_names[field.name] = field.verbose_name
self.assertDictEqual(Gloss.field_labels(self.gloss), field_names)
def test_get_fields(self):
"""Test function."""
field_list = []
for field in Gloss._meta.fields:
field_list.append((field.name, field.value_to_string(self.gloss)))
self.assertListEqual(Gloss.get_fields(self.gloss), field_list)
class DatasetTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="testdata", email=None, password=None)
# Migrations have id=1 already
self.signlanguage = SignLanguage.objects.create(pk=3, name="slang", language_code_3char="tst")
self.dataset = Dataset.objects.create(name="dataset", signlanguage=self.signlanguage)
def test_str(self):
"""Test unicode string representation."""
self.assertEqual(str(self.dataset), self.dataset.name)
class GlossTranslationsTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="testglosstrans", email=None, password=None)
(self.language1, created) = Language.objects.get_or_create(name="mylang", language_code_2char="er",
language_code_3char="eor")
(self.language2, created) = Language.objects.get_or_create(name="mylang2", language_code_2char="ir",
language_code_3char="ior")
(self.language3, created) = Language.objects.get_or_create(name="mylang3", language_code_2char="ar",
language_code_3char="aor")
(self.language4, created) = Language.objects.get_or_create(name="mylang4", language_code_2char="er",
language_code_3char="eor")
# Migrations have id=1 already for a SignLanguage
(self.signlanguage, created) = SignLanguage.objects.get_or_create(pk=14, name="mysignlang",
language_code_3char="mys")
(self.dataset, created) = Dataset.objects.get_or_create(name="dataset52", signlanguage=self.signlanguage)
(self.gloss, created) = Gloss.objects.get_or_create(idgloss="transgloss", dataset=self.dataset,
created_by=self.user, updated_by=self.user)
(self.keyword, created) = Keyword.objects.get_or_create(text="tiger")
# Create a Translation
(self.translation, created) = Translation.objects.get_or_create(gloss=self.gloss, language=self.language1,
keyword=self.keyword, order=1)
# Create GlossTranslation objects
self.translations = "squirrel, elephant, ant, magpie"
self.translations_keywords = ["squirrel", "elephant", "ant", "magpie"]
(self.glosstranslations, created) = \
GlossTranslations.objects.get_or_create(gloss=self.gloss, language=self.language1,
translations=self.translations)
self.translations_duplicates = "kangaroo, snake, kangaroo, horse, horse"
self.translations_duplicates_keywords = ["kangaroo", "snake", "horse"]
(self.glosstranslations_duplicates, created) = \
GlossTranslations.objects.get_or_create(gloss=self.gloss, language=self.language2,
translations=self.translations_duplicates)
self.translations_grouped = "1. cat, dog; 2. lion, crocodile; 3. mouse"
self.translations_grouped_keywords = ["cat", "dog", "lion", "crocodile", "mouse"]
(self.glosstranslations_grouped, created) = \
GlossTranslations.objects.get_or_create(gloss=self.gloss, language=self.language3,
translations=self.translations_grouped)
self.translations_grouped_duplicates = "1. kitten, pony; 2. monkey, pony; donkey, monkey"
self.translations_grouped_duplicates_keywords = ["kitten", "pony", "monkey", "donkey"]
(self.glosstranslations_grouped_duplicates, created) = \
GlossTranslations.objects.get_or_create(gloss=self.gloss, language=self.language4,
translations=self.translations_grouped_duplicates)
def test_get_keywords(self):
"""Test get_keywords()"""
# Simple format
self.assertEqual(self.glosstranslations.get_keywords(), self.translations_keywords)
# Grouped format
self.assertEqual(self.glosstranslations_grouped.get_keywords(), self.translations_grouped_keywords)
def test_get_keywords_duplicates(self):
"""Make sure get_keywords() doesn't return duplicates."""
# Simple format duplicates (keywords should not contain duplicates, translations field can)
self.assertEqual(self.glosstranslations_duplicates.get_keywords_unique(), self.translations_duplicates_keywords)
# Grouped format duplicates (keywords should not contain duplicates, translations field can)
self.assertEqual(self.glosstranslations_grouped_duplicates.get_keywords_unique(),
self.translations_grouped_duplicates_keywords)
def test_has_duplicates(self):
"""Tests has_duplicates() and verifies that it works correctly."""
self.assertFalse(self.glosstranslations.has_duplicates())
self.assertTrue(self.glosstranslations_duplicates.has_duplicates())
self.assertFalse(self.glosstranslations_grouped.has_duplicates())
self.assertTrue(self.glosstranslations_grouped_duplicates.has_duplicates())
def test_save(self):
"""Test GlossTranslations.save()"""
glosstrans = self.glosstranslations
glosstrans.translations = "squirrel, magpie" # Remove "elephant, ant"
glosstrans.save()
trans = Translation.objects.filter(gloss=self.gloss, language=self.language1)
# Verify that these Translation objects have been deleted.
self.assertFalse("elephant" and "ant" in [str(x.keyword) for x in trans])
# Verify that these Translation objects still exist.
self.assertTrue("squirrel" and "magpie" in [str(x.keyword) for x in trans])
def test_save_duplicates(self):
"""Test saving duplicates."""
# This object has duplicates, saving it should not raise exceptions.
self.glosstranslations_duplicates.save()
class TranslationTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="testtrans", email=None, password=None)
self.language = Language.objects.create(name="mylang", language_code_2char="ml", language_code_3char="myl")
# Migrations have id=1 already for a SignLanguage
self.signlanguage = SignLanguage.objects.create(pk=5, name="signlang", language_code_3char="sla")
self.dataset = Dataset.objects.create(name="dataset", signlanguage=self.signlanguage)
self.gloss = Gloss.objects.create(idgloss="transgloss", dataset=self.dataset, created_by=self.user,
updated_by=self.user)
self.keyword = Keyword.objects.create(text="myword")
# Create a Translation
self.translation = Translation.objects.create(gloss=self.gloss, language=self.language, keyword=self.keyword,
order=1)
def test_str(self):
"""Test unicode string representation."""
self.assertEqual(str(self.translation), self.keyword.text)
class KeywordTestCase(TestCase):
def setUp(self):
self.keyword = Keyword.objects.create(text="mykeyworD")
def test_str(self):
self.assertEqual(str(self.keyword), self.keyword.text)
class LanguageTestCase(TestCase):
def setUp(self):
self.language = Language.objects.create(name=u"New ÖÄ Language", language_code_2char="nl",
language_code_3char="nla", description="New language we just created")
def test_str(self):
self.assertEqual(str(self.language), self.language.name)
class DialectTestCase(TestCase):
def setUp(self):
self.signlanguage = SignLanguage.objects.create(pk=5, name=u"sÄÄö", language_code_3char="ÄÄö")
self.dialect = Dialect.objects.create(language=self.signlanguage, name=u"Northern sÄÄö",
description=u"Northern sÄÄö has traditionally been used in the North "
u"Pole, But to this day it has also spread to Greenland.")
def test_str(self):
self.assertEqual(str(self.dialect), self.signlanguage.name + "/" + self.dialect.name)
class RelationToForeignSignTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="testrel", email=None, password=None)
self.signlanguage = SignLanguage.objects.create(pk=6, name="lala", language_code_3char="lal")
self.dataset = Dataset.objects.create(name="relset", signlanguage=self.signlanguage)
self.gloss = Gloss.objects.create(idgloss="related-GLOSS", dataset=self.dataset, created_by=self.user,
updated_by=self.user)
self.relation = RelationToForeignSign.objects.create(gloss=self.gloss, loan=True, other_lang=u"sÄÄö",
other_lang_gloss="Samp-GLOSS")
def test_str(self):
self.assertEqual(str(self.relation), self.gloss.idgloss + "/" + self.relation.other_lang + "," +
self.relation.other_lang_gloss)
class FieldChoiceTestCase(TestCase):
def setUp(self):
self.fieldchoice = FieldChoice.objects.create(field="field", english_name="mychoice", machine_value=1)
def test_str(self):
self.assertEqual(str(self.fieldchoice), self.fieldchoice.english_name)
class MorphologyDefinitionTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="usermorph", email=None, password=None)
self.signlanguage = SignLanguage.objects.create(pk=11, name="definitive", language_code_3char="def")
self.dataset = Dataset.objects.create(name="morphdata", signlanguage=self.signlanguage)
self.gloss = Gloss.objects.create(idgloss="morhp-gloss", dataset=self.dataset, created_by=self.user,
updated_by=self.user)
self.gloss2 = Gloss.objects.create(idgloss="morhp-gloss2", dataset=self.dataset, created_by=self.user,
updated_by=self.user)
self.fieldchoice = FieldChoice.objects.create(field="newfield", english_name="nice name", machine_value=2)
self.morphdef = MorphologyDefinition.objects.create(parent_gloss=self.gloss, morpheme=self.gloss2,
role=self.fieldchoice)
def test_str(self):
self.assertEqual(str(self.morphdef), self.morphdef.morpheme.idgloss + " is " +
self.morphdef.role.english_name + " of " + self.morphdef.parent_gloss.idgloss)
class FunctionsTestCase(TestCase):
def setUp(self):
self.field = "testField"
f1 = FieldChoice.objects.create(field=self.field, english_name="choice1", machine_value=1)
f2 = FieldChoice.objects.create(field=self.field, english_name="choice_another", machine_value=2)
f3 = FieldChoice.objects.create(field=self.field, english_name="full-of-choices", machine_value=3)
self.choices = []
self.choices.append((str(f1.machine_value), str(f1)))
self.choices.append((str(f2.machine_value), str(f2)))
self.choices.append((str(f3.machine_value), str(f3)))
def test_build_choice_list(self):
"""Test that function returns proper values."""
# TODO: Simulate OperationalError?
self.assertListEqual(build_choice_list(self.field), self.choices)
|
the-stack_106_26363 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script to test homemade neural network
Args:
- (OPTIONAL) -e number of epochs (default is 200)
- (OPTIONAL) -b mini-batch size (default is 10)
- (OPTIONAL) -s hidden size, number of neurons per layers (default is 50)
- (OPTIONAL) -l learning rate (default is 0.003)
- (OPTIONAL) -n number of samples for training (default is 1000)
- (OPTIONAL) -t number of samples for testing (default is 1000)
Examples:
Use default parameters:
python3 test_training.py
Use batch size of 100, 5000 training samples and 100 epochs:
python3 test_training.py -b 100 -n 5000 -e 100
"""
import argparse
from framework import *
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-e",
"--num_epochs",
required=False,
default=200,
type=int,
help="Number of epochs (default is 200).",
)
parser.add_argument(
"-b",
"--batch_size",
required=False,
default=10,
type=int,
help="Mini-batch size (default is 10).",
)
parser.add_argument(
"-s",
"--hidden_size",
required=False,
default=50,
type=int,
help="Hidden size: number of neurons for the layers (default is 50).",
)
parser.add_argument(
"-l", "--learning_rate", required=False, default=0.003, type=float, help="Learning rate."
)
parser.add_argument(
"-n",
"--num_samples_train",
required=False,
default=1000,
type=int,
help="Number of samples to generate for training (default is 1000).",
)
parser.add_argument(
"-t",
"--num_samples_test",
required=False,
default=1000,
type=int,
help="Number of samples to generate for testing (default is 1000).",
)
args = parser.parse_args()
# generate train and set set
train_features, train_target = generate_disc_set(args.num_samples_train)
test_features, test_target = generate_disc_set(args.num_samples_test)
# Build the model
Model = Sequential(
[
Linear(2, args.hidden_size),
LeakyReLU(),
Linear(args.hidden_size, args.hidden_size),
LeakyReLU(),
Linear(args.hidden_size, 2),
Softmax(),
],
LossMSE(),
)
# Set the learning rate
Model.set_Lr(args.learning_rate)
# Print model's parameters
Model.print(print_color=False)
# start training
train(
Model,
args.num_epochs,
train_features,
train_target,
test_features,
test_target,
args.batch_size,
)
if __name__ == '__main__':
main()
|
the-stack_106_26370 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Copyright (c) 2013 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' This script provides utils for python scripts in crosswalk.
'''
import os
import sys
import subprocess
def TryAddDepotToolsToPythonPath():
depot_tools = FindDepotToolsInPath()
if depot_tools:
sys.path.append(depot_tools)
python_path = os.environ.get('PYTHONPATH')
if python_path:
os.environ['PYTHONPATH'] = os.path.pathsep.join(
python_path.split(os.path.pathsep)+[depot_tools])
else:
os.environ['PYTHONPATH'] = depot_tools
def FindDepotToolsInPath():
paths = os.getenv('PATH').split(os.path.pathsep)
for path in paths:
if os.path.basename(path) == '':
# path is end with os.path.pathsep
path = os.path.dirname(path)
if os.path.basename(path) == 'depot_tools':
return path
return None
def IsWindows():
return sys.platform == 'cygwin' or sys.platform.startswith('win')
def IsLinux():
return sys.platform.startswith('linux')
def IsMac():
return sys.platform.startswith('darwin')
def GitExe():
if IsWindows():
return 'git.bat'
else:
return 'git'
def GetCommandOutput(command, cwd=None):
proc = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, bufsize=1,
cwd=cwd)
output = proc.communicate()[0]
result = proc.returncode
if result:
raise Exception('%s: %s' % (subprocess.list2cmdline(command), output))
return output
|
the-stack_106_26372 | '''patching scipy to fit distributions and expect method
This adds new methods to estimate continuous distribution parameters with some
fixed/frozen parameters. It also contains functions that calculate the expected
value of a function for any continuous or discrete distribution
It temporarily also contains Bootstrap and Monte Carlo function for testing the
distribution fit, but these are neither general nor verified.
Author: josef-pktd
License: Simplified BSD
'''
from __future__ import print_function
from statsmodels.compat.python import range, lmap
import numpy as np
from scipy import stats, optimize, integrate
########## patching scipy
#vonmises doesn't define finite bounds, because it is intended for circular
#support which does not define a proper pdf on the real line
stats.distributions.vonmises.a = -np.pi
stats.distributions.vonmises.b = np.pi
#the next 3 functions are for fit with some fixed parameters
#As they are written, they do not work as functions, only as methods
def _fitstart(self, x):
'''example method, method of moment estimator as starting values
Parameters
----------
x : array
data for which the parameters are estimated
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
This example was written for the gamma distribution, but not verified
with literature
'''
loc = np.min([x.min(),0])
a = 4/stats.skew(x)**2
scale = np.std(x) / np.sqrt(a)
return (a, loc, scale)
def _fitstart_beta(self, x, fixed=None):
'''method of moment estimator as starting values for beta distribution
Parameters
----------
x : array
data for which the parameters are estimated
fixed : None or array_like
sequence of numbers and np.nan to indicate fixed parameters and parameters
to estimate
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
References
----------
for method of moment estimator for known loc and scale
http://en.wikipedia.org/wiki/Beta_distribution#Parameter_estimation
http://www.itl.nist.gov/div898/handbook/eda/section3/eda366h.htm
NIST reference also includes reference to MLE in
Johnson, Kotz, and Balakrishan, Volume II, pages 221-235
'''
#todo: separate out this part to be used for other compact support distributions
# e.g. rdist, vonmises, and truncnorm
# but this might not work because it might still be distribution specific
a, b = x.min(), x.max()
eps = (a-b)*0.01
if fixed is None:
#this part not checked with books
loc = a - eps
scale = (a - b) * (1 + 2*eps)
else:
if np.isnan(fixed[-2]):
#estimate loc
loc = a - eps
else:
loc = fixed[-2]
if np.isnan(fixed[-1]):
#estimate scale
scale = (b + eps) - loc
else:
scale = fixed[-1]
#method of moment for known loc scale:
scale = float(scale)
xtrans = (x - loc)/scale
xm = xtrans.mean()
xv = xtrans.var()
tmp = (xm*(1-xm)/xv - 1)
p = xm * tmp
q = (1 - xm) * tmp
return (p, q, loc, scale) #check return type and should fixed be returned ?
def _fitstart_poisson(self, x, fixed=None):
'''maximum likelihood estimator as starting values for Poisson distribution
Parameters
----------
x : array
data for which the parameters are estimated
fixed : None or array_like
sequence of numbers and np.nan to indicate fixed parameters and parameters
to estimate
Returns
-------
est : tuple
preliminary estimates used as starting value for fitting, not
necessarily a consistent estimator
Notes
-----
This needs to be written and attached to each individual distribution
References
----------
MLE :
http://en.wikipedia.org/wiki/Poisson_distribution#Maximum_likelihood
'''
#todo: separate out this part to be used for other compact support distributions
# e.g. rdist, vonmises, and truncnorm
# but this might not work because it might still be distribution specific
a = x.min()
eps = 0 # is this robust ?
if fixed is None:
#this part not checked with books
loc = a - eps
else:
if np.isnan(fixed[-1]):
#estimate loc
loc = a - eps
else:
loc = fixed[-1]
#MLE for standard (unshifted, if loc=0) Poisson distribution
xtrans = (x - loc)
lambd = xtrans.mean()
#second derivative d loglike/ dlambd Not used
#dlldlambd = 1/lambd # check
return (lambd, loc) #check return type and should fixed be returned ?
def nnlf_fr(self, thetash, x, frmask):
# new frozen version
# - sum (log pdf(x, theta),axis=0)
# where theta are the parameters (including loc and scale)
#
try:
if frmask != None:
theta = frmask.copy()
theta[np.isnan(frmask)] = thetash
else:
theta = thetash
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return np.inf
x = np.array((x-loc) / scale)
cond0 = (x <= self.a) | (x >= self.b)
if (np.any(cond0)):
return np.inf
else:
N = len(x)
#raise ValueError
return self._nnlf(x, *args) + N*np.log(scale)
def fit_fr(self, data, *args, **kwds):
'''estimate distribution parameters by MLE taking some parameters as fixed
Parameters
----------
data : array, 1d
data for which the distribution parameters are estimated,
args : list ? check
starting values for optimization
kwds :
- 'frozen' : array_like
values for frozen distribution parameters and, for elements with
np.nan, the corresponding parameter will be estimated
Returns
-------
argest : array
estimated parameters
Examples
--------
generate random sample
>>> np.random.seed(12345)
>>> x = stats.gamma.rvs(2.5, loc=0, scale=1.2, size=200)
estimate all parameters
>>> stats.gamma.fit(x)
array([ 2.0243194 , 0.20395655, 1.44411371])
>>> stats.gamma.fit_fr(x, frozen=[np.nan, np.nan, np.nan])
array([ 2.0243194 , 0.20395655, 1.44411371])
keep loc fixed, estimate shape and scale parameters
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, np.nan])
array([ 2.45603985, 1.27333105])
keep loc and scale fixed, estimate shape parameter
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
array([ 3.00048828])
>>> stats.gamma.fit_fr(x, frozen=[np.nan, 0.0, 1.2])
array([ 2.57792969])
estimate only scale parameter for fixed shape and loc
>>> stats.gamma.fit_fr(x, frozen=[2.5, 0.0, np.nan])
array([ 1.25087891])
Notes
-----
self is an instance of a distribution class. This can be attached to
scipy.stats.distributions.rv_continuous
*Todo*
* check if docstring is correct
* more input checking, args is list ? might also apply to current fit method
'''
loc0, scale0 = lmap(kwds.get, ['loc', 'scale'],[0.0, 1.0])
Narg = len(args)
if Narg == 0 and hasattr(self, '_fitstart'):
x0 = self._fitstart(data)
elif Narg > self.numargs:
raise ValueError("Too many input arguments.")
else:
args += (1.0,)*(self.numargs-Narg)
# location and scale are at the end
x0 = args + (loc0, scale0)
if 'frozen' in kwds:
frmask = np.array(kwds['frozen'])
if len(frmask) != self.numargs+2:
raise ValueError("Incorrect number of frozen arguments.")
else:
# keep starting values for not frozen parameters
x0 = np.array(x0)[np.isnan(frmask)]
else:
frmask = None
#print(x0
#print(frmask
return optimize.fmin(self.nnlf_fr, x0,
args=(np.ravel(data), frmask), disp=0)
#The next two functions/methods calculate expected value of an arbitrary
#function, however for the continuous functions intquad is use, which might
#require continuouity or smoothness in the function.
#TODO: add option for Monte Carlo integration
def expect(self, fn=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False):
'''calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution
conditional : boolean (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from scipy.integrate.quad.
'''
if fn is None:
def fun(x, *args):
return x*self.pdf(x, loc=loc, scale=scale, *args)
else:
def fun(x, *args):
return fn(x)*self.pdf(x, loc=loc, scale=scale, *args)
if lb is None:
lb = loc + self.a * scale #(self.a - loc)/(1.0*scale)
if ub is None:
ub = loc + self.b * scale #(self.b - loc)/(1.0*scale)
if conditional:
invfac = (self.sf(lb, loc=loc, scale=scale, *args)
- self.sf(ub, loc=loc, scale=scale, *args))
else:
invfac = 1.0
return integrate.quad(fun, lb, ub,
args=args)[0]/invfac
def expect_v2(self, fn=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False):
'''calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set using
quantiles of the distribution, see Notes
conditional : boolean (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from scipy.integrate.quad.
The default limits are lb = self.ppf(1e-9, *args), ub = self.ppf(1-1e-9, *args)
For some heavy tailed distributions, 'alpha', 'cauchy', 'halfcauchy',
'levy', 'levy_l', and for 'ncf', the default limits are not set correctly
even when the expectation of the function is finite. In this case, the
integration limits, lb and ub, should be chosen by the user. For example,
for the ncf distribution, ub=1000 works in the examples.
There are also problems with numerical integration in some other cases,
for example if the distribution is very concentrated and the default limits
are too large.
'''
#changes: 20100809
#correction and refactoring how loc and scale are handled
#uses now _pdf
#needs more testing for distribution with bound support, e.g. genpareto
if fn is None:
def fun(x, *args):
return (loc + x*scale)*self._pdf(x, *args)
else:
def fun(x, *args):
return fn(loc + x*scale)*self._pdf(x, *args)
if lb is None:
#lb = self.a
try:
lb = self.ppf(1e-9, *args) #1e-14 quad fails for pareto
except ValueError:
lb = self.a
else:
lb = max(self.a, (lb - loc)/(1.0*scale)) #transform to standardized
if ub is None:
#ub = self.b
try:
ub = self.ppf(1-1e-9, *args)
except ValueError:
ub = self.b
else:
ub = min(self.b, (ub - loc)/(1.0*scale))
if conditional:
invfac = self._sf(lb,*args) - self._sf(ub,*args)
else:
invfac = 1.0
return integrate.quad(fun, lb, ub,
args=args, limit=500)[0]/invfac
### for discrete distributions
#TODO: check that for a distribution with finite support the calculations are
# done with one array summation (np.dot)
#based on _drv2_moment(self, n, *args), but streamlined
def expect_discrete(self, fn=None, args=(), loc=0, lb=None, ub=None,
conditional=False):
'''calculate expected value of a function with respect to the distribution
for discrete distribution
Parameters
----------
(self : distribution instance as defined in scipy stats)
fn : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
optional keyword parameters
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution, lb and ub are inclusive (ul<=k<=ub)
conditional : boolean (False)
If true then the expectation is corrected by the conditional
probability of the integration interval. The return value is the
expectation of the function, conditional on being in the given
interval (k such that ul<=k<=ub).
Returns
-------
expected value : float
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to evaluate
could be added as keyword parameter, to evaluate functions with
non-monotonic shapes, points include integers in (-suppnmin, suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative integers
are evaluated)
'''
#moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
#avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 #minimum number of points to evaluate (+ and -)
if fn is None:
def fun(x):
#loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
#loc and args from outer scope
return fn(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint
# and there might be problems(?) with correct self.a, self.b at this stage
# maybe not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc
if ub is None:
ub = (self.b)
else:
ub = ub - loc
if conditional:
invfac = self.sf(lb,*args) - self.sf(ub+1,*args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) #check limits
#print('low, upp', low, upp
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
#handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: #handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while (pos >= lb) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
# replace with proper warning
print('sum did not converge')
return tot/invfac
stats.distributions.rv_continuous.fit_fr = fit_fr
stats.distributions.rv_continuous.nnlf_fr = nnlf_fr
stats.distributions.rv_continuous.expect = expect
stats.distributions.rv_discrete.expect = expect_discrete
stats.distributions.beta_gen._fitstart = _fitstart_beta #not tried out yet
stats.distributions.poisson_gen._fitstart = _fitstart_poisson #not tried out yet
########## end patching scipy
def distfitbootstrap(sample, distr, nrepl=100):
'''run bootstrap for estimation of distribution parameters
hard coded: only one shape parameter is allowed and estimated,
loc=0 and scale=1 are fixed in the estimation
Parameters
----------
sample : array
original sample data for bootstrap
distr : distribution instance with fit_fr method
nrepl : integer
number of bootstrap replications
Returns
-------
res : array (nrepl,)
parameter estimates for all bootstrap replications
'''
nobs = len(sample)
res = np.zeros(nrepl)
for ii in range(nrepl):
rvsind = np.random.randint(nobs, size=nobs)
x = sample[rvsind]
res[ii] = distr.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
return res
def distfitmc(sample, distr, nrepl=100, distkwds={}):
'''run Monte Carlo for estimation of distribution parameters
hard coded: only one shape parameter is allowed and estimated,
loc=0 and scale=1 are fixed in the estimation
Parameters
----------
sample : array
original sample data, in Monte Carlo only used to get nobs,
distr : distribution instance with fit_fr method
nrepl : integer
number of Monte Carlo replications
Returns
-------
res : array (nrepl,)
parameter estimates for all Monte Carlo replications
'''
arg = distkwds.pop('arg')
nobs = len(sample)
res = np.zeros(nrepl)
for ii in range(nrepl):
x = distr.rvs(arg, size=nobs, **distkwds)
res[ii] = distr.fit_fr(x, frozen=[np.nan, 0.0, 1.0])
return res
def printresults(sample, arg, bres, kind='bootstrap'):
'''calculate and print(Bootstrap or Monte Carlo result
Parameters
----------
sample : array
original sample data
arg : float (for general case will be array)
bres : array
parameter estimates from Bootstrap or Monte Carlo run
kind : {'bootstrap', 'montecarlo'}
output is printed for Mootstrap (default) or Monte Carlo
Returns
-------
None, currently only printing
Notes
-----
still a bit a mess because it is used for both Bootstrap and Monte Carlo
made correction:
reference point for bootstrap is estimated parameter
not clear:
I'm not doing any ddof adjustment in estimation of variance, do we
need ddof>0 ?
todo: return results and string instead of printing
'''
print('true parameter value')
print(arg)
print('MLE estimate of parameters using sample (nobs=%d)'% (nobs))
argest = distr.fit_fr(sample, frozen=[np.nan, 0.0, 1.0])
print(argest)
if kind == 'bootstrap':
#bootstrap compares to estimate from sample
argorig = arg
arg = argest
print('%s distribution of parameter estimate (nrepl=%d)'% (kind, nrepl))
print('mean = %f, bias=%f' % (bres.mean(0), bres.mean(0)-arg))
print('median', np.median(bres, axis=0))
print('var and std', bres.var(0), np.sqrt(bres.var(0)))
bmse = ((bres - arg)**2).mean(0)
print('mse, rmse', bmse, np.sqrt(bmse))
bressorted = np.sort(bres)
print('%s confidence interval (90%% coverage)' % kind)
print(bressorted[np.floor(nrepl*0.05)], bressorted[np.floor(nrepl*0.95)])
print('%s confidence interval (90%% coverage) normal approximation' % kind)
print(stats.norm.ppf(0.05, loc=bres.mean(), scale=bres.std()),)
print(stats.norm.isf(0.05, loc=bres.mean(), scale=bres.std()))
print('Kolmogorov-Smirnov test for normality of %s distribution' % kind)
print(' - estimated parameters, p-values not really correct')
print(stats.kstest(bres, 'norm', (bres.mean(), bres.std())))
if __name__ == '__main__':
examplecases = ['largenumber', 'bootstrap', 'montecarlo'][:]
if 'largenumber' in examplecases:
print('\nDistribution: vonmises')
for nobs in [200]:#[20000, 1000, 100]:
x = stats.vonmises.rvs(1.23, loc=0, scale=1, size=nobs)
print('\nnobs:', nobs)
print('true parameter')
print('1.23, loc=0, scale=1')
print('unconstraint')
print(stats.vonmises.fit(x))
print(stats.vonmises.fit_fr(x, frozen=[np.nan, np.nan, np.nan]))
print('with fixed loc and scale')
print(stats.vonmises.fit_fr(x, frozen=[np.nan, 0.0, 1.0]))
print('\nDistribution: gamma')
distr = stats.gamma
arg, loc, scale = 2.5, 0., 20.
for nobs in [200]:#[20000, 1000, 100]:
x = distr.rvs(arg, loc=loc, scale=scale, size=nobs)
print('\nnobs:', nobs)
print('true parameter')
print('%f, loc=%f, scale=%f' % (arg, loc, scale))
print('unconstraint')
print(distr.fit(x))
print(distr.fit_fr(x, frozen=[np.nan, np.nan, np.nan]))
print('with fixed loc and scale')
print(distr.fit_fr(x, frozen=[np.nan, 0.0, 1.0]))
print('with fixed loc')
print(distr.fit_fr(x, frozen=[np.nan, 0.0, np.nan]))
ex = ['gamma', 'vonmises'][0]
if ex == 'gamma':
distr = stats.gamma
arg, loc, scale = 2.5, 0., 1
elif ex == 'vonmises':
distr = stats.vonmises
arg, loc, scale = 1.5, 0., 1
else:
raise ValueError('wrong example')
nobs = 100
nrepl = 1000
sample = distr.rvs(arg, loc=loc, scale=scale, size=nobs)
print('\nDistribution:', distr)
if 'bootstrap' in examplecases:
print('\nBootstrap')
bres = distfitbootstrap(sample, distr, nrepl=nrepl )
printresults(sample, arg, bres)
if 'montecarlo' in examplecases:
print('\nMonteCarlo')
mcres = distfitmc(sample, distr, nrepl=nrepl,
distkwds=dict(arg=arg, loc=loc, scale=scale))
printresults(sample, arg, mcres, kind='montecarlo')
|
the-stack_106_26373 | #!/usr/bin/python
from smfishHmrf.HMRFInstance import HMRFInstance
from smfishHmrf.DatasetMatrix import DatasetMatrix, DatasetMatrixSingleField, DatasetMatrixMultiField
from smfishHmrf.spatial import rank_transform_matrix, calc_silhouette_per_gene
import sys
import os
import math
import subprocess
import numpy as np
import scipy
import scipy.stats
from scipy.stats import zscore
from scipy.spatial.distance import euclidean, squareform, pdist
import smfishHmrf.reader as reader
import pandas as pd
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#import seaborn as sns
from smfishHmrf.bias_correction import calc_bias_moving, do_pca, plot_pca
from scipy.cluster.vq import kmeans2
import argparse
def read_centroid(n, cells):
map_cell = {}
for ind,val in enumerate(cells):
map_cell[val] = ind
f = open(n)
num_cell = 0
for l in f:
l = l.rstrip("\n")
num_cell+=1
f.close()
Xcen = np.empty((num_cell, 2), dtype="float32")
field = np.empty((num_cell), dtype="int32")
f = open(n)
for l in f:
l = l.rstrip("\n")
ll = l.split()
x1, x2 = float(ll[0]), float(ll[1])
t_id = map_cell[ll[-1]]
#t_id = int(ll[-1].split("_")[1]) - 1
Xcen[t_id, :] = [x1, x2]
field[t_id] = 100
f.close()
return Xcen, field
def read_graph(n, cells):
map_cell = {}
for ind,val in enumerate(cells):
map_cell[val] = ind
f = open(n)
edges = set([])
for l in f:
l = l.rstrip("\n")
ll = l.split("\t")
e1, e2 = ll
e1_id = map_cell[e1]
e2_id = map_cell[e2]
#e1_id = int(e1.split("_")[1])-1
#e2_id = int(e2.split("_")[1])-1
edges.add(tuple(sorted([e1_id, e2_id])))
f.close()
return edges
def read_expression_classic(n):
f = open(n)
#h = f.readline().rstrip("\n").split()
h = f.readline().rstrip("\n")
#header begins with space
if h.startswith(" "):
h = h.split()
else: #header startswith a gene name
h = h.split()[1:]
num_cell = len(h)
num_gene = 0
for l in f:
l = l.rstrip("\n")
ll = l.split()
#gene = ll[0]
num_gene+=1
f.close()
mat = np.empty((num_gene, num_cell), dtype="float32")
genes = []
cells = h
f = open(n)
f.readline()
gid = 0
for l in f:
l = l.rstrip("\n")
ll = l.split()
genes.append(ll[0])
mat[gid, :] = [float(v) for v in ll[1:]]
gid+=1
f.close()
return mat, genes, cells
def connected_components(edges, adjacent, points):
visited = {}
chains = []
for p in sorted(list(points)):
visited[p] = False
for p in sorted(list(points)):
if visited[p]==False:
new_chain = []
visited, new_chain = DFS(p, adjacent, visited, new_chain)
chains.append(new_chain)
return chains
def DFS(p, adjacent, visited, new_chain):
visited[p] = True
new_chain.append(p)
for nei in sorted(list(adjacent[p])):
if visited[nei]==False:
visited, new_chain = DFS(nei, adjacent, visited, new_chain)
return visited, new_chain
if __name__=="__main__":
parser = argparse.ArgumentParser(description="HMRF.", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-l", "--location", dest="location", type=str, required=True)
parser.add_argument("-g", "--genes", dest="genes", type=str, required=True)
parser.add_argument("-n", "--network", dest="network", type=str, required=True)
parser.add_argument("-e", "--expression", dest="expression", type=str, required=True)
parser.add_argument("-o", "--outdir", dest="outdir", type=str, required=True)
parser.add_argument("-a", "--name", dest="name", type=str, required=True)
parser.add_argument("-k", "--k", dest="k", type=int, required=True)
parser.add_argument("-b", "--betas", help="three numbers: start_beta, beta_increment, num_beta (e.g. 0 2.0 50)", nargs=3, dest="betas", type=float, required=True)
parser.add_argument("-t", "--tolerance", dest="tolerance", type=float, help="tolerance value", default=1e-10)
parser.add_argument("-s", "--seed", dest="seed", type=float, help="seed for random initialization of HMRF. -1 will not fix it.", default=-1)
parser.add_argument("-z", "--zscore", type=str, dest="zscore", choices=["rowcol", "colrow", "none"], default="none", help="zscore the matrix after subsetting to spatial genes. Rowcol: row(gene) first, column(cell) next.")
parser.add_argument("-i", "--numinit", type=int, dest="num_init", default=100, help="number of initializations")
args = parser.parse_args()
sys.setrecursionlimit(50000)
#print args
#sys.exit(0)
mat, genes, cells = read_expression_classic(args.expression)
print("Done reading expression")
Xcen, field = read_centroid(args.location, cells)
print("Done reading location")
#mat = pd.read_table(args.expression, sep=" ", header=0, index_col=0)
#print mat.index
'''
genes = []
for g in range(mat.index.shape[0]):
genes.append(str(mat.index[g]))
#print genes
expr = np.copy(mat.values)
'''
genes_good = reader.read_genes(args.genes)
expr = mat
new_dset = DatasetMatrixSingleField(expr, genes, None, Xcen)
edges = read_graph(args.network, cells)
print("Done reading graph")
points = set([])
adjacent = {}
for e1,e2 in edges:
points.add(e1)
points.add(e2)
ncell = expr.shape[1]
ngene = expr.shape[0]
#print ncell, ngene
'''
dist = pdist(Xcen, metric="euclidean")
dist = squareform(dist)
for i in range(ncell):
if i in points: continue
dist_i = sorted([(dist[i,j],j) for j in range(ncell) if i!=j])
edges.add(tuple(sorted([i, dist_i[0][1]])))
'''
for e1,e2 in edges:
adjacent.setdefault(e1, set([]))
adjacent.setdefault(e2, set([]))
adjacent[e1].add(e2)
adjacent[e2].add(e1)
new_dset.edges = edges
new_dset.adjacent = adjacent
print("Start calculating independent regions")
conn = connected_components(edges, adjacent, points)
blocks = {}
for ind_con,con in enumerate(conn):
all_vert = con
set_all_vert = set(all_vert)
map_vert = {}
for ind,val in enumerate(all_vert):
map_vert[val] = ind
print("Edges for component", ind_con)
outdir = args.outdir
if not os.path.isdir(outdir):
os.mkdir(outdir)
edge_file = os.path.join(outdir, "edges.txt")
block_file = os.path.join(outdir, "blocks.txt")
#edge_file = "/tmp/edges.txt"
#block_file = "/tmp/blocks.txt"
fw = open(edge_file, "w")
for e1, e2 in edges:
if e1 in set_all_vert and e2 in set_all_vert:
fw.write("%d %d\n" % (map_vert[e1]+1, map_vert[e2]+1))
fw.close()
import smfishHmrf
this_path = os.path.dirname(smfishHmrf.__file__) + "/graphColoring"
subprocess.call("java -cp '%s' -Xmx32g -Xms32g GraphColoring '%s' '%s' '%d'" % (this_path, edge_file, block_file, args.seed), shell=True)
f = open(block_file)
b_ind = 0
for l in f:
l = l.rstrip("\n")
ll = l.split()
#self.blocks.append(int(ll[1]))
blocks[all_vert[b_ind]] = int(ll[1])
b_ind+=1
f.close()
#self.blocks = np.array(self.blocks)
new_blocks = []
for b in range(0, len(blocks.keys())):
new_blocks.append(blocks[b])
new_dset.blocks = np.array(new_blocks)
print("Finished calculating independent regions")
'''
print("Start calculating independent region")
new_dset.calc_independent_region()
print("Finished calculating independent region")
'''
t_dset = new_dset.subset_genes(genes_good)
if args.zscore=="colrow":
t_dset.expr = zscore(t_dset.expr, axis=0) #per column (cell)
t_dset.expr = zscore(t_dset.expr, axis=1) #per row (gene)
elif args.zscore=="rowcol":
t_dset.expr = zscore(t_dset.expr, axis=1) #per row (gene)
t_dset.expr = zscore(t_dset.expr, axis=0) #per col (cell)
outdir = args.outdir
st_beta, incr_beta, num_beta = args.betas
st_beta = float(st_beta)
incr_beta = float(incr_beta)
num_beta = int(num_beta)
if not os.path.isdir(outdir):
os.mkdir(outdir)
this_hmrf = HMRFInstance(args.name, outdir, t_dset, args.k, st_beta, incr_beta, num_beta, tolerance=args.tolerance)
this_hmrf.init(nstart=args.num_init, seed=args.seed)
this_hmrf.run()
|
the-stack_106_26374 | from Population import Population
import matplotlib.pyplot as plt
import copy
def text2array_unicode(string: str) -> list:
"""
Return an array of char ascii codes for each character in string
"""
array_unicode = []
for letter in string:
array_unicode.append(ord(letter))
return array_unicode
def array2text_unicode(array: list) -> str:
string = ""
for code in array:
string += chr(code)
return string
generations = 200 # its just a stop step in case the program cant find solution
max_population = 50 # while more high, more posibilities to find a good combination faster, but more processing
num_parents_to_select = 3 # is better tu select very little, no more than 10% of max population
# all posible gens
gens_set = text2array_unicode("abcdefghijklmnopqrstuvwxyz.-, ")
mutation_rate = 0.9
target = "to be or not to be"
target_unicode_set = text2array_unicode(target)
num_gens = len(target_unicode_set) # length of text
population = Population(gens_set, max_population, mutation_rate)
population.generate_initial_population(num_gens)
# PLOT VARIABLES
history_fitness_mean = []
history_change = []
for generation in range(1, generations+1):
# fitness is a list with scores of each individual
fitness = population.fitness_function(target_unicode_set)
# being elitist means that the parents are the best 2 (maybe a little more)
best_individuals_index = Population.select_best_individuals_by_elitist(fitness, num_parents_to_select)
parents_selected = []
parents_selected_fitness = []
for best_index in best_individuals_index:
parents_selected.append(copy.deepcopy(population.chromosomes[best_index]))
parents_selected_fitness.append(fitness[best_index])
################### RESULTS ACTUAL GENERATION ###################
print(f"\n*\nBest result for generation {generation}\n")
print("Individuals (chromosomes)", [str(parent) for parent in parents_selected])
print("fitness for individuals", parents_selected_fitness)
history_fitness_mean.append(sum(fitness)/len(fitness))
###################
offspring_crossover = Population.crossover(copy.deepcopy(parents_selected), max_population-num_parents_to_select)
# Creating the new population based on the parents and offspring.
offspring_mutated = population.mutate(offspring_crossover)
#print(f"Selected{len(parents_selected)}, mutated{len(offspring_mutated)}, chrom{len(population.chromosomes)}")
# print("PARENTS SELECTED", [str(parent) for parent in parents_selected])
# print("SELECTED MUTATED", [str(parent) for parent in offspring_mutated])
population.chromosomes[:num_parents_to_select] = parents_selected[:]
population.chromosomes[num_parents_to_select:] = offspring_mutated[:]
#print([str(parent) for parent in population.chromosomes])
if Population.has_reached_the_top(parents_selected_fitness, target_unicode_set):
print("FUNCTION HAS CONVERGED")
break
final_generations = len(history_fitness_mean)
best_chromosome_index = Population.select_best_individuals_by_elitist(fitness, 1)
best_chromosome = population.chromosomes[best_chromosome_index[0]]
print("Answer", array2text_unicode(best_chromosome.gens))
print("Target", target)
"""
plt.plot(range(1, final_generations+1), history_fitness_mean)
plt.show()
"""
|
the-stack_106_26375 | import unittest
import codecs
from os import path
from . import xml_response_parsers
testdata_dir = path.join(path.dirname(__file__), 'testdata')
def load_data(filename):
full_path = path.join(testdata_dir, filename)
f = codecs.open(full_path, encoding="utf8")
return f.read()
class TestQuestionResponseParser(unittest.TestCase):
def test_parse_response(self):
response_xml = load_data('question_responses.xml')
parsed = xml_response_parsers.subscriber_responses_as_list_of_dicts(response_xml)
r1, r2, r3 = parsed
assert('question_id' in r1)
assert(r1['question_id'] == 'MTAwNDk=')
|
the-stack_106_26378 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
from dictquery import __version__
# Package meta-data.
NAME = 'dictquery'
DESCRIPTION = 'Library to query python dicts'
URL = 'https://github.com/cyberlis/dictquery'
EMAIL = '[email protected]'
AUTHOR = 'Denis Lisovik'
REQUIRED = []
here = os.path.abspath(os.path.dirname(__file__))
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=__version__,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=['dictquery'],
entry_points={},
install_requires=REQUIRED,
include_package_data=True,
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
|
the-stack_106_26379 | import torch
import torch.nn as nn
from torch.autograd import Variable
class PoetryNet(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, num_layers=2):
super(PoetryNet, self).__init__()
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers)
self.linear1 = nn.Linear(self.hidden_dim, vocab_size)
def forward(self, input, hidden_state=None):
seq_len, batch_size = input.size()
if hidden_state is None:
h_0 = torch.zeros(self.num_layers, batch_size, self.hidden_dim)
c_0 = torch.zeros(self.num_layers, batch_size, self.hidden_dim)
h_0, c_0 = Variable(h_0), Variable(c_0)
else:
h_0, c_0 = hidden_state
embeds = self.embeddings(input)
output, hidden_state = self.lstm(embeds, (h_0, c_0))
output = self.linear1(output.view(seq_len * batch_size, -1))
return output, hidden_state
|
the-stack_106_26380 | import unittest
from fds.analyticsapi.engines.api.benchmarks_api import BenchmarksApi
from fds.analyticsapi.engines.model.spar_benchmark_root import SPARBenchmarkRoot
import common_parameters
from common_functions import CommonFunctions
class TestSparBenchmarkApi(unittest.TestCase):
def setUp(self):
self.spar_benchmark_api = BenchmarksApi(
CommonFunctions.build_api_client())
def test_get_spar_benchmark_by_id(self):
response = self.spar_benchmark_api.get_spar_benchmark_by_id(common_parameters.spar_benchmark_r1000,
_return_http_data_only=False
)
self.assertEqual(response[1], 200, "Response should be 200 - Success")
self.assertEqual(
type(response[0]), SPARBenchmarkRoot, "Response should be of SPARBenchmark type")
if __name__ == '__main__':
unittest.main()
|
the-stack_106_26382 | # coding:utf8
"""
# pylint: disable=line-too-long
url = "http://web.ifzq.gtimg.cn/appstock/app/hkfqkline/get?_var=kline_dayqfq¶m=hk00001,day,,,660,qfq&r=0.7773272375526847"
url 参数改动
股票代码 :hk00001
日k线天数:660
更改为需要获取的股票代码和天数例如:
# pylint: disable=line-too-long
url = "http://web.ifzq.gtimg.cn/appstock/app/hkfqkline/get?_var=kline_dayqfq¶m=hk00700,day,,,350,qfq&r=0.7773272375526847"
"""
import json
import re
from . import basequotation
class DayKline(basequotation.BaseQuotation):
"""腾讯免费行情获取"""
max_num = 1
@property
def stock_api(self) -> str:
# pylint: disable=line-too-long
return "http://web.ifzq.gtimg.cn/appstock/app/hkfqkline/get?_var=kline_dayqfq¶m="
def _gen_stock_prefix(self, stock_codes, day=1500):
return ["hk{},day,,,{},qfq".format(code, day) for code in stock_codes]
def format_response_data(self, rep_data, **kwargs):
stock_dict = {}
for raw_quotation in rep_data:
raw_stocks_detail = re.search(r"=(.*)", raw_quotation).group(1)
stock_details = json.loads(raw_stocks_detail)
for stock, value in stock_details["data"].items():
stock_code = stock[2:]
if "qfqday" in value:
stock_detail = value["qfqday"]
else:
stock_detail = value.get("day")
if stock_detail is None:
print("stock code data not find %s"%stock_code)
continue
stock_dict[stock_code] = stock_detail
break
return stock_dict
if __name__ == "__main__":
pass
|
the-stack_106_26384 | import pickle
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import matplotlib.pyplot as plt
import layers as ly
training_file = "train.p"
with open(training_file, mode="rb") as f:
train = pickle.load(f)
X_train, y_train = train["features"], train["labels"]
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
x_gray = tf.placeholder(tf.float32, (None, 32, 32, 1))
grayscale = tf.image.rgb_to_grayscale(x)
normalize = ly.normalize_grayscale(x_gray)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
x_grayscale = sess.run(grayscale, feed_dict={x: X_train})
x_normalize = sess.run(normalize, feed_dict={x_gray: x_grayscale})
f, axes = plt.subplots(1, 2)
axes[0].imshow(X_train[0])
axes[1].imshow(x_grayscale[0], cmap="gray")
plt.show()
f, axes = plt.subplots(1, 2)
axes[0].hist(x_grayscale[0].flatten())
axes[0].set_title("Grayscale")
axes[1].hist(x_normalize[0].flatten())
axes[1].set_title("Normalized")
plt.show()
|
the-stack_106_26387 | from collections.abc import Sequence
from itertools import chain
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.python.framework.smart_cond import smart_cond
from .set_utils import (
build_dense_dropout_model, PaddedToSegments, SegmentAggregation,
cumulative_softmax_weighting, cumulative_segment_mean)
from .utils import segment_softmax
class PositionalEncoding(tf.keras.layers.Layer):
def __init__(self, max_time=20000, n_dim=10, **kwargs):
self.max_time = max_time
self.n_dim = n_dim
self._num_timescales = self.n_dim // 2
super().__init__(**kwargs)
def get_timescales(self):
# This is a bit hacky, but works
timescales = self.max_time ** np.linspace(0, 1, self._num_timescales)
return timescales
def build(self, input_shape):
assert len(input_shape) == 3
self.timescales = self.add_weight(
'timescales',
(self._num_timescales, ),
trainable=False,
initializer=tf.compat.v1.keras.initializers.Constant(self.get_timescales())
)
def __call__(self, times):
scaled_time = times / self.timescales[None, None, :]
signal = tf.concat(
[
tf.sin(scaled_time),
tf.cos(scaled_time)
],
axis=-1)
return signal
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1], self.n_dim)
class CumulativeSetAttentionLayer(tf.keras.layers.Layer):
dense_options = {
'activation': 'relu',
'kernel_initializer': 'he_uniform'
}
def __init__(self, n_layers=2, width=128, latent_width=128,
aggregation_function='mean',
dot_prod_dim=64, n_heads=4, attn_dropout=0.3):
super().__init__()
assert aggregation_function == 'mean'
self.width = width
self.dot_prod_dim = dot_prod_dim
self.attn_dropout = attn_dropout
self.n_heads = n_heads
self.psi = build_dense_dropout_model(
n_layers, width, 0., self.dense_options)
self.psi.add(Dense(latent_width, **self.dense_options))
self.rho = Dense(latent_width, **self.dense_options)
def build(self, input_shape):
self.psi.build(input_shape)
encoded_shape = self.psi.compute_output_shape(input_shape)
self.rho.build(encoded_shape)
self.W_k = self.add_weight(
'W_k',
(encoded_shape[-1] + input_shape[-1], self.dot_prod_dim*self.n_heads),
initializer='he_uniform'
)
self.W_q = self.add_weight(
'W_q', (self.n_heads, self.dot_prod_dim),
initializer=tf.compat.v1.keras.initializers.Zeros()
)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.n_heads)
def call(self, inputs, segment_ids, training=None):
if training is None:
training = tf.keras.backend.learning_phase()
encoded = self.psi(inputs)
# cumulative mean aggregation
agg = cumulative_segment_mean(encoded, segment_ids)
agg = self.rho(agg)
combined = tf.concat([inputs, agg], axis=-1)
keys = tf.matmul(combined, self.W_k)
keys = tf.stack(tf.split(keys, self.n_heads, -1), 1)
keys = tf.expand_dims(keys, axis=2)
# should have shape (el, heads, 1, dot_prod_dim)
queries = tf.expand_dims(tf.expand_dims(self.W_q, -1), 0)
# should have shape (1, heads, dot_prod_dim, 1)
preattn = tf.matmul(keys, queries) / tf.sqrt(float(self.dot_prod_dim))
preattn = tf.squeeze(tf.squeeze(preattn, -1), -1)
return preattn
class SetAttentionLayer(tf.keras.layers.Layer):
dense_options = {
'activation': 'relu',
'kernel_initializer': 'he_uniform'
}
def __init__(self, n_layers=2, width=128, latent_width=128,
aggregation_function='mean',
dot_prod_dim=64, n_heads=4, attn_dropout=0.3):
super().__init__()
self.width = width
self.dot_prod_dim = dot_prod_dim
self.attn_dropout = attn_dropout
self.n_heads = n_heads
self.psi = build_dense_dropout_model(
n_layers, width, 0., self.dense_options)
self.psi.add(Dense(latent_width, **self.dense_options))
self.psi_aggregation = SegmentAggregation(aggregation_function)
self.rho = Dense(latent_width, **self.dense_options)
def build(self, input_shape):
self.psi.build(input_shape)
encoded_shape = self.psi.compute_output_shape(input_shape)
agg_shape = self.psi_aggregation.compute_output_shape(encoded_shape)
self.rho.build(agg_shape)
self.W_k = self.add_weight(
'W_k',
(encoded_shape[-1] + input_shape[-1], self.dot_prod_dim*self.n_heads),
initializer='he_uniform'
)
self.W_q = self.add_weight(
'W_q', (self.n_heads, self.dot_prod_dim),
initializer=tf.compat.v1.keras.initializers.Zeros()
)
def call(self, inputs, segment_ids, lengths, training=None):
if training is None:
training = tf.keras.backend.learning_phase()
def dropout_attn(input_tensor):
if self.attn_dropout > 0:
mask = (
tf.random.uniform(
tf.shape(input=input_tensor)[:-1]
) < self.attn_dropout)
return (
input_tensor
+ tf.expand_dims(tf.cast(mask, tf.float32), -1) * -1e9
)
else:
return tf.identity(input_tensor)
encoded = self.psi(inputs)
agg = self.psi_aggregation(encoded, segment_ids)
agg = self.rho(agg)
agg_scattered = tf.gather_nd(agg, tf.expand_dims(segment_ids, -1))
combined = tf.concat([inputs, agg_scattered], axis=-1)
keys = tf.matmul(combined, self.W_k)
keys = tf.stack(tf.split(keys, self.n_heads, -1), 1)
keys = tf.expand_dims(keys, axis=2)
# should have shape (el, heads, 1, dot_prod_dim)
queries = tf.expand_dims(tf.expand_dims(self.W_q, -1), 0)
# should have shape (1, heads, dot_prod_dim, 1)
preattn = tf.matmul(keys, queries) / tf.sqrt(float(self.dot_prod_dim))
preattn = tf.squeeze(preattn, -1)
preattn = smart_cond(
training,
lambda: dropout_attn(preattn),
lambda: tf.identity(preattn)
)
per_head_preattn = tf.unstack(preattn, axis=1)
attentions = []
for pre_attn in per_head_preattn:
attentions.append(segment_softmax(pre_attn, segment_ids))
return attentions
def compute_output_shape(self, input_shape):
return list(chain(input_shape[:-1], (self.n_heads, )))
class IdentityLayer(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
def compute_output_shape(self, input_shapes):
return input_shapes
def call(self, inputs, **kwargs):
return inputs
class DeepSetAttentionModel(tf.keras.Model):
dense_options = {
'activation': 'relu',
'kernel_initializer': 'he_uniform'
}
def __init__(self, output_activation, output_dims, n_phi_layers, phi_width,
n_psi_layers, psi_width, psi_latent_width, dot_prod_dim,
n_heads, attn_dropout, latent_width, phi_dropout,
n_rho_layers, rho_width, rho_dropout, max_timescale,
n_positional_dims):
self._config = {
name: val for name, val in locals().items()
if name not in ['self', '__class__']
}
super().__init__()
self.phi_width = phi_width
self.to_segments = PaddedToSegments()
# If we set n_positional_dims to 0, skip the positional encoding
self.positional_encoding = (
PositionalEncoding(max_timescale, n_positional_dims)
if n_positional_dims != 0
else IdentityLayer()
)
# We need the input dimensionality in order to determine the size of
# the embedding for the demographics.
self.demo_encoder = None
if isinstance(output_dims, Sequence):
# We have an online prediction scenario
assert output_dims[0] is None
self.return_sequences = True
output_dims = output_dims[1]
else:
self.return_sequences = False
# Build phi architecture
self.phi = build_dense_dropout_model(
n_phi_layers, phi_width, phi_dropout, self.dense_options)
self.phi.add(Dense(latent_width, **self.dense_options))
self.latent_width = latent_width
self.n_heads = n_heads
if self.return_sequences:
self.attention = CumulativeSetAttentionLayer(
n_psi_layers, psi_width, psi_latent_width,
dot_prod_dim=dot_prod_dim, n_heads=n_heads,
attn_dropout=attn_dropout
)
else:
self.attention = SetAttentionLayer(
n_psi_layers, psi_width, psi_latent_width,
dot_prod_dim=dot_prod_dim, n_heads=n_heads,
attn_dropout=attn_dropout
)
self.aggregation = SegmentAggregation(
aggregation_fn='sum',
cumulative=self.return_sequences
)
# Build rho architecture
self.rho = build_dense_dropout_model(
n_rho_layers, rho_width, rho_dropout, self.dense_options)
self.rho.add(Dense(output_dims, activation=output_activation))
self._n_modalities = None
def build(self, input_shapes):
if self.return_sequences:
demo, times, values, measurements, lengths, inverse_timepoints, pred_lengths = input_shapes
else:
demo, times, values, measurements, lengths = input_shapes
self.positional_encoding.build(times)
transformed_times = (
self.positional_encoding.compute_output_shape(times))
mod_shape = self._n_modalities
phi_input_dim = transformed_times[-1] + values[-1] + mod_shape
self.demo_encoder = tf.keras.Sequential(
[
tf.keras.layers.Dense(self.phi_width, activation='relu'),
tf.keras.layers.Dense(phi_input_dim)
],
name='demo_encoder'
)
self.demo_encoder.build(demo)
if self.return_sequences:
phi_input = (None, phi_input_dim)
self.phi.build(phi_input)
phi_output = self.phi.compute_output_shape(phi_input)
self.attention.build(phi_input)
attention_output = self.attention.compute_output_shape(phi_input)
aggregated_output = [
phi_output[0], phi_output[1] * attention_output[1]]
self.rho.build(aggregated_output)
else:
phi_input = (None, phi_input_dim)
self.phi.build(phi_input)
phi_output = self.phi.compute_output_shape(phi_input)
self.attention.build(phi_input)
attention_output = self.attention.compute_output_shape(phi_input)
aggregated_output = self.aggregation.compute_output_shape(
[phi_output[0], phi_output[1] * attention_output[1]])
self.rho.build(aggregated_output)
def call(self, inputs):
if self.return_sequences:
demo, times, values, measurements, lengths, elem_per_tp, pred_lengths = inputs
if len(pred_lengths.get_shape()) == 2:
pred_lengths = tf.squeeze(pred_lengths, -1)
else:
demo, times, values, measurements, lengths = inputs
transformed_times = self.positional_encoding(times)
# Transform modalities
transformed_measurements = tf.one_hot(
measurements, self._n_modalities, dtype=tf.float32)
combined_values = tf.concat(
(
transformed_times,
values,
transformed_measurements
),
axis=-1
)
demo_encoded = self.demo_encoder(demo)
combined_with_demo = tf.concat(
[tf.expand_dims(demo_encoded, 1), combined_values], axis=1)
# Somehow eager execution and graph mode behave differently.
# In graph mode lengths has an additional dimension
if len(lengths.get_shape()) == 2:
lengths = tf.squeeze(lengths, -1)
if self.return_sequences:
# We additionally have the encoded demographics as a set element
mask = tf.sequence_mask(lengths+1, name='mask')
collected_values, segment_ids = self.to_segments(
combined_with_demo, mask)
preattentions = self.attention(collected_values, segment_ids)
encoded = self.phi(collected_values)
agg = cumulative_softmax_weighting(
encoded, preattentions, segment_ids)
# Remove heads dimension
agg = tf.reshape(
agg,
tf.stack([tf.shape(input=agg)[0], tf.constant(-1)], axis=0)
)
predictions_mask = tf.sequence_mask(pred_lengths)
gathered_time_indices, batch_indices = self.to_segments(
elem_per_tp, predictions_mask)
# Compute index of the last observation associated with the
# provided time.
prediction_indices = tf.math.cumsum(gathered_time_indices)
# Add an offset for each instance to account for demographics. This
# offset decreases for each later index in the batch. Thus we can
# use the batch indices.
prediction_indices += batch_indices
gathered_embeddings = tf.gather_nd(
agg, prediction_indices[:, None])
# Lost shape information
gathered_embeddings.set_shape([None, None])
output = self.rho(gathered_embeddings)
valid_predictions = tf.cast(tf.compat.v1.where(predictions_mask), tf.int32)
output = tf.scatter_nd(
valid_predictions,
output,
tf.concat(
[tf.shape(input=predictions_mask), tf.shape(input=output)[-1:]],
axis=0
)
)
# tf.print(tf.shape(output), tf.shape(mask))
output._keras_mask = predictions_mask
return output
else:
# We additionally have the encoded demographics as a set element
mask = tf.sequence_mask(lengths+1, name='mask')
collected_values, segment_ids = self.to_segments(
combined_with_demo, mask)
encoded = self.phi(collected_values)
attentions = self.attention(collected_values, segment_ids, lengths)
weighted_values = []
for attention in attentions:
weighted_values.append(encoded * attention)
aggregated_values = self.aggregation(
tf.concat(weighted_values, axis=-1), segment_ids)
return self.rho(aggregated_values)
def get_attentions(self, inputs):
demo, times, values, measurements, lengths = inputs
transformed_times = self.positional_encoding(times)
# Transform modalities
if self._n_modalities > 100:
# Use an embedding instead of one hot encoding when we have a very
# high number of modalities
transformed_measurements = self.modality_embedding(measurements)
else:
transformed_measurements = tf.one_hot(
measurements, self._n_modalities, dtype=tf.float32)
combined_values = tf.concat(
(
transformed_times,
values,
transformed_measurements
),
axis=-1
)
demo_encoded = self.demo_encoder(demo)
combined_with_demo = tf.concat(
[tf.expand_dims(demo_encoded, 1), combined_values], axis=1)
# Somehow eager execution and graph mode behave differently.
# In graph mode legths has an additional dimension
if len(lengths.get_shape()) == 2:
lengths = tf.squeeze(lengths, -1)
# We additionally have the encoded demographics as a set element
mask = tf.sequence_mask(lengths+1, name='mask')
valid_observations = tf.cast(tf.compat.v1.where(mask), tf.int32)
out_shape = tf.concat(
[
tf.shape(input=combined_with_demo)[:-1],
tf.constant([1])
],
axis=0,
)
collected_values, segment_ids = self.to_segments(combined_with_demo, mask)
attentions = self.attention(collected_values, segment_ids, lengths)
demo_attentions = []
ts_attentions = []
for attention in attentions:
dist_attention = tf.scatter_nd(
valid_observations, attention, out_shape)
demo_attentions.append(dist_attention[:, 0])
ts_attentions.append(dist_attention[:, 1:])
return demo_attentions, ts_attentions
def _evtl_create_embedding_layer(self):
if self._n_modalities > 100 and not hasattr(self, 'modality_embedding'):
self.modality_embedding = tf.keras.layers.Embedding(
self._n_modalities, 64)
@classmethod
def get_hyperparameters(cls):
import tensorboard.plugins.hparams.api as hp
from ..training_utils import HParamWithDefault
return [
HParamWithDefault(
'n_phi_layers', hp.Discrete([1, 2, 3, 4, 5]), default=3),
HParamWithDefault(
'phi_width',
hp.Discrete([16, 32, 64, 128, 256, 512]),
default=32
),
HParamWithDefault(
'phi_dropout',
hp.Discrete([0.0, 0.1, 0.2, 0.3]),
default=0.
),
HParamWithDefault(
'n_psi_layers',
hp.Discrete([2]),
default=2
),
HParamWithDefault(
'psi_width',
hp.Discrete([64]),
default=64
),
HParamWithDefault(
'psi_latent_width',
hp.Discrete([128]),
default=128
),
HParamWithDefault(
'dot_prod_dim',
hp.Discrete([128]),
default=128
),
HParamWithDefault(
'n_heads',
hp.Discrete([4]),
default=4
),
HParamWithDefault(
'attn_dropout',
hp.Discrete([0.0, 0.1, 0.25, 0.5]),
default=0.1
),
HParamWithDefault(
'latent_width',
hp.Discrete([32, 64, 128, 256, 512, 1024, 2048]),
default=128
),
HParamWithDefault(
'n_rho_layers', hp.Discrete([1, 2, 3, 4, 5]), default=3),
HParamWithDefault(
'rho_width',
hp.Discrete([16, 32, 64, 128, 256, 512]),
default=32
),
HParamWithDefault(
'rho_dropout',
hp.Discrete([0.0, 0.1, 0.2, 0.3]),
default=0.
),
HParamWithDefault(
'max_timescale',
hp.Discrete([10., 100., 1000.]),
default=100.
),
HParamWithDefault(
'n_positional_dims',
hp.Discrete([4, 8, 16]),
default=4
)
]
@classmethod
def from_hyperparameter_dict(cls, task, hparams):
return cls(
output_activation=task.output_activation,
output_dims=task.n_outputs,
n_phi_layers=hparams['n_phi_layers'],
phi_width=hparams['phi_width'],
n_psi_layers=hparams['n_psi_layers'],
psi_width=hparams['psi_width'],
psi_latent_width=hparams['psi_latent_width'],
dot_prod_dim=hparams['dot_prod_dim'],
n_heads=hparams['n_heads'],
attn_dropout=hparams['attn_dropout'],
latent_width=hparams['latent_width'],
phi_dropout=hparams['phi_dropout'],
n_rho_layers=hparams['n_rho_layers'],
rho_width=hparams['rho_width'],
rho_dropout=hparams['rho_dropout'],
max_timescale=hparams['max_timescale'],
n_positional_dims=hparams['n_positional_dims']
)
@classmethod
def from_config(cls, config):
return cls(**config)
def get_config(self):
return self._config
def data_preprocessing_fn(self):
def flatten_unaligned_measurements(ts, labels):
# Ignore demographics for now
demo, X, Y, measurements, lengths = ts
if self._n_modalities is None:
self._n_modalities = int(measurements.get_shape()[-1])
X = tf.expand_dims(X, -1)
measurement_positions = tf.cast(tf.compat.v1.where(measurements), tf.int32)
X_indices = measurement_positions[:, 0]
Y_indices = measurement_positions[:, 1]
gathered_X = tf.gather(X, X_indices)
gathered_Y = tf.gather_nd(Y, measurement_positions)
gathered_Y = tf.expand_dims(gathered_Y, axis=-1)
length = tf.shape(input=X_indices)[0]
if self.return_sequences:
# We need to know now many prediction values each instance
# should have when doing online prediction
prediction_length = tf.shape(input=labels)[0]
counts = tf.reduce_sum(input_tensor=tf.cast(measurements, tf.int64), axis=1)
return (demo, gathered_X, gathered_Y, Y_indices, length, counts, prediction_length), labels
else:
return (demo, gathered_X, gathered_Y, Y_indices, length), labels
return flatten_unaligned_measurements
@classmethod
def get_default(cls, task):
hyperparams = cls.get_hyperparameters()
return cls.from_hyperparameter_dict(
task,
{
h.name: h._default for h in hyperparams
}
)
class DeepSetAttentionNoPosModel(DeepSetAttentionModel):
def __init__(self, output_activation, output_dims, **kwargs):
super().__init__(output_activation, output_dims, **kwargs,
max_timescale=0,
n_positional_dims=0)
@classmethod
def get_hyperparameters(cls):
parent_hyperparameters = super().get_hyperparameters()
return [
hp for hp in parent_hyperparameters
if hp.name not in ['max_timescale', 'n_positional_dims']
]
@classmethod
def from_hyperparameter_dict(cls, task, hparams):
return cls(
output_activation=task.output_activation,
output_dims=task.n_outputs,
n_phi_layers=hparams['n_phi_layers'],
phi_width=hparams['phi_width'],
n_psi_layers=hparams['n_psi_layers'],
psi_width=hparams['psi_width'],
psi_latent_width=hparams['psi_latent_width'],
dot_prod_dim=hparams['dot_prod_dim'],
n_heads=hparams['n_heads'],
attn_dropout=hparams['attn_dropout'],
latent_width=hparams['latent_width'],
phi_dropout=hparams['phi_dropout'],
n_rho_layers=hparams['n_rho_layers'],
rho_width=hparams['rho_width'],
rho_dropout=hparams['rho_dropout'],
)
|
the-stack_106_26389 | #!/usr/bin/env python3
'''
Пример для первой лекции про TkInter
Закрытие окошка в постинтерактивном режиме
'''
from tkinter import *
def dump(*args):
print("DUMP:",args)
TKroot = Tk()
TKroot.title("Hello")
root = Frame(TKroot)
root.place(relx=0, rely=0, relheight=1, relwidth=1)
root.columnconfigure(0, weight=1)
root.columnconfigure(1, weight=2)
root.rowconfigure(0, weight=10)
root.rowconfigure(1, weight=1)
Butt = Button(root, text="Butt ON")
Butt.bind('<Button-1>', dump)
Butt.grid(row=0, column=0, sticky=E+W+S+N)
Exit = Button(root, text="Quit!", command=root.quit)
Exit.grid(row=0, column=1, sticky=E+W+S+N)
Txt = Label(root, text="This is a label", bg="PeachPuff")
Txt.grid(row=1, column=0, columnspan=2, sticky=E+W+N)
TKroot.mainloop()
print("Done")
#root.destroy()
|
the-stack_106_26390 | import re
import tweepy
import pandas as pd
from datetime import date
from textblob import TextBlob
def clean_text(text):
text = re.sub(r'@[A-Za-z0-9]+', '', text) #Remove mentions
text = re.sub(r'#', '', text) #Remove #
text = re.sub(r'RT[\s]:+', '', text) #Remove RT
text = re.sub(r'https:\/\/\S+', '', text) #Remove mentions
text = re.sub('\n'," ",text)
text = re.sub('-\n([a-z])', '', text)
text = re.sub('\r'," ",text)
text = re.sub('-\r([a-z])', '', text)
text = deEmojify(text)
return text
def deEmojify(text):
regrex_pattern = re.compile(pattern="["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
return regrex_pattern.sub(r'', text)
def getSubjectivity(text):
return TextBlob(text).sentiment.subjectivity
def getPolarity(text):
return TextBlob(text).sentiment.polarity
def x_range(x):
if x > 0:
return 1
elif x == 0:
return 0
else:
return -1
def x_range_sentiment(x):
if x > 0:
return 'Positive'
elif x == 0:
return 'Neutral'
else:
return 'Negative'
def get_tweets(stock: str, update: str):
if update == "upyes":
consumer_key = "89sGhwiTspic1SS4NBnnv6yHk"
consumer_secret = "wPMainc6F8aw1KPYjWpBUz17wTUQJdDUu4a5vyjWsSYPPbMNvH"
access_token = "3243736436-RkiWCNFrQ5Gl82OFSCtENHqnRdRrnjeTB5pOpk6"
access_token_secret = "FzxpAu9WCmJYJIeTyqqT8p1mf0kum4s8z2sArU3MAqgDW"
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
query = '$' + stock
tweets = api.search(q=query,
# 200 is the maximum allowed count
count=100,
include_rts=False,
# Necessary to keep full_text
# otherwise only the first 140 words are extracted
tweet_mode='extended'
)
all_tweets = []
all_tweets.extend(tweets)
oldest_id = tweets[-90].id
for i in range(160):
i += 1
tweets = api.search(q=query,
# 200 is the maximum allowed count
count=100,
include_rts=False,
lang='en',
max_id=oldest_id - 1,
# Necessary to keep full_text
# otherwise only the first 140 words are extracted
tweet_mode='extended'
)
if len(tweets) == 0:
break
oldest_id = tweets[-1].id
all_tweets.extend(tweets)
print('N of tweets downloaded till now {}'.format(len(all_tweets)), 'ciclo = ', i)
outtweets = [[tweet.id_str,
tweet.user,
tweet.created_at,
tweet.favorite_count,
tweet.retweet_count,
tweet.full_text.encode("utf-8").decode("utf-8")]
for idx, tweet in enumerate(all_tweets)]
df = pd.DataFrame(outtweets, columns=["id", 'user_name', "created_at", "favorite_count", "retweet_count", "text"])
df['Date'] = df['created_at'].map(lambda x: x.date())
df['text'] = df['text'].apply(clean_text)
df['Subjectivity'] = df['text'].apply(getSubjectivity)
df['Polarity'] = df['text'].apply(getPolarity)
df['result'] = df['Polarity'].apply(x_range)
df['Sentiment'] = df['Polarity'].apply(x_range_sentiment)
df.to_csv("$SPY_tweets.csv", index=False)
return df
else:
print("No update")
|
the-stack_106_26391 | import pandas as pd
import pytest
from powersimdata.data_access.context import Context
from powersimdata.input.change_table import ChangeTable
from powersimdata.input.grid import Grid
from powersimdata.tests.mock_context import MockContext
grid = Grid(["USA"])
@pytest.fixture
def ct():
return ChangeTable(grid)
def test_resource_exist(ct):
with pytest.raises(ValueError):
ct.scale_plant_capacity("unknown", zone_name={"Idaho": 2})
assert ct.ct == {}
def test_add_dcline_argument_type(ct):
new_dcline = {"capacity": 500, "from_bus_id": 1, "to_bus_id": 2}
with pytest.raises(TypeError) as excinfo:
ct.add_dcline(new_dcline)
assert "Argument enclosing new HVDC line(s) must be a list" in str(excinfo.value)
assert ct.ct == {}
def test_add_dcline_argument_number_of_keys(ct):
new_dcline = [{"from_bus_id": 1, "to_bus_id": 2}]
with pytest.raises(ValueError) as excinfo:
ct.add_dcline(new_dcline)
expected = "For new_dcline, must specify one of ('capacity', 'Pmax') but not both."
assert expected in str(excinfo.value)
assert ct.ct == {}
def test_add_dcline_argument_wrong_keys(ct):
new_dcline = [{"capacity": 1000, "from_bus": 1, "to_bus": 2}]
with pytest.raises(ValueError) as excinfo:
ct.add_dcline(new_dcline)
expected_msg = "Each entry of new_dcline requires keys of: from_bus_id, to_bus_id"
assert expected_msg in str(excinfo.value)
assert ct.ct == {}
def test_add_dcline_argument_wrong_bus(ct):
new_dcline = [
{"capacity": 2000, "from_bus_id": 300, "to_bus_id": 1000},
{"capacity": 1000, "from_bus_id": 1, "to_bus_id": 30010010},
]
with pytest.raises(ValueError) as excinfo:
ct.add_dcline(new_dcline)
assert "No bus with the following id for line #2: 30010010" in str(excinfo.value)
assert ct.ct == {}
def test_add_dcline_argument_same_buses(ct):
new_dcline = [{"capacity": 1000, "from_bus_id": 1, "to_bus_id": 1}]
with pytest.raises(ValueError) as excinfo:
ct.add_dcline(new_dcline)
assert "buses of line #1 must be different" in str(excinfo.value)
assert ct.ct == {}
def test_add_dcline_argument_negative_capacity(ct):
new_dcline = [{"capacity": -1000, "from_bus_id": 300, "to_bus_id": 1000}]
with pytest.raises(ValueError) as excinfo:
ct.add_dcline(new_dcline)
assert "capacity of line #1 must be positive" in str(excinfo.value)
assert ct.ct == {}
def test_add_dcline_output(ct):
new_dcline = [
{"capacity": 2000, "from_bus_id": 200, "to_bus_id": 2000},
{"capacity": 1000, "from_bus_id": 9, "to_bus_id": 70042},
{"capacity": 8000, "from_bus_id": 2008, "to_bus_id": 5997},
]
ct.add_dcline(new_dcline)
expected = {
"new_dcline": [
{"Pmax": 2000, "Pmin": -2000, "from_bus_id": 200, "to_bus_id": 2000},
{"Pmax": 1000, "Pmin": -1000, "from_bus_id": 9, "to_bus_id": 70042},
{"Pmax": 8000, "Pmin": -8000, "from_bus_id": 2008, "to_bus_id": 5997},
]
}
assert ct.ct == expected
def test_add_dcline_in_different_interconnect(ct):
new_dcline = [
{"capacity": 2000, "from_bus_id": 200, "to_bus_id": 2000},
{"capacity": 8000, "from_bus_id": 2008, "to_bus_id": 3001001},
]
ct.add_dcline(new_dcline)
expected = {
"new_dcline": [
{"Pmax": 2000, "Pmin": -2000, "from_bus_id": 200, "to_bus_id": 2000},
{"Pmax": 8000, "Pmin": -8000, "from_bus_id": 2008, "to_bus_id": 3001001},
]
}
assert ct.ct == expected
def test_add_dcline_Pmin_and_Pmax_success(ct): # noqa: N802
new_dcline = [{"Pmax": 2000, "Pmin": 0, "from_bus_id": 200, "to_bus_id": 2000}]
ct.add_dcline(new_dcline)
assert ct.ct == {"new_dcline": new_dcline}
def test_add_dcline_Pmin_gt_Pmax(ct): # noqa: N802
new_dcline = [{"Pmax": 2000, "Pmin": 3000, "from_bus_id": 200, "to_bus_id": 2000}]
with pytest.raises(ValueError) as excinfo:
ct.add_dcline(new_dcline)
assert "Pmin cannot be greater than Pmax" in str(excinfo.value)
assert ct.ct == {}
def test_add_dcline_Pmin_and_Pmax_and_capacity(ct): # noqa: N802
new_dcline = [
{"Pmax": 200, "Pmin": -200, "capacity": 10, "from_bus_id": 1, "to_bus_id": 2}
]
with pytest.raises(ValueError) as excinfo:
ct.add_dcline(new_dcline)
expected = "For new_dcline, must specify one of ('capacity', 'Pmax') but not both"
assert expected in str(excinfo.value)
assert ct.ct == {}
def test_add_branch_argument_buses_in_different_interconnect(ct):
new_branch = [
{"capacity": 2000, "from_bus_id": 300, "to_bus_id": 1000},
{"capacity": 1000, "from_bus_id": 1, "to_bus_id": 3001001},
]
with pytest.raises(ValueError) as excinfo:
ct.add_branch(new_branch)
assert "Buses of line #2 must be in same interconnect" in str(excinfo.value)
assert ct.ct == {}
def test_add_branch_zero_distance_between_buses(ct):
new_branch = [{"capacity": 75, "from_bus_id": 1, "to_bus_id": 3}]
with pytest.raises(ValueError) as excinfo:
ct.add_branch(new_branch)
assert "Distance between buses of line #1 is 0" in str(excinfo.value)
assert ct.ct == {}
def test_add_branch_Pmin_and_Pmax(ct): # noqa: N802
new_dcline = [{"Pmax": 2000, "Pmin": 0, "from_bus_id": 200, "to_bus_id": 2000}]
with pytest.raises(ValueError) as excinfo:
ct.add_branch(new_dcline)
assert "Can't independently set Pmin & Pmax for AC branches" in str(excinfo.value)
assert ct.ct == {}
def test_add_plant_argument_type(ct):
new_plant = {"type": "solar", "bus_id": 1, "Pmax": 100}
with pytest.raises(TypeError) as excinfo:
ct.add_plant(new_plant)
assert "Argument enclosing new plant(s) must be a list" in str(excinfo.value)
assert ct.ct == {}
def test_add_renewable_plant_missing_key_type(ct):
new_plant = [{"bus_id": 350, "Pmax": 35}]
with pytest.raises(ValueError) as excinfo:
ct.add_plant(new_plant)
expected = (
"Each entry of plant requires keys of: Pmax, bus_id, type. Missing ['type']"
)
assert expected in str(excinfo.value)
assert ct.ct == {}
def test_add_renewable_plant_missing_key_bus_id(ct):
new_plant = [{"type": "solar", "Pmax": 35}]
with pytest.raises(ValueError) as excinfo:
ct.add_plant(new_plant)
expected = (
"Each entry of plant requires keys of: Pmax, bus_id, type. Missing ['bus_id']"
)
assert expected in str(excinfo.value)
assert ct.ct == {}
def test_add_renewable_plant_missing_key_pmax(ct):
new_plant = [{"type": "hydro", "bus_id": 350}]
with pytest.raises(ValueError) as excinfo:
ct.add_plant(new_plant)
expected = (
"Each entry of plant requires keys of: Pmax, bus_id, type. Missing ['Pmax']"
)
assert expected in str(excinfo.value)
assert ct.ct == {}
def test_add_thermal_plant_missing_key_c0(ct):
new_plant = [{"type": "ng", "bus_id": 100, "Pmax": 75, "c1": 9, "c2": 0.25}]
with pytest.raises(ValueError) as excinfo:
ct.add_plant(new_plant)
assert "Missing key c0 for plant #1" in str(excinfo.value)
assert ct.ct == {}
def test_add_thermal_plant_missing_key_c1(ct):
new_plant = [{"type": "ng", "bus_id": 100, "Pmax": 75, "c0": 1500, "c2": 1}]
with pytest.raises(ValueError) as excinfo:
ct.add_plant(new_plant)
assert "Missing key c1 for plant #1" in str(excinfo.value)
assert ct.ct == {}
def test_add_thermal_plant_missing_key_c2(ct):
new_plant = [{"type": "ng", "bus_id": 100, "Pmax": 75, "c0": 1500, "c1": 500}]
with pytest.raises(ValueError) as excinfo:
ct.add_plant(new_plant)
assert "Missing key c2 for plant #1" in str(excinfo.value)
assert ct.ct == {}
def test_add_plant_wrong_resource(ct):
with pytest.raises(ValueError) as excinfo:
ct.add_plant([{"type": "unknown", "bus_id": 50000, "Pmax": 1}])
assert "Invalid resource: unknown" in str(excinfo.value)
assert ct.ct == {}
def test_add_plant_wrong_bus(ct):
new_plant = [
{
"type": "nuclear",
"bus_id": 300,
"Pmin": 500,
"Pmax": 5000,
"c0": 1,
"c1": 2,
"c2": 3,
},
{"type": "coal", "bus_id": 5000000, "Pmax": 200, "c0": 1, "c1": 2, "c2": 3},
]
with pytest.raises(ValueError) as excinfo:
ct.add_plant(new_plant)
assert "No bus id 5000000 available for plant #2" in str(excinfo.value)
assert ct.ct == {}
def test_add_thermal_plant_wrong_coefficients(ct):
new_plant = [
{
"type": "ng",
"bus_id": 300,
"Pmin": 0,
"Pmax": 500,
"c0": -800,
"c1": 30,
"c2": 0.0025,
}
]
with pytest.raises(ValueError) as excinfo:
ct.add_plant(new_plant)
assert "c0 >= 0 must be satisfied for plant #1" in str(excinfo.value)
assert ct.ct == {}
def test_add_plant_negative_pmax(ct):
new_plant = [
{"type": "dfo", "bus_id": 300, "Pmax": -10, "c0": 1, "c1": 2, "c2": 0.3}
]
with pytest.raises(ValueError) as excinfo:
ct.add_plant(new_plant)
assert "Pmax >= 0 must be satisfied for plant #1" in str(excinfo.value)
assert ct.ct == {}
def test_add_plant_negative_pmin(ct):
new_plant = [
{"type": "dfo", "bus_id": 300, "Pmax": 10, "c0": 100, "c1": 2, "c2": 0.1},
{
"type": "geothermal",
"bus_id": 3001001,
"Pmin": -1,
"Pmax": 20,
"c0": 10,
"c1": 5,
"c2": 1,
},
]
with pytest.raises(ValueError) as excinfo:
ct.add_plant(new_plant)
assert "0 <= Pmin <= Pmax must be satisfied for plant #2" in str(excinfo.value)
assert ct.ct == {}
def test_add_plant_pmin_pmax_relationship(ct):
new_plant = [
{
"type": "biomass",
"bus_id": 13802,
"Pmin": 30,
"Pmax": 20,
"c0": 30,
"c1": 15,
"c2": 0.1,
}
]
with pytest.raises(ValueError) as excinfo:
ct.add_plant(new_plant)
assert "0 <= Pmin <= Pmax must be satisfied for plant #1" in str(excinfo.value)
assert ct.ct == {}
def test_add_plant_check_pmin_is_added(ct):
new_plant = [
{"type": "solar", "bus_id": 3001001, "Pmax": 85},
{"type": "wind", "bus_id": 9, "Pmin": 5, "Pmax": 60},
{"type": "wind_offshore", "bus_id": 13802, "Pmax": 175},
]
ct.add_plant(new_plant)
assert ct.ct["new_plant"][0]["Pmin"] == 0
assert ct.ct["new_plant"][1]["Pmin"] == 5
assert ct.ct["new_plant"][2]["Pmin"] == 0
def test_add_renewable_plant_check_neighbor_is_added(ct):
new_plant = [
{"type": "hydro", "bus_id": 3001001, "Pmin": 60, "Pmax": 85},
{
"type": "coal",
"bus_id": 9,
"Pmax": 120,
"c0": 1000,
"c1": 500,
"c2": 0.3,
},
{"type": "wind_offshore", "bus_id": 13802, "Pmax": 175},
]
ct.add_plant(new_plant)
assert "plant_id_neighbor" in ct.ct["new_plant"][0]
assert "plant_id_neighbor" not in ct.ct["new_plant"][1]
assert "plant_id_neighbor" in ct.ct["new_plant"][2]
def test_add_plant_neighbor_can_be_on_same_bus(ct):
wind_farm = grid.plant.groupby(["type"]).get_group("wind")
hydro_plant = grid.plant.groupby(["type"]).get_group("hydro")
bus_id_wind = wind_farm.iloc[100].bus_id
bus_id_hydro = hydro_plant.iloc[2000].bus_id
new_plant = [
{"type": "wind", "bus_id": bus_id_wind, "Pmin": 60, "Pmax": 85},
{"type": "hydro", "bus_id": bus_id_hydro, "Pmax": 175},
]
ct.add_plant(new_plant)
wind_neighbor_id = ct.ct["new_plant"][0]["plant_id_neighbor"]
assert wind_neighbor_id == wind_farm.iloc[100].name
hydro_neighbor_id = ct.ct["new_plant"][1]["plant_id_neighbor"]
assert hydro_neighbor_id == hydro_plant.iloc[2000].name
def test_scale_pmin_by_plant_too_high(ct):
ct.scale_plant_pmin("ng", plant_id={0: 100})
assert ct.ct["ng_pmin"]["plant_id"][0] * grid.plant.loc[0, "Pmin"] == pytest.approx(
grid.plant.loc[0, "Pmax"]
)
def test_scale_pmin_by_zone_too_high(ct):
ct.scale_plant_pmin("ng", zone_name={"Maine": 100})
assert (
ct.ct["ng_pmin"]["plant_id"][0]
* ct.ct["ng_pmin"]["zone_id"][1] # plant_id 0 is in Maine (zone_id 1)
* grid.plant.loc[0, "Pmin"]
) == pytest.approx(grid.plant.loc[0, "Pmax"])
def test_scale_pmin_by_plant_and_zone_too_high(ct):
ct.scale_plant_pmin("ng", plant_id={0: 10}, zone_name={"Maine": 10})
assert (
ct.ct["ng_pmin"]["plant_id"][0]
* ct.ct["ng_pmin"]["zone_id"][1] # plant_id 0 is in Maine (zone_id 1)
* grid.plant.loc[0, "Pmin"]
) == pytest.approx(grid.plant.loc[0, "Pmax"])
def test_add_bus_success(ct):
new_buses = [
{"lat": 40, "lon": 50.5, "zone_id": 2, "baseKV": 69},
{"lat": -40.5, "lon": -50, "zone_name": "Massachusetts", "Pd": 10},
]
ct.add_bus(new_buses)
expected_new_buses = [
{"lat": 40, "lon": 50.5, "zone_id": 2, "Pd": 0, "baseKV": 69},
{"lat": -40.5, "lon": -50, "zone_id": 4, "Pd": 10, "baseKV": 230},
]
assert ct.ct["new_bus"] == expected_new_buses
def test_add_bus_bad_list_entries(ct):
bad_dicts = [
{"lat": 40, "lon": 50}, # missing zone_id/zone_name
{"lat": 40, "zone_id": 2}, # missing lon
{"lon": 50, "zone_id": 2}, # missing lat
{"lat": 40, "lon": 250, "zone_id": 2}, # bad lat value
{"lat": -100, "lon": 120, "zone_id": 2}, # bad lon value
{"lat": "40", "lon": "50", "zone_id": 2}, # strings for lat/lon
{"lat": 4, "lon": 5, "zone_id": 2, "zone_name": "Ohio"}, # zone_id & zone_name
{"lat": 40, "lon": 50, "zone_id": 1000}, # bad zone_id
{"lat": 40, "lon": 50, "zone_name": "France"}, # bad zone_name
{"lat": 40, "lon": 50, "Pd": "100 MW"}, # bad Pd
{"lat": 40, "lon": 50.5, "zone_id": 2, "baseKV": "69"}, # bad baseKV type
{"lat": 40, "lon": 50.5, "zone_id": 2, "baseKV": -230}, # bad baseKV value
]
for d in bad_dicts:
with pytest.raises(ValueError):
ct.add_bus([d])
def test_add_bus_bad_type(ct):
with pytest.raises(TypeError):
ct.add_bus({"bus1": {"lat": 40, "lon": 50.5, "zone_id": 2}})
def test_add_new_elements_at_new_buses(ct):
max_existing_index = int(grid.bus.index.max())
new_buses = [
{"lat": 40, "lon": 50.5, "zone_id": 2, "baseKV": 69},
{"lat": -40.5, "lon": -50, "zone_name": "Massachusetts", "Pd": 10},
]
ct.add_bus(new_buses)
new_bus1 = max_existing_index + 1
new_bus2 = max_existing_index + 2
ct.add_storage_capacity([{"bus_id": new_bus1, "capacity": 100}])
ct.add_dcline([{"from_bus_id": new_bus1, "to_bus_id": new_bus2, "capacity": 200}])
ct.add_branch([{"from_bus_id": new_bus1, "to_bus_id": new_bus2, "capacity": 300}])
ct.add_plant([{"type": "wind", "bus_id": new_bus2, "Pmax": 400}])
def test_change_table_clear_success(ct):
fake_scaling = {"demand", "branch", "solar", "ng_cost", "coal_pmin", "dcline"}
fake_additions = {
"storage",
"new_dcline",
"new_branch",
"new_plant",
"demand_flexibility",
}
all_fakes = fake_scaling | fake_additions
original_dict_object = ct.ct
for fake in all_fakes:
ct.ct[fake] = {}
# Test that each individual clear makes a change, and the ct ends up empty
clear_keys = {
"branch",
"dcline",
"demand",
"plant",
"storage",
"demand_flexibility",
}
for key in clear_keys:
old_keys = set(ct.ct.keys())
ct.clear(key)
assert set(ct.ct.keys()) < old_keys
assert ct.ct == {}
# Test that passing no args clears everything in one shot
all_fakes = fake_scaling | fake_additions
for fake in all_fakes:
ct.ct[fake] = {}
ct.clear()
assert ct.ct == {}
assert ct.ct is original_dict_object
def test_change_table_clear_bad_type(ct):
with pytest.raises(TypeError):
ct.clear(["plant"])
def test_change_table_clear_bad_key(ct):
with pytest.raises(ValueError):
ct.clear({"plantttt"})
def test_remove_branch(ct):
ct.remove_branch({0})
with pytest.raises(ValueError):
# Can't remove again, because it shouldn't exist
ct.remove_branch({0})
def test_remove_bus(ct):
with pytest.raises(ValueError):
# Can't remove, because there are branches attached to it
ct.remove_bus({1})
ct.remove_branch({0, 1, 2})
ct.remove_bus({1})
with pytest.raises(ValueError):
# Can't remove again, because it shouldn't exist
ct.remove_bus({1})
# Evan after we remove the branch connected to bus 845...
ct.remove_branch({1094})
with pytest.raises(ValueError):
# We can't remove this bus, since there's a generator with non-zero capacity
ct.remove_bus({845})
ct.scale_plant_capacity(resource="ng", plant_id={0: 0})
ct.remove_bus({845})
def test_add_demand_flexibility(ct, monkeypatch):
with pytest.raises(ValueError):
# Fails because "demand_flexibility_dn", a required key, is not included
ct.add_demand_flexibility(
{"demand_flexibility_up": "Test", "demand_flexibility_duration": 6}
)
with pytest.raises(ValueError):
# Fails because there is a key that should not be there
ct.add_demand_flexibility(
{
"demand_flexibility_up": "Test",
"demand_flexibility_dn": "Test",
"demand_flexibility_duration": 6,
"demand_flexibility_wrong_key": "Test",
}
)
with pytest.raises(ValueError):
# Fails because there are no profiles available that match the specified version
ct.add_demand_flexibility(
{
"demand_flexibility_up": "Test",
"demand_flexibility_dn": "Test",
"demand_flexibility_duration": 6,
}
)
monkeypatch.setattr(Context, "get_data_access", MockContext().get_data_access)
data_access = Context.get_data_access()
# Create fake files in the expected directory path
exp_path = f"raw/{grid.grid_model}"
for csv_file in (
"demand_flexibility_up_Test.csv",
"demand_flexibility_dn_Test.csv",
):
with data_access.write(exp_path + "/" + csv_file) as f:
pd.DataFrame().to_csv(f)
# Add a test instance of demand flexibility to the change table
ct.add_demand_flexibility(
{
"demand_flexibility_up": "Test",
"demand_flexibility_dn": "Test",
"demand_flexibility_duration": 6,
}
)
exp_dict = {
"demand_flexibility": {
"demand_flexibility_up": "Test",
"demand_flexibility_dn": "Test",
"demand_flexibility_duration": 6,
},
}
assert ct.ct == exp_dict
|
the-stack_106_26393 | import numpy as np
def calc_life(trajs, ub=5, lb=-5):
"""
Identifies transition paths and returns lifetimes of states.
Parameters
----------
trajs : list of lists
Set of trajectories.
ub, lb : float
Cutoff value for upper and lower states.
"""
try:
assert ub > lb
except AssertionError:
print (" Upper bound is lower than lower bound")
return
lifeA = []
lifeB = []
time = 0
for tr in trajs:
state = None
ntp = 0
time_prev = 0
for t,q in enumerate(tr):
# assign state when beyond boundaries
if q > ub: # state "B"
if state == 'A':
ntp +=1
lifeA.append(time - time_prev)
time_prev = time
state = 'B'
elif q < lb: # state "A"
if state == 'B':
ntp +=1
lifeB.append(time - time_prev)
time_prev = time
state = 'A'
else:
if state == 'A' and q < ub:
time = t
elif state == 'B' and q > lb:
time = t
return lifeA, lifeB
def calc_life_multi(trajs, bounds=[[-3,-1], [1,3], [6,8]]):
"""
Identifies transition paths and returns lifetimes of states.
Parameters
----------
trajs : list of lists
Set of trajectories.
bounds : list
Limits for states.
"""
life = [[],[],[]]
tau = {}
time = 0
for tr in trajs:
state = None
ntp = 0
for t,q in enumerate(tr):
# assign state when beyond boundaries
for i,b in enumerate(bounds):
if b[0] < q < b[1]:
if state != i:
try:
life[state].append(time - time_prev)
tau[i, state].append(time - time_prev)
except TypeError:
pass
except KeyError:
tau[(i, state)] = [time - time_prev]
state = i
time_prev = t
break
state = i
time = t
return life, tau
def lifetimes(data, f_bound=-5, u_bound=5):
"""
Estimates lifetimes using a transition path analysis. Transitions are
only assigned from one state to the other when the core of the other
state is reached.
Parameters
----------
data : np.array
Time series data containing times and corrected extensions.
Returns
-------
tau_f : list
Waiting times in the unfolded state.
tau_u : list
Waiting times in the folded state.
data_f : list
Stretches of data corresponding to the folded segments.
data_u : list
Stretches of data corresponding to the unfolded segments.
tp_f : list
Transition paths for folding.
tp_u : list
Transition paths for unfolding.
"""
folded = False
unfolded = False
maybetp = []
recrossings = []
t = data[:,0]
dist = data[:,1]
data_f = []
data_u = []
tau_u = []
tau_f = []
tp_f = []
tp_u = []
time = 0
data_cum = []
for t, d in zip(t,dist):
if d <= f_bound:
folded = True
if unfolded:
#print ' Refolding event: %g'%t,
tp_u.append(np.array(maybetp))
tau_f.append(t-time)
data_u.append(np.array(data_cum))
unfolded = False
time = t
data_cum = []
else:
recrossings.append(np.array(maybetp))
for tt,dd in maybetp:
data_cum.append([tt,dd])
data_cum.append([t,d])
maybetp = []
elif u_bound <= d :
unfolded = True
if folded:
tp_f.append(np.array(maybetp))
#print ' Unfolding event: %g'%t,
tau_u.append(t-time)
data_f.append(np.array(data_cum))
folded = False
time = t
data_cum = []
else:
recrossings.append(np.array(maybetp))
for tt,dd in maybetp:
data_cum.append([tt,dd])
data_cum.append([t,d])
maybetp = []
else:
maybetp.append([t,d])
if unfolded:
#print ' Refolding event: %g'%t,
tau_f.append(t-time)
data_u.append(np.array(data_cum))
if folded:
# tp_f.append(np.array(maybetp))
#print ' Unfolding event: %g'%t,
tau_u.append(t-time)
data_f.append(np.array(data_cum))
return tau_f, tau_u, data_f, data_u, tp_f, tp_u, recrossings
|
the-stack_106_26394 | # -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
"""This module is intended to get metadata from BCKG for barcoded reagents."""
import pyBCKG.domain as domain
import pandas as pd
from pathlib import Path
from typing import List, Dict, Any, Set
from pyBCKG.azurestorage.api import AzureConnection
def get_plate_data(filepath: Path, conn: AzureConnection) -> List[Dict[str, Any]]: # pragma: no cover
"""Returns a list of dictionaries contains details of the barcoded reagents.
`filepath`: The file path of the barcoded file.
`conn`: An instance of `pyBCKG.azurestorage.api.AzureConnection`"""
barcoded_df: pd.DataFrame = pd.DataFrame(pd.read_csv(filepath))
barcodes: Set[str] = {str(barcode) for barcode in list(pd.Series(barcoded_df["Barcode"])) if str(barcode) != "nan"}
reagents = conn.get_reagents_by_barcode(barcodes)
barcode_map = barcoded_df.to_dict("records")
reagent_barcodes = {r.barcode: r for r in reagents}
new_df_list = []
for entry in barcode_map:
full_entry = dict(entry)
if entry["Barcode"] in reagent_barcodes:
reagent: domain.Reagent = reagent_barcodes[entry["Barcode"]]
full_entry["Name"] = reagent.name
full_entry["SampleID"] = reagent.guid
if isinstance(reagent, domain.DNA) or isinstance(reagent, domain.Chemical):
full_entry["Type"] = reagent._type.value
new_df_list.append(full_entry)
return new_df_list
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.